problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
10.2k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 582
21k
| num_tokens
int64 271
2.05k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_15795
|
rasdani/github-patches
|
git_diff
|
zestedesavoir__zds-site-6544
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Faciliter la programmation des Unes en modifiant le type de la date de publication.
À ce jour, quand on créer une Unes il faut remplir le champ "Date de publication" avec un format texte de style "2023/08/21 10:00". C'est assez désagréable à remplir.

Les propositions sont des Unes que j'ai déjà faite.
En ajoutant le type "datetime-local" à cette input on pourrait accèder aux interfaces natives des navigateurs/OS pour ce genre d'input.

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zds/featured/forms.py`
Content:
```
1 from crispy_forms.bootstrap import StrictButton
2 from crispy_forms.helper import FormHelper
3 from crispy_forms.layout import Layout, Field, ButtonHolder
4 from django import forms
5 from django.urls import reverse
6 from django.utils.translation import gettext_lazy as _
7
8 from zds.featured.models import FeaturedResource, FeaturedMessage
9
10
11 class FeaturedResourceForm(forms.ModelForm):
12 class Meta:
13 model = FeaturedResource
14
15 fields = ["title", "type", "authors", "image_url", "url"]
16
17 widgets = {
18 "title": forms.TextInput(attrs={"placeholder": _("Titre de la Une")}),
19 "type": forms.TextInput(attrs={"placeholder": _("ex: Un projet, Un article, Un tutoriel...")}),
20 "authors": forms.TextInput(attrs={"placeholder": _("Des auteurs (ou pas) ?")}),
21 "image_url": forms.URLInput(
22 attrs={"placeholder": _("Lien vers l'image de la Une (dimensions: 228x228px).")}
23 ),
24 "url": forms.URLInput(attrs={"placeholder": _("Lien vers la ressource.")}),
25 }
26
27 major_update = forms.BooleanField(
28 label=_("Mise à jour majeure (fera passer la Une en première position lors d'un changement)"),
29 initial=False,
30 required=False,
31 )
32
33 pubdate = forms.DateTimeField(
34 label=_("Date de publication (exemple: 25/12/2015 15:00 ou 2015-12-25T15:00)"),
35 input_formats=[
36 "%d/%m/%Y %H:%M:%S",
37 "%Y-%m-%d %H:%M:%S", # full format with second
38 "%Y-%m-%dT%H:%M", # datetime field format
39 "%Y-%m-%d %H:%M",
40 "%d/%m/%Y %H:%M", # without second
41 "%Y-%m-%d",
42 "%d/%m/%Y", # day only
43 ],
44 widget=forms.DateTimeInput(
45 attrs={"placeholder": _("Exemple : 25/12/2016 10:00"), "type": "text"},
46 format="%d/%m/%Y %H:%M", # datetime field format
47 ),
48 )
49
50 request = forms.IntegerField(widget=forms.HiddenInput(), required=False)
51
52 def __init__(self, *args, **kwargs):
53 hide_major_update_field = kwargs.pop("hide_major_update_field", False)
54
55 super().__init__(*args, **kwargs)
56 self.helper = FormHelper()
57 self.helper.form_class = "content-wrapper"
58 self.helper.form_method = "post"
59 self.helper.form_action = reverse("featured:resource-create")
60
61 fields = [Field("request"), Field("title"), Field("type"), Field("authors"), Field("image_url"), Field("url")]
62
63 if not hide_major_update_field:
64 fields.append(Field("major_update"))
65
66 fields.extend(
67 [
68 Field("pubdate"),
69 ButtonHolder(
70 StrictButton(_("Enregistrer"), type="submit"),
71 ),
72 ]
73 )
74
75 self.helper.layout = Layout(*fields)
76
77
78 class FeaturedMessageForm(forms.ModelForm):
79 class Meta:
80 model = FeaturedMessage
81
82 fields = ["hook", "message", "url"]
83
84 widgets = {
85 "hook": forms.TextInput(attrs={"placeholder": _('Mot d\'accroche court ("Nouveau !")')}),
86 "message": forms.TextInput(attrs={"placeholder": _("Message à afficher")}),
87 "url": forms.URLInput(attrs={"placeholder": _("Lien vers la description de la ressource")}),
88 }
89
90 def __init__(self, *args, **kwargs):
91 super().__init__(*args, **kwargs)
92 self.helper = FormHelper()
93 self.helper.form_class = "content-wrapper"
94 self.helper.form_method = "post"
95 self.helper.form_action = reverse("featured:message-create")
96
97 self.helper.layout = Layout(
98 Field("hook"),
99 Field("message"),
100 Field("url"),
101 ButtonHolder(
102 StrictButton(_("Enregistrer"), type="submit"),
103 ),
104 )
105
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/zds/featured/forms.py b/zds/featured/forms.py
--- a/zds/featured/forms.py
+++ b/zds/featured/forms.py
@@ -31,20 +31,8 @@
)
pubdate = forms.DateTimeField(
- label=_("Date de publication (exemple: 25/12/2015 15:00 ou 2015-12-25T15:00)"),
- input_formats=[
- "%d/%m/%Y %H:%M:%S",
- "%Y-%m-%d %H:%M:%S", # full format with second
- "%Y-%m-%dT%H:%M", # datetime field format
- "%Y-%m-%d %H:%M",
- "%d/%m/%Y %H:%M", # without second
- "%Y-%m-%d",
- "%d/%m/%Y", # day only
- ],
- widget=forms.DateTimeInput(
- attrs={"placeholder": _("Exemple : 25/12/2016 10:00"), "type": "text"},
- format="%d/%m/%Y %H:%M", # datetime field format
- ),
+ label=_("Date de publication (exemple: 25/12/2015 15:00)"),
+ widget=forms.DateTimeInput(attrs={"type": "datetime-local"}),
)
request = forms.IntegerField(widget=forms.HiddenInput(), required=False)
|
{"golden_diff": "diff --git a/zds/featured/forms.py b/zds/featured/forms.py\n--- a/zds/featured/forms.py\n+++ b/zds/featured/forms.py\n@@ -31,20 +31,8 @@\n )\n \n pubdate = forms.DateTimeField(\n- label=_(\"Date de publication (exemple: 25/12/2015 15:00 ou 2015-12-25T15:00)\"),\n- input_formats=[\n- \"%d/%m/%Y %H:%M:%S\",\n- \"%Y-%m-%d %H:%M:%S\", # full format with second\n- \"%Y-%m-%dT%H:%M\", # datetime field format\n- \"%Y-%m-%d %H:%M\",\n- \"%d/%m/%Y %H:%M\", # without second\n- \"%Y-%m-%d\",\n- \"%d/%m/%Y\", # day only\n- ],\n- widget=forms.DateTimeInput(\n- attrs={\"placeholder\": _(\"Exemple : 25/12/2016 10:00\"), \"type\": \"text\"},\n- format=\"%d/%m/%Y %H:%M\", # datetime field format\n- ),\n+ label=_(\"Date de publication (exemple: 25/12/2015 15:00)\"),\n+ widget=forms.DateTimeInput(attrs={\"type\": \"datetime-local\"}),\n )\n \n request = forms.IntegerField(widget=forms.HiddenInput(), required=False)\n", "issue": "Faciliter la programmation des Unes en modifiant le type de la date de publication.\n\u00c0 ce jour, quand on cr\u00e9er une Unes il faut remplir le champ \"Date de publication\" avec un format texte de style \"2023/08/21 10:00\". C'est assez d\u00e9sagr\u00e9able \u00e0 remplir. \r\n\r\n\r\nLes propositions sont des Unes que j'ai d\u00e9j\u00e0 faite.\r\n\r\nEn ajoutant le type \"datetime-local\" \u00e0 cette input on pourrait acc\u00e8der aux interfaces natives des navigateurs/OS pour ce genre d'input.\r\n\r\n\n", "before_files": [{"content": "from crispy_forms.bootstrap import StrictButton\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Layout, Field, ButtonHolder\nfrom django import forms\nfrom django.urls import reverse\nfrom django.utils.translation import gettext_lazy as _\n\nfrom zds.featured.models import FeaturedResource, FeaturedMessage\n\n\nclass FeaturedResourceForm(forms.ModelForm):\n class Meta:\n model = FeaturedResource\n\n fields = [\"title\", \"type\", \"authors\", \"image_url\", \"url\"]\n\n widgets = {\n \"title\": forms.TextInput(attrs={\"placeholder\": _(\"Titre de la Une\")}),\n \"type\": forms.TextInput(attrs={\"placeholder\": _(\"ex: Un projet, Un article, Un tutoriel...\")}),\n \"authors\": forms.TextInput(attrs={\"placeholder\": _(\"Des auteurs (ou pas)\u00a0?\")}),\n \"image_url\": forms.URLInput(\n attrs={\"placeholder\": _(\"Lien vers l'image de la Une (dimensions: 228x228px).\")}\n ),\n \"url\": forms.URLInput(attrs={\"placeholder\": _(\"Lien vers la ressource.\")}),\n }\n\n major_update = forms.BooleanField(\n label=_(\"Mise \u00e0 jour majeure (fera passer la Une en premi\u00e8re position lors d'un changement)\"),\n initial=False,\n required=False,\n )\n\n pubdate = forms.DateTimeField(\n label=_(\"Date de publication (exemple: 25/12/2015 15:00 ou 2015-12-25T15:00)\"),\n input_formats=[\n \"%d/%m/%Y %H:%M:%S\",\n \"%Y-%m-%d %H:%M:%S\", # full format with second\n \"%Y-%m-%dT%H:%M\", # datetime field format\n \"%Y-%m-%d %H:%M\",\n \"%d/%m/%Y %H:%M\", # without second\n \"%Y-%m-%d\",\n \"%d/%m/%Y\", # day only\n ],\n widget=forms.DateTimeInput(\n attrs={\"placeholder\": _(\"Exemple : 25/12/2016 10:00\"), \"type\": \"text\"},\n format=\"%d/%m/%Y %H:%M\", # datetime field format\n ),\n )\n\n request = forms.IntegerField(widget=forms.HiddenInput(), required=False)\n\n def __init__(self, *args, **kwargs):\n hide_major_update_field = kwargs.pop(\"hide_major_update_field\", False)\n\n super().__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_class = \"content-wrapper\"\n self.helper.form_method = \"post\"\n self.helper.form_action = reverse(\"featured:resource-create\")\n\n fields = [Field(\"request\"), Field(\"title\"), Field(\"type\"), Field(\"authors\"), Field(\"image_url\"), Field(\"url\")]\n\n if not hide_major_update_field:\n fields.append(Field(\"major_update\"))\n\n fields.extend(\n [\n Field(\"pubdate\"),\n ButtonHolder(\n StrictButton(_(\"Enregistrer\"), type=\"submit\"),\n ),\n ]\n )\n\n self.helper.layout = Layout(*fields)\n\n\nclass FeaturedMessageForm(forms.ModelForm):\n class Meta:\n model = FeaturedMessage\n\n fields = [\"hook\", \"message\", \"url\"]\n\n widgets = {\n \"hook\": forms.TextInput(attrs={\"placeholder\": _('Mot d\\'accroche court (\"Nouveau\u00a0!\")')}),\n \"message\": forms.TextInput(attrs={\"placeholder\": _(\"Message \u00e0 afficher\")}),\n \"url\": forms.URLInput(attrs={\"placeholder\": _(\"Lien vers la description de la ressource\")}),\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_class = \"content-wrapper\"\n self.helper.form_method = \"post\"\n self.helper.form_action = reverse(\"featured:message-create\")\n\n self.helper.layout = Layout(\n Field(\"hook\"),\n Field(\"message\"),\n Field(\"url\"),\n ButtonHolder(\n StrictButton(_(\"Enregistrer\"), type=\"submit\"),\n ),\n )\n", "path": "zds/featured/forms.py"}], "after_files": [{"content": "from crispy_forms.bootstrap import StrictButton\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Layout, Field, ButtonHolder\nfrom django import forms\nfrom django.urls import reverse\nfrom django.utils.translation import gettext_lazy as _\n\nfrom zds.featured.models import FeaturedResource, FeaturedMessage\n\n\nclass FeaturedResourceForm(forms.ModelForm):\n class Meta:\n model = FeaturedResource\n\n fields = [\"title\", \"type\", \"authors\", \"image_url\", \"url\"]\n\n widgets = {\n \"title\": forms.TextInput(attrs={\"placeholder\": _(\"Titre de la Une\")}),\n \"type\": forms.TextInput(attrs={\"placeholder\": _(\"ex: Un projet, Un article, Un tutoriel...\")}),\n \"authors\": forms.TextInput(attrs={\"placeholder\": _(\"Des auteurs (ou pas)\u00a0?\")}),\n \"image_url\": forms.URLInput(\n attrs={\"placeholder\": _(\"Lien vers l'image de la Une (dimensions: 228x228px).\")}\n ),\n \"url\": forms.URLInput(attrs={\"placeholder\": _(\"Lien vers la ressource.\")}),\n }\n\n major_update = forms.BooleanField(\n label=_(\"Mise \u00e0 jour majeure (fera passer la Une en premi\u00e8re position lors d'un changement)\"),\n initial=False,\n required=False,\n )\n\n pubdate = forms.DateTimeField(\n label=_(\"Date de publication (exemple: 25/12/2015 15:00)\"),\n widget=forms.DateTimeInput(attrs={\"type\": \"datetime-local\"}),\n )\n\n request = forms.IntegerField(widget=forms.HiddenInput(), required=False)\n\n def __init__(self, *args, **kwargs):\n hide_major_update_field = kwargs.pop(\"hide_major_update_field\", False)\n\n super().__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_class = \"content-wrapper\"\n self.helper.form_method = \"post\"\n self.helper.form_action = reverse(\"featured:resource-create\")\n\n fields = [Field(\"request\"), Field(\"title\"), Field(\"type\"), Field(\"authors\"), Field(\"image_url\"), Field(\"url\")]\n\n if not hide_major_update_field:\n fields.append(Field(\"major_update\"))\n\n fields.extend(\n [\n Field(\"pubdate\"),\n ButtonHolder(\n StrictButton(_(\"Enregistrer\"), type=\"submit\"),\n ),\n ]\n )\n\n self.helper.layout = Layout(*fields)\n\n\nclass FeaturedMessageForm(forms.ModelForm):\n class Meta:\n model = FeaturedMessage\n\n fields = [\"hook\", \"message\", \"url\"]\n\n widgets = {\n \"hook\": forms.TextInput(attrs={\"placeholder\": _('Mot d\\'accroche court (\"Nouveau\u00a0!\")')}),\n \"message\": forms.TextInput(attrs={\"placeholder\": _(\"Message \u00e0 afficher\")}),\n \"url\": forms.URLInput(attrs={\"placeholder\": _(\"Lien vers la description de la ressource\")}),\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_class = \"content-wrapper\"\n self.helper.form_method = \"post\"\n self.helper.form_action = reverse(\"featured:message-create\")\n\n self.helper.layout = Layout(\n Field(\"hook\"),\n Field(\"message\"),\n Field(\"url\"),\n ButtonHolder(\n StrictButton(_(\"Enregistrer\"), type=\"submit\"),\n ),\n )\n", "path": "zds/featured/forms.py"}]}
| 1,605 | 346 |
gh_patches_debug_8191
|
rasdani/github-patches
|
git_diff
|
TheAlgorithms__Python-10361
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
missing_number algorithm dosen't work as intended (bit_manipulation/missing_number.py)
### Repository commit
d0c54acd75cedf14cff353869482a0487fea1697
### Python version (python --version)
Python 3.12.0
### Dependencies version (pip freeze)
setuptools==68.2.2
wheel==0.41.2
### Expected behavior
for array [1,3,4,5,6] the output should be 2
### Actual behavior
the output got is 4
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bit_manipulation/missing_number.py`
Content:
```
1 def find_missing_number(nums: list[int]) -> int:
2 """
3 Finds the missing number in a list of consecutive integers.
4
5 Args:
6 nums: A list of integers.
7
8 Returns:
9 The missing number.
10
11 Example:
12 >>> find_missing_number([0, 1, 3, 4])
13 2
14 """
15 n = len(nums)
16 missing_number = n
17
18 for i in range(n):
19 missing_number ^= i ^ nums[i]
20
21 return missing_number
22
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bit_manipulation/missing_number.py b/bit_manipulation/missing_number.py
--- a/bit_manipulation/missing_number.py
+++ b/bit_manipulation/missing_number.py
@@ -11,11 +11,18 @@
Example:
>>> find_missing_number([0, 1, 3, 4])
2
+ >>> find_missing_number([1, 3, 4, 5, 6])
+ 2
+ >>> find_missing_number([6, 5, 4, 2, 1])
+ 3
+ >>> find_missing_number([6, 1, 5, 3, 4])
+ 2
"""
- n = len(nums)
- missing_number = n
+ low = min(nums)
+ high = max(nums)
+ missing_number = high
- for i in range(n):
- missing_number ^= i ^ nums[i]
+ for i in range(low, high):
+ missing_number ^= i ^ nums[i - low]
return missing_number
|
{"golden_diff": "diff --git a/bit_manipulation/missing_number.py b/bit_manipulation/missing_number.py\n--- a/bit_manipulation/missing_number.py\n+++ b/bit_manipulation/missing_number.py\n@@ -11,11 +11,18 @@\n Example:\n >>> find_missing_number([0, 1, 3, 4])\n 2\n+ >>> find_missing_number([1, 3, 4, 5, 6])\n+ 2\n+ >>> find_missing_number([6, 5, 4, 2, 1])\n+ 3\n+ >>> find_missing_number([6, 1, 5, 3, 4])\n+ 2\n \"\"\"\n- n = len(nums)\n- missing_number = n\n+ low = min(nums)\n+ high = max(nums)\n+ missing_number = high\n \n- for i in range(n):\n- missing_number ^= i ^ nums[i]\n+ for i in range(low, high):\n+ missing_number ^= i ^ nums[i - low]\n \n return missing_number\n", "issue": "missing_number algorithm dosen't work as intended (bit_manipulation/missing_number.py)\n### Repository commit\n\nd0c54acd75cedf14cff353869482a0487fea1697\n\n### Python version (python --version)\n\nPython 3.12.0\n\n### Dependencies version (pip freeze)\n\nsetuptools==68.2.2\r\nwheel==0.41.2\n\n### Expected behavior\n\nfor array [1,3,4,5,6] the output should be 2\n\n### Actual behavior\n\nthe output got is 4\n", "before_files": [{"content": "def find_missing_number(nums: list[int]) -> int:\n \"\"\"\n Finds the missing number in a list of consecutive integers.\n\n Args:\n nums: A list of integers.\n\n Returns:\n The missing number.\n\n Example:\n >>> find_missing_number([0, 1, 3, 4])\n 2\n \"\"\"\n n = len(nums)\n missing_number = n\n\n for i in range(n):\n missing_number ^= i ^ nums[i]\n\n return missing_number\n", "path": "bit_manipulation/missing_number.py"}], "after_files": [{"content": "def find_missing_number(nums: list[int]) -> int:\n \"\"\"\n Finds the missing number in a list of consecutive integers.\n\n Args:\n nums: A list of integers.\n\n Returns:\n The missing number.\n\n Example:\n >>> find_missing_number([0, 1, 3, 4])\n 2\n >>> find_missing_number([1, 3, 4, 5, 6])\n 2\n >>> find_missing_number([6, 5, 4, 2, 1])\n 3\n >>> find_missing_number([6, 1, 5, 3, 4])\n 2\n \"\"\"\n low = min(nums)\n high = max(nums)\n missing_number = high\n\n for i in range(low, high):\n missing_number ^= i ^ nums[i - low]\n\n return missing_number\n", "path": "bit_manipulation/missing_number.py"}]}
| 540 | 243 |
gh_patches_debug_15727
|
rasdani/github-patches
|
git_diff
|
crytic__slither-561
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AssertionError when obtaining address of library
```solidity
library UnsafeMath {
function add(uint a, uint b) external returns (uint) {
return a + b;
}
}
contract Test {
function getUnsafeMathAddr() public view returns (address) {
return address(UnsafeMath);
}
}
```
https://solidity.readthedocs.io/en/latest/contracts.html#libraries:~:text=It%20is%20possible%20to%20obtain%20the%20address%20of%20a%20library
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `slither/slithir/operations/type_conversion.py`
Content:
```
1 from slither.core.solidity_types.type import Type
2 from slither.slithir.operations.lvalue import OperationWithLValue
3 from slither.slithir.utils.utils import is_valid_lvalue, is_valid_rvalue
4
5
6 class TypeConversion(OperationWithLValue):
7
8 def __init__(self, result, variable, variable_type):
9 super().__init__()
10 assert is_valid_rvalue(variable)
11 assert is_valid_lvalue(result)
12 assert isinstance(variable_type, Type)
13
14 self._variable = variable
15 self._type = variable_type
16 self._lvalue = result
17
18
19 @property
20 def variable(self):
21 return self._variable
22
23 @property
24 def type(self):
25 return self._type
26
27 @property
28 def read(self):
29 return [self.variable]
30
31 def __str__(self):
32 return str(self.lvalue) +' = CONVERT {} to {}'.format(self.variable, self.type)
33
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/slither/slithir/operations/type_conversion.py b/slither/slithir/operations/type_conversion.py
--- a/slither/slithir/operations/type_conversion.py
+++ b/slither/slithir/operations/type_conversion.py
@@ -1,3 +1,4 @@
+from slither.core.declarations import Contract
from slither.core.solidity_types.type import Type
from slither.slithir.operations.lvalue import OperationWithLValue
from slither.slithir.utils.utils import is_valid_lvalue, is_valid_rvalue
@@ -7,7 +8,7 @@
def __init__(self, result, variable, variable_type):
super().__init__()
- assert is_valid_rvalue(variable)
+ assert is_valid_rvalue(variable) or isinstance(variable, Contract)
assert is_valid_lvalue(result)
assert isinstance(variable_type, Type)
|
{"golden_diff": "diff --git a/slither/slithir/operations/type_conversion.py b/slither/slithir/operations/type_conversion.py\n--- a/slither/slithir/operations/type_conversion.py\n+++ b/slither/slithir/operations/type_conversion.py\n@@ -1,3 +1,4 @@\n+from slither.core.declarations import Contract\n from slither.core.solidity_types.type import Type\n from slither.slithir.operations.lvalue import OperationWithLValue\n from slither.slithir.utils.utils import is_valid_lvalue, is_valid_rvalue\n@@ -7,7 +8,7 @@\n \n def __init__(self, result, variable, variable_type):\n super().__init__()\n- assert is_valid_rvalue(variable)\n+ assert is_valid_rvalue(variable) or isinstance(variable, Contract)\n assert is_valid_lvalue(result)\n assert isinstance(variable_type, Type)\n", "issue": "AssertionError when obtaining address of library\n```solidity\r\nlibrary UnsafeMath {\r\n function add(uint a, uint b) external returns (uint) {\r\n return a + b;\r\n }\r\n}\r\n\r\ncontract Test {\r\n function getUnsafeMathAddr() public view returns (address) {\r\n return address(UnsafeMath);\r\n }\r\n}\r\n```\r\n\r\nhttps://solidity.readthedocs.io/en/latest/contracts.html#libraries:~:text=It%20is%20possible%20to%20obtain%20the%20address%20of%20a%20library\n", "before_files": [{"content": "from slither.core.solidity_types.type import Type\nfrom slither.slithir.operations.lvalue import OperationWithLValue\nfrom slither.slithir.utils.utils import is_valid_lvalue, is_valid_rvalue\n\n\nclass TypeConversion(OperationWithLValue):\n\n def __init__(self, result, variable, variable_type):\n super().__init__()\n assert is_valid_rvalue(variable)\n assert is_valid_lvalue(result)\n assert isinstance(variable_type, Type)\n\n self._variable = variable\n self._type = variable_type\n self._lvalue = result\n \n\n @property\n def variable(self):\n return self._variable\n\n @property\n def type(self):\n return self._type\n\n @property\n def read(self):\n return [self.variable]\n\n def __str__(self):\n return str(self.lvalue) +' = CONVERT {} to {}'.format(self.variable, self.type)\n", "path": "slither/slithir/operations/type_conversion.py"}], "after_files": [{"content": "from slither.core.declarations import Contract\nfrom slither.core.solidity_types.type import Type\nfrom slither.slithir.operations.lvalue import OperationWithLValue\nfrom slither.slithir.utils.utils import is_valid_lvalue, is_valid_rvalue\n\n\nclass TypeConversion(OperationWithLValue):\n\n def __init__(self, result, variable, variable_type):\n super().__init__()\n assert is_valid_rvalue(variable) or isinstance(variable, Contract)\n assert is_valid_lvalue(result)\n assert isinstance(variable_type, Type)\n\n self._variable = variable\n self._type = variable_type\n self._lvalue = result\n \n\n @property\n def variable(self):\n return self._variable\n\n @property\n def type(self):\n return self._type\n\n @property\n def read(self):\n return [self.variable]\n\n def __str__(self):\n return str(self.lvalue) +' = CONVERT {} to {}'.format(self.variable, self.type)\n", "path": "slither/slithir/operations/type_conversion.py"}]}
| 650 | 188 |
gh_patches_debug_22747
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-2275
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Idea: warn users when trying to use TextResponse functionality with plain Response
Currently, if we try to use TextResponse functionality like response.text or css()/xpath() methods with a plain Response (e.g. in case of binary content), we get an AttributeError:
```
>>> response.css
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-1-7d6e256164d4> in <module>()
----> 1 response.css
AttributeError: 'Response' object has no attribute 'css'
>>> response.xpath
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-2-4f61f6e9fc6e> in <module>()
----> 1 response.xpath
AttributeError: 'Response' object has no attribute 'xpath'
>>> response.text
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-3-be6a4a00df5e> in <module>()
----> 1 response.text
AttributeError: 'Response' object has no attribute 'text'
```
Would it make sense to add a few methods/properties to explain what's going on for new users?
I was thinking instead of AttributeError, a better behavior could be a ValueError with a message giving a bit more context.
So, in plain `Response`, we could have:
```
def css(self, *args, **kw):
raise ValueError('Response content is not text')
def xpath(self, *args, **kw):
raise ValueError('Response content is not text')
@property
def text(self, *args, **kw):
raise ValueError('Response content is not text')
```
This would be nice, because we'd had to explain fewer things when teaching people about responses and also about using `.css` and `.xpath` methods.
What do you think?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/http/response/__init__.py`
Content:
```
1 """
2 This module implements the Response class which is used to represent HTTP
3 responses in Scrapy.
4
5 See documentation in docs/topics/request-response.rst
6 """
7 from six.moves.urllib.parse import urljoin
8
9 from scrapy.http.headers import Headers
10 from scrapy.utils.trackref import object_ref
11 from scrapy.http.common import obsolete_setter
12
13 class Response(object_ref):
14
15 def __init__(self, url, status=200, headers=None, body=b'', flags=None, request=None):
16 self.headers = Headers(headers or {})
17 self.status = int(status)
18 self._set_body(body)
19 self._set_url(url)
20 self.request = request
21 self.flags = [] if flags is None else list(flags)
22
23 @property
24 def meta(self):
25 try:
26 return self.request.meta
27 except AttributeError:
28 raise AttributeError(
29 "Response.meta not available, this response "
30 "is not tied to any request"
31 )
32
33 def _get_url(self):
34 return self._url
35
36 def _set_url(self, url):
37 if isinstance(url, str):
38 self._url = url
39 else:
40 raise TypeError('%s url must be str, got %s:' % (type(self).__name__,
41 type(url).__name__))
42
43 url = property(_get_url, obsolete_setter(_set_url, 'url'))
44
45 def _get_body(self):
46 return self._body
47
48 def _set_body(self, body):
49 if body is None:
50 self._body = b''
51 elif not isinstance(body, bytes):
52 raise TypeError(
53 "Response body must be bytes. "
54 "If you want to pass unicode body use TextResponse "
55 "or HtmlResponse.")
56 else:
57 self._body = body
58
59 body = property(_get_body, obsolete_setter(_set_body, 'body'))
60
61 def __str__(self):
62 return "<%d %s>" % (self.status, self.url)
63
64 __repr__ = __str__
65
66 def copy(self):
67 """Return a copy of this Response"""
68 return self.replace()
69
70 def replace(self, *args, **kwargs):
71 """Create a new Response with the same attributes except for those
72 given new values.
73 """
74 for x in ['url', 'status', 'headers', 'body', 'request', 'flags']:
75 kwargs.setdefault(x, getattr(self, x))
76 cls = kwargs.pop('cls', self.__class__)
77 return cls(*args, **kwargs)
78
79 def urljoin(self, url):
80 """Join this Response's url with a possible relative url to form an
81 absolute interpretation of the latter."""
82 return urljoin(self.url, url)
83
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scrapy/http/response/__init__.py b/scrapy/http/response/__init__.py
--- a/scrapy/http/response/__init__.py
+++ b/scrapy/http/response/__init__.py
@@ -9,6 +9,8 @@
from scrapy.http.headers import Headers
from scrapy.utils.trackref import object_ref
from scrapy.http.common import obsolete_setter
+from scrapy.exceptions import NotSupported
+
class Response(object_ref):
@@ -80,3 +82,22 @@
"""Join this Response's url with a possible relative url to form an
absolute interpretation of the latter."""
return urljoin(self.url, url)
+
+ @property
+ def text(self):
+ """For subclasses of TextResponse, this will return the body
+ as text (unicode object in Python 2 and str in Python 3)
+ """
+ raise AttributeError("Response content isn't text")
+
+ def css(self, *a, **kw):
+ """Shortcut method implemented only by responses whose content
+ is text (subclasses of TextResponse).
+ """
+ raise NotSupported("Response content isn't text")
+
+ def xpath(self, *a, **kw):
+ """Shortcut method implemented only by responses whose content
+ is text (subclasses of TextResponse).
+ """
+ raise NotSupported("Response content isn't text")
|
{"golden_diff": "diff --git a/scrapy/http/response/__init__.py b/scrapy/http/response/__init__.py\n--- a/scrapy/http/response/__init__.py\n+++ b/scrapy/http/response/__init__.py\n@@ -9,6 +9,8 @@\n from scrapy.http.headers import Headers\n from scrapy.utils.trackref import object_ref\n from scrapy.http.common import obsolete_setter\n+from scrapy.exceptions import NotSupported\n+\n \n class Response(object_ref):\n \n@@ -80,3 +82,22 @@\n \"\"\"Join this Response's url with a possible relative url to form an\n absolute interpretation of the latter.\"\"\"\n return urljoin(self.url, url)\n+\n+ @property\n+ def text(self):\n+ \"\"\"For subclasses of TextResponse, this will return the body\n+ as text (unicode object in Python 2 and str in Python 3)\n+ \"\"\"\n+ raise AttributeError(\"Response content isn't text\")\n+\n+ def css(self, *a, **kw):\n+ \"\"\"Shortcut method implemented only by responses whose content\n+ is text (subclasses of TextResponse).\n+ \"\"\"\n+ raise NotSupported(\"Response content isn't text\")\n+\n+ def xpath(self, *a, **kw):\n+ \"\"\"Shortcut method implemented only by responses whose content\n+ is text (subclasses of TextResponse).\n+ \"\"\"\n+ raise NotSupported(\"Response content isn't text\")\n", "issue": "Idea: warn users when trying to use TextResponse functionality with plain Response\nCurrently, if we try to use TextResponse functionality like response.text or css()/xpath() methods with a plain Response (e.g. in case of binary content), we get an AttributeError:\n\n```\n>>> response.css\n---------------------------------------------------------------------------\nAttributeError Traceback (most recent call last)\n<ipython-input-1-7d6e256164d4> in <module>()\n----> 1 response.css\n\nAttributeError: 'Response' object has no attribute 'css'\n>>> response.xpath\n---------------------------------------------------------------------------\nAttributeError Traceback (most recent call last)\n<ipython-input-2-4f61f6e9fc6e> in <module>()\n----> 1 response.xpath\n\nAttributeError: 'Response' object has no attribute 'xpath'\n>>> response.text\n---------------------------------------------------------------------------\nAttributeError Traceback (most recent call last)\n<ipython-input-3-be6a4a00df5e> in <module>()\n----> 1 response.text\n\nAttributeError: 'Response' object has no attribute 'text'\n```\n\nWould it make sense to add a few methods/properties to explain what's going on for new users?\n\nI was thinking instead of AttributeError, a better behavior could be a ValueError with a message giving a bit more context.\n\nSo, in plain `Response`, we could have:\n\n```\ndef css(self, *args, **kw):\n raise ValueError('Response content is not text')\n\ndef xpath(self, *args, **kw):\n raise ValueError('Response content is not text')\n\n@property\ndef text(self, *args, **kw):\n raise ValueError('Response content is not text')\n```\n\nThis would be nice, because we'd had to explain fewer things when teaching people about responses and also about using `.css` and `.xpath` methods.\n\nWhat do you think?\n\n", "before_files": [{"content": "\"\"\"\nThis module implements the Response class which is used to represent HTTP\nresponses in Scrapy.\n\nSee documentation in docs/topics/request-response.rst\n\"\"\"\nfrom six.moves.urllib.parse import urljoin\n\nfrom scrapy.http.headers import Headers\nfrom scrapy.utils.trackref import object_ref\nfrom scrapy.http.common import obsolete_setter\n\nclass Response(object_ref):\n\n def __init__(self, url, status=200, headers=None, body=b'', flags=None, request=None):\n self.headers = Headers(headers or {})\n self.status = int(status)\n self._set_body(body)\n self._set_url(url)\n self.request = request\n self.flags = [] if flags is None else list(flags)\n\n @property\n def meta(self):\n try:\n return self.request.meta\n except AttributeError:\n raise AttributeError(\n \"Response.meta not available, this response \"\n \"is not tied to any request\"\n )\n\n def _get_url(self):\n return self._url\n\n def _set_url(self, url):\n if isinstance(url, str):\n self._url = url\n else:\n raise TypeError('%s url must be str, got %s:' % (type(self).__name__,\n type(url).__name__))\n\n url = property(_get_url, obsolete_setter(_set_url, 'url'))\n\n def _get_body(self):\n return self._body\n\n def _set_body(self, body):\n if body is None:\n self._body = b''\n elif not isinstance(body, bytes):\n raise TypeError(\n \"Response body must be bytes. \"\n \"If you want to pass unicode body use TextResponse \"\n \"or HtmlResponse.\")\n else:\n self._body = body\n\n body = property(_get_body, obsolete_setter(_set_body, 'body'))\n\n def __str__(self):\n return \"<%d %s>\" % (self.status, self.url)\n\n __repr__ = __str__\n\n def copy(self):\n \"\"\"Return a copy of this Response\"\"\"\n return self.replace()\n\n def replace(self, *args, **kwargs):\n \"\"\"Create a new Response with the same attributes except for those\n given new values.\n \"\"\"\n for x in ['url', 'status', 'headers', 'body', 'request', 'flags']:\n kwargs.setdefault(x, getattr(self, x))\n cls = kwargs.pop('cls', self.__class__)\n return cls(*args, **kwargs)\n\n def urljoin(self, url):\n \"\"\"Join this Response's url with a possible relative url to form an\n absolute interpretation of the latter.\"\"\"\n return urljoin(self.url, url)\n", "path": "scrapy/http/response/__init__.py"}], "after_files": [{"content": "\"\"\"\nThis module implements the Response class which is used to represent HTTP\nresponses in Scrapy.\n\nSee documentation in docs/topics/request-response.rst\n\"\"\"\nfrom six.moves.urllib.parse import urljoin\n\nfrom scrapy.http.headers import Headers\nfrom scrapy.utils.trackref import object_ref\nfrom scrapy.http.common import obsolete_setter\nfrom scrapy.exceptions import NotSupported\n\n\nclass Response(object_ref):\n\n def __init__(self, url, status=200, headers=None, body=b'', flags=None, request=None):\n self.headers = Headers(headers or {})\n self.status = int(status)\n self._set_body(body)\n self._set_url(url)\n self.request = request\n self.flags = [] if flags is None else list(flags)\n\n @property\n def meta(self):\n try:\n return self.request.meta\n except AttributeError:\n raise AttributeError(\n \"Response.meta not available, this response \"\n \"is not tied to any request\"\n )\n\n def _get_url(self):\n return self._url\n\n def _set_url(self, url):\n if isinstance(url, str):\n self._url = url\n else:\n raise TypeError('%s url must be str, got %s:' % (type(self).__name__,\n type(url).__name__))\n\n url = property(_get_url, obsolete_setter(_set_url, 'url'))\n\n def _get_body(self):\n return self._body\n\n def _set_body(self, body):\n if body is None:\n self._body = b''\n elif not isinstance(body, bytes):\n raise TypeError(\n \"Response body must be bytes. \"\n \"If you want to pass unicode body use TextResponse \"\n \"or HtmlResponse.\")\n else:\n self._body = body\n\n body = property(_get_body, obsolete_setter(_set_body, 'body'))\n\n def __str__(self):\n return \"<%d %s>\" % (self.status, self.url)\n\n __repr__ = __str__\n\n def copy(self):\n \"\"\"Return a copy of this Response\"\"\"\n return self.replace()\n\n def replace(self, *args, **kwargs):\n \"\"\"Create a new Response with the same attributes except for those\n given new values.\n \"\"\"\n for x in ['url', 'status', 'headers', 'body', 'request', 'flags']:\n kwargs.setdefault(x, getattr(self, x))\n cls = kwargs.pop('cls', self.__class__)\n return cls(*args, **kwargs)\n\n def urljoin(self, url):\n \"\"\"Join this Response's url with a possible relative url to form an\n absolute interpretation of the latter.\"\"\"\n return urljoin(self.url, url)\n\n @property\n def text(self):\n \"\"\"For subclasses of TextResponse, this will return the body\n as text (unicode object in Python 2 and str in Python 3)\n \"\"\"\n raise AttributeError(\"Response content isn't text\")\n\n def css(self, *a, **kw):\n \"\"\"Shortcut method implemented only by responses whose content\n is text (subclasses of TextResponse).\n \"\"\"\n raise NotSupported(\"Response content isn't text\")\n\n def xpath(self, *a, **kw):\n \"\"\"Shortcut method implemented only by responses whose content\n is text (subclasses of TextResponse).\n \"\"\"\n raise NotSupported(\"Response content isn't text\")\n", "path": "scrapy/http/response/__init__.py"}]}
| 1,383 | 301 |
gh_patches_debug_66309
|
rasdani/github-patches
|
git_diff
|
sql-machine-learning__elasticdl-1463
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
No module named 'elasticdl.python.elasticdl.layers' on master
```
Traceback (most recent call last):
File "/usr/local/lib/python3.7/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/local/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/elasticdl/python/master/main.py", line 28, in <module>
from elasticdl.python.elasticdl.layers.embedding import Embedding
ModuleNotFoundError: No module named 'elasticdl.python.elasticdl.layers'
```
Seems `layers` directory is not installed to `/usr/local/lib/python3.7/site-packages/elasticdl-develop-py3.7.egg/elasticdl/python/elasticdl` after running `python setup.py install`
Steps to reproduce:
1. In a Python Docker container, clone ElasticDL and run `python setup.py install`
1. remove the cloned source
1. execute a demo job by: `elasticdl train ...`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticdl/python/elasticdl/__init__.py`
Content:
```
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/elasticdl/python/elasticdl/__init__.py b/elasticdl/python/elasticdl/__init__.py
--- a/elasticdl/python/elasticdl/__init__.py
+++ b/elasticdl/python/elasticdl/__init__.py
@@ -0,0 +1 @@
+from elasticdl.python.elasticdl import layers # noqa: F401
|
{"golden_diff": "diff --git a/elasticdl/python/elasticdl/__init__.py b/elasticdl/python/elasticdl/__init__.py\n--- a/elasticdl/python/elasticdl/__init__.py\n+++ b/elasticdl/python/elasticdl/__init__.py\n@@ -0,0 +1 @@\n+from elasticdl.python.elasticdl import layers # noqa: F401\n", "issue": "No module named 'elasticdl.python.elasticdl.layers' on master\n```\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.7/runpy.py\", line 193, in _run_module_as_main\r\n \"__main__\", mod_spec)\r\n File \"/usr/local/lib/python3.7/runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"/elasticdl/python/master/main.py\", line 28, in <module>\r\n from elasticdl.python.elasticdl.layers.embedding import Embedding\r\nModuleNotFoundError: No module named 'elasticdl.python.elasticdl.layers'\r\n```\r\n\r\nSeems `layers` directory is not installed to `/usr/local/lib/python3.7/site-packages/elasticdl-develop-py3.7.egg/elasticdl/python/elasticdl` after running `python setup.py install`\r\n\r\nSteps to reproduce:\r\n\r\n1. In a Python Docker container, clone ElasticDL and run `python setup.py install`\r\n1. remove the cloned source\r\n1. execute a demo job by: `elasticdl train ...`\n", "before_files": [{"content": "", "path": "elasticdl/python/elasticdl/__init__.py"}], "after_files": [{"content": "from elasticdl.python.elasticdl import layers # noqa: F401\n", "path": "elasticdl/python/elasticdl/__init__.py"}]}
| 496 | 83 |
gh_patches_debug_30901
|
rasdani/github-patches
|
git_diff
|
lk-geimfari__mimesis-677
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Auto add builtin provider to Generic based on passed locale
# Feature request
An idea is very simple:
```python
generic = Generic('ru', auto_add_builtin=True)
generic.russia_provider.inn()
```
Instead of this:
```python
from mimesis import Generic
from mimesis.builtins import RussiaSpecProvider
generic = Generic('ru')
generic.add_provider(RussiaSpecProvider)
generic.russia_provider.inn()
```
Optionally we can make builtin's name customizable:
```python
generic = Generic('ru', auto_add_builtin=True, builtin_custom_name='russia')
generic.russia.inn()
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mimesis/providers/generic.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 """Provides all at one."""
4
5 import inspect
6 from typing import Any, List, Type
7
8 from mimesis.providers.address import Address
9 from mimesis.providers.base import BaseDataProvider, BaseProvider
10 from mimesis.providers.business import Business
11 from mimesis.providers.choice import Choice
12 from mimesis.providers.clothing import Clothing
13 from mimesis.providers.code import Code
14 from mimesis.providers.cryptographic import Cryptographic
15 from mimesis.providers.date import Datetime
16 from mimesis.providers.development import Development
17 from mimesis.providers.file import File
18 from mimesis.providers.food import Food
19 from mimesis.providers.hardware import Hardware
20 from mimesis.providers.internet import Internet
21 from mimesis.providers.numbers import Numbers
22 from mimesis.providers.path import Path
23 from mimesis.providers.payment import Payment
24 from mimesis.providers.person import Person
25 from mimesis.providers.science import Science
26 from mimesis.providers.structure import Structure
27 from mimesis.providers.text import Text
28 from mimesis.providers.transport import Transport
29 from mimesis.providers.units import UnitSystem
30
31 __all__ = ['Generic']
32
33
34 class Generic(BaseDataProvider):
35 """Class which contain all providers at one."""
36
37 def __init__(self, *args, **kwargs) -> None:
38 """Initialize attributes lazily.
39
40 :param args: Arguments.
41 :param kwargs: Keyword arguments.
42 """
43 super().__init__(*args, **kwargs)
44 self._person = Person
45 self._address = Address
46 self._datetime = Datetime
47 self._business = Business
48 self._text = Text
49 self._food = Food
50 self._science = Science
51 self.transport = Transport(seed=self.seed)
52 self.code = Code(seed=self.seed)
53 self.unit_system = UnitSystem(seed=self.seed)
54 self.file = File(seed=self.seed)
55 self.numbers = Numbers(seed=self.seed)
56 self.development = Development(seed=self.seed)
57 self.hardware = Hardware(seed=self.seed)
58 self.clothing = Clothing(seed=self.seed)
59 self.internet = Internet(seed=self.seed)
60 self.path = Path(seed=self.seed)
61 self.payment = Payment(seed=self.seed)
62 self.cryptographic = Cryptographic(seed=self.seed)
63 self.structure = Structure(seed=self.seed)
64 self.choice = Choice(seed=self.seed)
65
66 class Meta:
67 """Class for metadata."""
68
69 name = 'generic'
70
71 def __getattr__(self, attrname: str) -> Any:
72 """Get attribute without underscore.
73
74 :param attrname: Attribute name.
75 :return: An attribute.
76 """
77 attribute = object.__getattribute__(
78 self, '_' + attrname)
79 if attribute and callable(attribute):
80 self.__dict__[attrname] = attribute(
81 self.locale,
82 self.seed,
83 )
84 return self.__dict__[attrname]
85
86 def __dir__(self) -> List[str]:
87 """Available data providers.
88
89 The list of result will be used in AbstractField to
90 determine method's class.
91
92 :return: List of attributes.
93 """
94 attributes = []
95 exclude = BaseDataProvider().__dict__.keys()
96
97 for a in self.__dict__:
98 if a not in exclude:
99 if a.startswith('_'):
100 attribute = a.replace('_', '', 1)
101 attributes.append(attribute)
102 else:
103 attributes.append(a)
104 return attributes
105
106 def add_provider(self, cls: Type[BaseProvider]) -> None:
107 """Add a custom provider to Generic() object.
108
109 :param cls: Custom provider.
110 :return: None
111 :raises TypeError: if cls is not class.
112 """
113 if inspect.isclass(cls):
114 if not issubclass(cls, BaseProvider):
115 raise TypeError('The provider must be a '
116 'subclass of BaseProvider')
117 try:
118 meta = getattr(cls, 'Meta')
119 name = getattr(meta, 'name')
120 except AttributeError:
121 name = cls.__name__.lower()
122 setattr(self, name, cls(seed=self.seed))
123 else:
124 raise TypeError('The provider must be a class')
125
126 def add_providers(self, *providers: Type[BaseProvider]) -> None:
127 """Add a lot of custom providers to Generic() object.
128
129 :param providers: Custom providers.
130 :return: None
131 """
132 for provider in providers:
133 self.add_provider(provider)
134
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mimesis/providers/generic.py b/mimesis/providers/generic.py
--- a/mimesis/providers/generic.py
+++ b/mimesis/providers/generic.py
@@ -5,6 +5,17 @@
import inspect
from typing import Any, List, Type
+from mimesis.builtins import (
+ BrazilSpecProvider,
+ DenmarkSpecProvider,
+ GermanySpecProvider,
+ ItalySpecProvider,
+ NetherlandsSpecProvider,
+ PolandSpecProvider,
+ RussiaSpecProvider,
+ UkraineSpecProvider,
+ USASpecProvider,
+)
from mimesis.providers.address import Address
from mimesis.providers.base import BaseDataProvider, BaseProvider
from mimesis.providers.business import Business
@@ -48,6 +59,21 @@
self._text = Text
self._food = Food
self._science = Science
+
+ _spec_providers = {
+ 'de': DenmarkSpecProvider,
+ 'ge': GermanySpecProvider,
+ 'en': USASpecProvider,
+ 'it': ItalySpecProvider,
+ 'nl': NetherlandsSpecProvider,
+ 'pl': PolandSpecProvider,
+ 'pt-br': BrazilSpecProvider,
+ 'ru': RussiaSpecProvider,
+ 'uk': UkraineSpecProvider,
+ }
+ if self.locale in _spec_providers:
+ self.add_provider(_spec_providers[self.locale])
+
self.transport = Transport(seed=self.seed)
self.code = Code(seed=self.seed)
self.unit_system = UnitSystem(seed=self.seed)
@@ -108,7 +134,8 @@
:param cls: Custom provider.
:return: None
- :raises TypeError: if cls is not class.
+ :raises TypeError: if cls is not class or is not a subclass
+ of BaseProvider.
"""
if inspect.isclass(cls):
if not issubclass(cls, BaseProvider):
|
{"golden_diff": "diff --git a/mimesis/providers/generic.py b/mimesis/providers/generic.py\n--- a/mimesis/providers/generic.py\n+++ b/mimesis/providers/generic.py\n@@ -5,6 +5,17 @@\n import inspect\n from typing import Any, List, Type\n \n+from mimesis.builtins import (\n+ BrazilSpecProvider,\n+ DenmarkSpecProvider,\n+ GermanySpecProvider,\n+ ItalySpecProvider,\n+ NetherlandsSpecProvider,\n+ PolandSpecProvider,\n+ RussiaSpecProvider,\n+ UkraineSpecProvider,\n+ USASpecProvider,\n+)\n from mimesis.providers.address import Address\n from mimesis.providers.base import BaseDataProvider, BaseProvider\n from mimesis.providers.business import Business\n@@ -48,6 +59,21 @@\n self._text = Text\n self._food = Food\n self._science = Science\n+\n+ _spec_providers = {\n+ 'de': DenmarkSpecProvider,\n+ 'ge': GermanySpecProvider,\n+ 'en': USASpecProvider,\n+ 'it': ItalySpecProvider,\n+ 'nl': NetherlandsSpecProvider,\n+ 'pl': PolandSpecProvider,\n+ 'pt-br': BrazilSpecProvider,\n+ 'ru': RussiaSpecProvider,\n+ 'uk': UkraineSpecProvider,\n+ }\n+ if self.locale in _spec_providers:\n+ self.add_provider(_spec_providers[self.locale])\n+\n self.transport = Transport(seed=self.seed)\n self.code = Code(seed=self.seed)\n self.unit_system = UnitSystem(seed=self.seed)\n@@ -108,7 +134,8 @@\n \n :param cls: Custom provider.\n :return: None\n- :raises TypeError: if cls is not class.\n+ :raises TypeError: if cls is not class or is not a subclass\n+ of BaseProvider.\n \"\"\"\n if inspect.isclass(cls):\n if not issubclass(cls, BaseProvider):\n", "issue": "Auto add builtin provider to Generic based on passed locale\n# Feature request\r\n\r\nAn idea is very simple:\r\n\r\n```python\r\ngeneric = Generic('ru', auto_add_builtin=True)\r\ngeneric.russia_provider.inn()\r\n```\r\n\r\nInstead of this:\r\n\r\n```python\r\nfrom mimesis import Generic\r\nfrom mimesis.builtins import RussiaSpecProvider\r\n\r\ngeneric = Generic('ru')\r\ngeneric.add_provider(RussiaSpecProvider)\r\ngeneric.russia_provider.inn()\r\n```\r\n\r\nOptionally we can make builtin's name customizable: \r\n\r\n```python\r\ngeneric = Generic('ru', auto_add_builtin=True, builtin_custom_name='russia')\r\ngeneric.russia.inn()\r\n```\r\n\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Provides all at one.\"\"\"\n\nimport inspect\nfrom typing import Any, List, Type\n\nfrom mimesis.providers.address import Address\nfrom mimesis.providers.base import BaseDataProvider, BaseProvider\nfrom mimesis.providers.business import Business\nfrom mimesis.providers.choice import Choice\nfrom mimesis.providers.clothing import Clothing\nfrom mimesis.providers.code import Code\nfrom mimesis.providers.cryptographic import Cryptographic\nfrom mimesis.providers.date import Datetime\nfrom mimesis.providers.development import Development\nfrom mimesis.providers.file import File\nfrom mimesis.providers.food import Food\nfrom mimesis.providers.hardware import Hardware\nfrom mimesis.providers.internet import Internet\nfrom mimesis.providers.numbers import Numbers\nfrom mimesis.providers.path import Path\nfrom mimesis.providers.payment import Payment\nfrom mimesis.providers.person import Person\nfrom mimesis.providers.science import Science\nfrom mimesis.providers.structure import Structure\nfrom mimesis.providers.text import Text\nfrom mimesis.providers.transport import Transport\nfrom mimesis.providers.units import UnitSystem\n\n__all__ = ['Generic']\n\n\nclass Generic(BaseDataProvider):\n \"\"\"Class which contain all providers at one.\"\"\"\n\n def __init__(self, *args, **kwargs) -> None:\n \"\"\"Initialize attributes lazily.\n\n :param args: Arguments.\n :param kwargs: Keyword arguments.\n \"\"\"\n super().__init__(*args, **kwargs)\n self._person = Person\n self._address = Address\n self._datetime = Datetime\n self._business = Business\n self._text = Text\n self._food = Food\n self._science = Science\n self.transport = Transport(seed=self.seed)\n self.code = Code(seed=self.seed)\n self.unit_system = UnitSystem(seed=self.seed)\n self.file = File(seed=self.seed)\n self.numbers = Numbers(seed=self.seed)\n self.development = Development(seed=self.seed)\n self.hardware = Hardware(seed=self.seed)\n self.clothing = Clothing(seed=self.seed)\n self.internet = Internet(seed=self.seed)\n self.path = Path(seed=self.seed)\n self.payment = Payment(seed=self.seed)\n self.cryptographic = Cryptographic(seed=self.seed)\n self.structure = Structure(seed=self.seed)\n self.choice = Choice(seed=self.seed)\n\n class Meta:\n \"\"\"Class for metadata.\"\"\"\n\n name = 'generic'\n\n def __getattr__(self, attrname: str) -> Any:\n \"\"\"Get attribute without underscore.\n\n :param attrname: Attribute name.\n :return: An attribute.\n \"\"\"\n attribute = object.__getattribute__(\n self, '_' + attrname)\n if attribute and callable(attribute):\n self.__dict__[attrname] = attribute(\n self.locale,\n self.seed,\n )\n return self.__dict__[attrname]\n\n def __dir__(self) -> List[str]:\n \"\"\"Available data providers.\n\n The list of result will be used in AbstractField to\n determine method's class.\n\n :return: List of attributes.\n \"\"\"\n attributes = []\n exclude = BaseDataProvider().__dict__.keys()\n\n for a in self.__dict__:\n if a not in exclude:\n if a.startswith('_'):\n attribute = a.replace('_', '', 1)\n attributes.append(attribute)\n else:\n attributes.append(a)\n return attributes\n\n def add_provider(self, cls: Type[BaseProvider]) -> None:\n \"\"\"Add a custom provider to Generic() object.\n\n :param cls: Custom provider.\n :return: None\n :raises TypeError: if cls is not class.\n \"\"\"\n if inspect.isclass(cls):\n if not issubclass(cls, BaseProvider):\n raise TypeError('The provider must be a '\n 'subclass of BaseProvider')\n try:\n meta = getattr(cls, 'Meta')\n name = getattr(meta, 'name')\n except AttributeError:\n name = cls.__name__.lower()\n setattr(self, name, cls(seed=self.seed))\n else:\n raise TypeError('The provider must be a class')\n\n def add_providers(self, *providers: Type[BaseProvider]) -> None:\n \"\"\"Add a lot of custom providers to Generic() object.\n\n :param providers: Custom providers.\n :return: None\n \"\"\"\n for provider in providers:\n self.add_provider(provider)\n", "path": "mimesis/providers/generic.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Provides all at one.\"\"\"\n\nimport inspect\nfrom typing import Any, List, Type\n\nfrom mimesis.builtins import (\n BrazilSpecProvider,\n DenmarkSpecProvider,\n GermanySpecProvider,\n ItalySpecProvider,\n NetherlandsSpecProvider,\n PolandSpecProvider,\n RussiaSpecProvider,\n UkraineSpecProvider,\n USASpecProvider,\n)\nfrom mimesis.providers.address import Address\nfrom mimesis.providers.base import BaseDataProvider, BaseProvider\nfrom mimesis.providers.business import Business\nfrom mimesis.providers.choice import Choice\nfrom mimesis.providers.clothing import Clothing\nfrom mimesis.providers.code import Code\nfrom mimesis.providers.cryptographic import Cryptographic\nfrom mimesis.providers.date import Datetime\nfrom mimesis.providers.development import Development\nfrom mimesis.providers.file import File\nfrom mimesis.providers.food import Food\nfrom mimesis.providers.hardware import Hardware\nfrom mimesis.providers.internet import Internet\nfrom mimesis.providers.numbers import Numbers\nfrom mimesis.providers.path import Path\nfrom mimesis.providers.payment import Payment\nfrom mimesis.providers.person import Person\nfrom mimesis.providers.science import Science\nfrom mimesis.providers.structure import Structure\nfrom mimesis.providers.text import Text\nfrom mimesis.providers.transport import Transport\nfrom mimesis.providers.units import UnitSystem\n\n__all__ = ['Generic']\n\n\nclass Generic(BaseDataProvider):\n \"\"\"Class which contain all providers at one.\"\"\"\n\n def __init__(self, *args, **kwargs) -> None:\n \"\"\"Initialize attributes lazily.\n\n :param args: Arguments.\n :param kwargs: Keyword arguments.\n \"\"\"\n super().__init__(*args, **kwargs)\n self._person = Person\n self._address = Address\n self._datetime = Datetime\n self._business = Business\n self._text = Text\n self._food = Food\n self._science = Science\n\n _spec_providers = {\n 'de': DenmarkSpecProvider,\n 'ge': GermanySpecProvider,\n 'en': USASpecProvider,\n 'it': ItalySpecProvider,\n 'nl': NetherlandsSpecProvider,\n 'pl': PolandSpecProvider,\n 'pt-br': BrazilSpecProvider,\n 'ru': RussiaSpecProvider,\n 'uk': UkraineSpecProvider,\n }\n if self.locale in _spec_providers:\n self.add_provider(_spec_providers[self.locale])\n\n self.transport = Transport(seed=self.seed)\n self.code = Code(seed=self.seed)\n self.unit_system = UnitSystem(seed=self.seed)\n self.file = File(seed=self.seed)\n self.numbers = Numbers(seed=self.seed)\n self.development = Development(seed=self.seed)\n self.hardware = Hardware(seed=self.seed)\n self.clothing = Clothing(seed=self.seed)\n self.internet = Internet(seed=self.seed)\n self.path = Path(seed=self.seed)\n self.payment = Payment(seed=self.seed)\n self.cryptographic = Cryptographic(seed=self.seed)\n self.structure = Structure(seed=self.seed)\n self.choice = Choice(seed=self.seed)\n\n class Meta:\n \"\"\"Class for metadata.\"\"\"\n\n name = 'generic'\n\n def __getattr__(self, attrname: str) -> Any:\n \"\"\"Get attribute without underscore.\n\n :param attrname: Attribute name.\n :return: An attribute.\n \"\"\"\n attribute = object.__getattribute__(\n self, '_' + attrname)\n if attribute and callable(attribute):\n self.__dict__[attrname] = attribute(\n self.locale,\n self.seed,\n )\n return self.__dict__[attrname]\n\n def __dir__(self) -> List[str]:\n \"\"\"Available data providers.\n\n The list of result will be used in AbstractField to\n determine method's class.\n\n :return: List of attributes.\n \"\"\"\n attributes = []\n exclude = BaseDataProvider().__dict__.keys()\n\n for a in self.__dict__:\n if a not in exclude:\n if a.startswith('_'):\n attribute = a.replace('_', '', 1)\n attributes.append(attribute)\n else:\n attributes.append(a)\n return attributes\n\n def add_provider(self, cls: Type[BaseProvider]) -> None:\n \"\"\"Add a custom provider to Generic() object.\n\n :param cls: Custom provider.\n :return: None\n :raises TypeError: if cls is not class or is not a subclass\n of BaseProvider.\n \"\"\"\n if inspect.isclass(cls):\n if not issubclass(cls, BaseProvider):\n raise TypeError('The provider must be a '\n 'subclass of BaseProvider')\n try:\n meta = getattr(cls, 'Meta')\n name = getattr(meta, 'name')\n except AttributeError:\n name = cls.__name__.lower()\n setattr(self, name, cls(seed=self.seed))\n else:\n raise TypeError('The provider must be a class')\n\n def add_providers(self, *providers: Type[BaseProvider]) -> None:\n \"\"\"Add a lot of custom providers to Generic() object.\n\n :param providers: Custom providers.\n :return: None\n \"\"\"\n for provider in providers:\n self.add_provider(provider)\n", "path": "mimesis/providers/generic.py"}]}
| 1,629 | 426 |
gh_patches_debug_10035
|
rasdani/github-patches
|
git_diff
|
comic__grand-challenge.org-1433
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Helptext for supported file formats is not up-to-date
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/grandchallenge/cases/forms.py`
Content:
```
1 from typing import List
2
3 from crispy_forms.helper import FormHelper
4 from crispy_forms.layout import Submit
5 from django import forms
6 from django.conf import settings
7 from django.core.exceptions import ValidationError
8
9 from grandchallenge.cases.models import RawImageFile, RawImageUploadSession
10 from grandchallenge.jqfileupload.widgets import uploader
11 from grandchallenge.jqfileupload.widgets.uploader import (
12 StagedAjaxFile,
13 UploadedAjaxFileList,
14 )
15
16
17 class UploadRawImagesForm(forms.ModelForm):
18 files = UploadedAjaxFileList(
19 widget=uploader.AjaxUploadWidget(multifile=True, auto_commit=False),
20 label="Image files",
21 help_text=(
22 "The total size of all files uploaded in a single session "
23 "cannot exceed 10 GB.<br>"
24 "The following file formats are supported: "
25 ".mha, .mhd, .raw, .zraw, .dcm, .tiff, .png, .jpeg and .jpg."
26 ),
27 )
28
29 def __init__(self, *args, user, linked_task=None, **kwargs):
30 super().__init__(*args, **kwargs)
31 self.helper = FormHelper()
32 self.helper.add_input(Submit("save", "Submit"))
33 self.fields["files"].widget.user = user
34 self._linked_task = linked_task
35
36 def clean_files(self):
37 files = self.cleaned_data["files"]
38
39 if len({f.name for f in files}) != len(files):
40 raise ValidationError("Filenames must be unique.")
41
42 if sum([f.size for f in files]) > settings.UPLOAD_SESSION_MAX_BYTES:
43 raise ValidationError(
44 "Total size of all files exceeds the upload limit."
45 )
46
47 return files
48
49 def save(self, commit=True):
50 instance = super().save(commit=False) # type: RawImageUploadSession
51
52 # Create links between the created session and all uploaded files
53 uploaded_files = self.cleaned_data[
54 "files"
55 ] # type: List[StagedAjaxFile]
56
57 raw_files = [
58 RawImageFile(
59 upload_session=instance,
60 filename=uploaded_file.name,
61 staged_file_id=uploaded_file.uuid,
62 )
63 for uploaded_file in uploaded_files
64 ]
65
66 if commit:
67 instance.save()
68 RawImageFile.objects.bulk_create(raw_files)
69 instance.process_images(linked_task=self._linked_task)
70
71 return instance
72
73 class Meta:
74 model = RawImageUploadSession
75 fields = ["files"]
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/app/grandchallenge/cases/forms.py b/app/grandchallenge/cases/forms.py
--- a/app/grandchallenge/cases/forms.py
+++ b/app/grandchallenge/cases/forms.py
@@ -22,7 +22,10 @@
"The total size of all files uploaded in a single session "
"cannot exceed 10 GB.<br>"
"The following file formats are supported: "
- ".mha, .mhd, .raw, .zraw, .dcm, .tiff, .png, .jpeg and .jpg."
+ ".mha, .mhd, .raw, .zraw, .dcm, .tiff, .png, .jpeg and .jpg.<br>"
+ "The following file formats can be uploaded and will be converted to "
+ "tif: Aperio(.svs), Hamamatsu(.vms, .vmu, .ndpi), Leica(.scn), MIRAX"
+ "(.mrxs) and Ventana(.bif)."
),
)
|
{"golden_diff": "diff --git a/app/grandchallenge/cases/forms.py b/app/grandchallenge/cases/forms.py\n--- a/app/grandchallenge/cases/forms.py\n+++ b/app/grandchallenge/cases/forms.py\n@@ -22,7 +22,10 @@\n \"The total size of all files uploaded in a single session \"\n \"cannot exceed 10 GB.<br>\"\n \"The following file formats are supported: \"\n- \".mha, .mhd, .raw, .zraw, .dcm, .tiff, .png, .jpeg and .jpg.\"\n+ \".mha, .mhd, .raw, .zraw, .dcm, .tiff, .png, .jpeg and .jpg.<br>\"\n+ \"The following file formats can be uploaded and will be converted to \"\n+ \"tif: Aperio(.svs), Hamamatsu(.vms, .vmu, .ndpi), Leica(.scn), MIRAX\"\n+ \"(.mrxs) and Ventana(.bif).\"\n ),\n )\n", "issue": "Helptext for supported file formats is not up-to-date\n\n", "before_files": [{"content": "from typing import List\n\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Submit\nfrom django import forms\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError\n\nfrom grandchallenge.cases.models import RawImageFile, RawImageUploadSession\nfrom grandchallenge.jqfileupload.widgets import uploader\nfrom grandchallenge.jqfileupload.widgets.uploader import (\n StagedAjaxFile,\n UploadedAjaxFileList,\n)\n\n\nclass UploadRawImagesForm(forms.ModelForm):\n files = UploadedAjaxFileList(\n widget=uploader.AjaxUploadWidget(multifile=True, auto_commit=False),\n label=\"Image files\",\n help_text=(\n \"The total size of all files uploaded in a single session \"\n \"cannot exceed 10 GB.<br>\"\n \"The following file formats are supported: \"\n \".mha, .mhd, .raw, .zraw, .dcm, .tiff, .png, .jpeg and .jpg.\"\n ),\n )\n\n def __init__(self, *args, user, linked_task=None, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.add_input(Submit(\"save\", \"Submit\"))\n self.fields[\"files\"].widget.user = user\n self._linked_task = linked_task\n\n def clean_files(self):\n files = self.cleaned_data[\"files\"]\n\n if len({f.name for f in files}) != len(files):\n raise ValidationError(\"Filenames must be unique.\")\n\n if sum([f.size for f in files]) > settings.UPLOAD_SESSION_MAX_BYTES:\n raise ValidationError(\n \"Total size of all files exceeds the upload limit.\"\n )\n\n return files\n\n def save(self, commit=True):\n instance = super().save(commit=False) # type: RawImageUploadSession\n\n # Create links between the created session and all uploaded files\n uploaded_files = self.cleaned_data[\n \"files\"\n ] # type: List[StagedAjaxFile]\n\n raw_files = [\n RawImageFile(\n upload_session=instance,\n filename=uploaded_file.name,\n staged_file_id=uploaded_file.uuid,\n )\n for uploaded_file in uploaded_files\n ]\n\n if commit:\n instance.save()\n RawImageFile.objects.bulk_create(raw_files)\n instance.process_images(linked_task=self._linked_task)\n\n return instance\n\n class Meta:\n model = RawImageUploadSession\n fields = [\"files\"]\n", "path": "app/grandchallenge/cases/forms.py"}], "after_files": [{"content": "from typing import List\n\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Submit\nfrom django import forms\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError\n\nfrom grandchallenge.cases.models import RawImageFile, RawImageUploadSession\nfrom grandchallenge.jqfileupload.widgets import uploader\nfrom grandchallenge.jqfileupload.widgets.uploader import (\n StagedAjaxFile,\n UploadedAjaxFileList,\n)\n\n\nclass UploadRawImagesForm(forms.ModelForm):\n files = UploadedAjaxFileList(\n widget=uploader.AjaxUploadWidget(multifile=True, auto_commit=False),\n label=\"Image files\",\n help_text=(\n \"The total size of all files uploaded in a single session \"\n \"cannot exceed 10 GB.<br>\"\n \"The following file formats are supported: \"\n \".mha, .mhd, .raw, .zraw, .dcm, .tiff, .png, .jpeg and .jpg.<br>\"\n \"The following file formats can be uploaded and will be converted to \"\n \"tif: Aperio(.svs), Hamamatsu(.vms, .vmu, .ndpi), Leica(.scn), MIRAX\"\n \"(.mrxs) and Ventana(.bif).\"\n ),\n )\n\n def __init__(self, *args, user, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.add_input(Submit(\"save\", \"Submit\"))\n self.fields[\"files\"].widget.user = user\n\n def clean_files(self):\n files = self.cleaned_data[\"files\"]\n\n if len({f.name for f in files}) != len(files):\n raise ValidationError(\"Filenames must be unique.\")\n\n if sum([f.size for f in files]) > settings.UPLOAD_SESSION_MAX_BYTES:\n raise ValidationError(\n \"Total size of all files exceeds the upload limit.\"\n )\n\n return files\n\n def save(self, commit=True):\n instance = super().save(commit=False) # type: RawImageUploadSession\n\n # Create links between the created session and all uploaded files\n uploaded_files = self.cleaned_data[\n \"files\"\n ] # type: List[StagedAjaxFile]\n\n raw_files = [\n RawImageFile(\n upload_session=instance,\n filename=uploaded_file.name,\n staged_file_id=uploaded_file.uuid,\n )\n for uploaded_file in uploaded_files\n ]\n\n if commit:\n instance.save()\n RawImageFile.objects.bulk_create(raw_files)\n instance.process_images()\n\n return instance\n\n class Meta:\n model = RawImageUploadSession\n fields = [\"files\"]\n", "path": "app/grandchallenge/cases/forms.py"}]}
| 949 | 235 |
gh_patches_debug_58053
|
rasdani/github-patches
|
git_diff
|
alltheplaces__alltheplaces-3312
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider upsstore is broken
During the global build at 2021-10-13-14-42-23, spider **upsstore** failed with **5176 features** and **5 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-10-13-14-42-23/logs/upsstore.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-10-13-14-42-23/output/upsstore.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-10-13-14-42-23/output/upsstore.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/upsstore.py`
Content:
```
1 import scrapy
2 import json
3 import re
4 from locations.items import GeojsonPointItem
5 from locations.hours import OpeningHours
6
7 DAY_MAPPING = {
8 "MONDAY": "Mo",
9 "TUESDAY": "Tu",
10 "WEDNESDAY": "We",
11 "THURSDAY": "Th",
12 "FRIDAY": "Fr",
13 "SATURDAY": "Sa",
14 "SUNDAY": "Su"
15 }
16
17
18 class UpsStoreSpider(scrapy.Spider):
19 name = "upsstore"
20 item_attributes = { 'brand': "UPS Store" }
21 allowed_domains = ["theupsstore.com"]
22 download_delay = 0.1
23 start_urls = (
24 'https://locations.theupsstore.com/',
25 )
26
27 def parse_hours(self, hours):
28 """
29 :param hours:
30 :return:
31 """
32 hours = json.loads(hours)
33 o = OpeningHours()
34
35 for day in hours["hours"]["days"]:
36 if not day["isClosed"]:
37 interval = day["intervals"][0]
38
39 o.add_range(DAY_MAPPING[day["day"]],
40 open_time=str(interval["start"]),
41 close_time=str(interval["end"]),
42 time_format="%H%M")
43 return o.as_opening_hours()
44
45 def parse_store(self, response):
46 ref = response.xpath('//input[@id="store_id"]/@value').extract_first()
47 if not ref:
48 ref = re.search(r'store(\d+)@theupsstore.com',
49 response.xpath('//a[@itemprop="email"]/text()').extract_first()).groups()
50
51 properties = {
52 'name': response.xpath('//span[@class="LocationName-geo"]/text()').extract_first(),
53 'phone': response.xpath('//span[@itemprop="telephone"]/text()').extract_first(),
54 'addr_full': response.xpath('//meta[@itemprop="streetAddress"]/@content').extract_first(),
55 'city': response.xpath('//meta[@itemprop="addressLocality"]/@content').extract_first(),
56 'state': response.xpath('//abbr[@itemprop="addressRegion"]/text()').extract_first(),
57 'country': response.xpath('//abbr[@itemprop="addressCountry"]/text()').extract_first(),
58 'postcode': response.xpath('//span[@itemprop="postalCode"]/text()').extract_first(),
59 'ref': ref,
60 'website': response.url,
61 'lat': float(response.xpath('//meta[@itemprop="latitude"]/@content').extract_first()),
62 'lon': float(response.xpath('//meta[@itemprop="longitude"]/@content').extract_first()),
63 }
64
65 hours = response.xpath('//script[@id="location_info_hours"]/text()').extract_first()
66 try:
67 hours = self.parse_hours(hours)
68 if hours:
69 properties['opening_hours'] = hours
70 except:
71 pass
72
73 yield GeojsonPointItem(**properties)
74
75 def parse(self, response):
76 urls = response.xpath('//a[@class="Directory-listLink"]/@href').extract()
77
78 if urls:
79 for url in urls:
80 if len(url.split('/')) == 3:
81 callback = self.parse_store
82 else:
83 callback = self.parse
84
85 yield scrapy.Request(
86 response.urljoin(url),
87 callback=callback,
88 )
89
90 else:
91 urls = response.xpath('//a[@class="Link"]/@href').extract()
92 for url in urls:
93 yield scrapy.Request(
94 response.urljoin(url),
95 callback=self.parse_store,
96 )
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/locations/spiders/upsstore.py b/locations/spiders/upsstore.py
--- a/locations/spiders/upsstore.py
+++ b/locations/spiders/upsstore.py
@@ -43,6 +43,9 @@
return o.as_opening_hours()
def parse_store(self, response):
+ if "Permanently Closed" in response.text:
+ return
+
ref = response.xpath('//input[@id="store_id"]/@value').extract_first()
if not ref:
ref = re.search(r'store(\d+)@theupsstore.com',
|
{"golden_diff": "diff --git a/locations/spiders/upsstore.py b/locations/spiders/upsstore.py\n--- a/locations/spiders/upsstore.py\n+++ b/locations/spiders/upsstore.py\n@@ -43,6 +43,9 @@\n return o.as_opening_hours()\n \n def parse_store(self, response):\n+ if \"Permanently Closed\" in response.text:\n+ return\n+\n ref = response.xpath('//input[@id=\"store_id\"]/@value').extract_first()\n if not ref:\n ref = re.search(r'store(\\d+)@theupsstore.com',\n", "issue": "Spider upsstore is broken\nDuring the global build at 2021-10-13-14-42-23, spider **upsstore** failed with **5176 features** and **5 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-10-13-14-42-23/logs/upsstore.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-10-13-14-42-23/output/upsstore.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-10-13-14-42-23/output/upsstore.geojson))\n", "before_files": [{"content": "import scrapy\nimport json\nimport re\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\nDAY_MAPPING = {\n \"MONDAY\": \"Mo\",\n \"TUESDAY\": \"Tu\",\n \"WEDNESDAY\": \"We\",\n \"THURSDAY\": \"Th\",\n \"FRIDAY\": \"Fr\",\n \"SATURDAY\": \"Sa\",\n \"SUNDAY\": \"Su\"\n}\n\n\nclass UpsStoreSpider(scrapy.Spider):\n name = \"upsstore\"\n item_attributes = { 'brand': \"UPS Store\" }\n allowed_domains = [\"theupsstore.com\"]\n download_delay = 0.1\n start_urls = (\n 'https://locations.theupsstore.com/',\n )\n\n def parse_hours(self, hours):\n \"\"\"\n :param hours:\n :return:\n \"\"\"\n hours = json.loads(hours)\n o = OpeningHours()\n\n for day in hours[\"hours\"][\"days\"]:\n if not day[\"isClosed\"]:\n interval = day[\"intervals\"][0]\n\n o.add_range(DAY_MAPPING[day[\"day\"]],\n open_time=str(interval[\"start\"]),\n close_time=str(interval[\"end\"]),\n time_format=\"%H%M\")\n return o.as_opening_hours()\n\n def parse_store(self, response):\n ref = response.xpath('//input[@id=\"store_id\"]/@value').extract_first()\n if not ref:\n ref = re.search(r'store(\\d+)@theupsstore.com',\n response.xpath('//a[@itemprop=\"email\"]/text()').extract_first()).groups()\n\n properties = {\n 'name': response.xpath('//span[@class=\"LocationName-geo\"]/text()').extract_first(),\n 'phone': response.xpath('//span[@itemprop=\"telephone\"]/text()').extract_first(),\n 'addr_full': response.xpath('//meta[@itemprop=\"streetAddress\"]/@content').extract_first(),\n 'city': response.xpath('//meta[@itemprop=\"addressLocality\"]/@content').extract_first(),\n 'state': response.xpath('//abbr[@itemprop=\"addressRegion\"]/text()').extract_first(),\n 'country': response.xpath('//abbr[@itemprop=\"addressCountry\"]/text()').extract_first(),\n 'postcode': response.xpath('//span[@itemprop=\"postalCode\"]/text()').extract_first(),\n 'ref': ref,\n 'website': response.url,\n 'lat': float(response.xpath('//meta[@itemprop=\"latitude\"]/@content').extract_first()),\n 'lon': float(response.xpath('//meta[@itemprop=\"longitude\"]/@content').extract_first()),\n }\n\n hours = response.xpath('//script[@id=\"location_info_hours\"]/text()').extract_first()\n try:\n hours = self.parse_hours(hours)\n if hours:\n properties['opening_hours'] = hours\n except:\n pass\n\n yield GeojsonPointItem(**properties)\n\n def parse(self, response):\n urls = response.xpath('//a[@class=\"Directory-listLink\"]/@href').extract()\n\n if urls:\n for url in urls:\n if len(url.split('/')) == 3:\n callback = self.parse_store\n else:\n callback = self.parse\n\n yield scrapy.Request(\n response.urljoin(url),\n callback=callback,\n )\n\n else:\n urls = response.xpath('//a[@class=\"Link\"]/@href').extract()\n for url in urls:\n yield scrapy.Request(\n response.urljoin(url),\n callback=self.parse_store,\n )", "path": "locations/spiders/upsstore.py"}], "after_files": [{"content": "import scrapy\nimport json\nimport re\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\nDAY_MAPPING = {\n \"MONDAY\": \"Mo\",\n \"TUESDAY\": \"Tu\",\n \"WEDNESDAY\": \"We\",\n \"THURSDAY\": \"Th\",\n \"FRIDAY\": \"Fr\",\n \"SATURDAY\": \"Sa\",\n \"SUNDAY\": \"Su\"\n}\n\n\nclass UpsStoreSpider(scrapy.Spider):\n name = \"upsstore\"\n item_attributes = { 'brand': \"UPS Store\" }\n allowed_domains = [\"theupsstore.com\"]\n download_delay = 0.1\n start_urls = (\n 'https://locations.theupsstore.com/',\n )\n\n def parse_hours(self, hours):\n \"\"\"\n :param hours:\n :return:\n \"\"\"\n hours = json.loads(hours)\n o = OpeningHours()\n\n for day in hours[\"hours\"][\"days\"]:\n if not day[\"isClosed\"]:\n interval = day[\"intervals\"][0]\n\n o.add_range(DAY_MAPPING[day[\"day\"]],\n open_time=str(interval[\"start\"]),\n close_time=str(interval[\"end\"]),\n time_format=\"%H%M\")\n return o.as_opening_hours()\n\n def parse_store(self, response):\n if \"Permanently Closed\" in response.text:\n return\n\n ref = response.xpath('//input[@id=\"store_id\"]/@value').extract_first()\n if not ref:\n ref = re.search(r'store(\\d+)@theupsstore.com',\n response.xpath('//a[@itemprop=\"email\"]/text()').extract_first()).groups()\n\n properties = {\n 'name': response.xpath('//span[@class=\"LocationName-geo\"]/text()').extract_first(),\n 'phone': response.xpath('//span[@itemprop=\"telephone\"]/text()').extract_first(),\n 'addr_full': response.xpath('//meta[@itemprop=\"streetAddress\"]/@content').extract_first(),\n 'city': response.xpath('//meta[@itemprop=\"addressLocality\"]/@content').extract_first(),\n 'state': response.xpath('//abbr[@itemprop=\"addressRegion\"]/text()').extract_first(),\n 'country': response.xpath('//abbr[@itemprop=\"addressCountry\"]/text()').extract_first(),\n 'postcode': response.xpath('//span[@itemprop=\"postalCode\"]/text()').extract_first(),\n 'ref': ref,\n 'website': response.url,\n 'lat': float(response.xpath('//meta[@itemprop=\"latitude\"]/@content').extract_first()),\n 'lon': float(response.xpath('//meta[@itemprop=\"longitude\"]/@content').extract_first()),\n }\n\n hours = response.xpath('//script[@id=\"location_info_hours\"]/text()').extract_first()\n try:\n hours = self.parse_hours(hours)\n if hours:\n properties['opening_hours'] = hours\n except:\n pass\n\n yield GeojsonPointItem(**properties)\n\n def parse(self, response):\n urls = response.xpath('//a[@class=\"Directory-listLink\"]/@href').extract()\n\n if urls:\n for url in urls:\n if len(url.split('/')) == 3:\n callback = self.parse_store\n else:\n callback = self.parse\n\n yield scrapy.Request(\n response.urljoin(url),\n callback=callback,\n )\n\n else:\n urls = response.xpath('//a[@class=\"Link\"]/@href').extract()\n for url in urls:\n yield scrapy.Request(\n response.urljoin(url),\n callback=self.parse_store,\n )", "path": "locations/spiders/upsstore.py"}]}
| 1,371 | 133 |
gh_patches_debug_309
|
rasdani/github-patches
|
git_diff
|
wemake-services__wemake-python-styleguide-195
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix documentation main page's header
The header is gone:
<img width="1032" alt="2018-10-03 0 18 01" src="https://user-images.githubusercontent.com/4660275/46377643-d0ce1080-c6a1-11e8-950b-d2d0c515dee1.png">
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wemake_python_styleguide/visitors/ast/numbers.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 import ast
4 from typing import Optional
5
6 from wemake_python_styleguide.constants import MAGIC_NUMBERS_WHITELIST
7 from wemake_python_styleguide.violations.best_practices import (
8 MagicNumberViolation,
9 )
10 from wemake_python_styleguide.visitors.base import BaseNodeVisitor
11
12
13 class MagicNumberVisitor(BaseNodeVisitor):
14 """Checks magic numbers used in the code."""
15
16 _ALLOWED_PARENTS = (
17 ast.Assign,
18
19 # Constructor usages:
20 ast.FunctionDef,
21 ast.arguments,
22
23 # Primitives:
24 ast.List,
25 ast.Dict,
26 ast.Set,
27 ast.Tuple,
28 )
29
30 _PROXY_PARENTS = (
31 ast.UnaryOp,
32 )
33
34 def _get_real_parent(self, node: Optional[ast.AST]) -> Optional[ast.AST]:
35 """
36 Returns real number's parent.
37
38 What can go wrong?
39
40 1. Number can be negative: ``x = -1``,
41 so ``1`` has ``UnaryOp`` as parent, but should return ``Assign``
42
43 """
44 parent = getattr(node, 'parent', None)
45 if isinstance(parent, self._PROXY_PARENTS):
46 return self._get_real_parent(parent)
47 return parent
48
49 def _check_is_magic(self, node: ast.Num) -> None:
50 parent = self._get_real_parent(node)
51 if isinstance(parent, self._ALLOWED_PARENTS):
52 return
53
54 if node.n in MAGIC_NUMBERS_WHITELIST:
55 return
56
57 if isinstance(node.n, int) and node.n <= 10:
58 return
59
60 self.add_violation(MagicNumberViolation(node, text=str(node.n)))
61
62 def visit_Num(self, node: ast.Num) -> None:
63 """
64 Checks numbers not to be magic constants inside the code.
65
66 Raises:
67 MagicNumberViolation
68
69 """
70 self._check_is_magic(node)
71 self.generic_visit(node)
72
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/wemake_python_styleguide/visitors/ast/numbers.py b/wemake_python_styleguide/visitors/ast/numbers.py
--- a/wemake_python_styleguide/visitors/ast/numbers.py
+++ b/wemake_python_styleguide/visitors/ast/numbers.py
@@ -27,6 +27,7 @@
ast.Tuple,
)
+ # TODO: make consistent naming rules for class attributes:
_PROXY_PARENTS = (
ast.UnaryOp,
)
|
{"golden_diff": "diff --git a/wemake_python_styleguide/visitors/ast/numbers.py b/wemake_python_styleguide/visitors/ast/numbers.py\n--- a/wemake_python_styleguide/visitors/ast/numbers.py\n+++ b/wemake_python_styleguide/visitors/ast/numbers.py\n@@ -27,6 +27,7 @@\n ast.Tuple,\n )\n \n+ # TODO: make consistent naming rules for class attributes:\n _PROXY_PARENTS = (\n ast.UnaryOp,\n )\n", "issue": "Fix documentation main page's header\nThe header is gone:\r\n<img width=\"1032\" alt=\"2018-10-03 0 18 01\" src=\"https://user-images.githubusercontent.com/4660275/46377643-d0ce1080-c6a1-11e8-950b-d2d0c515dee1.png\">\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport ast\nfrom typing import Optional\n\nfrom wemake_python_styleguide.constants import MAGIC_NUMBERS_WHITELIST\nfrom wemake_python_styleguide.violations.best_practices import (\n MagicNumberViolation,\n)\nfrom wemake_python_styleguide.visitors.base import BaseNodeVisitor\n\n\nclass MagicNumberVisitor(BaseNodeVisitor):\n \"\"\"Checks magic numbers used in the code.\"\"\"\n\n _ALLOWED_PARENTS = (\n ast.Assign,\n\n # Constructor usages:\n ast.FunctionDef,\n ast.arguments,\n\n # Primitives:\n ast.List,\n ast.Dict,\n ast.Set,\n ast.Tuple,\n )\n\n _PROXY_PARENTS = (\n ast.UnaryOp,\n )\n\n def _get_real_parent(self, node: Optional[ast.AST]) -> Optional[ast.AST]:\n \"\"\"\n Returns real number's parent.\n\n What can go wrong?\n\n 1. Number can be negative: ``x = -1``,\n so ``1`` has ``UnaryOp`` as parent, but should return ``Assign``\n\n \"\"\"\n parent = getattr(node, 'parent', None)\n if isinstance(parent, self._PROXY_PARENTS):\n return self._get_real_parent(parent)\n return parent\n\n def _check_is_magic(self, node: ast.Num) -> None:\n parent = self._get_real_parent(node)\n if isinstance(parent, self._ALLOWED_PARENTS):\n return\n\n if node.n in MAGIC_NUMBERS_WHITELIST:\n return\n\n if isinstance(node.n, int) and node.n <= 10:\n return\n\n self.add_violation(MagicNumberViolation(node, text=str(node.n)))\n\n def visit_Num(self, node: ast.Num) -> None:\n \"\"\"\n Checks numbers not to be magic constants inside the code.\n\n Raises:\n MagicNumberViolation\n\n \"\"\"\n self._check_is_magic(node)\n self.generic_visit(node)\n", "path": "wemake_python_styleguide/visitors/ast/numbers.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport ast\nfrom typing import Optional\n\nfrom wemake_python_styleguide.constants import MAGIC_NUMBERS_WHITELIST\nfrom wemake_python_styleguide.violations.best_practices import (\n MagicNumberViolation,\n)\nfrom wemake_python_styleguide.visitors.base import BaseNodeVisitor\n\n\nclass MagicNumberVisitor(BaseNodeVisitor):\n \"\"\"Checks magic numbers used in the code.\"\"\"\n\n _ALLOWED_PARENTS = (\n ast.Assign,\n\n # Constructor usages:\n ast.FunctionDef,\n ast.arguments,\n\n # Primitives:\n ast.List,\n ast.Dict,\n ast.Set,\n ast.Tuple,\n )\n\n # TODO: make consistent naming rules for class attributes:\n _PROXY_PARENTS = (\n ast.UnaryOp,\n )\n\n def _get_real_parent(self, node: Optional[ast.AST]) -> Optional[ast.AST]:\n \"\"\"\n Returns real number's parent.\n\n What can go wrong?\n\n 1. Number can be negative: ``x = -1``,\n so ``1`` has ``UnaryOp`` as parent, but should return ``Assign``\n\n \"\"\"\n parent = getattr(node, 'parent', None)\n if isinstance(parent, self._PROXY_PARENTS):\n return self._get_real_parent(parent)\n return parent\n\n def _check_is_magic(self, node: ast.Num) -> None:\n parent = self._get_real_parent(node)\n if isinstance(parent, self._ALLOWED_PARENTS):\n return\n\n if node.n in MAGIC_NUMBERS_WHITELIST:\n return\n\n if isinstance(node.n, int) and node.n <= 10:\n return\n\n self.add_violation(MagicNumberViolation(node, text=str(node.n)))\n\n def visit_Num(self, node: ast.Num) -> None:\n \"\"\"\n Checks numbers not to be magic constants inside the code.\n\n Raises:\n MagicNumberViolation\n\n \"\"\"\n self._check_is_magic(node)\n self.generic_visit(node)\n", "path": "wemake_python_styleguide/visitors/ast/numbers.py"}]}
| 933 | 117 |
gh_patches_debug_32176
|
rasdani/github-patches
|
git_diff
|
Qiskit__qiskit-2288
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error when number of qubits is of type numpy.int64
<!-- ⚠️ If you do not respect this template, your issue will be closed -->
<!-- ⚠️ Make sure to browse the opened and closed issues to confirm this idea does not exist. -->
### What is the expected enhancement?
In `qiskit/validation/base.py`, function `check_types`: currently, if `n_qubits` or `memory_slots` are of type `numpy.int64`, then an error is triggered, because type `int` is expected.
I find it too strict. Especially considering that if the number of qubits is originated in a `numpy` array, then its default type is `numpy.int64`. Terra can allow additional types, or convert the type internally.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qiskit/circuit/register.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # This code is part of Qiskit.
4 #
5 # (C) Copyright IBM 2017.
6 #
7 # This code is licensed under the Apache License, Version 2.0. You may
8 # obtain a copy of this license in the LICENSE.txt file in the root directory
9 # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
10 #
11 # Any modifications or derivative works of this code must retain this
12 # copyright notice, and modified files need to carry a notice indicating
13 # that they have been altered from the originals.
14
15 """
16 Base register reference object.
17 """
18 import re
19 import logging
20 import itertools
21
22 from qiskit.exceptions import QiskitError, QiskitIndexError
23
24 logger = logging.getLogger(__name__)
25
26
27 class Register:
28 """Implement a generic register."""
29
30 # Counter for the number of instances in this class.
31 instances_counter = itertools.count()
32 # Prefix to use for auto naming.
33 prefix = 'reg'
34
35 def __init__(self, size, name=None):
36 """Create a new generic register.
37 """
38
39 if name is None:
40 name = '%s%i' % (self.prefix, next(self.instances_counter))
41
42 if not isinstance(name, str):
43 raise QiskitError("The circuit name should be a string "
44 "(or None for autogenerate a name).")
45
46 test = re.compile('[a-z][a-zA-Z0-9_]*')
47 if test.match(name) is None:
48 raise QiskitError("%s is an invalid OPENQASM register name." % name)
49
50 self.name = name
51 self.size = size
52 if size <= 0:
53 raise QiskitError("register size must be positive")
54
55 def __repr__(self):
56 """Return the official string representing the register."""
57 return "%s(%d, '%s')" % (self.__class__.__qualname__,
58 self.size, self.name)
59
60 def __len__(self):
61 """Return register size"""
62 return self.size
63
64 def check_range(self, j):
65 """Check that j is a valid index into self."""
66 if isinstance(j, int):
67 if j < 0 or j >= self.size:
68 raise QiskitIndexError("register index out of range")
69 elif isinstance(j, slice):
70 if j.start < 0 or j.stop >= self.size or (j.step is not None and
71 j.step <= 0):
72 raise QiskitIndexError("register index slice out of range")
73
74 def __getitem__(self, key):
75 """
76 Arg:
77 key (int|slice|list): index of the bit/qubit to be retrieved.
78
79 Returns:
80 tuple[Register, int]: a tuple in the form `(self, key)` if key is int.
81 If key is a slice, return a `list((self,key))`.
82
83 Raises:
84 QiskitError: if the `key` is not an integer.
85 QiskitIndexError: if the `key` is not in the range
86 `(0, self.size)`.
87 """
88 if not isinstance(key, (int, slice, list)):
89 raise QiskitError("expected integer or slice index into register")
90 if isinstance(key, int) and key < 0:
91 key = self.size + key
92 self.check_range(key)
93 if isinstance(key, slice):
94 return [(self, ind) for ind in range(*key.indices(len(self)))]
95 elif isinstance(key, list): # list of qubit indices
96 if max(key) < len(self):
97 return [(self, ind) for ind in key]
98 else:
99 raise QiskitError('register index out of range')
100 else:
101 return self, key
102
103 def __iter__(self):
104 """
105 Returns:
106 iterator: an iterator over the bits/qubits of the register, in the
107 form `tuple (Register, int)`.
108 """
109 return zip([self]*self.size, range(self.size))
110
111 def __eq__(self, other):
112 """Two Registers are the same if they are of the same type
113 (i.e. quantum/classical), and have the same name and size.
114
115 Args:
116 other (Register): other Register
117
118 Returns:
119 bool: are self and other equal.
120 """
121 res = False
122 if type(self) is type(other) and \
123 self.name == other.name and \
124 self.size == other.size:
125 res = True
126 return res
127
128 def __hash__(self):
129 """Make object hashable, based on the name and size to hash."""
130 return hash((type(self), self.name, self.size))
131
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/qiskit/circuit/register.py b/qiskit/circuit/register.py
--- a/qiskit/circuit/register.py
+++ b/qiskit/circuit/register.py
@@ -36,21 +36,29 @@
"""Create a new generic register.
"""
+ # validate (or cast) size
+ try:
+ size = int(size)
+ except Exception:
+ raise QiskitError("size needs to be castable to an int")
+ if size <= 0:
+ raise QiskitError("register size must be positive")
+
+ # validate (or cast) name
if name is None:
name = '%s%i' % (self.prefix, next(self.instances_counter))
-
- if not isinstance(name, str):
- raise QiskitError("The circuit name should be a string "
- "(or None for autogenerate a name).")
-
- test = re.compile('[a-z][a-zA-Z0-9_]*')
- if test.match(name) is None:
- raise QiskitError("%s is an invalid OPENQASM register name." % name)
+ else:
+ try:
+ name = str(name)
+ except Exception:
+ raise QiskitError("The circuit name should be castable to a string "
+ "(or None for autogenerate a name).")
+ name_format = re.compile('[a-z][a-zA-Z0-9_]*')
+ if name_format.match(name) is None:
+ raise QiskitError("%s is an invalid OPENQASM register name." % name)
self.name = name
self.size = size
- if size <= 0:
- raise QiskitError("register size must be positive")
def __repr__(self):
"""Return the official string representing the register."""
@@ -106,7 +114,7 @@
iterator: an iterator over the bits/qubits of the register, in the
form `tuple (Register, int)`.
"""
- return zip([self]*self.size, range(self.size))
+ return zip([self] * self.size, range(self.size))
def __eq__(self, other):
"""Two Registers are the same if they are of the same type
|
{"golden_diff": "diff --git a/qiskit/circuit/register.py b/qiskit/circuit/register.py\n--- a/qiskit/circuit/register.py\n+++ b/qiskit/circuit/register.py\n@@ -36,21 +36,29 @@\n \"\"\"Create a new generic register.\n \"\"\"\n \n+ # validate (or cast) size\n+ try:\n+ size = int(size)\n+ except Exception:\n+ raise QiskitError(\"size needs to be castable to an int\")\n+ if size <= 0:\n+ raise QiskitError(\"register size must be positive\")\n+\n+ # validate (or cast) name\n if name is None:\n name = '%s%i' % (self.prefix, next(self.instances_counter))\n-\n- if not isinstance(name, str):\n- raise QiskitError(\"The circuit name should be a string \"\n- \"(or None for autogenerate a name).\")\n-\n- test = re.compile('[a-z][a-zA-Z0-9_]*')\n- if test.match(name) is None:\n- raise QiskitError(\"%s is an invalid OPENQASM register name.\" % name)\n+ else:\n+ try:\n+ name = str(name)\n+ except Exception:\n+ raise QiskitError(\"The circuit name should be castable to a string \"\n+ \"(or None for autogenerate a name).\")\n+ name_format = re.compile('[a-z][a-zA-Z0-9_]*')\n+ if name_format.match(name) is None:\n+ raise QiskitError(\"%s is an invalid OPENQASM register name.\" % name)\n \n self.name = name\n self.size = size\n- if size <= 0:\n- raise QiskitError(\"register size must be positive\")\n \n def __repr__(self):\n \"\"\"Return the official string representing the register.\"\"\"\n@@ -106,7 +114,7 @@\n iterator: an iterator over the bits/qubits of the register, in the\n form `tuple (Register, int)`.\n \"\"\"\n- return zip([self]*self.size, range(self.size))\n+ return zip([self] * self.size, range(self.size))\n \n def __eq__(self, other):\n \"\"\"Two Registers are the same if they are of the same type\n", "issue": "Error when number of qubits is of type numpy.int64\n<!-- \u26a0\ufe0f If you do not respect this template, your issue will be closed -->\r\n<!-- \u26a0\ufe0f Make sure to browse the opened and closed issues to confirm this idea does not exist. -->\r\n\r\n### What is the expected enhancement?\r\n\r\nIn `qiskit/validation/base.py`, function `check_types`: currently, if `n_qubits` or `memory_slots` are of type `numpy.int64`, then an error is triggered, because type `int` is expected.\r\n\r\nI find it too strict. Especially considering that if the number of qubits is originated in a `numpy` array, then its default type is `numpy.int64`. Terra can allow additional types, or convert the type internally.\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"\nBase register reference object.\n\"\"\"\nimport re\nimport logging\nimport itertools\n\nfrom qiskit.exceptions import QiskitError, QiskitIndexError\n\nlogger = logging.getLogger(__name__)\n\n\nclass Register:\n \"\"\"Implement a generic register.\"\"\"\n\n # Counter for the number of instances in this class.\n instances_counter = itertools.count()\n # Prefix to use for auto naming.\n prefix = 'reg'\n\n def __init__(self, size, name=None):\n \"\"\"Create a new generic register.\n \"\"\"\n\n if name is None:\n name = '%s%i' % (self.prefix, next(self.instances_counter))\n\n if not isinstance(name, str):\n raise QiskitError(\"The circuit name should be a string \"\n \"(or None for autogenerate a name).\")\n\n test = re.compile('[a-z][a-zA-Z0-9_]*')\n if test.match(name) is None:\n raise QiskitError(\"%s is an invalid OPENQASM register name.\" % name)\n\n self.name = name\n self.size = size\n if size <= 0:\n raise QiskitError(\"register size must be positive\")\n\n def __repr__(self):\n \"\"\"Return the official string representing the register.\"\"\"\n return \"%s(%d, '%s')\" % (self.__class__.__qualname__,\n self.size, self.name)\n\n def __len__(self):\n \"\"\"Return register size\"\"\"\n return self.size\n\n def check_range(self, j):\n \"\"\"Check that j is a valid index into self.\"\"\"\n if isinstance(j, int):\n if j < 0 or j >= self.size:\n raise QiskitIndexError(\"register index out of range\")\n elif isinstance(j, slice):\n if j.start < 0 or j.stop >= self.size or (j.step is not None and\n j.step <= 0):\n raise QiskitIndexError(\"register index slice out of range\")\n\n def __getitem__(self, key):\n \"\"\"\n Arg:\n key (int|slice|list): index of the bit/qubit to be retrieved.\n\n Returns:\n tuple[Register, int]: a tuple in the form `(self, key)` if key is int.\n If key is a slice, return a `list((self,key))`.\n\n Raises:\n QiskitError: if the `key` is not an integer.\n QiskitIndexError: if the `key` is not in the range\n `(0, self.size)`.\n \"\"\"\n if not isinstance(key, (int, slice, list)):\n raise QiskitError(\"expected integer or slice index into register\")\n if isinstance(key, int) and key < 0:\n key = self.size + key\n self.check_range(key)\n if isinstance(key, slice):\n return [(self, ind) for ind in range(*key.indices(len(self)))]\n elif isinstance(key, list): # list of qubit indices\n if max(key) < len(self):\n return [(self, ind) for ind in key]\n else:\n raise QiskitError('register index out of range')\n else:\n return self, key\n\n def __iter__(self):\n \"\"\"\n Returns:\n iterator: an iterator over the bits/qubits of the register, in the\n form `tuple (Register, int)`.\n \"\"\"\n return zip([self]*self.size, range(self.size))\n\n def __eq__(self, other):\n \"\"\"Two Registers are the same if they are of the same type\n (i.e. quantum/classical), and have the same name and size.\n\n Args:\n other (Register): other Register\n\n Returns:\n bool: are self and other equal.\n \"\"\"\n res = False\n if type(self) is type(other) and \\\n self.name == other.name and \\\n self.size == other.size:\n res = True\n return res\n\n def __hash__(self):\n \"\"\"Make object hashable, based on the name and size to hash.\"\"\"\n return hash((type(self), self.name, self.size))\n", "path": "qiskit/circuit/register.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"\nBase register reference object.\n\"\"\"\nimport re\nimport logging\nimport itertools\n\nfrom qiskit.exceptions import QiskitError, QiskitIndexError\n\nlogger = logging.getLogger(__name__)\n\n\nclass Register:\n \"\"\"Implement a generic register.\"\"\"\n\n # Counter for the number of instances in this class.\n instances_counter = itertools.count()\n # Prefix to use for auto naming.\n prefix = 'reg'\n\n def __init__(self, size, name=None):\n \"\"\"Create a new generic register.\n \"\"\"\n\n # validate (or cast) size\n try:\n size = int(size)\n except Exception:\n raise QiskitError(\"size needs to be castable to an int\")\n if size <= 0:\n raise QiskitError(\"register size must be positive\")\n\n # validate (or cast) name\n if name is None:\n name = '%s%i' % (self.prefix, next(self.instances_counter))\n else:\n try:\n name = str(name)\n except Exception:\n raise QiskitError(\"The circuit name should be castable to a string \"\n \"(or None for autogenerate a name).\")\n name_format = re.compile('[a-z][a-zA-Z0-9_]*')\n if name_format.match(name) is None:\n raise QiskitError(\"%s is an invalid OPENQASM register name.\" % name)\n\n self.name = name\n self.size = size\n\n def __repr__(self):\n \"\"\"Return the official string representing the register.\"\"\"\n return \"%s(%d, '%s')\" % (self.__class__.__qualname__,\n self.size, self.name)\n\n def __len__(self):\n \"\"\"Return register size\"\"\"\n return self.size\n\n def check_range(self, j):\n \"\"\"Check that j is a valid index into self.\"\"\"\n if isinstance(j, int):\n if j < 0 or j >= self.size:\n raise QiskitIndexError(\"register index out of range\")\n elif isinstance(j, slice):\n if j.start < 0 or j.stop >= self.size or (j.step is not None and\n j.step <= 0):\n raise QiskitIndexError(\"register index slice out of range\")\n\n def __getitem__(self, key):\n \"\"\"\n Arg:\n key (int|slice|list): index of the bit/qubit to be retrieved.\n\n Returns:\n tuple[Register, int]: a tuple in the form `(self, key)` if key is int.\n If key is a slice, return a `list((self,key))`.\n\n Raises:\n QiskitError: if the `key` is not an integer.\n QiskitIndexError: if the `key` is not in the range\n `(0, self.size)`.\n \"\"\"\n if not isinstance(key, (int, slice, list)):\n raise QiskitError(\"expected integer or slice index into register\")\n if isinstance(key, int) and key < 0:\n key = self.size + key\n self.check_range(key)\n if isinstance(key, slice):\n return [(self, ind) for ind in range(*key.indices(len(self)))]\n elif isinstance(key, list): # list of qubit indices\n if max(key) < len(self):\n return [(self, ind) for ind in key]\n else:\n raise QiskitError('register index out of range')\n else:\n return self, key\n\n def __iter__(self):\n \"\"\"\n Returns:\n iterator: an iterator over the bits/qubits of the register, in the\n form `tuple (Register, int)`.\n \"\"\"\n return zip([self] * self.size, range(self.size))\n\n def __eq__(self, other):\n \"\"\"Two Registers are the same if they are of the same type\n (i.e. quantum/classical), and have the same name and size.\n\n Args:\n other (Register): other Register\n\n Returns:\n bool: are self and other equal.\n \"\"\"\n res = False\n if type(self) is type(other) and \\\n self.name == other.name and \\\n self.size == other.size:\n res = True\n return res\n\n def __hash__(self):\n \"\"\"Make object hashable, based on the name and size to hash.\"\"\"\n return hash((type(self), self.name, self.size))\n", "path": "qiskit/circuit/register.py"}]}
| 1,723 | 509 |
gh_patches_debug_40866
|
rasdani/github-patches
|
git_diff
|
hpcaitech__ColossalAI-5273
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `colossalai/kernel/triton/kvcache_copy.py`
Content:
```
1 import torch
2 import triton
3 import triton.language as tl
4
5
6 # Triton 2.1.0
7 @triton.jit
8 def _copy_to_kvcache_seqlen1_kernel(
9 KV, # K or V
10 KVCache, # KCache or VCache
11 BLOCK_TABLES,
12 context_lengths,
13 stride_kt,
14 stride_kh,
15 stride_kd,
16 stride_cacheb,
17 stride_cacheh,
18 stride_cached,
19 stride_cachebs,
20 stride_bts,
21 stride_btb,
22 block_size,
23 HEAD_DIM: tl.constexpr,
24 ):
25 cur_seq_idx = tl.program_id(0)
26 cur_kv_head_idx = tl.program_id(1)
27
28 cur_kv_seq_len = tl.load(context_lengths + cur_seq_idx)
29 last_bt_block_idx = cur_kv_seq_len // block_size
30 block_table_ptr = BLOCK_TABLES + cur_seq_idx * stride_bts
31 block_id = tl.load(block_table_ptr + last_bt_block_idx * stride_btb)
32 offsets_in_last_block = (cur_kv_seq_len % block_size) * stride_cachebs
33 offsets_dmodel = tl.arange(0, HEAD_DIM)
34 offsets_kv = cur_seq_idx * stride_kt + cur_kv_head_idx * stride_kh + offsets_dmodel * stride_kd
35 kv = tl.load(KV + offsets_kv)
36 offsets_kvcache = (
37 block_id * stride_cacheb
38 + cur_kv_head_idx * stride_cacheh
39 + offsets_dmodel * stride_cached
40 + offsets_in_last_block
41 )
42 tl.store(KVCache + offsets_kvcache, kv)
43 return
44
45
46 # Used with blocked kv cache.
47 # Copy k or v to block k/v cache during decoding stage
48 def copy_kv_to_blocked_cache(
49 k: torch.Tensor, # [bsz, 1, num_kv_heads, head_dim], k or v during decoding stage
50 k_cache: torch.Tensor, # [num_blocks, num_kv_heads, head_dim, block_size], blocked k or v cache (for now, the shapes of them are the same)
51 context_lengths: torch.Tensor, # [bsz], past kv seq len (not incorporating the current kv of length 1)
52 block_tables: torch.Tensor, # [bsz, max_blocks_per_sequence]
53 ):
54 assert k.dim() == 4, "Unsupported shape of k (supposed to be used for decoding stage)"
55 assert k.size(1) == 1, "Unsupported kv seq len (supposed to be used for decoding stage)"
56 assert k.size(-1) == k_cache.size(-2), "Incompatible head dim"
57 assert k.dtype == k_cache.dtype, "Expected consistent dtype for tensor and cache."
58 bsz, _, num_kv_heads, head_dim = k.shape
59 assert context_lengths.shape[0] == block_tables.shape[0] == bsz, (
60 f"Got incompatible batch size (number of seqs):\n"
61 f" Conext lengths bsz {context_lengths.shape[0]}, Block tables bsz {block_tables.shape[0]}, "
62 f"batch size {bsz}"
63 )
64
65 # Modify if the shape of kv cahce is changed.
66 block_size = k_cache.size(-1)
67 # [bsz, 1, num_kv_heads, head_dim] -> [bsz, num_kv_heads, head_dim]
68 k = k.squeeze(dim=1)
69
70 num_warps = 8 if head_dim > 128 else 4
71
72 grid = (bsz, num_kv_heads)
73 _copy_to_kvcache_seqlen1_kernel[grid](
74 k,
75 k_cache,
76 block_tables,
77 context_lengths,
78 k.stride(0),
79 k.stride(1),
80 k.stride(2),
81 k_cache.stride(0),
82 k_cache.stride(1),
83 k_cache.stride(2),
84 k_cache.stride(3),
85 block_tables.stride(0),
86 block_tables.stride(1),
87 block_size,
88 HEAD_DIM=head_dim,
89 num_warps=num_warps,
90 )
91
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/colossalai/kernel/triton/kvcache_copy.py b/colossalai/kernel/triton/kvcache_copy.py
--- a/colossalai/kernel/triton/kvcache_copy.py
+++ b/colossalai/kernel/triton/kvcache_copy.py
@@ -25,11 +25,11 @@
cur_seq_idx = tl.program_id(0)
cur_kv_head_idx = tl.program_id(1)
- cur_kv_seq_len = tl.load(context_lengths + cur_seq_idx)
- last_bt_block_idx = cur_kv_seq_len // block_size
+ past_kv_seq_len = tl.load(context_lengths + cur_seq_idx) - 1
+ last_bt_block_idx = past_kv_seq_len // block_size
block_table_ptr = BLOCK_TABLES + cur_seq_idx * stride_bts
block_id = tl.load(block_table_ptr + last_bt_block_idx * stride_btb)
- offsets_in_last_block = (cur_kv_seq_len % block_size) * stride_cachebs
+ offsets_in_last_block = (past_kv_seq_len % block_size) * stride_cachebs
offsets_dmodel = tl.arange(0, HEAD_DIM)
offsets_kv = cur_seq_idx * stride_kt + cur_kv_head_idx * stride_kh + offsets_dmodel * stride_kd
kv = tl.load(KV + offsets_kv)
@@ -43,23 +43,30 @@
return
-# Used with blocked kv cache.
-# Copy k or v to block k/v cache during decoding stage
def copy_kv_to_blocked_cache(
- k: torch.Tensor, # [bsz, 1, num_kv_heads, head_dim], k or v during decoding stage
- k_cache: torch.Tensor, # [num_blocks, num_kv_heads, head_dim, block_size], blocked k or v cache (for now, the shapes of them are the same)
- context_lengths: torch.Tensor, # [bsz], past kv seq len (not incorporating the current kv of length 1)
- block_tables: torch.Tensor, # [bsz, max_blocks_per_sequence]
+ k: torch.Tensor,
+ k_cache: torch.Tensor,
+ kv_lengths: torch.Tensor,
+ block_tables: torch.Tensor,
):
+ """
+ Copy keys or values to the blocked key/value cache during decoding stage.
+
+ Parameters:
+ - k (torch.Tensor): [bsz, 1, num_kv_heads, head_dim] - Keys or values during decoding with seq len 1.
+ - k_cache (torch.Tensor): [num_blocks, num_kv_heads, head_dim, block_size] - Blocked key or value cache.
+ - kv_lengths (torch.Tensor): [bsz] - Past key/value sequence lengths plus current sequence length for each sequence.
+ - block_tables (torch.Tensor): [bsz, max_blocks_per_sequence] - Block tables for each sequence.
+ """
assert k.dim() == 4, "Unsupported shape of k (supposed to be used for decoding stage)"
assert k.size(1) == 1, "Unsupported kv seq len (supposed to be used for decoding stage)"
assert k.size(-1) == k_cache.size(-2), "Incompatible head dim"
assert k.dtype == k_cache.dtype, "Expected consistent dtype for tensor and cache."
bsz, _, num_kv_heads, head_dim = k.shape
- assert context_lengths.shape[0] == block_tables.shape[0] == bsz, (
+ assert kv_lengths.shape[0] == block_tables.shape[0] == bsz, (
f"Got incompatible batch size (number of seqs):\n"
- f" Conext lengths bsz {context_lengths.shape[0]}, Block tables bsz {block_tables.shape[0]}, "
- f"batch size {bsz}"
+ f" Past kv sequence lengths bsz {kv_lengths.shape[0]}; "
+ f" block tables bsz {block_tables.shape[0]}, input k batch size {bsz}"
)
# Modify if the shape of kv cahce is changed.
@@ -74,7 +81,7 @@
k,
k_cache,
block_tables,
- context_lengths,
+ kv_lengths,
k.stride(0),
k.stride(1),
k.stride(2),
|
{"golden_diff": "diff --git a/colossalai/kernel/triton/kvcache_copy.py b/colossalai/kernel/triton/kvcache_copy.py\n--- a/colossalai/kernel/triton/kvcache_copy.py\n+++ b/colossalai/kernel/triton/kvcache_copy.py\n@@ -25,11 +25,11 @@\n cur_seq_idx = tl.program_id(0)\n cur_kv_head_idx = tl.program_id(1)\n \n- cur_kv_seq_len = tl.load(context_lengths + cur_seq_idx)\n- last_bt_block_idx = cur_kv_seq_len // block_size\n+ past_kv_seq_len = tl.load(context_lengths + cur_seq_idx) - 1\n+ last_bt_block_idx = past_kv_seq_len // block_size\n block_table_ptr = BLOCK_TABLES + cur_seq_idx * stride_bts\n block_id = tl.load(block_table_ptr + last_bt_block_idx * stride_btb)\n- offsets_in_last_block = (cur_kv_seq_len % block_size) * stride_cachebs\n+ offsets_in_last_block = (past_kv_seq_len % block_size) * stride_cachebs\n offsets_dmodel = tl.arange(0, HEAD_DIM)\n offsets_kv = cur_seq_idx * stride_kt + cur_kv_head_idx * stride_kh + offsets_dmodel * stride_kd\n kv = tl.load(KV + offsets_kv)\n@@ -43,23 +43,30 @@\n return\n \n \n-# Used with blocked kv cache.\n-# Copy k or v to block k/v cache during decoding stage\n def copy_kv_to_blocked_cache(\n- k: torch.Tensor, # [bsz, 1, num_kv_heads, head_dim], k or v during decoding stage\n- k_cache: torch.Tensor, # [num_blocks, num_kv_heads, head_dim, block_size], blocked k or v cache (for now, the shapes of them are the same)\n- context_lengths: torch.Tensor, # [bsz], past kv seq len (not incorporating the current kv of length 1)\n- block_tables: torch.Tensor, # [bsz, max_blocks_per_sequence]\n+ k: torch.Tensor,\n+ k_cache: torch.Tensor,\n+ kv_lengths: torch.Tensor,\n+ block_tables: torch.Tensor,\n ):\n+ \"\"\"\n+ Copy keys or values to the blocked key/value cache during decoding stage.\n+\n+ Parameters:\n+ - k (torch.Tensor): [bsz, 1, num_kv_heads, head_dim] - Keys or values during decoding with seq len 1.\n+ - k_cache (torch.Tensor): [num_blocks, num_kv_heads, head_dim, block_size] - Blocked key or value cache.\n+ - kv_lengths (torch.Tensor): [bsz] - Past key/value sequence lengths plus current sequence length for each sequence.\n+ - block_tables (torch.Tensor): [bsz, max_blocks_per_sequence] - Block tables for each sequence.\n+ \"\"\"\n assert k.dim() == 4, \"Unsupported shape of k (supposed to be used for decoding stage)\"\n assert k.size(1) == 1, \"Unsupported kv seq len (supposed to be used for decoding stage)\"\n assert k.size(-1) == k_cache.size(-2), \"Incompatible head dim\"\n assert k.dtype == k_cache.dtype, \"Expected consistent dtype for tensor and cache.\"\n bsz, _, num_kv_heads, head_dim = k.shape\n- assert context_lengths.shape[0] == block_tables.shape[0] == bsz, (\n+ assert kv_lengths.shape[0] == block_tables.shape[0] == bsz, (\n f\"Got incompatible batch size (number of seqs):\\n\"\n- f\" Conext lengths bsz {context_lengths.shape[0]}, Block tables bsz {block_tables.shape[0]}, \"\n- f\"batch size {bsz}\"\n+ f\" Past kv sequence lengths bsz {kv_lengths.shape[0]}; \"\n+ f\" block tables bsz {block_tables.shape[0]}, input k batch size {bsz}\"\n )\n \n # Modify if the shape of kv cahce is changed.\n@@ -74,7 +81,7 @@\n k,\n k_cache,\n block_tables,\n- context_lengths,\n+ kv_lengths,\n k.stride(0),\n k.stride(1),\n k.stride(2),\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "import torch\nimport triton\nimport triton.language as tl\n\n\n# Triton 2.1.0\[email protected]\ndef _copy_to_kvcache_seqlen1_kernel(\n KV, # K or V\n KVCache, # KCache or VCache\n BLOCK_TABLES,\n context_lengths,\n stride_kt,\n stride_kh,\n stride_kd,\n stride_cacheb,\n stride_cacheh,\n stride_cached,\n stride_cachebs,\n stride_bts,\n stride_btb,\n block_size,\n HEAD_DIM: tl.constexpr,\n):\n cur_seq_idx = tl.program_id(0)\n cur_kv_head_idx = tl.program_id(1)\n\n cur_kv_seq_len = tl.load(context_lengths + cur_seq_idx)\n last_bt_block_idx = cur_kv_seq_len // block_size\n block_table_ptr = BLOCK_TABLES + cur_seq_idx * stride_bts\n block_id = tl.load(block_table_ptr + last_bt_block_idx * stride_btb)\n offsets_in_last_block = (cur_kv_seq_len % block_size) * stride_cachebs\n offsets_dmodel = tl.arange(0, HEAD_DIM)\n offsets_kv = cur_seq_idx * stride_kt + cur_kv_head_idx * stride_kh + offsets_dmodel * stride_kd\n kv = tl.load(KV + offsets_kv)\n offsets_kvcache = (\n block_id * stride_cacheb\n + cur_kv_head_idx * stride_cacheh\n + offsets_dmodel * stride_cached\n + offsets_in_last_block\n )\n tl.store(KVCache + offsets_kvcache, kv)\n return\n\n\n# Used with blocked kv cache.\n# Copy k or v to block k/v cache during decoding stage\ndef copy_kv_to_blocked_cache(\n k: torch.Tensor, # [bsz, 1, num_kv_heads, head_dim], k or v during decoding stage\n k_cache: torch.Tensor, # [num_blocks, num_kv_heads, head_dim, block_size], blocked k or v cache (for now, the shapes of them are the same)\n context_lengths: torch.Tensor, # [bsz], past kv seq len (not incorporating the current kv of length 1)\n block_tables: torch.Tensor, # [bsz, max_blocks_per_sequence]\n):\n assert k.dim() == 4, \"Unsupported shape of k (supposed to be used for decoding stage)\"\n assert k.size(1) == 1, \"Unsupported kv seq len (supposed to be used for decoding stage)\"\n assert k.size(-1) == k_cache.size(-2), \"Incompatible head dim\"\n assert k.dtype == k_cache.dtype, \"Expected consistent dtype for tensor and cache.\"\n bsz, _, num_kv_heads, head_dim = k.shape\n assert context_lengths.shape[0] == block_tables.shape[0] == bsz, (\n f\"Got incompatible batch size (number of seqs):\\n\"\n f\" Conext lengths bsz {context_lengths.shape[0]}, Block tables bsz {block_tables.shape[0]}, \"\n f\"batch size {bsz}\"\n )\n\n # Modify if the shape of kv cahce is changed.\n block_size = k_cache.size(-1)\n # [bsz, 1, num_kv_heads, head_dim] -> [bsz, num_kv_heads, head_dim]\n k = k.squeeze(dim=1)\n\n num_warps = 8 if head_dim > 128 else 4\n\n grid = (bsz, num_kv_heads)\n _copy_to_kvcache_seqlen1_kernel[grid](\n k,\n k_cache,\n block_tables,\n context_lengths,\n k.stride(0),\n k.stride(1),\n k.stride(2),\n k_cache.stride(0),\n k_cache.stride(1),\n k_cache.stride(2),\n k_cache.stride(3),\n block_tables.stride(0),\n block_tables.stride(1),\n block_size,\n HEAD_DIM=head_dim,\n num_warps=num_warps,\n )\n", "path": "colossalai/kernel/triton/kvcache_copy.py"}], "after_files": [{"content": "import torch\nimport triton\nimport triton.language as tl\n\n\n# Triton 2.1.0\[email protected]\ndef _copy_to_kvcache_seqlen1_kernel(\n KV, # K or V\n KVCache, # KCache or VCache\n BLOCK_TABLES,\n context_lengths,\n stride_kt,\n stride_kh,\n stride_kd,\n stride_cacheb,\n stride_cacheh,\n stride_cached,\n stride_cachebs,\n stride_bts,\n stride_btb,\n block_size,\n HEAD_DIM: tl.constexpr,\n):\n cur_seq_idx = tl.program_id(0)\n cur_kv_head_idx = tl.program_id(1)\n\n past_kv_seq_len = tl.load(context_lengths + cur_seq_idx) - 1\n last_bt_block_idx = past_kv_seq_len // block_size\n block_table_ptr = BLOCK_TABLES + cur_seq_idx * stride_bts\n block_id = tl.load(block_table_ptr + last_bt_block_idx * stride_btb)\n offsets_in_last_block = (past_kv_seq_len % block_size) * stride_cachebs\n offsets_dmodel = tl.arange(0, HEAD_DIM)\n offsets_kv = cur_seq_idx * stride_kt + cur_kv_head_idx * stride_kh + offsets_dmodel * stride_kd\n kv = tl.load(KV + offsets_kv)\n offsets_kvcache = (\n block_id * stride_cacheb\n + cur_kv_head_idx * stride_cacheh\n + offsets_dmodel * stride_cached\n + offsets_in_last_block\n )\n tl.store(KVCache + offsets_kvcache, kv)\n return\n\n\ndef copy_kv_to_blocked_cache(\n k: torch.Tensor,\n k_cache: torch.Tensor,\n kv_lengths: torch.Tensor,\n block_tables: torch.Tensor,\n):\n \"\"\"\n Copy keys or values to the blocked key/value cache during decoding stage.\n\n Parameters:\n - k (torch.Tensor): [bsz, 1, num_kv_heads, head_dim] - Keys or values during decoding with seq len 1.\n - k_cache (torch.Tensor): [num_blocks, num_kv_heads, head_dim, block_size] - Blocked key or value cache.\n - kv_lengths (torch.Tensor): [bsz] - Past key/value sequence lengths plus current sequence length for each sequence.\n - block_tables (torch.Tensor): [bsz, max_blocks_per_sequence] - Block tables for each sequence.\n \"\"\"\n assert k.dim() == 4, \"Unsupported shape of k (supposed to be used for decoding stage)\"\n assert k.size(1) == 1, \"Unsupported kv seq len (supposed to be used for decoding stage)\"\n assert k.size(-1) == k_cache.size(-2), \"Incompatible head dim\"\n assert k.dtype == k_cache.dtype, \"Expected consistent dtype for tensor and cache.\"\n bsz, _, num_kv_heads, head_dim = k.shape\n assert kv_lengths.shape[0] == block_tables.shape[0] == bsz, (\n f\"Got incompatible batch size (number of seqs):\\n\"\n f\" Past kv sequence lengths bsz {kv_lengths.shape[0]}; \"\n f\" block tables bsz {block_tables.shape[0]}, input k batch size {bsz}\"\n )\n\n # Modify if the shape of kv cahce is changed.\n block_size = k_cache.size(-1)\n # [bsz, 1, num_kv_heads, head_dim] -> [bsz, num_kv_heads, head_dim]\n k = k.squeeze(dim=1)\n\n num_warps = 8 if head_dim > 128 else 4\n\n grid = (bsz, num_kv_heads)\n _copy_to_kvcache_seqlen1_kernel[grid](\n k,\n k_cache,\n block_tables,\n kv_lengths,\n k.stride(0),\n k.stride(1),\n k.stride(2),\n k_cache.stride(0),\n k_cache.stride(1),\n k_cache.stride(2),\n k_cache.stride(3),\n block_tables.stride(0),\n block_tables.stride(1),\n block_size,\n HEAD_DIM=head_dim,\n num_warps=num_warps,\n )\n", "path": "colossalai/kernel/triton/kvcache_copy.py"}]}
| 1,345 | 948 |
gh_patches_debug_14637
|
rasdani/github-patches
|
git_diff
|
googleapis__google-auth-library-python-124
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add public properties to google.oauth2.credentials.Credentials
Resolves #124
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `google/oauth2/credentials.py`
Content:
```
1 # Copyright 2016 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """OAuth 2.0 Credentials.
16
17 This module provides credentials based on OAuth 2.0 access and refresh tokens.
18 These credentials usually access resources on behalf of a user (resource
19 owner).
20
21 Specifically, this is intended to use access tokens acquired using the
22 `Authorization Code grant`_ and can refresh those tokens using a
23 optional `refresh token`_.
24
25 Obtaining the initial access and refresh token is outside of the scope of this
26 module. Consult `rfc6749 section 4.1`_ for complete details on the
27 Authorization Code grant flow.
28
29 .. _Authorization Code grant: https://tools.ietf.org/html/rfc6749#section-1.3.1
30 .. _refresh token: https://tools.ietf.org/html/rfc6749#section-6
31 .. _rfc6749 section 4.1: https://tools.ietf.org/html/rfc6749#section-4.1
32 """
33
34 from google.auth import _helpers
35 from google.auth import credentials
36 from google.oauth2 import _client
37
38
39 class Credentials(credentials.Scoped, credentials.Credentials):
40 """Credentials using OAuth 2.0 access and refresh tokens."""
41
42 def __init__(self, token, refresh_token=None, token_uri=None,
43 client_id=None, client_secret=None, scopes=None):
44 """
45 Args:
46 token (Optional(str)): The OAuth 2.0 access token. Can be None
47 if refresh information is provided.
48 refresh_token (str): The OAuth 2.0 refresh token. If specified,
49 credentials can be refreshed.
50 token_uri (str): The OAuth 2.0 authorization server's token
51 endpoint URI. Must be specified for refresh, can be left as
52 None if the token can not be refreshed.
53 client_id (str): The OAuth 2.0 client ID. Must be specified for
54 refresh, can be left as None if the token can not be refreshed.
55 client_secret(str): The OAuth 2.0 client secret. Must be specified
56 for refresh, can be left as None if the token can not be
57 refreshed.
58 scopes (Sequence[str]): The scopes that were originally used
59 to obtain authorization. This is a purely informative parameter
60 that can be used by :meth:`has_scopes`. OAuth 2.0 credentials
61 can not request additional scopes after authorization.
62 """
63 super(Credentials, self).__init__()
64 self.token = token
65 self._refresh_token = refresh_token
66 self._scopes = scopes
67 self._token_uri = token_uri
68 self._client_id = client_id
69 self._client_secret = client_secret
70
71 @property
72 def requires_scopes(self):
73 """False: OAuth 2.0 credentials have their scopes set when
74 the initial token is requested and can not be changed."""
75 return False
76
77 def with_scopes(self, scopes):
78 """Unavailable, OAuth 2.0 credentials can not be re-scoped.
79
80 OAuth 2.0 credentials have their scopes set when the initial token is
81 requested and can not be changed.
82 """
83 raise NotImplementedError(
84 'OAuth 2.0 Credentials can not modify their scopes.')
85
86 @_helpers.copy_docstring(credentials.Credentials)
87 def refresh(self, request):
88 access_token, refresh_token, expiry, _ = _client.refresh_grant(
89 request, self._token_uri, self._refresh_token, self._client_id,
90 self._client_secret)
91
92 self.token = access_token
93 self.expiry = expiry
94 self._refresh_token = refresh_token
95
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/google/oauth2/credentials.py b/google/oauth2/credentials.py
--- a/google/oauth2/credentials.py
+++ b/google/oauth2/credentials.py
@@ -68,6 +68,27 @@
self._client_id = client_id
self._client_secret = client_secret
+ @property
+ def refresh_token(self):
+ """Optional[str]: The OAuth 2.0 refresh token."""
+ return self._refresh_token
+
+ @property
+ def token_uri(self):
+ """Optional[str]: The OAuth 2.0 authorization server's token endpoint
+ URI."""
+ return self._token_uri
+
+ @property
+ def client_id(self):
+ """Optional[str]: The OAuth 2.0 client ID."""
+ return self._client_id
+
+ @property
+ def client_secret(self):
+ """Optional[str]: The OAuth 2.0 client secret."""
+ return self._client_secret
+
@property
def requires_scopes(self):
"""False: OAuth 2.0 credentials have their scopes set when
|
{"golden_diff": "diff --git a/google/oauth2/credentials.py b/google/oauth2/credentials.py\n--- a/google/oauth2/credentials.py\n+++ b/google/oauth2/credentials.py\n@@ -68,6 +68,27 @@\n self._client_id = client_id\n self._client_secret = client_secret\n \n+ @property\n+ def refresh_token(self):\n+ \"\"\"Optional[str]: The OAuth 2.0 refresh token.\"\"\"\n+ return self._refresh_token\n+\n+ @property\n+ def token_uri(self):\n+ \"\"\"Optional[str]: The OAuth 2.0 authorization server's token endpoint\n+ URI.\"\"\"\n+ return self._token_uri\n+\n+ @property\n+ def client_id(self):\n+ \"\"\"Optional[str]: The OAuth 2.0 client ID.\"\"\"\n+ return self._client_id\n+\n+ @property\n+ def client_secret(self):\n+ \"\"\"Optional[str]: The OAuth 2.0 client secret.\"\"\"\n+ return self._client_secret\n+\n @property\n def requires_scopes(self):\n \"\"\"False: OAuth 2.0 credentials have their scopes set when\n", "issue": "Add public properties to google.oauth2.credentials.Credentials\nResolves #124 \n", "before_files": [{"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"OAuth 2.0 Credentials.\n\nThis module provides credentials based on OAuth 2.0 access and refresh tokens.\nThese credentials usually access resources on behalf of a user (resource\nowner).\n\nSpecifically, this is intended to use access tokens acquired using the\n`Authorization Code grant`_ and can refresh those tokens using a\noptional `refresh token`_.\n\nObtaining the initial access and refresh token is outside of the scope of this\nmodule. Consult `rfc6749 section 4.1`_ for complete details on the\nAuthorization Code grant flow.\n\n.. _Authorization Code grant: https://tools.ietf.org/html/rfc6749#section-1.3.1\n.. _refresh token: https://tools.ietf.org/html/rfc6749#section-6\n.. _rfc6749 section 4.1: https://tools.ietf.org/html/rfc6749#section-4.1\n\"\"\"\n\nfrom google.auth import _helpers\nfrom google.auth import credentials\nfrom google.oauth2 import _client\n\n\nclass Credentials(credentials.Scoped, credentials.Credentials):\n \"\"\"Credentials using OAuth 2.0 access and refresh tokens.\"\"\"\n\n def __init__(self, token, refresh_token=None, token_uri=None,\n client_id=None, client_secret=None, scopes=None):\n \"\"\"\n Args:\n token (Optional(str)): The OAuth 2.0 access token. Can be None\n if refresh information is provided.\n refresh_token (str): The OAuth 2.0 refresh token. If specified,\n credentials can be refreshed.\n token_uri (str): The OAuth 2.0 authorization server's token\n endpoint URI. Must be specified for refresh, can be left as\n None if the token can not be refreshed.\n client_id (str): The OAuth 2.0 client ID. Must be specified for\n refresh, can be left as None if the token can not be refreshed.\n client_secret(str): The OAuth 2.0 client secret. Must be specified\n for refresh, can be left as None if the token can not be\n refreshed.\n scopes (Sequence[str]): The scopes that were originally used\n to obtain authorization. This is a purely informative parameter\n that can be used by :meth:`has_scopes`. OAuth 2.0 credentials\n can not request additional scopes after authorization.\n \"\"\"\n super(Credentials, self).__init__()\n self.token = token\n self._refresh_token = refresh_token\n self._scopes = scopes\n self._token_uri = token_uri\n self._client_id = client_id\n self._client_secret = client_secret\n\n @property\n def requires_scopes(self):\n \"\"\"False: OAuth 2.0 credentials have their scopes set when\n the initial token is requested and can not be changed.\"\"\"\n return False\n\n def with_scopes(self, scopes):\n \"\"\"Unavailable, OAuth 2.0 credentials can not be re-scoped.\n\n OAuth 2.0 credentials have their scopes set when the initial token is\n requested and can not be changed.\n \"\"\"\n raise NotImplementedError(\n 'OAuth 2.0 Credentials can not modify their scopes.')\n\n @_helpers.copy_docstring(credentials.Credentials)\n def refresh(self, request):\n access_token, refresh_token, expiry, _ = _client.refresh_grant(\n request, self._token_uri, self._refresh_token, self._client_id,\n self._client_secret)\n\n self.token = access_token\n self.expiry = expiry\n self._refresh_token = refresh_token\n", "path": "google/oauth2/credentials.py"}], "after_files": [{"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"OAuth 2.0 Credentials.\n\nThis module provides credentials based on OAuth 2.0 access and refresh tokens.\nThese credentials usually access resources on behalf of a user (resource\nowner).\n\nSpecifically, this is intended to use access tokens acquired using the\n`Authorization Code grant`_ and can refresh those tokens using a\noptional `refresh token`_.\n\nObtaining the initial access and refresh token is outside of the scope of this\nmodule. Consult `rfc6749 section 4.1`_ for complete details on the\nAuthorization Code grant flow.\n\n.. _Authorization Code grant: https://tools.ietf.org/html/rfc6749#section-1.3.1\n.. _refresh token: https://tools.ietf.org/html/rfc6749#section-6\n.. _rfc6749 section 4.1: https://tools.ietf.org/html/rfc6749#section-4.1\n\"\"\"\n\nfrom google.auth import _helpers\nfrom google.auth import credentials\nfrom google.oauth2 import _client\n\n\nclass Credentials(credentials.Scoped, credentials.Credentials):\n \"\"\"Credentials using OAuth 2.0 access and refresh tokens.\"\"\"\n\n def __init__(self, token, refresh_token=None, token_uri=None,\n client_id=None, client_secret=None, scopes=None):\n \"\"\"\n Args:\n token (Optional(str)): The OAuth 2.0 access token. Can be None\n if refresh information is provided.\n refresh_token (str): The OAuth 2.0 refresh token. If specified,\n credentials can be refreshed.\n token_uri (str): The OAuth 2.0 authorization server's token\n endpoint URI. Must be specified for refresh, can be left as\n None if the token can not be refreshed.\n client_id (str): The OAuth 2.0 client ID. Must be specified for\n refresh, can be left as None if the token can not be refreshed.\n client_secret(str): The OAuth 2.0 client secret. Must be specified\n for refresh, can be left as None if the token can not be\n refreshed.\n scopes (Sequence[str]): The scopes that were originally used\n to obtain authorization. This is a purely informative parameter\n that can be used by :meth:`has_scopes`. OAuth 2.0 credentials\n can not request additional scopes after authorization.\n \"\"\"\n super(Credentials, self).__init__()\n self.token = token\n self._refresh_token = refresh_token\n self._scopes = scopes\n self._token_uri = token_uri\n self._client_id = client_id\n self._client_secret = client_secret\n\n @property\n def refresh_token(self):\n \"\"\"Optional[str]: The OAuth 2.0 refresh token.\"\"\"\n return self._refresh_token\n\n @property\n def token_uri(self):\n \"\"\"Optional[str]: The OAuth 2.0 authorization server's token endpoint\n URI.\"\"\"\n return self._token_uri\n\n @property\n def client_id(self):\n \"\"\"Optional[str]: The OAuth 2.0 client ID.\"\"\"\n return self._client_id\n\n @property\n def client_secret(self):\n \"\"\"Optional[str]: The OAuth 2.0 client secret.\"\"\"\n return self._client_secret\n\n @property\n def requires_scopes(self):\n \"\"\"False: OAuth 2.0 credentials have their scopes set when\n the initial token is requested and can not be changed.\"\"\"\n return False\n\n def with_scopes(self, scopes):\n \"\"\"Unavailable, OAuth 2.0 credentials can not be re-scoped.\n\n OAuth 2.0 credentials have their scopes set when the initial token is\n requested and can not be changed.\n \"\"\"\n raise NotImplementedError(\n 'OAuth 2.0 Credentials can not modify their scopes.')\n\n @_helpers.copy_docstring(credentials.Credentials)\n def refresh(self, request):\n access_token, refresh_token, expiry, _ = _client.refresh_grant(\n request, self._token_uri, self._refresh_token, self._client_id,\n self._client_secret)\n\n self.token = access_token\n self.expiry = expiry\n self._refresh_token = refresh_token\n", "path": "google/oauth2/credentials.py"}]}
| 1,356 | 245 |
gh_patches_debug_5344
|
rasdani/github-patches
|
git_diff
|
nilearn__nilearn-2822
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use plot_event in a example
The function `plot_event` has currently no example linked to its [doc](https://nilearn.github.io/modules/generated/nilearn.plotting.plot_event.html#nilearn.plotting.plot_event).
It wouldn't be too costly to use it in one example somewhere.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/04_glm_first_level/write_events_file.py`
Content:
```
1 """Example of a events.tsv file generation: the neurospin/localizer events.
2 =============================================================================
3
4 The protocol described is the so-called "archi standard" localizer
5 event sequence. See Pinel et al., BMC neuroscience 2007 for reference.
6 """
7
8 print(__doc__)
9
10 #########################################################################
11 # Define the onset times in seconds. Those are typically extracted
12 # from the stimulation software used.
13 import numpy as np
14 onset = np.array([
15 0., 2.4, 8.7, 11.4, 15., 18., 20.7, 23.7, 26.7, 29.7, 33., 35.4, 39.,
16 41.7, 44.7, 48., 56.4, 59.7, 62.4, 69., 71.4, 75., 83.4, 87., 89.7,
17 96., 108., 116.7, 119.4, 122.7, 125.4, 131.4, 135., 137.7, 140.4,
18 143.4, 146.7, 149.4, 153., 156., 159., 162., 164.4, 167.7, 170.4,
19 173.7, 176.7, 188.4, 191.7, 195., 198., 201., 203.7, 207., 210.,
20 212.7, 215.7, 218.7, 221.4, 224.7, 227.7, 230.7, 234., 236.7, 246.,
21 248.4, 251.7, 254.7, 257.4, 260.4, 264., 266.7, 269.7, 275.4, 278.4,
22 284.4, 288., 291., 293.4, 296.7])
23
24 #########################################################################
25 # Associated trial types: these are numbered between 0 and 9, hence
26 # correspond to 10 different conditions.
27 trial_idx = np.array(
28 [7, 7, 0, 2, 9, 4, 9, 3, 5, 9, 1, 6, 8, 8, 6, 6, 8, 0, 3, 4, 5, 8, 6,
29 2, 9, 1, 6, 5, 9, 1, 7, 8, 6, 6, 1, 2, 9, 0, 7, 1, 8, 2, 7, 8, 3, 6,
30 0, 0, 6, 8, 7, 7, 1, 1, 1, 5, 5, 0, 7, 0, 4, 2, 7, 9, 8, 0, 6, 3, 3,
31 7, 1, 0, 0, 4, 1, 9, 8, 4, 9, 9])
32
33 #########################################################################
34 # We may want to map these indices to explicit condition names.
35 # For that, we define a list of 10 strings.
36 condition_ids = ['horizontal checkerboard',
37 'vertical checkerboard',
38 'right button press, auditory instructions',
39 'left button press, auditory instructions',
40 'right button press, visual instructions',
41 'left button press, visual instructions',
42 'mental computation, auditory instructions',
43 'mental computation, visual instructions',
44 'visual sentence',
45 'auditory sentence']
46
47 trial_type = np.array([condition_ids[i] for i in trial_idx])
48
49 #########################################################################
50 # We also define a duration (required by BIDS conventions).
51 duration = np.ones_like(onset)
52
53
54 #########################################################################
55 # Form an event dataframe from these information.
56 import pandas as pd
57 events = pd.DataFrame({'trial_type': trial_type,
58 'onset': onset,
59 'duration': duration})
60
61 #########################################################################
62 # Export them to a tsv file.
63 tsvfile = 'localizer_events.tsv'
64 events.to_csv(tsvfile, sep='\t', index=False)
65 print("Created the events file in %s " % tsvfile)
66
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/examples/04_glm_first_level/write_events_file.py b/examples/04_glm_first_level/write_events_file.py
--- a/examples/04_glm_first_level/write_events_file.py
+++ b/examples/04_glm_first_level/write_events_file.py
@@ -63,3 +63,10 @@
tsvfile = 'localizer_events.tsv'
events.to_csv(tsvfile, sep='\t', index=False)
print("Created the events file in %s " % tsvfile)
+
+#########################################################################
+# Optionally, the events can be visualized using the plot_event function.
+from matplotlib import pyplot as plt
+from nilearn.plotting import plot_event
+plot_event(events, figsize=(15, 5))
+plt.show()
|
{"golden_diff": "diff --git a/examples/04_glm_first_level/write_events_file.py b/examples/04_glm_first_level/write_events_file.py\n--- a/examples/04_glm_first_level/write_events_file.py\n+++ b/examples/04_glm_first_level/write_events_file.py\n@@ -63,3 +63,10 @@\n tsvfile = 'localizer_events.tsv'\n events.to_csv(tsvfile, sep='\\t', index=False)\n print(\"Created the events file in %s \" % tsvfile)\n+\n+#########################################################################\n+# Optionally, the events can be visualized using the plot_event function.\n+from matplotlib import pyplot as plt\n+from nilearn.plotting import plot_event\n+plot_event(events, figsize=(15, 5))\n+plt.show()\n", "issue": "Use plot_event in a example\nThe function `plot_event` has currently no example linked to its [doc](https://nilearn.github.io/modules/generated/nilearn.plotting.plot_event.html#nilearn.plotting.plot_event). \r\nIt wouldn't be too costly to use it in one example somewhere.\n", "before_files": [{"content": "\"\"\"Example of a events.tsv file generation: the neurospin/localizer events.\n=============================================================================\n\nThe protocol described is the so-called \"archi standard\" localizer\nevent sequence. See Pinel et al., BMC neuroscience 2007 for reference.\n\"\"\"\n\nprint(__doc__)\n\n#########################################################################\n# Define the onset times in seconds. Those are typically extracted\n# from the stimulation software used.\nimport numpy as np\nonset = np.array([\n 0., 2.4, 8.7, 11.4, 15., 18., 20.7, 23.7, 26.7, 29.7, 33., 35.4, 39.,\n 41.7, 44.7, 48., 56.4, 59.7, 62.4, 69., 71.4, 75., 83.4, 87., 89.7,\n 96., 108., 116.7, 119.4, 122.7, 125.4, 131.4, 135., 137.7, 140.4,\n 143.4, 146.7, 149.4, 153., 156., 159., 162., 164.4, 167.7, 170.4,\n 173.7, 176.7, 188.4, 191.7, 195., 198., 201., 203.7, 207., 210.,\n 212.7, 215.7, 218.7, 221.4, 224.7, 227.7, 230.7, 234., 236.7, 246.,\n 248.4, 251.7, 254.7, 257.4, 260.4, 264., 266.7, 269.7, 275.4, 278.4,\n 284.4, 288., 291., 293.4, 296.7])\n\n#########################################################################\n# Associated trial types: these are numbered between 0 and 9, hence\n# correspond to 10 different conditions.\ntrial_idx = np.array(\n [7, 7, 0, 2, 9, 4, 9, 3, 5, 9, 1, 6, 8, 8, 6, 6, 8, 0, 3, 4, 5, 8, 6,\n 2, 9, 1, 6, 5, 9, 1, 7, 8, 6, 6, 1, 2, 9, 0, 7, 1, 8, 2, 7, 8, 3, 6,\n 0, 0, 6, 8, 7, 7, 1, 1, 1, 5, 5, 0, 7, 0, 4, 2, 7, 9, 8, 0, 6, 3, 3,\n 7, 1, 0, 0, 4, 1, 9, 8, 4, 9, 9])\n\n#########################################################################\n# We may want to map these indices to explicit condition names.\n# For that, we define a list of 10 strings.\ncondition_ids = ['horizontal checkerboard',\n 'vertical checkerboard',\n 'right button press, auditory instructions',\n 'left button press, auditory instructions',\n 'right button press, visual instructions',\n 'left button press, visual instructions',\n 'mental computation, auditory instructions',\n 'mental computation, visual instructions',\n 'visual sentence',\n 'auditory sentence']\n\ntrial_type = np.array([condition_ids[i] for i in trial_idx])\n\n#########################################################################\n# We also define a duration (required by BIDS conventions).\nduration = np.ones_like(onset)\n\n\n#########################################################################\n# Form an event dataframe from these information.\nimport pandas as pd\nevents = pd.DataFrame({'trial_type': trial_type,\n 'onset': onset,\n 'duration': duration})\n\n#########################################################################\n# Export them to a tsv file.\ntsvfile = 'localizer_events.tsv'\nevents.to_csv(tsvfile, sep='\\t', index=False)\nprint(\"Created the events file in %s \" % tsvfile)\n", "path": "examples/04_glm_first_level/write_events_file.py"}], "after_files": [{"content": "\"\"\"Example of a events.tsv file generation: the neurospin/localizer events.\n=============================================================================\n\nThe protocol described is the so-called \"archi standard\" localizer\nevent sequence. See Pinel et al., BMC neuroscience 2007 for reference.\n\"\"\"\n\nprint(__doc__)\n\n#########################################################################\n# Define the onset times in seconds. Those are typically extracted\n# from the stimulation software used.\nimport numpy as np\nonset = np.array([\n 0., 2.4, 8.7, 11.4, 15., 18., 20.7, 23.7, 26.7, 29.7, 33., 35.4, 39.,\n 41.7, 44.7, 48., 56.4, 59.7, 62.4, 69., 71.4, 75., 83.4, 87., 89.7,\n 96., 108., 116.7, 119.4, 122.7, 125.4, 131.4, 135., 137.7, 140.4,\n 143.4, 146.7, 149.4, 153., 156., 159., 162., 164.4, 167.7, 170.4,\n 173.7, 176.7, 188.4, 191.7, 195., 198., 201., 203.7, 207., 210.,\n 212.7, 215.7, 218.7, 221.4, 224.7, 227.7, 230.7, 234., 236.7, 246.,\n 248.4, 251.7, 254.7, 257.4, 260.4, 264., 266.7, 269.7, 275.4, 278.4,\n 284.4, 288., 291., 293.4, 296.7])\n\n#########################################################################\n# Associated trial types: these are numbered between 0 and 9, hence\n# correspond to 10 different conditions.\ntrial_idx = np.array(\n [7, 7, 0, 2, 9, 4, 9, 3, 5, 9, 1, 6, 8, 8, 6, 6, 8, 0, 3, 4, 5, 8, 6,\n 2, 9, 1, 6, 5, 9, 1, 7, 8, 6, 6, 1, 2, 9, 0, 7, 1, 8, 2, 7, 8, 3, 6,\n 0, 0, 6, 8, 7, 7, 1, 1, 1, 5, 5, 0, 7, 0, 4, 2, 7, 9, 8, 0, 6, 3, 3,\n 7, 1, 0, 0, 4, 1, 9, 8, 4, 9, 9])\n\n#########################################################################\n# We may want to map these indices to explicit condition names.\n# For that, we define a list of 10 strings.\ncondition_ids = ['horizontal checkerboard',\n 'vertical checkerboard',\n 'right button press, auditory instructions',\n 'left button press, auditory instructions',\n 'right button press, visual instructions',\n 'left button press, visual instructions',\n 'mental computation, auditory instructions',\n 'mental computation, visual instructions',\n 'visual sentence',\n 'auditory sentence']\n\ntrial_type = np.array([condition_ids[i] for i in trial_idx])\n\n#########################################################################\n# We also define a duration (required by BIDS conventions).\nduration = np.ones_like(onset)\n\n\n#########################################################################\n# Form an event dataframe from these information.\nimport pandas as pd\nevents = pd.DataFrame({'trial_type': trial_type,\n 'onset': onset,\n 'duration': duration})\n\n#########################################################################\n# Export them to a tsv file.\ntsvfile = 'localizer_events.tsv'\nevents.to_csv(tsvfile, sep='\\t', index=False)\nprint(\"Created the events file in %s \" % tsvfile)\n\n#########################################################################\n# Optionally, the events can be visualized using the plot_event function.\nfrom matplotlib import pyplot as plt\nfrom nilearn.plotting import plot_event\nplot_event(events, figsize=(15, 5))\nplt.show()\n", "path": "examples/04_glm_first_level/write_events_file.py"}]}
| 1,552 | 167 |
gh_patches_debug_4117
|
rasdani/github-patches
|
git_diff
|
kivy__kivy-6178
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
MacOS: Clipboard nspaste make app crash when copying text
<!--
The issue tracker is a tool to address bugs.
Please use the #support Discord channel at https://chat.kivy.org/ or Stack Overflow for
support questions, more information at https://git.io/vM1yQ.
Before opening a new issue, make sure you do the following:
* check that your issue isn't already filed: https://git.io/vM1iE
* prepare a short, runnable example that reproduces the issue
* reproduce the problem with the latest development version of Kivy
* double-check that the issue is indeed a bug and not a support request
-->
### Versions
* Python: 3.7.1
* OS: MacOS 10.13.6
* Kivy: 1.10.1
* Kivy installation method: pypi
### Description
When I try copy text in TextInput, this make app crash. But paste is OK.
### Code and Logs
```log
Traceback (most recent call last):
File "main.py", line 56, in <module>
app.run()
File "/Users/ivc/kivy/.env3/lib/python3.7/site-packages/kivy/app.py", line 826, in run
runTouchApp()
File "/Users/ivc/kivy/.env3/lib/python3.7/site-packages/kivy/base.py", line 502, in runTouchApp
EventLoop.window.mainloop()
File "/Users/ivc/kivy/.env3/lib/python3.7/site-packages/kivy/core/window/window_sdl2.py", line 727, in mainloop
self._mainloop()
File "/Users/ivc/kivy/.env3/lib/python3.7/site-packages/kivy/core/window/window_sdl2.py", line 662, in _mainloop
self.modifiers):
File "kivy/_event.pyx", line 703, in kivy._event.EventDispatcher.dispatch
File "kivy/_event.pyx", line 1214, in kivy._event.EventObservers.dispatch
File "kivy/_event.pyx", line 1138, in kivy._event.EventObservers._dispatch
File "/Users/ivc/kivy/.env3/lib/python3.7/site-packages/kivy/core/window/__init__.py", line 162, in _on_window_key_down
return self.dispatch('on_key_down', keycode, text, modifiers)
File "kivy/_event.pyx", line 703, in kivy._event.EventDispatcher.dispatch
File "kivy/_event.pyx", line 1214, in kivy._event.EventObservers.dispatch
File "kivy/_event.pyx", line 1138, in kivy._event.EventObservers._dispatch
File "/Users/ivc/kivy/.env3/lib/python3.7/site-packages/kivy/uix/textinput.py", line 2434, in keyboard_on_key_down
self.copy()
File "/Users/ivc/kivy/.env3/lib/python3.7/site-packages/kivy/uix/textinput.py", line 1727, in copy
return Clipboard.copy(self.selection_text)
File "/Users/ivc/kivy/.env3/lib/python3.7/site-packages/kivy/core/clipboard/__init__.py", line 73, in copy
self._copy(data)
File "/Users/ivc/kivy/.env3/lib/python3.7/site-packages/kivy/core/clipboard/__init__.py", line 87, in _copy
self.put(data, self._clip_mime_type)
File "/Users/ivc/kivy/.env3/lib/python3.7/site-packages/kivy/core/clipboard/clipboard_nspaste.py", line 40, in put
pb.writeObjects_([data])
File "pyobjus/pyobjus.pyx", line 393, in pyobjus.ObjcMethod.__call__
File "pyobjus/pyobjus_conversions.pxi", line 617, in pyobjus.convert_py_arg_to_cy
File "pyobjus/pyobjus_conversions.pxi", line 441, in pyobjus.convert_py_to_nsobject
File "pyobjus/pyobjus.pyx", line 393, in pyobjus.ObjcMethod.__call__
File "pyobjus/pyobjus_conversions.pxi", line 617, in pyobjus.convert_py_arg_to_cy
File "pyobjus/pyobjus_conversions.pxi", line 452, in pyobjus.convert_py_to_nsobject
File "pyobjus/pyobjus.pyx", line 974, in pyobjus.objc_create_delegate
pyobjus.ObjcException: You've passed b'kivyproject' as delegate, but there is no @protocol methods declared.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kivy/core/clipboard/clipboard_nspaste.py`
Content:
```
1 '''
2 Clipboard OsX: implementation of clipboard using Appkit
3 '''
4
5 __all__ = ('ClipboardNSPaste', )
6
7 from kivy.core.clipboard import ClipboardBase
8 from kivy.utils import platform
9
10 if platform != 'macosx':
11 raise SystemError('Unsupported platform for appkit clipboard.')
12 try:
13 from pyobjus import autoclass
14 from pyobjus.dylib_manager import load_framework, INCLUDE
15 load_framework(INCLUDE.AppKit)
16 except ImportError:
17 raise SystemError('Pyobjus not installed. Please run the following'
18 ' command to install it. `pip install --user pyobjus`')
19
20 NSPasteboard = autoclass('NSPasteboard')
21 NSString = autoclass('NSString')
22
23
24 class ClipboardNSPaste(ClipboardBase):
25
26 def __init__(self):
27 super(ClipboardNSPaste, self).__init__()
28 self._clipboard = NSPasteboard.generalPasteboard()
29
30 def get(self, mimetype='text/plain'):
31 pb = self._clipboard
32 data = pb.stringForType_('public.utf8-plain-text')
33 if not data:
34 return ""
35 return data.UTF8String()
36
37 def put(self, data, mimetype='text/plain'):
38 pb = self._clipboard
39 pb.clearContents()
40 pb.writeObjects_([data])
41
42 def get_types(self):
43 return list('text/plain',)
44
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kivy/core/clipboard/clipboard_nspaste.py b/kivy/core/clipboard/clipboard_nspaste.py
--- a/kivy/core/clipboard/clipboard_nspaste.py
+++ b/kivy/core/clipboard/clipboard_nspaste.py
@@ -37,7 +37,8 @@
def put(self, data, mimetype='text/plain'):
pb = self._clipboard
pb.clearContents()
- pb.writeObjects_([data])
+ utf8 = NSString.alloc().initWithUTF8String_(data)
+ pb.setString_forType_(utf8, 'public.utf8-plain-text')
def get_types(self):
return list('text/plain',)
|
{"golden_diff": "diff --git a/kivy/core/clipboard/clipboard_nspaste.py b/kivy/core/clipboard/clipboard_nspaste.py\n--- a/kivy/core/clipboard/clipboard_nspaste.py\n+++ b/kivy/core/clipboard/clipboard_nspaste.py\n@@ -37,7 +37,8 @@\n def put(self, data, mimetype='text/plain'):\n pb = self._clipboard\n pb.clearContents()\n- pb.writeObjects_([data])\n+ utf8 = NSString.alloc().initWithUTF8String_(data)\n+ pb.setString_forType_(utf8, 'public.utf8-plain-text')\n \n def get_types(self):\n return list('text/plain',)\n", "issue": "MacOS: Clipboard nspaste make app crash when copying text\n<!--\r\nThe issue tracker is a tool to address bugs.\r\nPlease use the #support Discord channel at https://chat.kivy.org/ or Stack Overflow for\r\nsupport questions, more information at https://git.io/vM1yQ.\r\n\r\nBefore opening a new issue, make sure you do the following:\r\n * check that your issue isn't already filed: https://git.io/vM1iE\r\n * prepare a short, runnable example that reproduces the issue\r\n * reproduce the problem with the latest development version of Kivy\r\n * double-check that the issue is indeed a bug and not a support request\r\n-->\r\n\r\n### Versions\r\n\r\n* Python: 3.7.1\r\n* OS: MacOS 10.13.6\r\n* Kivy: 1.10.1\r\n* Kivy installation method: pypi\r\n\r\n### Description\r\n\r\nWhen I try copy text in TextInput, this make app crash. But paste is OK.\r\n\r\n### Code and Logs\r\n\r\n```log\r\nTraceback (most recent call last):\r\n File \"main.py\", line 56, in <module>\r\n app.run()\r\n File \"/Users/ivc/kivy/.env3/lib/python3.7/site-packages/kivy/app.py\", line 826, in run\r\n runTouchApp()\r\n File \"/Users/ivc/kivy/.env3/lib/python3.7/site-packages/kivy/base.py\", line 502, in runTouchApp\r\n EventLoop.window.mainloop()\r\n File \"/Users/ivc/kivy/.env3/lib/python3.7/site-packages/kivy/core/window/window_sdl2.py\", line 727, in mainloop\r\n self._mainloop()\r\n File \"/Users/ivc/kivy/.env3/lib/python3.7/site-packages/kivy/core/window/window_sdl2.py\", line 662, in _mainloop\r\n self.modifiers):\r\n File \"kivy/_event.pyx\", line 703, in kivy._event.EventDispatcher.dispatch\r\n File \"kivy/_event.pyx\", line 1214, in kivy._event.EventObservers.dispatch\r\n File \"kivy/_event.pyx\", line 1138, in kivy._event.EventObservers._dispatch\r\n File \"/Users/ivc/kivy/.env3/lib/python3.7/site-packages/kivy/core/window/__init__.py\", line 162, in _on_window_key_down\r\n return self.dispatch('on_key_down', keycode, text, modifiers)\r\n File \"kivy/_event.pyx\", line 703, in kivy._event.EventDispatcher.dispatch\r\n File \"kivy/_event.pyx\", line 1214, in kivy._event.EventObservers.dispatch\r\n File \"kivy/_event.pyx\", line 1138, in kivy._event.EventObservers._dispatch\r\n File \"/Users/ivc/kivy/.env3/lib/python3.7/site-packages/kivy/uix/textinput.py\", line 2434, in keyboard_on_key_down\r\n self.copy()\r\n File \"/Users/ivc/kivy/.env3/lib/python3.7/site-packages/kivy/uix/textinput.py\", line 1727, in copy\r\n return Clipboard.copy(self.selection_text)\r\n File \"/Users/ivc/kivy/.env3/lib/python3.7/site-packages/kivy/core/clipboard/__init__.py\", line 73, in copy\r\n self._copy(data)\r\n File \"/Users/ivc/kivy/.env3/lib/python3.7/site-packages/kivy/core/clipboard/__init__.py\", line 87, in _copy\r\n self.put(data, self._clip_mime_type)\r\n File \"/Users/ivc/kivy/.env3/lib/python3.7/site-packages/kivy/core/clipboard/clipboard_nspaste.py\", line 40, in put\r\n pb.writeObjects_([data])\r\n File \"pyobjus/pyobjus.pyx\", line 393, in pyobjus.ObjcMethod.__call__\r\n File \"pyobjus/pyobjus_conversions.pxi\", line 617, in pyobjus.convert_py_arg_to_cy\r\n File \"pyobjus/pyobjus_conversions.pxi\", line 441, in pyobjus.convert_py_to_nsobject\r\n File \"pyobjus/pyobjus.pyx\", line 393, in pyobjus.ObjcMethod.__call__\r\n File \"pyobjus/pyobjus_conversions.pxi\", line 617, in pyobjus.convert_py_arg_to_cy\r\n File \"pyobjus/pyobjus_conversions.pxi\", line 452, in pyobjus.convert_py_to_nsobject\r\n File \"pyobjus/pyobjus.pyx\", line 974, in pyobjus.objc_create_delegate\r\n pyobjus.ObjcException: You've passed b'kivyproject' as delegate, but there is no @protocol methods declared.\r\n```\r\n\n", "before_files": [{"content": "'''\nClipboard OsX: implementation of clipboard using Appkit\n'''\n\n__all__ = ('ClipboardNSPaste', )\n\nfrom kivy.core.clipboard import ClipboardBase\nfrom kivy.utils import platform\n\nif platform != 'macosx':\n raise SystemError('Unsupported platform for appkit clipboard.')\ntry:\n from pyobjus import autoclass\n from pyobjus.dylib_manager import load_framework, INCLUDE\n load_framework(INCLUDE.AppKit)\nexcept ImportError:\n raise SystemError('Pyobjus not installed. Please run the following'\n ' command to install it. `pip install --user pyobjus`')\n\nNSPasteboard = autoclass('NSPasteboard')\nNSString = autoclass('NSString')\n\n\nclass ClipboardNSPaste(ClipboardBase):\n\n def __init__(self):\n super(ClipboardNSPaste, self).__init__()\n self._clipboard = NSPasteboard.generalPasteboard()\n\n def get(self, mimetype='text/plain'):\n pb = self._clipboard\n data = pb.stringForType_('public.utf8-plain-text')\n if not data:\n return \"\"\n return data.UTF8String()\n\n def put(self, data, mimetype='text/plain'):\n pb = self._clipboard\n pb.clearContents()\n pb.writeObjects_([data])\n\n def get_types(self):\n return list('text/plain',)\n", "path": "kivy/core/clipboard/clipboard_nspaste.py"}], "after_files": [{"content": "'''\nClipboard OsX: implementation of clipboard using Appkit\n'''\n\n__all__ = ('ClipboardNSPaste', )\n\nfrom kivy.core.clipboard import ClipboardBase\nfrom kivy.utils import platform\n\nif platform != 'macosx':\n raise SystemError('Unsupported platform for appkit clipboard.')\ntry:\n from pyobjus import autoclass\n from pyobjus.dylib_manager import load_framework, INCLUDE\n load_framework(INCLUDE.AppKit)\nexcept ImportError:\n raise SystemError('Pyobjus not installed. Please run the following'\n ' command to install it. `pip install --user pyobjus`')\n\nNSPasteboard = autoclass('NSPasteboard')\nNSString = autoclass('NSString')\n\n\nclass ClipboardNSPaste(ClipboardBase):\n\n def __init__(self):\n super(ClipboardNSPaste, self).__init__()\n self._clipboard = NSPasteboard.generalPasteboard()\n\n def get(self, mimetype='text/plain'):\n pb = self._clipboard\n data = pb.stringForType_('public.utf8-plain-text')\n if not data:\n return \"\"\n return data.UTF8String()\n\n def put(self, data, mimetype='text/plain'):\n pb = self._clipboard\n pb.clearContents()\n utf8 = NSString.alloc().initWithUTF8String_(data)\n pb.setString_forType_(utf8, 'public.utf8-plain-text')\n\n def get_types(self):\n return list('text/plain',)\n", "path": "kivy/core/clipboard/clipboard_nspaste.py"}]}
| 1,723 | 148 |
gh_patches_debug_33844
|
rasdani/github-patches
|
git_diff
|
getredash__redash-4354
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make Cypress tests work with [email protected]
Running our tests with [email protected] doesn't work. Need to figure out what happened, until then pinning the version to 3.4.1 (#4284).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redash/app.py`
Content:
```
1 from flask import Flask
2 from werkzeug.contrib.fixers import ProxyFix
3
4 from . import settings
5
6
7 class Redash(Flask):
8 """A custom Flask app for Redash"""
9 def __init__(self, *args, **kwargs):
10 kwargs.update({
11 'template_folder': settings.STATIC_ASSETS_PATH,
12 'static_folder': settings.STATIC_ASSETS_PATH,
13 'static_url_path': '/static',
14 })
15 super(Redash, self).__init__(__name__, *args, **kwargs)
16 # Make sure we get the right referral address even behind proxies like nginx.
17 self.wsgi_app = ProxyFix(self.wsgi_app, settings.PROXIES_COUNT)
18 # Configure Redash using our settings
19 self.config.from_object('redash.settings')
20
21
22 def create_app():
23 from . import authentication, extensions, handlers, limiter, mail, migrate, security
24 from .handlers import chrome_logger
25 from .handlers.webpack import configure_webpack
26 from .metrics import request as request_metrics
27 from .models import db, users
28 from .utils import sentry
29 from .version_check import reset_new_version_status
30
31 sentry.init()
32 app = Redash()
33
34 # Check and update the cached version for use by the client
35 app.before_first_request(reset_new_version_status)
36
37 security.init_app(app)
38 request_metrics.init_app(app)
39 db.init_app(app)
40 migrate.init_app(app, db)
41 mail.init_app(app)
42 authentication.init_app(app)
43 limiter.init_app(app)
44 handlers.init_app(app)
45 configure_webpack(app)
46 extensions.init_app(app)
47 chrome_logger.init_app(app)
48 users.init_app(app)
49
50 return app
51
```
Path: `redash/handlers/chrome_logger.py`
Content:
```
1 import time
2 import chromelogger
3 from flask import g, request
4 from flask_sqlalchemy import get_debug_queries
5
6
7 def log_queries():
8 total_duration = 0.0
9 queries_count = 0
10
11 chromelogger.group("SQL Queries")
12
13 for q in get_debug_queries():
14 total_duration += q.duration
15 queries_count += 1
16 chromelogger.info(q.statement % q.parameters)
17 chromelogger.info("Runtime: {:.2f}ms".format(1000 * q.duration))
18
19 chromelogger.info("{} queries executed in {:.2f}ms.".format(queries_count, total_duration*1000))
20
21 chromelogger.group_end("SQL Queries")
22
23
24 def chrome_log(response):
25 request_duration = (time.time() - g.start_time) * 1000
26 queries_duration = g.get('queries_duration', 0.0)
27 queries_count = g.get('queries_count', 0)
28
29 group_name = '{} {} ({}, {:.2f}ms runtime, {} queries in {:.2f}ms)'.format(
30 request.method, request.path, response.status_code, request_duration, queries_count, queries_duration)
31
32 chromelogger.group_collapsed(group_name)
33
34 endpoint = (request.endpoint or 'unknown').replace('.', '_')
35 chromelogger.info('Endpoint: {}'.format(endpoint))
36 chromelogger.info('Content Type: {}'.format(response.content_type))
37 chromelogger.info('Content Length: {}'.format(response.content_length or -1))
38
39 log_queries()
40
41 chromelogger.group_end(group_name)
42
43 header = chromelogger.get_header()
44 if header is not None:
45 response.headers.add(*header)
46
47 return response
48
49
50 def init_app(app):
51 if not app.debug:
52 return
53
54 app.after_request(chrome_log)
55
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/redash/app.py b/redash/app.py
--- a/redash/app.py
+++ b/redash/app.py
@@ -21,7 +21,6 @@
def create_app():
from . import authentication, extensions, handlers, limiter, mail, migrate, security
- from .handlers import chrome_logger
from .handlers.webpack import configure_webpack
from .metrics import request as request_metrics
from .models import db, users
@@ -44,7 +43,6 @@
handlers.init_app(app)
configure_webpack(app)
extensions.init_app(app)
- chrome_logger.init_app(app)
users.init_app(app)
return app
diff --git a/redash/handlers/chrome_logger.py b/redash/handlers/chrome_logger.py
deleted file mode 100644
--- a/redash/handlers/chrome_logger.py
+++ /dev/null
@@ -1,54 +0,0 @@
-import time
-import chromelogger
-from flask import g, request
-from flask_sqlalchemy import get_debug_queries
-
-
-def log_queries():
- total_duration = 0.0
- queries_count = 0
-
- chromelogger.group("SQL Queries")
-
- for q in get_debug_queries():
- total_duration += q.duration
- queries_count += 1
- chromelogger.info(q.statement % q.parameters)
- chromelogger.info("Runtime: {:.2f}ms".format(1000 * q.duration))
-
- chromelogger.info("{} queries executed in {:.2f}ms.".format(queries_count, total_duration*1000))
-
- chromelogger.group_end("SQL Queries")
-
-
-def chrome_log(response):
- request_duration = (time.time() - g.start_time) * 1000
- queries_duration = g.get('queries_duration', 0.0)
- queries_count = g.get('queries_count', 0)
-
- group_name = '{} {} ({}, {:.2f}ms runtime, {} queries in {:.2f}ms)'.format(
- request.method, request.path, response.status_code, request_duration, queries_count, queries_duration)
-
- chromelogger.group_collapsed(group_name)
-
- endpoint = (request.endpoint or 'unknown').replace('.', '_')
- chromelogger.info('Endpoint: {}'.format(endpoint))
- chromelogger.info('Content Type: {}'.format(response.content_type))
- chromelogger.info('Content Length: {}'.format(response.content_length or -1))
-
- log_queries()
-
- chromelogger.group_end(group_name)
-
- header = chromelogger.get_header()
- if header is not None:
- response.headers.add(*header)
-
- return response
-
-
-def init_app(app):
- if not app.debug:
- return
-
- app.after_request(chrome_log)
|
{"golden_diff": "diff --git a/redash/app.py b/redash/app.py\n--- a/redash/app.py\n+++ b/redash/app.py\n@@ -21,7 +21,6 @@\n \n def create_app():\n from . import authentication, extensions, handlers, limiter, mail, migrate, security\n- from .handlers import chrome_logger\n from .handlers.webpack import configure_webpack\n from .metrics import request as request_metrics\n from .models import db, users\n@@ -44,7 +43,6 @@\n handlers.init_app(app)\n configure_webpack(app)\n extensions.init_app(app)\n- chrome_logger.init_app(app)\n users.init_app(app)\n \n return app\ndiff --git a/redash/handlers/chrome_logger.py b/redash/handlers/chrome_logger.py\ndeleted file mode 100644\n--- a/redash/handlers/chrome_logger.py\n+++ /dev/null\n@@ -1,54 +0,0 @@\n-import time\n-import chromelogger\n-from flask import g, request\n-from flask_sqlalchemy import get_debug_queries\n-\n-\n-def log_queries():\n- total_duration = 0.0\n- queries_count = 0\n-\n- chromelogger.group(\"SQL Queries\")\n-\n- for q in get_debug_queries():\n- total_duration += q.duration\n- queries_count += 1\n- chromelogger.info(q.statement % q.parameters)\n- chromelogger.info(\"Runtime: {:.2f}ms\".format(1000 * q.duration))\n-\n- chromelogger.info(\"{} queries executed in {:.2f}ms.\".format(queries_count, total_duration*1000))\n-\n- chromelogger.group_end(\"SQL Queries\")\n-\n-\n-def chrome_log(response):\n- request_duration = (time.time() - g.start_time) * 1000\n- queries_duration = g.get('queries_duration', 0.0)\n- queries_count = g.get('queries_count', 0)\n-\n- group_name = '{} {} ({}, {:.2f}ms runtime, {} queries in {:.2f}ms)'.format(\n- request.method, request.path, response.status_code, request_duration, queries_count, queries_duration)\n-\n- chromelogger.group_collapsed(group_name)\n-\n- endpoint = (request.endpoint or 'unknown').replace('.', '_')\n- chromelogger.info('Endpoint: {}'.format(endpoint))\n- chromelogger.info('Content Type: {}'.format(response.content_type))\n- chromelogger.info('Content Length: {}'.format(response.content_length or -1))\n-\n- log_queries()\n-\n- chromelogger.group_end(group_name)\n-\n- header = chromelogger.get_header()\n- if header is not None:\n- response.headers.add(*header)\n-\n- return response\n-\n-\n-def init_app(app):\n- if not app.debug:\n- return\n-\n- app.after_request(chrome_log)\n", "issue": "Make Cypress tests work with [email protected]\nRunning our tests with [email protected] doesn't work. Need to figure out what happened, until then pinning the version to 3.4.1 (#4284).\n", "before_files": [{"content": "from flask import Flask\nfrom werkzeug.contrib.fixers import ProxyFix\n\nfrom . import settings\n\n\nclass Redash(Flask):\n \"\"\"A custom Flask app for Redash\"\"\"\n def __init__(self, *args, **kwargs):\n kwargs.update({\n 'template_folder': settings.STATIC_ASSETS_PATH,\n 'static_folder': settings.STATIC_ASSETS_PATH,\n 'static_url_path': '/static',\n })\n super(Redash, self).__init__(__name__, *args, **kwargs)\n # Make sure we get the right referral address even behind proxies like nginx.\n self.wsgi_app = ProxyFix(self.wsgi_app, settings.PROXIES_COUNT)\n # Configure Redash using our settings\n self.config.from_object('redash.settings')\n\n\ndef create_app():\n from . import authentication, extensions, handlers, limiter, mail, migrate, security\n from .handlers import chrome_logger\n from .handlers.webpack import configure_webpack\n from .metrics import request as request_metrics\n from .models import db, users\n from .utils import sentry\n from .version_check import reset_new_version_status\n\n sentry.init()\n app = Redash()\n\n # Check and update the cached version for use by the client\n app.before_first_request(reset_new_version_status)\n\n security.init_app(app)\n request_metrics.init_app(app)\n db.init_app(app)\n migrate.init_app(app, db)\n mail.init_app(app)\n authentication.init_app(app)\n limiter.init_app(app)\n handlers.init_app(app)\n configure_webpack(app)\n extensions.init_app(app)\n chrome_logger.init_app(app)\n users.init_app(app)\n\n return app\n", "path": "redash/app.py"}, {"content": "import time\nimport chromelogger\nfrom flask import g, request\nfrom flask_sqlalchemy import get_debug_queries\n\n\ndef log_queries():\n total_duration = 0.0\n queries_count = 0\n\n chromelogger.group(\"SQL Queries\")\n\n for q in get_debug_queries():\n total_duration += q.duration\n queries_count += 1\n chromelogger.info(q.statement % q.parameters)\n chromelogger.info(\"Runtime: {:.2f}ms\".format(1000 * q.duration))\n\n chromelogger.info(\"{} queries executed in {:.2f}ms.\".format(queries_count, total_duration*1000))\n\n chromelogger.group_end(\"SQL Queries\")\n\n\ndef chrome_log(response):\n request_duration = (time.time() - g.start_time) * 1000\n queries_duration = g.get('queries_duration', 0.0)\n queries_count = g.get('queries_count', 0)\n\n group_name = '{} {} ({}, {:.2f}ms runtime, {} queries in {:.2f}ms)'.format(\n request.method, request.path, response.status_code, request_duration, queries_count, queries_duration)\n\n chromelogger.group_collapsed(group_name)\n\n endpoint = (request.endpoint or 'unknown').replace('.', '_')\n chromelogger.info('Endpoint: {}'.format(endpoint))\n chromelogger.info('Content Type: {}'.format(response.content_type))\n chromelogger.info('Content Length: {}'.format(response.content_length or -1))\n\n log_queries()\n\n chromelogger.group_end(group_name)\n\n header = chromelogger.get_header()\n if header is not None:\n response.headers.add(*header)\n\n return response\n\n\ndef init_app(app):\n if not app.debug:\n return\n\n app.after_request(chrome_log)\n", "path": "redash/handlers/chrome_logger.py"}], "after_files": [{"content": "from flask import Flask\nfrom werkzeug.contrib.fixers import ProxyFix\n\nfrom . import settings\n\n\nclass Redash(Flask):\n \"\"\"A custom Flask app for Redash\"\"\"\n def __init__(self, *args, **kwargs):\n kwargs.update({\n 'template_folder': settings.STATIC_ASSETS_PATH,\n 'static_folder': settings.STATIC_ASSETS_PATH,\n 'static_url_path': '/static',\n })\n super(Redash, self).__init__(__name__, *args, **kwargs)\n # Make sure we get the right referral address even behind proxies like nginx.\n self.wsgi_app = ProxyFix(self.wsgi_app, settings.PROXIES_COUNT)\n # Configure Redash using our settings\n self.config.from_object('redash.settings')\n\n\ndef create_app():\n from . import authentication, extensions, handlers, limiter, mail, migrate, security\n from .handlers.webpack import configure_webpack\n from .metrics import request as request_metrics\n from .models import db, users\n from .utils import sentry\n from .version_check import reset_new_version_status\n\n sentry.init()\n app = Redash()\n\n # Check and update the cached version for use by the client\n app.before_first_request(reset_new_version_status)\n\n security.init_app(app)\n request_metrics.init_app(app)\n db.init_app(app)\n migrate.init_app(app, db)\n mail.init_app(app)\n authentication.init_app(app)\n limiter.init_app(app)\n handlers.init_app(app)\n configure_webpack(app)\n extensions.init_app(app)\n users.init_app(app)\n\n return app\n", "path": "redash/app.py"}, {"content": null, "path": "redash/handlers/chrome_logger.py"}]}
| 1,291 | 642 |
gh_patches_debug_7863
|
rasdani/github-patches
|
git_diff
|
facebookresearch__hydra-1363
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Nevergrad-Plugin] Add support for Python 3.9
Python 3.9 support pending on scikit 2.4.0 release. Relevant comment: scikit-learn/scikit-learn#18621 (comment)
Related to #1062
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugins/hydra_nevergrad_sweeper/setup.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 # type: ignore
3 from setuptools import find_namespace_packages, setup
4
5 with open("README.md", "r") as fh:
6 LONG_DESC = fh.read()
7 setup(
8 name="hydra-nevergrad-sweeper",
9 version="1.1.0rc1",
10 author="Jeremy Rapin, Omry Yadan, Jieru Hu",
11 author_email="[email protected], [email protected], [email protected]",
12 description="Hydra Nevergrad Sweeper plugin",
13 long_description=LONG_DESC,
14 long_description_content_type="text/markdown",
15 url="https://github.com/facebookresearch/hydra/",
16 packages=find_namespace_packages(include=["hydra_plugins.*"]),
17 classifiers=[
18 "License :: OSI Approved :: MIT License",
19 "Programming Language :: Python :: 3.6",
20 "Programming Language :: Python :: 3.7",
21 "Programming Language :: Python :: 3.8",
22 # "Programming Language :: Python :: 3.9",
23 "Operating System :: OS Independent",
24 "Development Status :: 4 - Beta",
25 ],
26 install_requires=["hydra-core>=1.0.0", "nevergrad>=0.4.1.post4"],
27 include_package_data=True,
28 )
29
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/plugins/hydra_nevergrad_sweeper/setup.py b/plugins/hydra_nevergrad_sweeper/setup.py
--- a/plugins/hydra_nevergrad_sweeper/setup.py
+++ b/plugins/hydra_nevergrad_sweeper/setup.py
@@ -19,7 +19,7 @@
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
- # "Programming Language :: Python :: 3.9",
+ "Programming Language :: Python :: 3.9",
"Operating System :: OS Independent",
"Development Status :: 4 - Beta",
],
|
{"golden_diff": "diff --git a/plugins/hydra_nevergrad_sweeper/setup.py b/plugins/hydra_nevergrad_sweeper/setup.py\n--- a/plugins/hydra_nevergrad_sweeper/setup.py\n+++ b/plugins/hydra_nevergrad_sweeper/setup.py\n@@ -19,7 +19,7 @@\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n- # \"Programming Language :: Python :: 3.9\",\n+ \"Programming Language :: Python :: 3.9\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 4 - Beta\",\n ],\n", "issue": "[Nevergrad-Plugin] Add support for Python 3.9\nPython 3.9 support pending on scikit 2.4.0 release. Relevant comment: scikit-learn/scikit-learn#18621 (comment)\r\n\r\nRelated to #1062\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# type: ignore\nfrom setuptools import find_namespace_packages, setup\n\nwith open(\"README.md\", \"r\") as fh:\n LONG_DESC = fh.read()\n setup(\n name=\"hydra-nevergrad-sweeper\",\n version=\"1.1.0rc1\",\n author=\"Jeremy Rapin, Omry Yadan, Jieru Hu\",\n author_email=\"[email protected], [email protected], [email protected]\",\n description=\"Hydra Nevergrad Sweeper plugin\",\n long_description=LONG_DESC,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/facebookresearch/hydra/\",\n packages=find_namespace_packages(include=[\"hydra_plugins.*\"]),\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n # \"Programming Language :: Python :: 3.9\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 4 - Beta\",\n ],\n install_requires=[\"hydra-core>=1.0.0\", \"nevergrad>=0.4.1.post4\"],\n include_package_data=True,\n )\n", "path": "plugins/hydra_nevergrad_sweeper/setup.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# type: ignore\nfrom setuptools import find_namespace_packages, setup\n\nwith open(\"README.md\", \"r\") as fh:\n LONG_DESC = fh.read()\n setup(\n name=\"hydra-nevergrad-sweeper\",\n version=\"1.1.0rc1\",\n author=\"Jeremy Rapin, Omry Yadan, Jieru Hu\",\n author_email=\"[email protected], [email protected], [email protected]\",\n description=\"Hydra Nevergrad Sweeper plugin\",\n long_description=LONG_DESC,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/facebookresearch/hydra/\",\n packages=find_namespace_packages(include=[\"hydra_plugins.*\"]),\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 4 - Beta\",\n ],\n install_requires=[\"hydra-core>=1.0.0\", \"nevergrad>=0.4.1.post4\"],\n include_package_data=True,\n )\n", "path": "plugins/hydra_nevergrad_sweeper/setup.py"}]}
| 663 | 155 |
gh_patches_debug_13339
|
rasdani/github-patches
|
git_diff
|
pypi__warehouse-1491
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[requires.io] dependency update on master branch
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `warehouse/celery.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 import celery.backends
14
15 # We need to trick Celery into supporting rediss:// URLs which is how redis-py
16 # signals that you should use Redis with TLS.
17 celery.backends.BACKEND_ALIASES["rediss"] = "warehouse.celery:TLSRedisBackend" # noqa
18
19 from celery import Celery, Task
20 from celery.backends.redis import RedisBackend as _RedisBackend
21 from celery.signals import celeryd_init
22 from pyramid import scripting
23 from pyramid.threadlocal import get_current_request
24 from raven.contrib.celery import register_signal, register_logger_signal
25
26 from warehouse.config import Environment, configure
27
28
29 @celeryd_init.connect
30 def _configure_celery(*args, **kwargs):
31 config = configure()
32 register_logger_signal(config.registry["raven.client"])
33 register_signal(config.registry["raven.client"])
34
35
36 class TLSRedisBackend(_RedisBackend):
37
38 def _params_from_url(self, url, defaults):
39 params = super()._params_from_url(url, defaults)
40 params.update({"connection_class": self.redis.SSLConnection})
41 return params
42
43
44 class WarehouseTask(Task):
45
46 abstract = True
47
48 def __call__(self, *args, **kwargs):
49 registry = self.app.pyramid_config.registry
50 pyramid_env = scripting.prepare(registry=registry)
51
52 try:
53 return super().__call__(pyramid_env["request"], *args, **kwargs)
54 finally:
55 pyramid_env["closer"]()
56
57 def apply_async(self, *args, **kwargs):
58 # The API design of Celery makes this threadlocal pretty impossible to
59 # avoid :(
60 request = get_current_request()
61
62 # If for whatever reason we were unable to get a request we'll just
63 # skip this and call the original method to send this immediately.
64 if request is None or not hasattr(request, "tm"):
65 return super().apply_async(*args, **kwargs)
66
67 # This will break things that expect to get an AsyncResult because
68 # we're no longer going to be returning an async result from this when
69 # called from within a request, response cycle. Ideally we shouldn't be
70 # waiting for responses in a request/response cycle anyways though.
71 request.tm.get().addAfterCommitHook(
72 self._after_commit_hook,
73 args=args,
74 kws=kwargs,
75 )
76
77 def _after_commit_hook(self, success, *args, **kwargs):
78 if success:
79 super().apply_async(*args, **kwargs)
80
81
82 app = Celery("warehouse")
83 app.Task = WarehouseTask
84
85
86 task = app.task
87
88
89 def includeme(config):
90 s = config.registry.settings
91 app.pyramid_config = config
92 app.conf.update(
93 BROKER_URL=s["celery.broker_url"],
94 BROKER_USE_SSL=s["warehouse.env"] == Environment.production,
95 CELERY_DISABLE_RATE_LIMITS=True,
96 CELERY_RESULT_BACKEND=s["celery.result_url"],
97 CELERY_RESULT_SERIALIZER="json",
98 CELERY_TASK_SERIALIZER="json",
99 CELERY_ACCEPT_CONTENT=["json", "msgpack"],
100 CELERY_MESSAGE_COMPRESSION="gzip",
101 CELERY_QUEUE_HA_POLICY="all",
102 )
103
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/warehouse/celery.py b/warehouse/celery.py
--- a/warehouse/celery.py
+++ b/warehouse/celery.py
@@ -10,11 +10,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import celery.backends
+import celery.app.backends
# We need to trick Celery into supporting rediss:// URLs which is how redis-py
# signals that you should use Redis with TLS.
-celery.backends.BACKEND_ALIASES["rediss"] = "warehouse.celery:TLSRedisBackend" # noqa
+celery.app.backends.BACKEND_ALIASES["rediss"] = "warehouse.celery:TLSRedisBackend" # noqa
from celery import Celery, Task
from celery.backends.redis import RedisBackend as _RedisBackend
|
{"golden_diff": "diff --git a/warehouse/celery.py b/warehouse/celery.py\n--- a/warehouse/celery.py\n+++ b/warehouse/celery.py\n@@ -10,11 +10,11 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-import celery.backends\n+import celery.app.backends\n \n # We need to trick Celery into supporting rediss:// URLs which is how redis-py\n # signals that you should use Redis with TLS.\n-celery.backends.BACKEND_ALIASES[\"rediss\"] = \"warehouse.celery:TLSRedisBackend\" # noqa\n+celery.app.backends.BACKEND_ALIASES[\"rediss\"] = \"warehouse.celery:TLSRedisBackend\" # noqa\n \n from celery import Celery, Task\n from celery.backends.redis import RedisBackend as _RedisBackend\n", "issue": "[requires.io] dependency update on master branch\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport celery.backends\n\n# We need to trick Celery into supporting rediss:// URLs which is how redis-py\n# signals that you should use Redis with TLS.\ncelery.backends.BACKEND_ALIASES[\"rediss\"] = \"warehouse.celery:TLSRedisBackend\" # noqa\n\nfrom celery import Celery, Task\nfrom celery.backends.redis import RedisBackend as _RedisBackend\nfrom celery.signals import celeryd_init\nfrom pyramid import scripting\nfrom pyramid.threadlocal import get_current_request\nfrom raven.contrib.celery import register_signal, register_logger_signal\n\nfrom warehouse.config import Environment, configure\n\n\n@celeryd_init.connect\ndef _configure_celery(*args, **kwargs):\n config = configure()\n register_logger_signal(config.registry[\"raven.client\"])\n register_signal(config.registry[\"raven.client\"])\n\n\nclass TLSRedisBackend(_RedisBackend):\n\n def _params_from_url(self, url, defaults):\n params = super()._params_from_url(url, defaults)\n params.update({\"connection_class\": self.redis.SSLConnection})\n return params\n\n\nclass WarehouseTask(Task):\n\n abstract = True\n\n def __call__(self, *args, **kwargs):\n registry = self.app.pyramid_config.registry\n pyramid_env = scripting.prepare(registry=registry)\n\n try:\n return super().__call__(pyramid_env[\"request\"], *args, **kwargs)\n finally:\n pyramid_env[\"closer\"]()\n\n def apply_async(self, *args, **kwargs):\n # The API design of Celery makes this threadlocal pretty impossible to\n # avoid :(\n request = get_current_request()\n\n # If for whatever reason we were unable to get a request we'll just\n # skip this and call the original method to send this immediately.\n if request is None or not hasattr(request, \"tm\"):\n return super().apply_async(*args, **kwargs)\n\n # This will break things that expect to get an AsyncResult because\n # we're no longer going to be returning an async result from this when\n # called from within a request, response cycle. Ideally we shouldn't be\n # waiting for responses in a request/response cycle anyways though.\n request.tm.get().addAfterCommitHook(\n self._after_commit_hook,\n args=args,\n kws=kwargs,\n )\n\n def _after_commit_hook(self, success, *args, **kwargs):\n if success:\n super().apply_async(*args, **kwargs)\n\n\napp = Celery(\"warehouse\")\napp.Task = WarehouseTask\n\n\ntask = app.task\n\n\ndef includeme(config):\n s = config.registry.settings\n app.pyramid_config = config\n app.conf.update(\n BROKER_URL=s[\"celery.broker_url\"],\n BROKER_USE_SSL=s[\"warehouse.env\"] == Environment.production,\n CELERY_DISABLE_RATE_LIMITS=True,\n CELERY_RESULT_BACKEND=s[\"celery.result_url\"],\n CELERY_RESULT_SERIALIZER=\"json\",\n CELERY_TASK_SERIALIZER=\"json\",\n CELERY_ACCEPT_CONTENT=[\"json\", \"msgpack\"],\n CELERY_MESSAGE_COMPRESSION=\"gzip\",\n CELERY_QUEUE_HA_POLICY=\"all\",\n )\n", "path": "warehouse/celery.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport celery.app.backends\n\n# We need to trick Celery into supporting rediss:// URLs which is how redis-py\n# signals that you should use Redis with TLS.\ncelery.app.backends.BACKEND_ALIASES[\"rediss\"] = \"warehouse.celery:TLSRedisBackend\" # noqa\n\nfrom celery import Celery, Task\nfrom celery.backends.redis import RedisBackend as _RedisBackend\nfrom celery.signals import celeryd_init\nfrom pyramid import scripting\nfrom pyramid.threadlocal import get_current_request\nfrom raven.contrib.celery import register_signal, register_logger_signal\n\nfrom warehouse.config import Environment, configure\n\n\n@celeryd_init.connect\ndef _configure_celery(*args, **kwargs):\n config = configure()\n register_logger_signal(config.registry[\"raven.client\"])\n register_signal(config.registry[\"raven.client\"])\n\n\nclass TLSRedisBackend(_RedisBackend):\n\n def _params_from_url(self, url, defaults):\n params = super()._params_from_url(url, defaults)\n params.update({\"connection_class\": self.redis.SSLConnection})\n return params\n\n\nclass WarehouseTask(Task):\n\n abstract = True\n\n def __call__(self, *args, **kwargs):\n registry = self.app.pyramid_config.registry\n pyramid_env = scripting.prepare(registry=registry)\n\n try:\n return super().__call__(pyramid_env[\"request\"], *args, **kwargs)\n finally:\n pyramid_env[\"closer\"]()\n\n def apply_async(self, *args, **kwargs):\n # The API design of Celery makes this threadlocal pretty impossible to\n # avoid :(\n request = get_current_request()\n\n # If for whatever reason we were unable to get a request we'll just\n # skip this and call the original method to send this immediately.\n if request is None or not hasattr(request, \"tm\"):\n return super().apply_async(*args, **kwargs)\n\n # This will break things that expect to get an AsyncResult because\n # we're no longer going to be returning an async result from this when\n # called from within a request, response cycle. Ideally we shouldn't be\n # waiting for responses in a request/response cycle anyways though.\n request.tm.get().addAfterCommitHook(\n self._after_commit_hook,\n args=args,\n kws=kwargs,\n )\n\n def _after_commit_hook(self, success, *args, **kwargs):\n if success:\n super().apply_async(*args, **kwargs)\n\n\napp = Celery(\"warehouse\")\napp.Task = WarehouseTask\n\n\ntask = app.task\n\n\ndef includeme(config):\n s = config.registry.settings\n app.pyramid_config = config\n app.conf.update(\n BROKER_URL=s[\"celery.broker_url\"],\n BROKER_USE_SSL=s[\"warehouse.env\"] == Environment.production,\n CELERY_DISABLE_RATE_LIMITS=True,\n CELERY_RESULT_BACKEND=s[\"celery.result_url\"],\n CELERY_RESULT_SERIALIZER=\"json\",\n CELERY_TASK_SERIALIZER=\"json\",\n CELERY_ACCEPT_CONTENT=[\"json\", \"msgpack\"],\n CELERY_MESSAGE_COMPRESSION=\"gzip\",\n CELERY_QUEUE_HA_POLICY=\"all\",\n )\n", "path": "warehouse/celery.py"}]}
| 1,269 | 188 |
gh_patches_debug_17543
|
rasdani/github-patches
|
git_diff
|
coreruleset__coreruleset-3002
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Move data files from util/regexp-assemble directory to the top level
### Description
Data files used to generate regular expressions have been somehow in a difficult-to-find place, dependent on the tool.
Now with the new crs-toolchain, this is not needed anymore.
So let's move the data files to the top level directory.
### Requirements
- move all data files to the top level dir
- review dependencies and check that all references are updated
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `util/regexp-assemble/lib/context.py`
Content:
```
1 import argparse
2 from pathlib import Path
3 import logging
4
5
6
7 class Context(object):
8 def __init__(self, root_directory: Path, namespace: argparse.Namespace=None):
9 self.root_directory = root_directory
10 self.rules_directory = self.root_directory / "rules"
11 self.util_directory = self.root_directory / "util"
12 self.regexp_assemble_directory = self.util_directory / "regexp-assemble"
13 self.data_files_directory = self.regexp_assemble_directory / "data"
14 self.include_files_directory = self.regexp_assemble_directory / "data" / "include"
15 self.regexp_assemble_pl_path = self.regexp_assemble_directory / "lib" / "regexp-assemble.pl"
16 self.single_rule_id = namespace.rule_id if namespace else None
17 self.single_chain_offset = None
18 if namespace and "chain_offset" in namespace:
19 self.single_chain_offset = namespace.chain_offset
20
21 self._dump_to_debug_log()
22
23 assert (
24 self.rules_directory.exists()
25 and self.util_directory.exists()
26 and self.regexp_assemble_directory.exists()
27 and self.data_files_directory.exists()
28 and self.include_files_directory.exists()
29 )
30
31
32 def _dump_to_debug_log(self):
33 logger = logging.getLogger()
34 logger.debug("Root directory: %s", self.root_directory)
35 logger.debug("Rules directory: %s", self.rules_directory)
36 logger.debug("Data files directory: %s", self.data_files_directory)
37 logger.debug("Include files directory: %s", self.include_files_directory)
38 logger.debug("Parsed rule ID: %s", self.single_rule_id)
39 logger.debug("Parsed chain offset: %s", self.single_chain_offset)
40
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/util/regexp-assemble/lib/context.py b/util/regexp-assemble/lib/context.py
--- a/util/regexp-assemble/lib/context.py
+++ b/util/regexp-assemble/lib/context.py
@@ -10,8 +10,8 @@
self.rules_directory = self.root_directory / "rules"
self.util_directory = self.root_directory / "util"
self.regexp_assemble_directory = self.util_directory / "regexp-assemble"
- self.data_files_directory = self.regexp_assemble_directory / "data"
- self.include_files_directory = self.regexp_assemble_directory / "data" / "include"
+ self.data_files_directory = self.root_directory / "data"
+ self.include_files_directory = self.root_directory / "data" / "include"
self.regexp_assemble_pl_path = self.regexp_assemble_directory / "lib" / "regexp-assemble.pl"
self.single_rule_id = namespace.rule_id if namespace else None
self.single_chain_offset = None
|
{"golden_diff": "diff --git a/util/regexp-assemble/lib/context.py b/util/regexp-assemble/lib/context.py\n--- a/util/regexp-assemble/lib/context.py\n+++ b/util/regexp-assemble/lib/context.py\n@@ -10,8 +10,8 @@\n self.rules_directory = self.root_directory / \"rules\"\n self.util_directory = self.root_directory / \"util\"\n self.regexp_assemble_directory = self.util_directory / \"regexp-assemble\"\n- self.data_files_directory = self.regexp_assemble_directory / \"data\"\n- self.include_files_directory = self.regexp_assemble_directory / \"data\" / \"include\"\n+ self.data_files_directory = self.root_directory / \"data\"\n+ self.include_files_directory = self.root_directory / \"data\" / \"include\"\n self.regexp_assemble_pl_path = self.regexp_assemble_directory / \"lib\" / \"regexp-assemble.pl\"\n self.single_rule_id = namespace.rule_id if namespace else None\n self.single_chain_offset = None\n", "issue": "Move data files from util/regexp-assemble directory to the top level\n### Description\r\n\r\nData files used to generate regular expressions have been somehow in a difficult-to-find place, dependent on the tool.\r\n\r\nNow with the new crs-toolchain, this is not needed anymore.\r\n\r\nSo let's move the data files to the top level directory.\r\n\r\n### Requirements\r\n\r\n- move all data files to the top level dir\r\n- review dependencies and check that all references are updated\n", "before_files": [{"content": "import argparse\nfrom pathlib import Path\nimport logging\n\n\n\nclass Context(object):\n def __init__(self, root_directory: Path, namespace: argparse.Namespace=None):\n self.root_directory = root_directory\n self.rules_directory = self.root_directory / \"rules\"\n self.util_directory = self.root_directory / \"util\"\n self.regexp_assemble_directory = self.util_directory / \"regexp-assemble\"\n self.data_files_directory = self.regexp_assemble_directory / \"data\"\n self.include_files_directory = self.regexp_assemble_directory / \"data\" / \"include\"\n self.regexp_assemble_pl_path = self.regexp_assemble_directory / \"lib\" / \"regexp-assemble.pl\"\n self.single_rule_id = namespace.rule_id if namespace else None\n self.single_chain_offset = None\n if namespace and \"chain_offset\" in namespace:\n self.single_chain_offset = namespace.chain_offset\n\n self._dump_to_debug_log()\n\n assert (\n self.rules_directory.exists()\n and self.util_directory.exists()\n and self.regexp_assemble_directory.exists()\n and self.data_files_directory.exists()\n and self.include_files_directory.exists()\n )\n\n\n def _dump_to_debug_log(self):\n logger = logging.getLogger()\n logger.debug(\"Root directory: %s\", self.root_directory)\n logger.debug(\"Rules directory: %s\", self.rules_directory)\n logger.debug(\"Data files directory: %s\", self.data_files_directory)\n logger.debug(\"Include files directory: %s\", self.include_files_directory)\n logger.debug(\"Parsed rule ID: %s\", self.single_rule_id)\n logger.debug(\"Parsed chain offset: %s\", self.single_chain_offset)\n", "path": "util/regexp-assemble/lib/context.py"}], "after_files": [{"content": "import argparse\nfrom pathlib import Path\nimport logging\n\n\n\nclass Context(object):\n def __init__(self, root_directory: Path, namespace: argparse.Namespace=None):\n self.root_directory = root_directory\n self.rules_directory = self.root_directory / \"rules\"\n self.util_directory = self.root_directory / \"util\"\n self.regexp_assemble_directory = self.util_directory / \"regexp-assemble\"\n self.data_files_directory = self.root_directory / \"data\"\n self.include_files_directory = self.root_directory / \"data\" / \"include\"\n self.regexp_assemble_pl_path = self.regexp_assemble_directory / \"lib\" / \"regexp-assemble.pl\"\n self.single_rule_id = namespace.rule_id if namespace else None\n self.single_chain_offset = None\n if namespace and \"chain_offset\" in namespace:\n self.single_chain_offset = namespace.chain_offset\n\n self._dump_to_debug_log()\n\n assert (\n self.rules_directory.exists()\n and self.util_directory.exists()\n and self.regexp_assemble_directory.exists()\n and self.data_files_directory.exists()\n and self.include_files_directory.exists()\n )\n\n\n def _dump_to_debug_log(self):\n logger = logging.getLogger()\n logger.debug(\"Root directory: %s\", self.root_directory)\n logger.debug(\"Rules directory: %s\", self.rules_directory)\n logger.debug(\"Data files directory: %s\", self.data_files_directory)\n logger.debug(\"Include files directory: %s\", self.include_files_directory)\n logger.debug(\"Parsed rule ID: %s\", self.single_rule_id)\n logger.debug(\"Parsed chain offset: %s\", self.single_chain_offset)\n", "path": "util/regexp-assemble/lib/context.py"}]}
| 779 | 215 |
gh_patches_debug_4420
|
rasdani/github-patches
|
git_diff
|
ephios-dev__ephios-220
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
List of own upcoming shifts
As a user, I want to see a list of shifts that I have been confirmed for on the main page.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ephios/event_management/templatetags/event_extras.py`
Content:
```
1 from django import template
2 from django.utils.safestring import mark_safe
3
4 from ephios.event_management.models import AbstractParticipation
5
6 register = template.Library()
7
8
9 @register.filter(name="shift_status")
10 def shift_status(shift, user):
11 participation = user.as_participant().participation_for(shift)
12 if participation is not None:
13 color = {
14 AbstractParticipation.States.USER_DECLINED: "text-danger",
15 AbstractParticipation.States.RESPONSIBLE_REJECTED: "text-danger",
16 AbstractParticipation.States.REQUESTED: "text-warning",
17 AbstractParticipation.States.CONFIRMED: "text-success",
18 }[participation.state]
19 return mark_safe(f'<span class="{color}">{participation.get_state_display()}</span><br>')
20 return ""
21
22
23 @register.filter(name="can_sign_up")
24 def can_sign_up(shift, user):
25 return shift.signup_method.can_sign_up(user.as_participant())
26
27
28 @register.filter(name="render_shift_state")
29 def render_shift_state(shift, request):
30 return shift.signup_method.render_shift_state(request)
31
32
33 @register.filter(name="signup_errors")
34 def signup_errors(shift, user):
35 return shift.signup_method.get_signup_errors(user.as_participant())
36
37
38 @register.filter(name="can_decline")
39 def can_decline(shift, user):
40 return shift.signup_method.can_decline(user.as_participant())
41
42
43 @register.filter(name="decline_errors")
44 def decline_errors(shift, user):
45 return shift.signup_method.get_decline_errors(user.as_participant())
46
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ephios/event_management/templatetags/event_extras.py b/ephios/event_management/templatetags/event_extras.py
--- a/ephios/event_management/templatetags/event_extras.py
+++ b/ephios/event_management/templatetags/event_extras.py
@@ -43,3 +43,10 @@
@register.filter(name="decline_errors")
def decline_errors(shift, user):
return shift.signup_method.get_decline_errors(user.as_participant())
+
+
[email protected](name="confirmed_shifts")
+def confirmed_shifts(user):
+ return user.get_shifts(
+ with_participation_state_in=[AbstractParticipation.States.CONFIRMED]
+ ).order_by("start_time")
|
{"golden_diff": "diff --git a/ephios/event_management/templatetags/event_extras.py b/ephios/event_management/templatetags/event_extras.py\n--- a/ephios/event_management/templatetags/event_extras.py\n+++ b/ephios/event_management/templatetags/event_extras.py\n@@ -43,3 +43,10 @@\n @register.filter(name=\"decline_errors\")\n def decline_errors(shift, user):\n return shift.signup_method.get_decline_errors(user.as_participant())\n+\n+\[email protected](name=\"confirmed_shifts\")\n+def confirmed_shifts(user):\n+ return user.get_shifts(\n+ with_participation_state_in=[AbstractParticipation.States.CONFIRMED]\n+ ).order_by(\"start_time\")\n", "issue": "List of own upcoming shifts\nAs a user, I want to see a list of shifts that I have been confirmed for on the main page.\n", "before_files": [{"content": "from django import template\nfrom django.utils.safestring import mark_safe\n\nfrom ephios.event_management.models import AbstractParticipation\n\nregister = template.Library()\n\n\[email protected](name=\"shift_status\")\ndef shift_status(shift, user):\n participation = user.as_participant().participation_for(shift)\n if participation is not None:\n color = {\n AbstractParticipation.States.USER_DECLINED: \"text-danger\",\n AbstractParticipation.States.RESPONSIBLE_REJECTED: \"text-danger\",\n AbstractParticipation.States.REQUESTED: \"text-warning\",\n AbstractParticipation.States.CONFIRMED: \"text-success\",\n }[participation.state]\n return mark_safe(f'<span class=\"{color}\">{participation.get_state_display()}</span><br>')\n return \"\"\n\n\[email protected](name=\"can_sign_up\")\ndef can_sign_up(shift, user):\n return shift.signup_method.can_sign_up(user.as_participant())\n\n\[email protected](name=\"render_shift_state\")\ndef render_shift_state(shift, request):\n return shift.signup_method.render_shift_state(request)\n\n\[email protected](name=\"signup_errors\")\ndef signup_errors(shift, user):\n return shift.signup_method.get_signup_errors(user.as_participant())\n\n\[email protected](name=\"can_decline\")\ndef can_decline(shift, user):\n return shift.signup_method.can_decline(user.as_participant())\n\n\[email protected](name=\"decline_errors\")\ndef decline_errors(shift, user):\n return shift.signup_method.get_decline_errors(user.as_participant())\n", "path": "ephios/event_management/templatetags/event_extras.py"}], "after_files": [{"content": "from django import template\nfrom django.utils.safestring import mark_safe\n\nfrom ephios.event_management.models import AbstractParticipation\n\nregister = template.Library()\n\n\[email protected](name=\"shift_status\")\ndef shift_status(shift, user):\n participation = user.as_participant().participation_for(shift)\n if participation is not None:\n color = {\n AbstractParticipation.States.USER_DECLINED: \"text-danger\",\n AbstractParticipation.States.RESPONSIBLE_REJECTED: \"text-danger\",\n AbstractParticipation.States.REQUESTED: \"text-warning\",\n AbstractParticipation.States.CONFIRMED: \"text-success\",\n }[participation.state]\n return mark_safe(f'<span class=\"{color}\">{participation.get_state_display()}</span><br>')\n return \"\"\n\n\[email protected](name=\"can_sign_up\")\ndef can_sign_up(shift, user):\n return shift.signup_method.can_sign_up(user.as_participant())\n\n\[email protected](name=\"render_shift_state\")\ndef render_shift_state(shift, request):\n return shift.signup_method.render_shift_state(request)\n\n\[email protected](name=\"signup_errors\")\ndef signup_errors(shift, user):\n return shift.signup_method.get_signup_errors(user.as_participant())\n\n\[email protected](name=\"can_decline\")\ndef can_decline(shift, user):\n return shift.signup_method.can_decline(user.as_participant())\n\n\[email protected](name=\"decline_errors\")\ndef decline_errors(shift, user):\n return shift.signup_method.get_decline_errors(user.as_participant())\n\n\[email protected](name=\"confirmed_shifts\")\ndef confirmed_shifts(user):\n return user.get_shifts(\n with_participation_state_in=[AbstractParticipation.States.CONFIRMED]\n ).order_by(\"start_time\")\n", "path": "ephios/event_management/templatetags/event_extras.py"}]}
| 700 | 165 |
gh_patches_debug_15929
|
rasdani/github-patches
|
git_diff
|
microsoft__DeepSpeed-4405
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[REQUEST] Add timeout as entry-point option or environment variable
**Is your feature request related to a problem? Please describe.**
I am using Hugging Face `transformers` for my deep learning, and it has a nice option to restrict specific processing to the main process only. This is useful if a function caches the result: the main process does the processing while the other processes wait, and when main is done, the other processes can just load from the cache. That's pretty neat.
The problem arises when these are long running processes. In distributed environment (torch or deepspeed, for instance), the communication between processes has a default timeout. If no communication has occurred for `timeout` seconds, the whole program will exit.
**Describe the solution you'd like**
Both [`torch`](https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group) and [`deepspeed`](https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group) provide options in the Python init methods to set the timeout parameter to a higher value than the default 30 minutes, but this option is not available from the command-line or through an environment, which is what I would like.
**Describe alternatives you've considered**
I could make a custom fork but I think that this is something that more people might need as soon as they scale to larger projects.
**Additional context**
I can work on this, depending on what you suggest as a solution (CLI argument for the `deepspeed` command or as environment variable).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `deepspeed/constants.py`
Content:
```
1 # Copyright (c) Microsoft Corporation.
2 # SPDX-License-Identifier: Apache-2.0
3
4 # DeepSpeed Team
5
6 from datetime import timedelta
7
8 #############################################
9 # Torch distributed constants
10 #############################################
11 TORCH_DISTRIBUTED_DEFAULT_PORT = 29500
12
13 # Default process group wide timeout, if applicable.
14 # This only applies to the gloo and nccl backends
15 # (only if NCCL_BLOCKING_WAIT or NCCL_ASYNC_ERROR_HANDLING is set to 1).
16 # To make an attempt at backwards compatibility with THD, we use an
17 # extraordinarily high default timeout, given that THD did not have timeouts.
18 default_pg_timeout = timedelta(minutes=30)
19 INFERENCE_GENERIC_MODE = 'generic'
20 INFERENCE_SPECIALIZED_MODE = 'specialized'
21
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/deepspeed/constants.py b/deepspeed/constants.py
--- a/deepspeed/constants.py
+++ b/deepspeed/constants.py
@@ -3,6 +3,7 @@
# DeepSpeed Team
+import os
from datetime import timedelta
#############################################
@@ -15,6 +16,6 @@
# (only if NCCL_BLOCKING_WAIT or NCCL_ASYNC_ERROR_HANDLING is set to 1).
# To make an attempt at backwards compatibility with THD, we use an
# extraordinarily high default timeout, given that THD did not have timeouts.
-default_pg_timeout = timedelta(minutes=30)
+default_pg_timeout = timedelta(minutes=int(os.getenv("DEEPSPEED_TIMEOUT", default=30)))
INFERENCE_GENERIC_MODE = 'generic'
INFERENCE_SPECIALIZED_MODE = 'specialized'
|
{"golden_diff": "diff --git a/deepspeed/constants.py b/deepspeed/constants.py\n--- a/deepspeed/constants.py\n+++ b/deepspeed/constants.py\n@@ -3,6 +3,7 @@\n \n # DeepSpeed Team\n \n+import os\n from datetime import timedelta\n \n #############################################\n@@ -15,6 +16,6 @@\n # (only if NCCL_BLOCKING_WAIT or NCCL_ASYNC_ERROR_HANDLING is set to 1).\n # To make an attempt at backwards compatibility with THD, we use an\n # extraordinarily high default timeout, given that THD did not have timeouts.\n-default_pg_timeout = timedelta(minutes=30)\n+default_pg_timeout = timedelta(minutes=int(os.getenv(\"DEEPSPEED_TIMEOUT\", default=30)))\n INFERENCE_GENERIC_MODE = 'generic'\n INFERENCE_SPECIALIZED_MODE = 'specialized'\n", "issue": "[REQUEST] Add timeout as entry-point option or environment variable\n**Is your feature request related to a problem? Please describe.**\r\nI am using Hugging Face `transformers` for my deep learning, and it has a nice option to restrict specific processing to the main process only. This is useful if a function caches the result: the main process does the processing while the other processes wait, and when main is done, the other processes can just load from the cache. That's pretty neat.\r\n\r\nThe problem arises when these are long running processes. In distributed environment (torch or deepspeed, for instance), the communication between processes has a default timeout. If no communication has occurred for `timeout` seconds, the whole program will exit. \r\n\r\n**Describe the solution you'd like**\r\n\r\nBoth [`torch`](https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group) and [`deepspeed`](https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group) provide options in the Python init methods to set the timeout parameter to a higher value than the default 30 minutes, but this option is not available from the command-line or through an environment, which is what I would like.\r\n\r\n**Describe alternatives you've considered**\r\nI could make a custom fork but I think that this is something that more people might need as soon as they scale to larger projects.\r\n\r\n**Additional context**\r\n\r\nI can work on this, depending on what you suggest as a solution (CLI argument for the `deepspeed` command or as environment variable).\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n# SPDX-License-Identifier: Apache-2.0\n\n# DeepSpeed Team\n\nfrom datetime import timedelta\n\n#############################################\n# Torch distributed constants\n#############################################\nTORCH_DISTRIBUTED_DEFAULT_PORT = 29500\n\n# Default process group wide timeout, if applicable.\n# This only applies to the gloo and nccl backends\n# (only if NCCL_BLOCKING_WAIT or NCCL_ASYNC_ERROR_HANDLING is set to 1).\n# To make an attempt at backwards compatibility with THD, we use an\n# extraordinarily high default timeout, given that THD did not have timeouts.\ndefault_pg_timeout = timedelta(minutes=30)\nINFERENCE_GENERIC_MODE = 'generic'\nINFERENCE_SPECIALIZED_MODE = 'specialized'\n", "path": "deepspeed/constants.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation.\n# SPDX-License-Identifier: Apache-2.0\n\n# DeepSpeed Team\n\nimport os\nfrom datetime import timedelta\n\n#############################################\n# Torch distributed constants\n#############################################\nTORCH_DISTRIBUTED_DEFAULT_PORT = 29500\n\n# Default process group wide timeout, if applicable.\n# This only applies to the gloo and nccl backends\n# (only if NCCL_BLOCKING_WAIT or NCCL_ASYNC_ERROR_HANDLING is set to 1).\n# To make an attempt at backwards compatibility with THD, we use an\n# extraordinarily high default timeout, given that THD did not have timeouts.\ndefault_pg_timeout = timedelta(minutes=int(os.getenv(\"DEEPSPEED_TIMEOUT\", default=30)))\nINFERENCE_GENERIC_MODE = 'generic'\nINFERENCE_SPECIALIZED_MODE = 'specialized'\n", "path": "deepspeed/constants.py"}]}
| 778 | 174 |
gh_patches_debug_6053
|
rasdani/github-patches
|
git_diff
|
networkx__networkx-3123
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Double import
I noticed that in `networkx/algorithms/__init__.py`the statement `from networkx.algorithms.triads import *` occurs twice. Is there any reason for this or is this just a blunder?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `networkx/algorithms/__init__.py`
Content:
```
1 from networkx.algorithms.assortativity import *
2 from networkx.algorithms.boundary import *
3 from networkx.algorithms.bridges import *
4 from networkx.algorithms.chains import *
5 from networkx.algorithms.centrality import *
6 from networkx.algorithms.chordal import *
7 from networkx.algorithms.cluster import *
8 from networkx.algorithms.clique import *
9 from networkx.algorithms.communicability_alg import *
10 from networkx.algorithms.components import *
11 from networkx.algorithms.coloring import *
12 from networkx.algorithms.core import *
13 from networkx.algorithms.covering import *
14 from networkx.algorithms.cycles import *
15 from networkx.algorithms.cuts import *
16 from networkx.algorithms.dag import *
17 from networkx.algorithms.distance_measures import *
18 from networkx.algorithms.distance_regular import *
19 from networkx.algorithms.dominance import *
20 from networkx.algorithms.dominating import *
21 from networkx.algorithms.efficiency import *
22 from networkx.algorithms.euler import *
23 from networkx.algorithms.graphical import *
24 from networkx.algorithms.hierarchy import *
25 from networkx.algorithms.hybrid import *
26 from networkx.algorithms.link_analysis import *
27 from networkx.algorithms.link_prediction import *
28 from networkx.algorithms.lowest_common_ancestors import *
29 from networkx.algorithms.isolate import *
30 from networkx.algorithms.matching import *
31 from networkx.algorithms.minors import *
32 from networkx.algorithms.mis import *
33 from networkx.algorithms.operators import *
34 from networkx.algorithms.planarity import *
35 from networkx.algorithms.reciprocity import *
36 from networkx.algorithms.richclub import *
37 from networkx.algorithms.shortest_paths import *
38 from networkx.algorithms.similarity import *
39 from networkx.algorithms.simple_paths import *
40 from networkx.algorithms.smallworld import *
41 from networkx.algorithms.smetric import *
42 from networkx.algorithms.structuralholes import *
43 from networkx.algorithms.triads import *
44 from networkx.algorithms.sparsifiers import *
45 from networkx.algorithms.swap import *
46 from networkx.algorithms.traversal import *
47 from networkx.algorithms.triads import *
48 from networkx.algorithms.vitality import *
49 from networkx.algorithms.voronoi import *
50 from networkx.algorithms.wiener import *
51
52 # Make certain subpackages available to the user as direct imports from
53 # the `networkx` namespace.
54 import networkx.algorithms.assortativity
55 import networkx.algorithms.bipartite
56 import networkx.algorithms.node_classification
57 import networkx.algorithms.centrality
58 import networkx.algorithms.chordal
59 import networkx.algorithms.cluster
60 import networkx.algorithms.clique
61 import networkx.algorithms.components
62 import networkx.algorithms.connectivity
63 import networkx.algorithms.community
64 import networkx.algorithms.coloring
65 import networkx.algorithms.flow
66 import networkx.algorithms.isomorphism
67 import networkx.algorithms.link_analysis
68 import networkx.algorithms.lowest_common_ancestors
69 import networkx.algorithms.operators
70 import networkx.algorithms.shortest_paths
71 import networkx.algorithms.tournament
72 import networkx.algorithms.traversal
73 import networkx.algorithms.tree
74
75 # Make certain functions from some of the previous subpackages available
76 # to the user as direct imports from the `networkx` namespace.
77 from networkx.algorithms.bipartite import complete_bipartite_graph
78 from networkx.algorithms.bipartite import is_bipartite
79 from networkx.algorithms.bipartite import project
80 from networkx.algorithms.bipartite import projected_graph
81 from networkx.algorithms.connectivity import all_pairs_node_connectivity
82 from networkx.algorithms.connectivity import all_node_cuts
83 from networkx.algorithms.connectivity import average_node_connectivity
84 from networkx.algorithms.connectivity import edge_connectivity
85 from networkx.algorithms.connectivity import edge_disjoint_paths
86 from networkx.algorithms.connectivity import k_components
87 from networkx.algorithms.connectivity import k_edge_components
88 from networkx.algorithms.connectivity import k_edge_subgraphs
89 from networkx.algorithms.connectivity import k_edge_augmentation
90 from networkx.algorithms.connectivity import is_k_edge_connected
91 from networkx.algorithms.connectivity import minimum_edge_cut
92 from networkx.algorithms.connectivity import minimum_node_cut
93 from networkx.algorithms.connectivity import node_connectivity
94 from networkx.algorithms.connectivity import node_disjoint_paths
95 from networkx.algorithms.connectivity import stoer_wagner
96 from networkx.algorithms.flow import capacity_scaling
97 from networkx.algorithms.flow import cost_of_flow
98 from networkx.algorithms.flow import gomory_hu_tree
99 from networkx.algorithms.flow import max_flow_min_cost
100 from networkx.algorithms.flow import maximum_flow
101 from networkx.algorithms.flow import maximum_flow_value
102 from networkx.algorithms.flow import min_cost_flow
103 from networkx.algorithms.flow import min_cost_flow_cost
104 from networkx.algorithms.flow import minimum_cut
105 from networkx.algorithms.flow import minimum_cut_value
106 from networkx.algorithms.flow import network_simplex
107 from networkx.algorithms.isomorphism import could_be_isomorphic
108 from networkx.algorithms.isomorphism import fast_could_be_isomorphic
109 from networkx.algorithms.isomorphism import faster_could_be_isomorphic
110 from networkx.algorithms.isomorphism import is_isomorphic
111 from networkx.algorithms.tree.branchings import maximum_branching
112 from networkx.algorithms.tree.branchings import maximum_spanning_arborescence
113 from networkx.algorithms.tree.branchings import minimum_branching
114 from networkx.algorithms.tree.branchings import minimum_spanning_arborescence
115 from networkx.algorithms.tree.coding import *
116 from networkx.algorithms.tree.operations import *
117 from networkx.algorithms.tree.recognition import *
118 from networkx.algorithms.tree.mst import *
119
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/networkx/algorithms/__init__.py b/networkx/algorithms/__init__.py
--- a/networkx/algorithms/__init__.py
+++ b/networkx/algorithms/__init__.py
@@ -40,7 +40,6 @@
from networkx.algorithms.smallworld import *
from networkx.algorithms.smetric import *
from networkx.algorithms.structuralholes import *
-from networkx.algorithms.triads import *
from networkx.algorithms.sparsifiers import *
from networkx.algorithms.swap import *
from networkx.algorithms.traversal import *
|
{"golden_diff": "diff --git a/networkx/algorithms/__init__.py b/networkx/algorithms/__init__.py\n--- a/networkx/algorithms/__init__.py\n+++ b/networkx/algorithms/__init__.py\n@@ -40,7 +40,6 @@\n from networkx.algorithms.smallworld import *\n from networkx.algorithms.smetric import *\n from networkx.algorithms.structuralholes import *\n-from networkx.algorithms.triads import *\n from networkx.algorithms.sparsifiers import *\n from networkx.algorithms.swap import *\n from networkx.algorithms.traversal import *\n", "issue": "Double import\nI noticed that in `networkx/algorithms/__init__.py`the statement `from networkx.algorithms.triads import *` occurs twice. Is there any reason for this or is this just a blunder?\n", "before_files": [{"content": "from networkx.algorithms.assortativity import *\nfrom networkx.algorithms.boundary import *\nfrom networkx.algorithms.bridges import *\nfrom networkx.algorithms.chains import *\nfrom networkx.algorithms.centrality import *\nfrom networkx.algorithms.chordal import *\nfrom networkx.algorithms.cluster import *\nfrom networkx.algorithms.clique import *\nfrom networkx.algorithms.communicability_alg import *\nfrom networkx.algorithms.components import *\nfrom networkx.algorithms.coloring import *\nfrom networkx.algorithms.core import *\nfrom networkx.algorithms.covering import *\nfrom networkx.algorithms.cycles import *\nfrom networkx.algorithms.cuts import *\nfrom networkx.algorithms.dag import *\nfrom networkx.algorithms.distance_measures import *\nfrom networkx.algorithms.distance_regular import *\nfrom networkx.algorithms.dominance import *\nfrom networkx.algorithms.dominating import *\nfrom networkx.algorithms.efficiency import *\nfrom networkx.algorithms.euler import *\nfrom networkx.algorithms.graphical import *\nfrom networkx.algorithms.hierarchy import *\nfrom networkx.algorithms.hybrid import *\nfrom networkx.algorithms.link_analysis import *\nfrom networkx.algorithms.link_prediction import *\nfrom networkx.algorithms.lowest_common_ancestors import *\nfrom networkx.algorithms.isolate import *\nfrom networkx.algorithms.matching import *\nfrom networkx.algorithms.minors import *\nfrom networkx.algorithms.mis import *\nfrom networkx.algorithms.operators import *\nfrom networkx.algorithms.planarity import *\nfrom networkx.algorithms.reciprocity import *\nfrom networkx.algorithms.richclub import *\nfrom networkx.algorithms.shortest_paths import *\nfrom networkx.algorithms.similarity import *\nfrom networkx.algorithms.simple_paths import *\nfrom networkx.algorithms.smallworld import *\nfrom networkx.algorithms.smetric import *\nfrom networkx.algorithms.structuralholes import *\nfrom networkx.algorithms.triads import *\nfrom networkx.algorithms.sparsifiers import *\nfrom networkx.algorithms.swap import *\nfrom networkx.algorithms.traversal import *\nfrom networkx.algorithms.triads import *\nfrom networkx.algorithms.vitality import *\nfrom networkx.algorithms.voronoi import *\nfrom networkx.algorithms.wiener import *\n\n# Make certain subpackages available to the user as direct imports from\n# the `networkx` namespace.\nimport networkx.algorithms.assortativity\nimport networkx.algorithms.bipartite\nimport networkx.algorithms.node_classification\nimport networkx.algorithms.centrality\nimport networkx.algorithms.chordal\nimport networkx.algorithms.cluster\nimport networkx.algorithms.clique\nimport networkx.algorithms.components\nimport networkx.algorithms.connectivity\nimport networkx.algorithms.community\nimport networkx.algorithms.coloring\nimport networkx.algorithms.flow\nimport networkx.algorithms.isomorphism\nimport networkx.algorithms.link_analysis\nimport networkx.algorithms.lowest_common_ancestors\nimport networkx.algorithms.operators\nimport networkx.algorithms.shortest_paths\nimport networkx.algorithms.tournament\nimport networkx.algorithms.traversal\nimport networkx.algorithms.tree\n\n# Make certain functions from some of the previous subpackages available\n# to the user as direct imports from the `networkx` namespace.\nfrom networkx.algorithms.bipartite import complete_bipartite_graph\nfrom networkx.algorithms.bipartite import is_bipartite\nfrom networkx.algorithms.bipartite import project\nfrom networkx.algorithms.bipartite import projected_graph\nfrom networkx.algorithms.connectivity import all_pairs_node_connectivity\nfrom networkx.algorithms.connectivity import all_node_cuts\nfrom networkx.algorithms.connectivity import average_node_connectivity\nfrom networkx.algorithms.connectivity import edge_connectivity\nfrom networkx.algorithms.connectivity import edge_disjoint_paths\nfrom networkx.algorithms.connectivity import k_components\nfrom networkx.algorithms.connectivity import k_edge_components\nfrom networkx.algorithms.connectivity import k_edge_subgraphs\nfrom networkx.algorithms.connectivity import k_edge_augmentation\nfrom networkx.algorithms.connectivity import is_k_edge_connected\nfrom networkx.algorithms.connectivity import minimum_edge_cut\nfrom networkx.algorithms.connectivity import minimum_node_cut\nfrom networkx.algorithms.connectivity import node_connectivity\nfrom networkx.algorithms.connectivity import node_disjoint_paths\nfrom networkx.algorithms.connectivity import stoer_wagner\nfrom networkx.algorithms.flow import capacity_scaling\nfrom networkx.algorithms.flow import cost_of_flow\nfrom networkx.algorithms.flow import gomory_hu_tree\nfrom networkx.algorithms.flow import max_flow_min_cost\nfrom networkx.algorithms.flow import maximum_flow\nfrom networkx.algorithms.flow import maximum_flow_value\nfrom networkx.algorithms.flow import min_cost_flow\nfrom networkx.algorithms.flow import min_cost_flow_cost\nfrom networkx.algorithms.flow import minimum_cut\nfrom networkx.algorithms.flow import minimum_cut_value\nfrom networkx.algorithms.flow import network_simplex\nfrom networkx.algorithms.isomorphism import could_be_isomorphic\nfrom networkx.algorithms.isomorphism import fast_could_be_isomorphic\nfrom networkx.algorithms.isomorphism import faster_could_be_isomorphic\nfrom networkx.algorithms.isomorphism import is_isomorphic\nfrom networkx.algorithms.tree.branchings import maximum_branching\nfrom networkx.algorithms.tree.branchings import maximum_spanning_arborescence\nfrom networkx.algorithms.tree.branchings import minimum_branching\nfrom networkx.algorithms.tree.branchings import minimum_spanning_arborescence\nfrom networkx.algorithms.tree.coding import *\nfrom networkx.algorithms.tree.operations import *\nfrom networkx.algorithms.tree.recognition import *\nfrom networkx.algorithms.tree.mst import *\n", "path": "networkx/algorithms/__init__.py"}], "after_files": [{"content": "from networkx.algorithms.assortativity import *\nfrom networkx.algorithms.boundary import *\nfrom networkx.algorithms.bridges import *\nfrom networkx.algorithms.chains import *\nfrom networkx.algorithms.centrality import *\nfrom networkx.algorithms.chordal import *\nfrom networkx.algorithms.cluster import *\nfrom networkx.algorithms.clique import *\nfrom networkx.algorithms.communicability_alg import *\nfrom networkx.algorithms.components import *\nfrom networkx.algorithms.coloring import *\nfrom networkx.algorithms.core import *\nfrom networkx.algorithms.covering import *\nfrom networkx.algorithms.cycles import *\nfrom networkx.algorithms.cuts import *\nfrom networkx.algorithms.dag import *\nfrom networkx.algorithms.distance_measures import *\nfrom networkx.algorithms.distance_regular import *\nfrom networkx.algorithms.dominance import *\nfrom networkx.algorithms.dominating import *\nfrom networkx.algorithms.efficiency import *\nfrom networkx.algorithms.euler import *\nfrom networkx.algorithms.graphical import *\nfrom networkx.algorithms.hierarchy import *\nfrom networkx.algorithms.hybrid import *\nfrom networkx.algorithms.link_analysis import *\nfrom networkx.algorithms.link_prediction import *\nfrom networkx.algorithms.lowest_common_ancestors import *\nfrom networkx.algorithms.isolate import *\nfrom networkx.algorithms.matching import *\nfrom networkx.algorithms.minors import *\nfrom networkx.algorithms.mis import *\nfrom networkx.algorithms.operators import *\nfrom networkx.algorithms.planarity import *\nfrom networkx.algorithms.reciprocity import *\nfrom networkx.algorithms.richclub import *\nfrom networkx.algorithms.shortest_paths import *\nfrom networkx.algorithms.similarity import *\nfrom networkx.algorithms.simple_paths import *\nfrom networkx.algorithms.smallworld import *\nfrom networkx.algorithms.smetric import *\nfrom networkx.algorithms.structuralholes import *\nfrom networkx.algorithms.sparsifiers import *\nfrom networkx.algorithms.swap import *\nfrom networkx.algorithms.traversal import *\nfrom networkx.algorithms.triads import *\nfrom networkx.algorithms.vitality import *\nfrom networkx.algorithms.voronoi import *\nfrom networkx.algorithms.wiener import *\n\n# Make certain subpackages available to the user as direct imports from\n# the `networkx` namespace.\nimport networkx.algorithms.assortativity\nimport networkx.algorithms.bipartite\nimport networkx.algorithms.node_classification\nimport networkx.algorithms.centrality\nimport networkx.algorithms.chordal\nimport networkx.algorithms.cluster\nimport networkx.algorithms.clique\nimport networkx.algorithms.components\nimport networkx.algorithms.connectivity\nimport networkx.algorithms.community\nimport networkx.algorithms.coloring\nimport networkx.algorithms.flow\nimport networkx.algorithms.isomorphism\nimport networkx.algorithms.link_analysis\nimport networkx.algorithms.lowest_common_ancestors\nimport networkx.algorithms.operators\nimport networkx.algorithms.shortest_paths\nimport networkx.algorithms.tournament\nimport networkx.algorithms.traversal\nimport networkx.algorithms.tree\n\n# Make certain functions from some of the previous subpackages available\n# to the user as direct imports from the `networkx` namespace.\nfrom networkx.algorithms.bipartite import complete_bipartite_graph\nfrom networkx.algorithms.bipartite import is_bipartite\nfrom networkx.algorithms.bipartite import project\nfrom networkx.algorithms.bipartite import projected_graph\nfrom networkx.algorithms.connectivity import all_pairs_node_connectivity\nfrom networkx.algorithms.connectivity import all_node_cuts\nfrom networkx.algorithms.connectivity import average_node_connectivity\nfrom networkx.algorithms.connectivity import edge_connectivity\nfrom networkx.algorithms.connectivity import edge_disjoint_paths\nfrom networkx.algorithms.connectivity import k_components\nfrom networkx.algorithms.connectivity import k_edge_components\nfrom networkx.algorithms.connectivity import k_edge_subgraphs\nfrom networkx.algorithms.connectivity import k_edge_augmentation\nfrom networkx.algorithms.connectivity import is_k_edge_connected\nfrom networkx.algorithms.connectivity import minimum_edge_cut\nfrom networkx.algorithms.connectivity import minimum_node_cut\nfrom networkx.algorithms.connectivity import node_connectivity\nfrom networkx.algorithms.connectivity import node_disjoint_paths\nfrom networkx.algorithms.connectivity import stoer_wagner\nfrom networkx.algorithms.flow import capacity_scaling\nfrom networkx.algorithms.flow import cost_of_flow\nfrom networkx.algorithms.flow import gomory_hu_tree\nfrom networkx.algorithms.flow import max_flow_min_cost\nfrom networkx.algorithms.flow import maximum_flow\nfrom networkx.algorithms.flow import maximum_flow_value\nfrom networkx.algorithms.flow import min_cost_flow\nfrom networkx.algorithms.flow import min_cost_flow_cost\nfrom networkx.algorithms.flow import minimum_cut\nfrom networkx.algorithms.flow import minimum_cut_value\nfrom networkx.algorithms.flow import network_simplex\nfrom networkx.algorithms.isomorphism import could_be_isomorphic\nfrom networkx.algorithms.isomorphism import fast_could_be_isomorphic\nfrom networkx.algorithms.isomorphism import faster_could_be_isomorphic\nfrom networkx.algorithms.isomorphism import is_isomorphic\nfrom networkx.algorithms.tree.branchings import maximum_branching\nfrom networkx.algorithms.tree.branchings import maximum_spanning_arborescence\nfrom networkx.algorithms.tree.branchings import minimum_branching\nfrom networkx.algorithms.tree.branchings import minimum_spanning_arborescence\nfrom networkx.algorithms.tree.coding import *\nfrom networkx.algorithms.tree.operations import *\nfrom networkx.algorithms.tree.recognition import *\nfrom networkx.algorithms.tree.mst import *\n", "path": "networkx/algorithms/__init__.py"}]}
| 1,778 | 121 |
gh_patches_debug_28442
|
rasdani/github-patches
|
git_diff
|
pypa__pipenv-1326
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pipenv starts slow when IPython is installed.
IPython is imported when importing dotenv.
(ref: theskumar/python-dotenv#84 and [import profile](https://paste.ubuntu.com/26409167/))
Since pipenv uses patched version of dotenv, pipenv should port upstream fix
or patch `dotenv/__init__.py` to stop importing dotenv.ipython.
##### Describe your environment
1. Ubuntu 17.10
1. Python version: 3.7.0a4
1. Pipenv version: 9.0.3
##### Steps to replicate
* Install Python 3.7.0a4 or newer
* ` PYTHONPROFILEIMPORTTIME=1 path/to/pipenv --version 2>pipenv-version`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pipenv/patched/dotenv/__init__.py`
Content:
```
1 from .cli import get_cli_string
2 from .main import load_dotenv, get_key, set_key, unset_key, find_dotenv
3 try:
4 from .ipython import load_ipython_extension
5 except ImportError:
6 pass
7
8 __all__ = ['get_cli_string', 'load_dotenv', 'get_key', 'set_key', 'unset_key', 'find_dotenv', 'load_ipython_extension']
9
```
Path: `pipenv/patched/dotenv/ipython.py`
Content:
```
1 from __future__ import print_function
2 from .main import load_dotenv, find_dotenv
3
4 from IPython.core.magic import Magics, magics_class, line_magic
5 from IPython.core.magic_arguments import (argument, magic_arguments,
6 parse_argstring)
7
8
9 @magics_class
10 class IPythonDotEnv(Magics):
11
12 @magic_arguments()
13 @argument(
14 '-o', '--override', action='store_true',
15 help="Indicate to override existing variables"
16 )
17 @argument(
18 '-v', '--verbose', action='store_true',
19 help="Indicate function calls to be verbose"
20 )
21 @argument('dotenv_path', nargs='?', type=str, default='.env',
22 help='Search in increasingly higher folders for the `dotenv_path`')
23 @line_magic
24 def dotenv(self, line):
25 args = parse_argstring(self.dotenv, line)
26 # Locate the .env file
27 dotenv_path = args.dotenv_path
28 try:
29 dotenv_path = find_dotenv(dotenv_path, True, True)
30 except IOError:
31 print("cannot find .env file")
32 return
33
34 # Load the .env file
35 load_dotenv(dotenv_path, verbose=args.verbose, override=args.override)
36
37
38 def load_ipython_extension(ipython):
39 """Register the %dotenv magic."""
40 ipython.register_magics(IPythonDotEnv)
41
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pipenv/patched/dotenv/__init__.py b/pipenv/patched/dotenv/__init__.py
--- a/pipenv/patched/dotenv/__init__.py
+++ b/pipenv/patched/dotenv/__init__.py
@@ -1,8 +1,4 @@
from .cli import get_cli_string
from .main import load_dotenv, get_key, set_key, unset_key, find_dotenv
-try:
- from .ipython import load_ipython_extension
-except ImportError:
- pass
-__all__ = ['get_cli_string', 'load_dotenv', 'get_key', 'set_key', 'unset_key', 'find_dotenv', 'load_ipython_extension']
+__all__ = ['get_cli_string', 'load_dotenv', 'get_key', 'set_key', 'unset_key', 'find_dotenv']
diff --git a/pipenv/patched/dotenv/ipython.py b/pipenv/patched/dotenv/ipython.py
deleted file mode 100644
--- a/pipenv/patched/dotenv/ipython.py
+++ /dev/null
@@ -1,40 +0,0 @@
-from __future__ import print_function
-from .main import load_dotenv, find_dotenv
-
-from IPython.core.magic import Magics, magics_class, line_magic
-from IPython.core.magic_arguments import (argument, magic_arguments,
- parse_argstring)
-
-
-@magics_class
-class IPythonDotEnv(Magics):
-
- @magic_arguments()
- @argument(
- '-o', '--override', action='store_true',
- help="Indicate to override existing variables"
- )
- @argument(
- '-v', '--verbose', action='store_true',
- help="Indicate function calls to be verbose"
- )
- @argument('dotenv_path', nargs='?', type=str, default='.env',
- help='Search in increasingly higher folders for the `dotenv_path`')
- @line_magic
- def dotenv(self, line):
- args = parse_argstring(self.dotenv, line)
- # Locate the .env file
- dotenv_path = args.dotenv_path
- try:
- dotenv_path = find_dotenv(dotenv_path, True, True)
- except IOError:
- print("cannot find .env file")
- return
-
- # Load the .env file
- load_dotenv(dotenv_path, verbose=args.verbose, override=args.override)
-
-
-def load_ipython_extension(ipython):
- """Register the %dotenv magic."""
- ipython.register_magics(IPythonDotEnv)
|
{"golden_diff": "diff --git a/pipenv/patched/dotenv/__init__.py b/pipenv/patched/dotenv/__init__.py\n--- a/pipenv/patched/dotenv/__init__.py\n+++ b/pipenv/patched/dotenv/__init__.py\n@@ -1,8 +1,4 @@\n from .cli import get_cli_string\n from .main import load_dotenv, get_key, set_key, unset_key, find_dotenv\n-try:\n- from .ipython import load_ipython_extension\n-except ImportError:\n- pass\n \n-__all__ = ['get_cli_string', 'load_dotenv', 'get_key', 'set_key', 'unset_key', 'find_dotenv', 'load_ipython_extension']\n+__all__ = ['get_cli_string', 'load_dotenv', 'get_key', 'set_key', 'unset_key', 'find_dotenv']\ndiff --git a/pipenv/patched/dotenv/ipython.py b/pipenv/patched/dotenv/ipython.py\ndeleted file mode 100644\n--- a/pipenv/patched/dotenv/ipython.py\n+++ /dev/null\n@@ -1,40 +0,0 @@\n-from __future__ import print_function\n-from .main import load_dotenv, find_dotenv\n-\n-from IPython.core.magic import Magics, magics_class, line_magic\n-from IPython.core.magic_arguments import (argument, magic_arguments,\n- parse_argstring)\n-\n-\n-@magics_class\n-class IPythonDotEnv(Magics):\n-\n- @magic_arguments()\n- @argument(\n- '-o', '--override', action='store_true',\n- help=\"Indicate to override existing variables\"\n- )\n- @argument(\n- '-v', '--verbose', action='store_true',\n- help=\"Indicate function calls to be verbose\"\n- )\n- @argument('dotenv_path', nargs='?', type=str, default='.env',\n- help='Search in increasingly higher folders for the `dotenv_path`')\n- @line_magic\n- def dotenv(self, line):\n- args = parse_argstring(self.dotenv, line)\n- # Locate the .env file\n- dotenv_path = args.dotenv_path\n- try:\n- dotenv_path = find_dotenv(dotenv_path, True, True)\n- except IOError:\n- print(\"cannot find .env file\")\n- return\n-\n- # Load the .env file\n- load_dotenv(dotenv_path, verbose=args.verbose, override=args.override)\n-\n-\n-def load_ipython_extension(ipython):\n- \"\"\"Register the %dotenv magic.\"\"\"\n- ipython.register_magics(IPythonDotEnv)\n", "issue": "pipenv starts slow when IPython is installed.\nIPython is imported when importing dotenv. \r\n(ref: theskumar/python-dotenv#84 and [import profile](https://paste.ubuntu.com/26409167/))\r\n\r\nSince pipenv uses patched version of dotenv, pipenv should port upstream fix\r\nor patch `dotenv/__init__.py` to stop importing dotenv.ipython.\r\n\r\n##### Describe your environment\r\n\r\n1. Ubuntu 17.10\r\n1. Python version: 3.7.0a4\r\n1. Pipenv version: 9.0.3\r\n\r\n##### Steps to replicate\r\n\r\n* Install Python 3.7.0a4 or newer\r\n* ` PYTHONPROFILEIMPORTTIME=1 path/to/pipenv --version 2>pipenv-version`\n", "before_files": [{"content": "from .cli import get_cli_string\nfrom .main import load_dotenv, get_key, set_key, unset_key, find_dotenv\ntry:\n from .ipython import load_ipython_extension\nexcept ImportError:\n pass\n\n__all__ = ['get_cli_string', 'load_dotenv', 'get_key', 'set_key', 'unset_key', 'find_dotenv', 'load_ipython_extension']\n", "path": "pipenv/patched/dotenv/__init__.py"}, {"content": "from __future__ import print_function\nfrom .main import load_dotenv, find_dotenv\n\nfrom IPython.core.magic import Magics, magics_class, line_magic\nfrom IPython.core.magic_arguments import (argument, magic_arguments,\n parse_argstring)\n\n\n@magics_class\nclass IPythonDotEnv(Magics):\n\n @magic_arguments()\n @argument(\n '-o', '--override', action='store_true',\n help=\"Indicate to override existing variables\"\n )\n @argument(\n '-v', '--verbose', action='store_true',\n help=\"Indicate function calls to be verbose\"\n )\n @argument('dotenv_path', nargs='?', type=str, default='.env',\n help='Search in increasingly higher folders for the `dotenv_path`')\n @line_magic\n def dotenv(self, line):\n args = parse_argstring(self.dotenv, line)\n # Locate the .env file\n dotenv_path = args.dotenv_path\n try:\n dotenv_path = find_dotenv(dotenv_path, True, True)\n except IOError:\n print(\"cannot find .env file\")\n return\n\n # Load the .env file\n load_dotenv(dotenv_path, verbose=args.verbose, override=args.override)\n\n\ndef load_ipython_extension(ipython):\n \"\"\"Register the %dotenv magic.\"\"\"\n ipython.register_magics(IPythonDotEnv)\n", "path": "pipenv/patched/dotenv/ipython.py"}], "after_files": [{"content": "from .cli import get_cli_string\nfrom .main import load_dotenv, get_key, set_key, unset_key, find_dotenv\n\n__all__ = ['get_cli_string', 'load_dotenv', 'get_key', 'set_key', 'unset_key', 'find_dotenv']\n", "path": "pipenv/patched/dotenv/__init__.py"}, {"content": null, "path": "pipenv/patched/dotenv/ipython.py"}]}
| 921 | 594 |
gh_patches_debug_4224
|
rasdani/github-patches
|
git_diff
|
pypa__pip-5146
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Development version number triggers a false positive warning
* Pip version: 10.0.0b1
* Python version: 3.6.4
* Operating system: Linux
### Description:
Say a package `foo` depends on `bar>=1.0.0`. If the installed version of `bar` is a development version such as `1.0.1.dev42`, pip issues an incompatible version warning upon installation of `foo`. Pip shouldn't issue any warning since `1.0.1.dev42>=1.0.0`. The weird thing is that pip is satisfied with that version when scanning the dependencies of `foo`, but issues that warning anyway.
For that matter, the real life scenario is installing a development library with a `setuptools_scm`-generated version number and then installing a library that depends on it.
### What I've run:
```
% tree
.
├── bar
│ └── setup.py
└── foo
└── setup.py
2 directories, 2 files
```
```
% cat bar/setup.py
from setuptools import setup
setup(
name='bar',
version='1.0.1.dev42')
```
```
% cat foo/setup.py
from setuptools import setup
setup(
name='foo',
install_requires=['bar>=1.0.0'],
version='3.14.15')
```
```
# setting up virtual environment
% python3 -m venv compat
% source compat/bin/activate
% pip install pip==10.0.0b1
```
```
% pip install ./bar
Processing ./bar
Installing collected packages: bar
Running setup.py install for bar ... done
Successfully installed bar-1.0.1.dev42
```
```
% pip install ./foo
Processing ./foo
Requirement already satisfied: bar>=1.0.0 in ./compat/lib/python3.6/site-packages (from foo==3.14.15) (1.0.1.dev42)
foo 3.14.15 has requirement bar>=1.0.0, but you'll have bar 1.0.1.dev42 which is incompatible.
Installing collected packages: foo
Running setup.py install for foo ... done
Successfully installed foo-3.14.15
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pip/_internal/operations/check.py`
Content:
```
1 """Validation of dependencies of packages
2 """
3
4 from collections import namedtuple
5
6 from pip._vendor.packaging.utils import canonicalize_name
7
8 from pip._internal.operations.prepare import make_abstract_dist
9
10 from pip._internal.utils.misc import get_installed_distributions
11 from pip._internal.utils.typing import MYPY_CHECK_RUNNING
12
13 if MYPY_CHECK_RUNNING:
14 from pip._internal.req.req_install import InstallRequirement
15 from typing import Any, Dict, Iterator, Set, Tuple, List
16
17 # Shorthands
18 PackageSet = Dict[str, 'PackageDetails']
19 Missing = Tuple[str, Any]
20 Conflicting = Tuple[str, str, Any]
21
22 MissingDict = Dict[str, List[Missing]]
23 ConflictingDict = Dict[str, List[Conflicting]]
24 CheckResult = Tuple[MissingDict, ConflictingDict]
25
26 PackageDetails = namedtuple('PackageDetails', ['version', 'requires'])
27
28
29 def create_package_set_from_installed(**kwargs):
30 # type: (**Any) -> PackageSet
31 """Converts a list of distributions into a PackageSet.
32 """
33 retval = {}
34 for dist in get_installed_distributions(**kwargs):
35 name = canonicalize_name(dist.project_name)
36 retval[name] = PackageDetails(dist.version, dist.requires())
37 return retval
38
39
40 def check_package_set(package_set):
41 # type: (PackageSet) -> CheckResult
42 """Check if a package set is consistent
43 """
44 missing = dict()
45 conflicting = dict()
46
47 for package_name in package_set:
48 # Info about dependencies of package_name
49 missing_deps = set() # type: Set[Missing]
50 conflicting_deps = set() # type: Set[Conflicting]
51
52 for req in package_set[package_name].requires:
53 name = canonicalize_name(req.project_name) # type: str
54
55 # Check if it's missing
56 if name not in package_set:
57 missed = True
58 if req.marker is not None:
59 missed = req.marker.evaluate()
60 if missed:
61 missing_deps.add((name, req))
62 continue
63
64 # Check if there's a conflict
65 version = package_set[name].version # type: str
66 if version not in req.specifier:
67 conflicting_deps.add((name, version, req))
68
69 def str_key(x):
70 return str(x)
71
72 if missing_deps:
73 missing[package_name] = sorted(missing_deps, key=str_key)
74 if conflicting_deps:
75 conflicting[package_name] = sorted(conflicting_deps, key=str_key)
76
77 return missing, conflicting
78
79
80 def check_install_conflicts(to_install):
81 # type: (List[InstallRequirement]) -> Tuple[PackageSet, CheckResult]
82 """For checking if the dependency graph would be consistent after \
83 installing given requirements
84 """
85 # Start from the current state
86 state = create_package_set_from_installed()
87 _simulate_installation_of(to_install, state)
88 return state, check_package_set(state)
89
90
91 # NOTE from @pradyunsg
92 # This required a minor update in dependency link handling logic over at
93 # operations.prepare.IsSDist.dist() to get it working
94 def _simulate_installation_of(to_install, state):
95 # type: (List[InstallRequirement], PackageSet) -> None
96 """Computes the version of packages after installing to_install.
97 """
98
99 # Modify it as installing requirement_set would (assuming no errors)
100 for inst_req in to_install:
101 dist = make_abstract_dist(inst_req).dist(finder=None)
102 state[dist.key] = PackageDetails(dist.version, dist.requires())
103
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/pip/_internal/operations/check.py b/src/pip/_internal/operations/check.py
--- a/src/pip/_internal/operations/check.py
+++ b/src/pip/_internal/operations/check.py
@@ -63,7 +63,7 @@
# Check if there's a conflict
version = package_set[name].version # type: str
- if version not in req.specifier:
+ if not req.specifier.contains(version, prereleases=True):
conflicting_deps.add((name, version, req))
def str_key(x):
|
{"golden_diff": "diff --git a/src/pip/_internal/operations/check.py b/src/pip/_internal/operations/check.py\n--- a/src/pip/_internal/operations/check.py\n+++ b/src/pip/_internal/operations/check.py\n@@ -63,7 +63,7 @@\n \n # Check if there's a conflict\n version = package_set[name].version # type: str\n- if version not in req.specifier:\n+ if not req.specifier.contains(version, prereleases=True):\n conflicting_deps.add((name, version, req))\n \n def str_key(x):\n", "issue": "Development version number triggers a false positive warning\n* Pip version: 10.0.0b1\r\n* Python version: 3.6.4\r\n* Operating system: Linux\r\n\r\n### Description:\r\n\r\nSay a package `foo` depends on `bar>=1.0.0`. If the installed version of `bar` is a development version such as `1.0.1.dev42`, pip issues an incompatible version warning upon installation of `foo`. Pip shouldn't issue any warning since `1.0.1.dev42>=1.0.0`. The weird thing is that pip is satisfied with that version when scanning the dependencies of `foo`, but issues that warning anyway.\r\n\r\nFor that matter, the real life scenario is installing a development library with a `setuptools_scm`-generated version number and then installing a library that depends on it.\r\n\r\n### What I've run:\r\n\r\n```\r\n% tree\r\n.\r\n\u251c\u2500\u2500 bar\r\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 setup.py\r\n\u2514\u2500\u2500 foo\r\n \u2514\u2500\u2500 setup.py\r\n\r\n2 directories, 2 files\r\n```\r\n\r\n```\r\n% cat bar/setup.py\r\nfrom setuptools import setup\r\n\r\nsetup(\r\n name='bar',\r\n version='1.0.1.dev42')\r\n```\r\n\r\n```\r\n% cat foo/setup.py\r\nfrom setuptools import setup\r\n\r\nsetup(\r\n name='foo',\r\n install_requires=['bar>=1.0.0'],\r\n version='3.14.15')\r\n```\r\n\r\n```\r\n# setting up virtual environment\r\n% python3 -m venv compat\r\n% source compat/bin/activate\r\n% pip install pip==10.0.0b1\r\n```\r\n\r\n```\r\n% pip install ./bar\r\nProcessing ./bar\r\nInstalling collected packages: bar\r\n Running setup.py install for bar ... done\r\nSuccessfully installed bar-1.0.1.dev42\r\n```\r\n\r\n```\r\n% pip install ./foo\r\nProcessing ./foo\r\nRequirement already satisfied: bar>=1.0.0 in ./compat/lib/python3.6/site-packages (from foo==3.14.15) (1.0.1.dev42)\r\nfoo 3.14.15 has requirement bar>=1.0.0, but you'll have bar 1.0.1.dev42 which is incompatible.\r\nInstalling collected packages: foo\r\n Running setup.py install for foo ... done\r\nSuccessfully installed foo-3.14.15\r\n```\r\n\n", "before_files": [{"content": "\"\"\"Validation of dependencies of packages\n\"\"\"\n\nfrom collections import namedtuple\n\nfrom pip._vendor.packaging.utils import canonicalize_name\n\nfrom pip._internal.operations.prepare import make_abstract_dist\n\nfrom pip._internal.utils.misc import get_installed_distributions\nfrom pip._internal.utils.typing import MYPY_CHECK_RUNNING\n\nif MYPY_CHECK_RUNNING:\n from pip._internal.req.req_install import InstallRequirement\n from typing import Any, Dict, Iterator, Set, Tuple, List\n\n # Shorthands\n PackageSet = Dict[str, 'PackageDetails']\n Missing = Tuple[str, Any]\n Conflicting = Tuple[str, str, Any]\n\n MissingDict = Dict[str, List[Missing]]\n ConflictingDict = Dict[str, List[Conflicting]]\n CheckResult = Tuple[MissingDict, ConflictingDict]\n\nPackageDetails = namedtuple('PackageDetails', ['version', 'requires'])\n\n\ndef create_package_set_from_installed(**kwargs):\n # type: (**Any) -> PackageSet\n \"\"\"Converts a list of distributions into a PackageSet.\n \"\"\"\n retval = {}\n for dist in get_installed_distributions(**kwargs):\n name = canonicalize_name(dist.project_name)\n retval[name] = PackageDetails(dist.version, dist.requires())\n return retval\n\n\ndef check_package_set(package_set):\n # type: (PackageSet) -> CheckResult\n \"\"\"Check if a package set is consistent\n \"\"\"\n missing = dict()\n conflicting = dict()\n\n for package_name in package_set:\n # Info about dependencies of package_name\n missing_deps = set() # type: Set[Missing]\n conflicting_deps = set() # type: Set[Conflicting]\n\n for req in package_set[package_name].requires:\n name = canonicalize_name(req.project_name) # type: str\n\n # Check if it's missing\n if name not in package_set:\n missed = True\n if req.marker is not None:\n missed = req.marker.evaluate()\n if missed:\n missing_deps.add((name, req))\n continue\n\n # Check if there's a conflict\n version = package_set[name].version # type: str\n if version not in req.specifier:\n conflicting_deps.add((name, version, req))\n\n def str_key(x):\n return str(x)\n\n if missing_deps:\n missing[package_name] = sorted(missing_deps, key=str_key)\n if conflicting_deps:\n conflicting[package_name] = sorted(conflicting_deps, key=str_key)\n\n return missing, conflicting\n\n\ndef check_install_conflicts(to_install):\n # type: (List[InstallRequirement]) -> Tuple[PackageSet, CheckResult]\n \"\"\"For checking if the dependency graph would be consistent after \\\n installing given requirements\n \"\"\"\n # Start from the current state\n state = create_package_set_from_installed()\n _simulate_installation_of(to_install, state)\n return state, check_package_set(state)\n\n\n# NOTE from @pradyunsg\n# This required a minor update in dependency link handling logic over at\n# operations.prepare.IsSDist.dist() to get it working\ndef _simulate_installation_of(to_install, state):\n # type: (List[InstallRequirement], PackageSet) -> None\n \"\"\"Computes the version of packages after installing to_install.\n \"\"\"\n\n # Modify it as installing requirement_set would (assuming no errors)\n for inst_req in to_install:\n dist = make_abstract_dist(inst_req).dist(finder=None)\n state[dist.key] = PackageDetails(dist.version, dist.requires())\n", "path": "src/pip/_internal/operations/check.py"}], "after_files": [{"content": "\"\"\"Validation of dependencies of packages\n\"\"\"\n\nfrom collections import namedtuple\n\nfrom pip._vendor.packaging.utils import canonicalize_name\n\nfrom pip._internal.operations.prepare import make_abstract_dist\n\nfrom pip._internal.utils.misc import get_installed_distributions\nfrom pip._internal.utils.typing import MYPY_CHECK_RUNNING\n\nif MYPY_CHECK_RUNNING:\n from pip._internal.req.req_install import InstallRequirement\n from typing import Any, Dict, Iterator, Set, Tuple, List\n\n # Shorthands\n PackageSet = Dict[str, 'PackageDetails']\n Missing = Tuple[str, Any]\n Conflicting = Tuple[str, str, Any]\n\n MissingDict = Dict[str, List[Missing]]\n ConflictingDict = Dict[str, List[Conflicting]]\n CheckResult = Tuple[MissingDict, ConflictingDict]\n\nPackageDetails = namedtuple('PackageDetails', ['version', 'requires'])\n\n\ndef create_package_set_from_installed(**kwargs):\n # type: (**Any) -> PackageSet\n \"\"\"Converts a list of distributions into a PackageSet.\n \"\"\"\n retval = {}\n for dist in get_installed_distributions(**kwargs):\n name = canonicalize_name(dist.project_name)\n retval[name] = PackageDetails(dist.version, dist.requires())\n return retval\n\n\ndef check_package_set(package_set):\n # type: (PackageSet) -> CheckResult\n \"\"\"Check if a package set is consistent\n \"\"\"\n missing = dict()\n conflicting = dict()\n\n for package_name in package_set:\n # Info about dependencies of package_name\n missing_deps = set() # type: Set[Missing]\n conflicting_deps = set() # type: Set[Conflicting]\n\n for req in package_set[package_name].requires:\n name = canonicalize_name(req.project_name) # type: str\n\n # Check if it's missing\n if name not in package_set:\n missed = True\n if req.marker is not None:\n missed = req.marker.evaluate()\n if missed:\n missing_deps.add((name, req))\n continue\n\n # Check if there's a conflict\n version = package_set[name].version # type: str\n if not req.specifier.contains(version, prereleases=True):\n conflicting_deps.add((name, version, req))\n\n def str_key(x):\n return str(x)\n\n if missing_deps:\n missing[package_name] = sorted(missing_deps, key=str_key)\n if conflicting_deps:\n conflicting[package_name] = sorted(conflicting_deps, key=str_key)\n\n return missing, conflicting\n\n\ndef check_install_conflicts(to_install):\n # type: (List[InstallRequirement]) -> Tuple[PackageSet, CheckResult]\n \"\"\"For checking if the dependency graph would be consistent after \\\n installing given requirements\n \"\"\"\n # Start from the current state\n state = create_package_set_from_installed()\n _simulate_installation_of(to_install, state)\n return state, check_package_set(state)\n\n\n# NOTE from @pradyunsg\n# This required a minor update in dependency link handling logic over at\n# operations.prepare.IsSDist.dist() to get it working\ndef _simulate_installation_of(to_install, state):\n # type: (List[InstallRequirement], PackageSet) -> None\n \"\"\"Computes the version of packages after installing to_install.\n \"\"\"\n\n # Modify it as installing requirement_set would (assuming no errors)\n for inst_req in to_install:\n dist = make_abstract_dist(inst_req).dist(finder=None)\n state[dist.key] = PackageDetails(dist.version, dist.requires())\n", "path": "src/pip/_internal/operations/check.py"}]}
| 1,739 | 126 |
gh_patches_debug_63956
|
rasdani/github-patches
|
git_diff
|
redis__redis-py-1780
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Module installation fails due to missing dependency
https://github.com/redis/redis-py/blob/039488d97ec545b37e903d1b791a88bac8f77973/redis/connection.py#L1
the deprecated distutils was replaced with the packaging module as part of release v4.0.0b1
packaging is not a builtin python module but was not added to setup.py as a dependency which causes applications that require redis-py to fail if packaging isn't already installed on the machine.
the packaging module should probably be added as a dependency in setup.py to resolve this
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 from setuptools import find_packages, setup
3
4 import redis
5
6 setup(
7 name="redis",
8 description="Python client for Redis database and key-value store",
9 long_description=open("README.md").read().strip(),
10 long_description_content_type="text/markdown",
11 keywords=["Redis", "key-value store", "database"],
12 license="MIT",
13 version=redis.__version__,
14 packages=find_packages(
15 include=[
16 "redis",
17 "redis.commands",
18 "redis.commands.bf",
19 "redis.commands.json",
20 "redis.commands.search",
21 "redis.commands.timeseries",
22 "redis.commands.graph",
23 ]
24 ),
25 url="https://github.com/redis/redis-py",
26 author="Redis Inc.",
27 author_email="[email protected]",
28 python_requires=">=3.6",
29 install_requires=[
30 "deprecated==1.2.3",
31 "packaging==21.3",
32 ],
33 classifiers=[
34 "Development Status :: 5 - Production/Stable",
35 "Environment :: Console",
36 "Intended Audience :: Developers",
37 "License :: OSI Approved :: MIT License",
38 "Operating System :: OS Independent",
39 "Programming Language :: Python",
40 "Programming Language :: Python :: 3",
41 "Programming Language :: Python :: 3 :: Only",
42 "Programming Language :: Python :: 3.6",
43 "Programming Language :: Python :: 3.7",
44 "Programming Language :: Python :: 3.8",
45 "Programming Language :: Python :: 3.9",
46 "Programming Language :: Python :: 3.10",
47 "Programming Language :: Python :: Implementation :: CPython",
48 "Programming Language :: Python :: Implementation :: PyPy",
49 ],
50 extras_require={
51 "hiredis": ["hiredis>=1.0.0"],
52 },
53 )
54
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -26,9 +26,12 @@
author="Redis Inc.",
author_email="[email protected]",
python_requires=">=3.6",
+ setup_requires=[
+ "packaging>=21.3",
+ ],
install_requires=[
- "deprecated==1.2.3",
- "packaging==21.3",
+ "deprecated>=1.2.3",
+ "packaging>=21.3",
],
classifiers=[
"Development Status :: 5 - Production/Stable",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -26,9 +26,12 @@\n author=\"Redis Inc.\",\n author_email=\"[email protected]\",\n python_requires=\">=3.6\",\n+ setup_requires=[\n+ \"packaging>=21.3\",\n+ ],\n install_requires=[\n- \"deprecated==1.2.3\",\n- \"packaging==21.3\",\n+ \"deprecated>=1.2.3\",\n+ \"packaging>=21.3\",\n ],\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n", "issue": "Module installation fails due to missing dependency\nhttps://github.com/redis/redis-py/blob/039488d97ec545b37e903d1b791a88bac8f77973/redis/connection.py#L1\r\nthe deprecated distutils was replaced with the packaging module as part of release v4.0.0b1\r\npackaging is not a builtin python module but was not added to setup.py as a dependency which causes applications that require redis-py to fail if packaging isn't already installed on the machine.\r\nthe packaging module should probably be added as a dependency in setup.py to resolve this\n", "before_files": [{"content": "#!/usr/bin/env python\nfrom setuptools import find_packages, setup\n\nimport redis\n\nsetup(\n name=\"redis\",\n description=\"Python client for Redis database and key-value store\",\n long_description=open(\"README.md\").read().strip(),\n long_description_content_type=\"text/markdown\",\n keywords=[\"Redis\", \"key-value store\", \"database\"],\n license=\"MIT\",\n version=redis.__version__,\n packages=find_packages(\n include=[\n \"redis\",\n \"redis.commands\",\n \"redis.commands.bf\",\n \"redis.commands.json\",\n \"redis.commands.search\",\n \"redis.commands.timeseries\",\n \"redis.commands.graph\",\n ]\n ),\n url=\"https://github.com/redis/redis-py\",\n author=\"Redis Inc.\",\n author_email=\"[email protected]\",\n python_requires=\">=3.6\",\n install_requires=[\n \"deprecated==1.2.3\",\n \"packaging==21.3\",\n ],\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n ],\n extras_require={\n \"hiredis\": [\"hiredis>=1.0.0\"],\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nfrom setuptools import find_packages, setup\n\nimport redis\n\nsetup(\n name=\"redis\",\n description=\"Python client for Redis database and key-value store\",\n long_description=open(\"README.md\").read().strip(),\n long_description_content_type=\"text/markdown\",\n keywords=[\"Redis\", \"key-value store\", \"database\"],\n license=\"MIT\",\n version=redis.__version__,\n packages=find_packages(\n include=[\n \"redis\",\n \"redis.commands\",\n \"redis.commands.bf\",\n \"redis.commands.json\",\n \"redis.commands.search\",\n \"redis.commands.timeseries\",\n \"redis.commands.graph\",\n ]\n ),\n url=\"https://github.com/redis/redis-py\",\n author=\"Redis Inc.\",\n author_email=\"[email protected]\",\n python_requires=\">=3.6\",\n setup_requires=[\n \"packaging>=21.3\",\n ],\n install_requires=[\n \"deprecated>=1.2.3\",\n \"packaging>=21.3\",\n ],\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n ],\n extras_require={\n \"hiredis\": [\"hiredis>=1.0.0\"],\n },\n)\n", "path": "setup.py"}]}
| 889 | 141 |
gh_patches_debug_20147
|
rasdani/github-patches
|
git_diff
|
kartoza__prj.app-447
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
After creating a new organization it should appear in the pending approval menu
Please make sure if a user adds an organization the Pending Approval menu is updated
http://staging.changelog.qgis.org/en/qgis/pending-certifyingorganisation/list/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `django_project/core/custom_middleware.py`
Content:
```
1 # coding=utf-8
2 # flake8: noqa
3 """
4 core.custom_middleware
5 """
6 from base.models import Project, Version
7 from changes.models import Category, SponsorshipLevel, SponsorshipPeriod, Entry
8
9
10 class NavContextMiddleware(object):
11 """
12 Adds the required navigation variables to each response
13 """
14
15 def __init__(self):
16 pass
17
18 @staticmethod
19 def process_template_response(request, response):
20 """
21 Add 'the_project', 'the_entry', 'the_version' to context for the
22 navigation.
23
24 Justification: To make the navigation functional, we need to know
25 which Project (or Version, Committee etc) the current context
26 relates to. This is required for URLs. Rather than include lots of
27 if/else in the navigation template, it seems cleaner to add the
28 above variables to the context here.
29
30 :param request: Http Request obj
31 :param response: Http Response obj
32 :return: context :rtype: dict
33 """
34 context = response.context_data
35
36 if context.get('project', None):
37 context['the_project'] = context.get('project')
38 versions = Version.objects.filter(project=context.get('project'))
39 context['has_pending_versions'] = (
40 Version.unapproved_objects.filter(
41 project=context.get('project')).exists())
42 context['has_pending_categories'] = (
43 Category.unapproved_objects.filter(
44 project=context.get('project')).exists())
45 context['has_pending_sponsor_lvl'] = (
46 SponsorshipLevel.unapproved_objects.filter(
47 project=context.get('project')).exists())
48 context['has_pending_sponsor_period'] = (
49 SponsorshipPeriod.unapproved_objects.filter(
50 project=context.get('project')).exists())
51 if versions:
52 context['has_pending_entries'] = (
53 Entry.unapproved_objects.filter(
54 version__in=versions).exists())
55
56 else:
57 if request.user.is_staff:
58 context['the_projects'] = Project.objects.all()
59 else:
60 context['the_projects'] = Project.approved_objects.filter(
61 private=False
62 )
63
64 if context.get('version', None):
65 context['the_version'] = context.get('version')
66 context['the_project'] = context.get('version').project
67
68 if context.get('committee', None):
69 context['the_committee'] = context.get('committee')
70 context['the_project'] = context.get('committee').project
71
72 if context.get('ballot', None):
73 context['the_committee'] = context.get('ballot').committee
74 context['the_project'] = context.get('ballot').committee.project
75
76 if context.get('category', None):
77 context['the_project'] = context.get('category').project
78
79 if context.get('ballots', None):
80 try:
81 context['the_project'] = \
82 context.get('ballots')[0].committee.project
83 except (KeyError, IndexError):
84 pass
85
86 if context.get('entry', None):
87 context['the_entry'] = context.get('entry')
88 context['the_version'] = context.get('entry').version
89 context['the_project'] = context.get('entry').version.project
90
91 if context.get('committees', None):
92 try:
93 context['the_project'] = context.get('committees')[0].project
94 except (KeyError, IndexError):
95 pass
96
97 if context.get('versions', None):
98 try:
99 context['the_project'] = context.get('versions')[0].project
100 except (KeyError, IndexError):
101 pass
102
103 if context.get('entries', None):
104 try:
105 context['the_version'] = context.get('entries')[0].version
106 context['the_project'] = \
107 context.get('entries')[0].version.project
108 except (KeyError, IndexError):
109 pass
110
111 if context.get('categories', None):
112 try:
113 context['the_project'] = \
114 context.get('categories')[0].project
115 except (KeyError, IndexError):
116 pass
117
118 return response
119
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/django_project/core/custom_middleware.py b/django_project/core/custom_middleware.py
--- a/django_project/core/custom_middleware.py
+++ b/django_project/core/custom_middleware.py
@@ -5,6 +5,7 @@
"""
from base.models import Project, Version
from changes.models import Category, SponsorshipLevel, SponsorshipPeriod, Entry
+from certification.models import CertifyingOrganisation
class NavContextMiddleware(object):
@@ -48,6 +49,9 @@
context['has_pending_sponsor_period'] = (
SponsorshipPeriod.unapproved_objects.filter(
project=context.get('project')).exists())
+ context['has_pending_organisations'] = (
+ CertifyingOrganisation.unapproved_objects.filter(
+ project=context.get('project')).exists())
if versions:
context['has_pending_entries'] = (
Entry.unapproved_objects.filter(
|
{"golden_diff": "diff --git a/django_project/core/custom_middleware.py b/django_project/core/custom_middleware.py\n--- a/django_project/core/custom_middleware.py\n+++ b/django_project/core/custom_middleware.py\n@@ -5,6 +5,7 @@\n \"\"\"\n from base.models import Project, Version\n from changes.models import Category, SponsorshipLevel, SponsorshipPeriod, Entry\n+from certification.models import CertifyingOrganisation\n \n \n class NavContextMiddleware(object):\n@@ -48,6 +49,9 @@\n context['has_pending_sponsor_period'] = (\n SponsorshipPeriod.unapproved_objects.filter(\n project=context.get('project')).exists())\n+ context['has_pending_organisations'] = (\n+ CertifyingOrganisation.unapproved_objects.filter(\n+ project=context.get('project')).exists())\n if versions:\n context['has_pending_entries'] = (\n Entry.unapproved_objects.filter(\n", "issue": "After creating a new organization it should appear in the pending approval menu\nPlease make sure if a user adds an organization the Pending Approval menu is updated\r\n\r\nhttp://staging.changelog.qgis.org/en/qgis/pending-certifyingorganisation/list/\n", "before_files": [{"content": "# coding=utf-8\n# flake8: noqa\n\"\"\"\ncore.custom_middleware\n\"\"\"\nfrom base.models import Project, Version\nfrom changes.models import Category, SponsorshipLevel, SponsorshipPeriod, Entry\n\n\nclass NavContextMiddleware(object):\n \"\"\"\n Adds the required navigation variables to each response\n \"\"\"\n\n def __init__(self):\n pass\n\n @staticmethod\n def process_template_response(request, response):\n \"\"\"\n Add 'the_project', 'the_entry', 'the_version' to context for the\n navigation.\n\n Justification: To make the navigation functional, we need to know\n which Project (or Version, Committee etc) the current context\n relates to. This is required for URLs. Rather than include lots of\n if/else in the navigation template, it seems cleaner to add the\n above variables to the context here.\n\n :param request: Http Request obj\n :param response: Http Response obj\n :return: context :rtype: dict\n \"\"\"\n context = response.context_data\n\n if context.get('project', None):\n context['the_project'] = context.get('project')\n versions = Version.objects.filter(project=context.get('project'))\n context['has_pending_versions'] = (\n Version.unapproved_objects.filter(\n project=context.get('project')).exists())\n context['has_pending_categories'] = (\n Category.unapproved_objects.filter(\n project=context.get('project')).exists())\n context['has_pending_sponsor_lvl'] = (\n SponsorshipLevel.unapproved_objects.filter(\n project=context.get('project')).exists())\n context['has_pending_sponsor_period'] = (\n SponsorshipPeriod.unapproved_objects.filter(\n project=context.get('project')).exists())\n if versions:\n context['has_pending_entries'] = (\n Entry.unapproved_objects.filter(\n version__in=versions).exists())\n\n else:\n if request.user.is_staff:\n context['the_projects'] = Project.objects.all()\n else:\n context['the_projects'] = Project.approved_objects.filter(\n private=False\n )\n\n if context.get('version', None):\n context['the_version'] = context.get('version')\n context['the_project'] = context.get('version').project\n\n if context.get('committee', None):\n context['the_committee'] = context.get('committee')\n context['the_project'] = context.get('committee').project\n\n if context.get('ballot', None):\n context['the_committee'] = context.get('ballot').committee\n context['the_project'] = context.get('ballot').committee.project\n\n if context.get('category', None):\n context['the_project'] = context.get('category').project\n\n if context.get('ballots', None):\n try:\n context['the_project'] = \\\n context.get('ballots')[0].committee.project\n except (KeyError, IndexError):\n pass\n\n if context.get('entry', None):\n context['the_entry'] = context.get('entry')\n context['the_version'] = context.get('entry').version\n context['the_project'] = context.get('entry').version.project\n\n if context.get('committees', None):\n try:\n context['the_project'] = context.get('committees')[0].project\n except (KeyError, IndexError):\n pass\n\n if context.get('versions', None):\n try:\n context['the_project'] = context.get('versions')[0].project\n except (KeyError, IndexError):\n pass\n\n if context.get('entries', None):\n try:\n context['the_version'] = context.get('entries')[0].version\n context['the_project'] = \\\n context.get('entries')[0].version.project\n except (KeyError, IndexError):\n pass\n\n if context.get('categories', None):\n try:\n context['the_project'] = \\\n context.get('categories')[0].project\n except (KeyError, IndexError):\n pass\n\n return response\n", "path": "django_project/core/custom_middleware.py"}], "after_files": [{"content": "# coding=utf-8\n# flake8: noqa\n\"\"\"\ncore.custom_middleware\n\"\"\"\nfrom base.models import Project, Version\nfrom changes.models import Category, SponsorshipLevel, SponsorshipPeriod, Entry\nfrom certification.models import CertifyingOrganisation\n\n\nclass NavContextMiddleware(object):\n \"\"\"\n Adds the required navigation variables to each response\n \"\"\"\n\n def __init__(self):\n pass\n\n @staticmethod\n def process_template_response(request, response):\n \"\"\"\n Add 'the_project', 'the_entry', 'the_version' to context for the\n navigation.\n\n Justification: To make the navigation functional, we need to know\n which Project (or Version, Committee etc) the current context\n relates to. This is required for URLs. Rather than include lots of\n if/else in the navigation template, it seems cleaner to add the\n above variables to the context here.\n\n :param request: Http Request obj\n :param response: Http Response obj\n :return: context :rtype: dict\n \"\"\"\n context = response.context_data\n\n if context.get('project', None):\n context['the_project'] = context.get('project')\n versions = Version.objects.filter(project=context.get('project'))\n context['has_pending_versions'] = (\n Version.unapproved_objects.filter(\n project=context.get('project')).exists())\n context['has_pending_categories'] = (\n Category.unapproved_objects.filter(\n project=context.get('project')).exists())\n context['has_pending_sponsor_lvl'] = (\n SponsorshipLevel.unapproved_objects.filter(\n project=context.get('project')).exists())\n context['has_pending_sponsor_period'] = (\n SponsorshipPeriod.unapproved_objects.filter(\n project=context.get('project')).exists())\n context['has_pending_organisations'] = (\n CertifyingOrganisation.unapproved_objects.filter(\n project=context.get('project')).exists())\n if versions:\n context['has_pending_entries'] = (\n Entry.unapproved_objects.filter(\n version__in=versions).exists())\n\n else:\n if request.user.is_staff:\n context['the_projects'] = Project.objects.all()\n else:\n context['the_projects'] = Project.approved_objects.filter(\n private=False\n )\n\n if context.get('version', None):\n context['the_version'] = context.get('version')\n context['the_project'] = context.get('version').project\n\n if context.get('committee', None):\n context['the_committee'] = context.get('committee')\n context['the_project'] = context.get('committee').project\n\n if context.get('ballot', None):\n context['the_committee'] = context.get('ballot').committee\n context['the_project'] = context.get('ballot').committee.project\n\n if context.get('category', None):\n context['the_project'] = context.get('category').project\n\n if context.get('ballots', None):\n try:\n context['the_project'] = \\\n context.get('ballots')[0].committee.project\n except (KeyError, IndexError):\n pass\n\n if context.get('entry', None):\n context['the_entry'] = context.get('entry')\n context['the_version'] = context.get('entry').version\n context['the_project'] = context.get('entry').version.project\n\n if context.get('committees', None):\n try:\n context['the_project'] = context.get('committees')[0].project\n except (KeyError, IndexError):\n pass\n\n if context.get('versions', None):\n try:\n context['the_project'] = context.get('versions')[0].project\n except (KeyError, IndexError):\n pass\n\n if context.get('entries', None):\n try:\n context['the_version'] = context.get('entries')[0].version\n context['the_project'] = \\\n context.get('entries')[0].version.project\n except (KeyError, IndexError):\n pass\n\n if context.get('categories', None):\n try:\n context['the_project'] = \\\n context.get('categories')[0].project\n except (KeyError, IndexError):\n pass\n\n return response\n", "path": "django_project/core/custom_middleware.py"}]}
| 1,422 | 190 |
gh_patches_debug_35988
|
rasdani/github-patches
|
git_diff
|
PlasmaPy__PlasmaPy-2175
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Incompatibility between `@angular_freq_to_hz` and var-keyword arguments
### Bug description
While trying to decorate `gyrofrequency` with `@particle_input` in #2026, I found an issue with `@angular_freq_to_hz`. It appears that `@angular_freq_to_hz` cannot decorate functions that accept var-keyword arguments.
### Expected outcome
We should be able to use `@angular_freq_to_hz` to decorate functions with var-keyword parameters.
### Minimal complete verifiable example
When declaring this function:
```Python
from plasmapy.utils.decorators import angular_freq_to_hz
@angular_freq_to_hz
def f(**kwargs):
return kwargs
```
I get:
```python
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
Cell In[41], line 1
----> 1 @angular_freq_to_hz
2 def f(**kwargs):
3 return kwargs
File ~/Projects/PlasmaPy/plasmapy/utils/decorators/converter.py:101, in angular_freq_to_hz(fn)
97 new_params = sig.parameters.copy()
98 new_params["to_hz"] = inspect.Parameter(
99 "to_hz", inspect.Parameter.POSITIONAL_OR_KEYWORD, default=False
100 )
--> 101 new_sig = inspect.Signature(
102 parameters=new_params.values(), return_annotation=sig.return_annotation
103 )
104 fn.__signature__ = new_sig
106 @preserve_signature
107 @functools.wraps(fn)
108 def wrapper(*args, to_hz=False, **kwargs):
File ~/miniconda3/envs/pldev/lib/python3.11/inspect.py:2994, in Signature.__init__(self, parameters, return_annotation, __validate_parameters__)
2988 msg = (
2989 'wrong parameter order: {} parameter before {} '
2990 'parameter'
2991 )
2992 msg = msg.format(top_kind.description,
2993 kind.description)
-> 2994 raise ValueError(msg)
2995 elif kind > top_kind:
2996 kind_defaults = False
ValueError: wrong parameter order: variadic keyword parameter before positional or keyword parameter
```
### Package versions
Development branch
### Additional context
This is medium priority to address since it's blocking #2026 and possibly also #2022.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plasmapy/utils/decorators/converter.py`
Content:
```
1 """Decorators to convert units."""
2
3 __all__ = ["angular_freq_to_hz"]
4
5 import astropy.units as u
6 import functools
7 import inspect
8
9 from plasmapy.utils.decorators.helpers import preserve_signature
10
11
12 def angular_freq_to_hz(fn):
13 """
14 A decorator that enables a function to convert its return
15 value from angular frequency (rad/s) to frequency (Hz).
16
17 A kwarg ``to_hz`` is added to the function's signature, with a
18 default value of `False`. The keyword is also added to the
19 function's docstring under the **"Other Parameters"** heading.
20
21 Parameters
22 ----------
23 fn : function
24 The function to be decorated.
25
26 Raises
27 ------
28 ValueError
29 If ``fn`` has already defined a kwarg ``to_hz``.
30
31 Returns
32 -------
33 callable
34 The decorated function.
35
36 Notes
37 -----
38 * If `~plasmapy.utils.decorators.converter.angular_freq_to_hz` is
39 used with decorator
40 :func:`~plasmapy.utils.decorators.validators.validate_quantities`,
41 then `angular_freq_to_hz` should be used inside
42 :func:`~plasmapy.utils.decorators.validators.validate_quantities`
43 but special consideration is needed for setup. The following is
44 an example of an appropriate setup::
45
46 import astropy.units as u
47 from plasmapy.utils.decorators.converter import angular_freq_to_hz
48 from plasmapy.utils.decorators.validators import validate_quantities
49
50 @validate_quantities(validations_on_return={'units': [u.rad / u.s, u.Hz]})
51 @angular_freq_to_hz
52 def foo(x: u.rad / u.s) -> u.rad / u.s
53 return x
54
55 Adding ``u.Hz`` to the allowed units allows the converted
56 quantity to pass the validations.
57
58 Examples
59 --------
60 >>> import astropy.units as u
61 >>> from plasmapy.utils.decorators.converter import angular_freq_to_hz
62 >>>
63 >>> @angular_freq_to_hz
64 ... def foo(x):
65 ... return x
66 >>>
67 >>> foo(5 * u.rad / u.s, to_hz=True)
68 <Quantity 0.79577472 Hz>
69 >>>
70 >>> foo(-1 * u.rad / u.s, to_hz=True)
71 <Quantity -0.15915494 Hz>
72
73 Decoration also works with methods
74
75 >>> class Foo:
76 ... def __init__(self, x):
77 ... self.x = x
78 ...
79 ... @angular_freq_to_hz
80 ... def bar(self):
81 ... return self.x
82 >>>
83 >>> foo = Foo(0.5 * u.rad / u.s)
84 >>> foo.bar(to_hz=True)
85 <Quantity 0.07957747 Hz>
86
87 """
88 # raise exception if fn uses the 'to_hz' kwarg
89 sig = inspect.signature(fn)
90 if "to_hz" in sig.parameters:
91 raise ValueError(
92 f"Wrapped function '{fn.__name__}' can not use keyword 'to_hz'."
93 f" Keyword reserved for decorator functionality."
94 )
95
96 # make new signature for fn
97 new_params = sig.parameters.copy()
98 new_params["to_hz"] = inspect.Parameter(
99 "to_hz", inspect.Parameter.POSITIONAL_OR_KEYWORD, default=False
100 )
101 new_sig = inspect.Signature(
102 parameters=new_params.values(), return_annotation=sig.return_annotation
103 )
104 fn.__signature__ = new_sig
105
106 @preserve_signature
107 @functools.wraps(fn)
108 def wrapper(*args, to_hz=False, **kwargs):
109 _result = fn(*args, **kwargs)
110 if to_hz:
111 return _result.to(u.Hz, equivalencies=[(u.cy / u.s, u.Hz)])
112 return _result
113
114 added_doc_bit = """
115 Other Parameters
116 ----------------
117 to_hz: bool
118 Set `True` to to convert function output from angular frequency to Hz
119 """
120 if wrapper.__doc__ is not None:
121 wrapper.__doc__ += added_doc_bit
122 else:
123 wrapper.__doc__ = added_doc_bit
124
125 return wrapper
126
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/plasmapy/utils/decorators/converter.py b/plasmapy/utils/decorators/converter.py
--- a/plasmapy/utils/decorators/converter.py
+++ b/plasmapy/utils/decorators/converter.py
@@ -3,10 +3,8 @@
__all__ = ["angular_freq_to_hz"]
import astropy.units as u
-import functools
import inspect
-
-from plasmapy.utils.decorators.helpers import preserve_signature
+import wrapt
def angular_freq_to_hz(fn):
@@ -85,7 +83,6 @@
<Quantity 0.07957747 Hz>
"""
- # raise exception if fn uses the 'to_hz' kwarg
sig = inspect.signature(fn)
if "to_hz" in sig.parameters:
raise ValueError(
@@ -94,32 +91,45 @@
)
# make new signature for fn
- new_params = sig.parameters.copy()
- new_params["to_hz"] = inspect.Parameter(
- "to_hz", inspect.Parameter.POSITIONAL_OR_KEYWORD, default=False
+ new_params = []
+ var_keyword_param = None
+ for param in sig.parameters.values():
+ if param.kind == param.VAR_KEYWORD:
+ var_keyword_param = param
+ else:
+ new_params.append(param)
+
+ new_params.append(
+ inspect.Parameter("to_hz", inspect.Parameter.KEYWORD_ONLY, default=False)
)
+
+ if var_keyword_param:
+ new_params.append(var_keyword_param)
+
new_sig = inspect.Signature(
- parameters=new_params.values(), return_annotation=sig.return_annotation
+ parameters=new_params, return_annotation=sig.return_annotation
)
fn.__signature__ = new_sig
- @preserve_signature
- @functools.wraps(fn)
- def wrapper(*args, to_hz=False, **kwargs):
+ @wrapt.decorator
+ def wrapper(fn, instance, args, kwargs): # noqa: ARG001
+ to_hz = kwargs.pop("to_hz", False)
_result = fn(*args, **kwargs)
if to_hz:
return _result.to(u.Hz, equivalencies=[(u.cy / u.s, u.Hz)])
return _result
+ fn = wrapper(fn)
+
added_doc_bit = """
Other Parameters
----------------
to_hz: bool
- Set `True` to to convert function output from angular frequency to Hz
+ Set `True` to convert function output from angular frequency to Hz
"""
- if wrapper.__doc__ is not None:
- wrapper.__doc__ += added_doc_bit
+ if fn.__doc__ is not None:
+ fn.__doc__ += added_doc_bit
else:
- wrapper.__doc__ = added_doc_bit
+ fn.__doc__ = added_doc_bit
- return wrapper
+ return fn
|
{"golden_diff": "diff --git a/plasmapy/utils/decorators/converter.py b/plasmapy/utils/decorators/converter.py\n--- a/plasmapy/utils/decorators/converter.py\n+++ b/plasmapy/utils/decorators/converter.py\n@@ -3,10 +3,8 @@\n __all__ = [\"angular_freq_to_hz\"]\n \n import astropy.units as u\n-import functools\n import inspect\n-\n-from plasmapy.utils.decorators.helpers import preserve_signature\n+import wrapt\n \n \n def angular_freq_to_hz(fn):\n@@ -85,7 +83,6 @@\n <Quantity 0.07957747 Hz>\n \n \"\"\"\n- # raise exception if fn uses the 'to_hz' kwarg\n sig = inspect.signature(fn)\n if \"to_hz\" in sig.parameters:\n raise ValueError(\n@@ -94,32 +91,45 @@\n )\n \n # make new signature for fn\n- new_params = sig.parameters.copy()\n- new_params[\"to_hz\"] = inspect.Parameter(\n- \"to_hz\", inspect.Parameter.POSITIONAL_OR_KEYWORD, default=False\n+ new_params = []\n+ var_keyword_param = None\n+ for param in sig.parameters.values():\n+ if param.kind == param.VAR_KEYWORD:\n+ var_keyword_param = param\n+ else:\n+ new_params.append(param)\n+\n+ new_params.append(\n+ inspect.Parameter(\"to_hz\", inspect.Parameter.KEYWORD_ONLY, default=False)\n )\n+\n+ if var_keyword_param:\n+ new_params.append(var_keyword_param)\n+\n new_sig = inspect.Signature(\n- parameters=new_params.values(), return_annotation=sig.return_annotation\n+ parameters=new_params, return_annotation=sig.return_annotation\n )\n fn.__signature__ = new_sig\n \n- @preserve_signature\n- @functools.wraps(fn)\n- def wrapper(*args, to_hz=False, **kwargs):\n+ @wrapt.decorator\n+ def wrapper(fn, instance, args, kwargs): # noqa: ARG001\n+ to_hz = kwargs.pop(\"to_hz\", False)\n _result = fn(*args, **kwargs)\n if to_hz:\n return _result.to(u.Hz, equivalencies=[(u.cy / u.s, u.Hz)])\n return _result\n \n+ fn = wrapper(fn)\n+\n added_doc_bit = \"\"\"\n Other Parameters\n ----------------\n to_hz: bool\n- Set `True` to to convert function output from angular frequency to Hz\n+ Set `True` to convert function output from angular frequency to Hz\n \"\"\"\n- if wrapper.__doc__ is not None:\n- wrapper.__doc__ += added_doc_bit\n+ if fn.__doc__ is not None:\n+ fn.__doc__ += added_doc_bit\n else:\n- wrapper.__doc__ = added_doc_bit\n+ fn.__doc__ = added_doc_bit\n \n- return wrapper\n+ return fn\n", "issue": "Incompatibility between `@angular_freq_to_hz` and var-keyword arguments\n### Bug description\r\n\r\nWhile trying to decorate `gyrofrequency` with `@particle_input` in #2026, I found an issue with `@angular_freq_to_hz`. It appears that `@angular_freq_to_hz` cannot decorate functions that accept var-keyword arguments.\r\n\r\n### Expected outcome\r\n\r\nWe should be able to use `@angular_freq_to_hz` to decorate functions with var-keyword parameters.\r\n\r\n### Minimal complete verifiable example\r\n\r\nWhen declaring this function:\r\n\r\n```Python\r\nfrom plasmapy.utils.decorators import angular_freq_to_hz\r\n@angular_freq_to_hz\r\ndef f(**kwargs):\r\n return kwargs\r\n```\r\nI get:\r\n```python\r\n---------------------------------------------------------------------------\r\nValueError Traceback (most recent call last)\r\nCell In[41], line 1\r\n----> 1 @angular_freq_to_hz\r\n 2 def f(**kwargs):\r\n 3 return kwargs\r\n\r\nFile ~/Projects/PlasmaPy/plasmapy/utils/decorators/converter.py:101, in angular_freq_to_hz(fn)\r\n 97 new_params = sig.parameters.copy()\r\n 98 new_params[\"to_hz\"] = inspect.Parameter(\r\n 99 \"to_hz\", inspect.Parameter.POSITIONAL_OR_KEYWORD, default=False\r\n 100 )\r\n--> 101 new_sig = inspect.Signature(\r\n 102 parameters=new_params.values(), return_annotation=sig.return_annotation\r\n 103 )\r\n 104 fn.__signature__ = new_sig\r\n 106 @preserve_signature\r\n 107 @functools.wraps(fn)\r\n 108 def wrapper(*args, to_hz=False, **kwargs):\r\n\r\nFile ~/miniconda3/envs/pldev/lib/python3.11/inspect.py:2994, in Signature.__init__(self, parameters, return_annotation, __validate_parameters__)\r\n 2988 msg = (\r\n 2989 'wrong parameter order: {} parameter before {} '\r\n 2990 'parameter'\r\n 2991 )\r\n 2992 msg = msg.format(top_kind.description,\r\n 2993 kind.description)\r\n-> 2994 raise ValueError(msg)\r\n 2995 elif kind > top_kind:\r\n 2996 kind_defaults = False\r\n\r\nValueError: wrong parameter order: variadic keyword parameter before positional or keyword parameter\r\n```\r\n\r\n\r\n### Package versions\r\n\r\nDevelopment branch \r\n\r\n### Additional context\r\n\r\nThis is medium priority to address since it's blocking #2026 and possibly also #2022.\n", "before_files": [{"content": "\"\"\"Decorators to convert units.\"\"\"\n\n__all__ = [\"angular_freq_to_hz\"]\n\nimport astropy.units as u\nimport functools\nimport inspect\n\nfrom plasmapy.utils.decorators.helpers import preserve_signature\n\n\ndef angular_freq_to_hz(fn):\n \"\"\"\n A decorator that enables a function to convert its return\n value from angular frequency (rad/s) to frequency (Hz).\n\n A kwarg ``to_hz`` is added to the function's signature, with a\n default value of `False`. The keyword is also added to the\n function's docstring under the **\"Other Parameters\"** heading.\n\n Parameters\n ----------\n fn : function\n The function to be decorated.\n\n Raises\n ------\n ValueError\n If ``fn`` has already defined a kwarg ``to_hz``.\n\n Returns\n -------\n callable\n The decorated function.\n\n Notes\n -----\n * If `~plasmapy.utils.decorators.converter.angular_freq_to_hz` is\n used with decorator\n :func:`~plasmapy.utils.decorators.validators.validate_quantities`,\n then `angular_freq_to_hz` should be used inside\n :func:`~plasmapy.utils.decorators.validators.validate_quantities`\n but special consideration is needed for setup. The following is\n an example of an appropriate setup::\n\n import astropy.units as u\n from plasmapy.utils.decorators.converter import angular_freq_to_hz\n from plasmapy.utils.decorators.validators import validate_quantities\n\n @validate_quantities(validations_on_return={'units': [u.rad / u.s, u.Hz]})\n @angular_freq_to_hz\n def foo(x: u.rad / u.s) -> u.rad / u.s\n return x\n\n Adding ``u.Hz`` to the allowed units allows the converted\n quantity to pass the validations.\n\n Examples\n --------\n >>> import astropy.units as u\n >>> from plasmapy.utils.decorators.converter import angular_freq_to_hz\n >>>\n >>> @angular_freq_to_hz\n ... def foo(x):\n ... return x\n >>>\n >>> foo(5 * u.rad / u.s, to_hz=True)\n <Quantity 0.79577472 Hz>\n >>>\n >>> foo(-1 * u.rad / u.s, to_hz=True)\n <Quantity -0.15915494 Hz>\n\n Decoration also works with methods\n\n >>> class Foo:\n ... def __init__(self, x):\n ... self.x = x\n ...\n ... @angular_freq_to_hz\n ... def bar(self):\n ... return self.x\n >>>\n >>> foo = Foo(0.5 * u.rad / u.s)\n >>> foo.bar(to_hz=True)\n <Quantity 0.07957747 Hz>\n\n \"\"\"\n # raise exception if fn uses the 'to_hz' kwarg\n sig = inspect.signature(fn)\n if \"to_hz\" in sig.parameters:\n raise ValueError(\n f\"Wrapped function '{fn.__name__}' can not use keyword 'to_hz'.\"\n f\" Keyword reserved for decorator functionality.\"\n )\n\n # make new signature for fn\n new_params = sig.parameters.copy()\n new_params[\"to_hz\"] = inspect.Parameter(\n \"to_hz\", inspect.Parameter.POSITIONAL_OR_KEYWORD, default=False\n )\n new_sig = inspect.Signature(\n parameters=new_params.values(), return_annotation=sig.return_annotation\n )\n fn.__signature__ = new_sig\n\n @preserve_signature\n @functools.wraps(fn)\n def wrapper(*args, to_hz=False, **kwargs):\n _result = fn(*args, **kwargs)\n if to_hz:\n return _result.to(u.Hz, equivalencies=[(u.cy / u.s, u.Hz)])\n return _result\n\n added_doc_bit = \"\"\"\n Other Parameters\n ----------------\n to_hz: bool\n Set `True` to to convert function output from angular frequency to Hz\n \"\"\"\n if wrapper.__doc__ is not None:\n wrapper.__doc__ += added_doc_bit\n else:\n wrapper.__doc__ = added_doc_bit\n\n return wrapper\n", "path": "plasmapy/utils/decorators/converter.py"}], "after_files": [{"content": "\"\"\"Decorators to convert units.\"\"\"\n\n__all__ = [\"angular_freq_to_hz\"]\n\nimport astropy.units as u\nimport inspect\nimport wrapt\n\n\ndef angular_freq_to_hz(fn):\n \"\"\"\n A decorator that enables a function to convert its return\n value from angular frequency (rad/s) to frequency (Hz).\n\n A kwarg ``to_hz`` is added to the function's signature, with a\n default value of `False`. The keyword is also added to the\n function's docstring under the **\"Other Parameters\"** heading.\n\n Parameters\n ----------\n fn : function\n The function to be decorated.\n\n Raises\n ------\n ValueError\n If ``fn`` has already defined a kwarg ``to_hz``.\n\n Returns\n -------\n callable\n The decorated function.\n\n Notes\n -----\n * If `~plasmapy.utils.decorators.converter.angular_freq_to_hz` is\n used with decorator\n :func:`~plasmapy.utils.decorators.validators.validate_quantities`,\n then `angular_freq_to_hz` should be used inside\n :func:`~plasmapy.utils.decorators.validators.validate_quantities`\n but special consideration is needed for setup. The following is\n an example of an appropriate setup::\n\n import astropy.units as u\n from plasmapy.utils.decorators.converter import angular_freq_to_hz\n from plasmapy.utils.decorators.validators import validate_quantities\n\n @validate_quantities(validations_on_return={'units': [u.rad / u.s, u.Hz]})\n @angular_freq_to_hz\n def foo(x: u.rad / u.s) -> u.rad / u.s\n return x\n\n Adding ``u.Hz`` to the allowed units allows the converted\n quantity to pass the validations.\n\n Examples\n --------\n >>> import astropy.units as u\n >>> from plasmapy.utils.decorators.converter import angular_freq_to_hz\n >>>\n >>> @angular_freq_to_hz\n ... def foo(x):\n ... return x\n >>>\n >>> foo(5 * u.rad / u.s, to_hz=True)\n <Quantity 0.79577472 Hz>\n >>>\n >>> foo(-1 * u.rad / u.s, to_hz=True)\n <Quantity -0.15915494 Hz>\n\n Decoration also works with methods\n\n >>> class Foo:\n ... def __init__(self, x):\n ... self.x = x\n ...\n ... @angular_freq_to_hz\n ... def bar(self):\n ... return self.x\n >>>\n >>> foo = Foo(0.5 * u.rad / u.s)\n >>> foo.bar(to_hz=True)\n <Quantity 0.07957747 Hz>\n\n \"\"\"\n sig = inspect.signature(fn)\n if \"to_hz\" in sig.parameters:\n raise ValueError(\n f\"Wrapped function '{fn.__name__}' can not use keyword 'to_hz'.\"\n f\" Keyword reserved for decorator functionality.\"\n )\n\n # make new signature for fn\n new_params = []\n var_keyword_param = None\n for param in sig.parameters.values():\n if param.kind == param.VAR_KEYWORD:\n var_keyword_param = param\n else:\n new_params.append(param)\n\n new_params.append(\n inspect.Parameter(\"to_hz\", inspect.Parameter.KEYWORD_ONLY, default=False)\n )\n\n if var_keyword_param:\n new_params.append(var_keyword_param)\n\n new_sig = inspect.Signature(\n parameters=new_params, return_annotation=sig.return_annotation\n )\n fn.__signature__ = new_sig\n\n @wrapt.decorator\n def wrapper(fn, instance, args, kwargs): # noqa: ARG001\n to_hz = kwargs.pop(\"to_hz\", False)\n _result = fn(*args, **kwargs)\n if to_hz:\n return _result.to(u.Hz, equivalencies=[(u.cy / u.s, u.Hz)])\n return _result\n\n fn = wrapper(fn)\n\n added_doc_bit = \"\"\"\n Other Parameters\n ----------------\n to_hz: bool\n Set `True` to convert function output from angular frequency to Hz\n \"\"\"\n if fn.__doc__ is not None:\n fn.__doc__ += added_doc_bit\n else:\n fn.__doc__ = added_doc_bit\n\n return fn\n", "path": "plasmapy/utils/decorators/converter.py"}]}
| 2,025 | 647 |
gh_patches_debug_3596
|
rasdani/github-patches
|
git_diff
|
liqd__a4-meinberlin-2170
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Identity spoofing via secondary email
See https://github.com/pennersr/django-allauth/issues/2265
cc: @CarolingerSeilchenspringer @MagdaN @fuzzylogic2000
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `meinberlin/apps/users/adapters.py`
Content:
```
1 import re
2 from urllib.parse import quote
3
4 from allauth.account.adapter import DefaultAccountAdapter
5 from django.conf import settings
6 from django.utils.http import is_safe_url
7
8 from adhocracy4.emails.mixins import SyncEmailMixin
9 from meinberlin.apps.contrib.emails import Email
10 from meinberlin.apps.users import USERNAME_INVALID_MESSAGE
11 from meinberlin.apps.users import USERNAME_REGEX
12
13
14 class UserAccountEmail(SyncEmailMixin, Email):
15 def get_receivers(self):
16 return [self.object]
17
18 @property
19 def template_name(self):
20 return self.kwargs['template_name']
21
22 def get_context(self):
23 context = super().get_context()
24 context['contact_email'] = settings.CONTACT_EMAIL
25 return context
26
27
28 class AccountAdapter(DefaultAccountAdapter):
29 username_regex = re.compile(USERNAME_REGEX)
30 error_messages = dict(
31 DefaultAccountAdapter.error_messages,
32 invalid_username=USERNAME_INVALID_MESSAGE
33 )
34
35 def get_email_confirmation_url(self, request, emailconfirmation):
36 url = super().get_email_confirmation_url(request, emailconfirmation)
37 if 'next' in request.POST and is_safe_url(request.POST['next']):
38 return '{}?next={}'.format(url, quote(request.POST['next']))
39 else:
40 return url
41
42 def send_mail(self, template_prefix, email, context):
43 user = context['user']
44 return UserAccountEmail.send(
45 user,
46 template_name=template_prefix,
47 **context
48 )
49
50 def get_email_confirmation_redirect_url(self, request):
51 if 'next' in request.GET and is_safe_url(request.GET['next']):
52 return request.GET['next']
53 else:
54 return super().get_email_confirmation_redirect_url(request)
55
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/meinberlin/apps/users/adapters.py b/meinberlin/apps/users/adapters.py
--- a/meinberlin/apps/users/adapters.py
+++ b/meinberlin/apps/users/adapters.py
@@ -40,9 +40,8 @@
return url
def send_mail(self, template_prefix, email, context):
- user = context['user']
return UserAccountEmail.send(
- user,
+ email,
template_name=template_prefix,
**context
)
|
{"golden_diff": "diff --git a/meinberlin/apps/users/adapters.py b/meinberlin/apps/users/adapters.py\n--- a/meinberlin/apps/users/adapters.py\n+++ b/meinberlin/apps/users/adapters.py\n@@ -40,9 +40,8 @@\n return url\n \n def send_mail(self, template_prefix, email, context):\n- user = context['user']\n return UserAccountEmail.send(\n- user,\n+ email,\n template_name=template_prefix,\n **context\n )\n", "issue": "Identity spoofing via secondary email\nSee https://github.com/pennersr/django-allauth/issues/2265\r\n\r\ncc: @CarolingerSeilchenspringer @MagdaN @fuzzylogic2000 \n", "before_files": [{"content": "import re\nfrom urllib.parse import quote\n\nfrom allauth.account.adapter import DefaultAccountAdapter\nfrom django.conf import settings\nfrom django.utils.http import is_safe_url\n\nfrom adhocracy4.emails.mixins import SyncEmailMixin\nfrom meinberlin.apps.contrib.emails import Email\nfrom meinberlin.apps.users import USERNAME_INVALID_MESSAGE\nfrom meinberlin.apps.users import USERNAME_REGEX\n\n\nclass UserAccountEmail(SyncEmailMixin, Email):\n def get_receivers(self):\n return [self.object]\n\n @property\n def template_name(self):\n return self.kwargs['template_name']\n\n def get_context(self):\n context = super().get_context()\n context['contact_email'] = settings.CONTACT_EMAIL\n return context\n\n\nclass AccountAdapter(DefaultAccountAdapter):\n username_regex = re.compile(USERNAME_REGEX)\n error_messages = dict(\n DefaultAccountAdapter.error_messages,\n invalid_username=USERNAME_INVALID_MESSAGE\n )\n\n def get_email_confirmation_url(self, request, emailconfirmation):\n url = super().get_email_confirmation_url(request, emailconfirmation)\n if 'next' in request.POST and is_safe_url(request.POST['next']):\n return '{}?next={}'.format(url, quote(request.POST['next']))\n else:\n return url\n\n def send_mail(self, template_prefix, email, context):\n user = context['user']\n return UserAccountEmail.send(\n user,\n template_name=template_prefix,\n **context\n )\n\n def get_email_confirmation_redirect_url(self, request):\n if 'next' in request.GET and is_safe_url(request.GET['next']):\n return request.GET['next']\n else:\n return super().get_email_confirmation_redirect_url(request)\n", "path": "meinberlin/apps/users/adapters.py"}], "after_files": [{"content": "import re\nfrom urllib.parse import quote\n\nfrom allauth.account.adapter import DefaultAccountAdapter\nfrom django.conf import settings\nfrom django.utils.http import is_safe_url\n\nfrom adhocracy4.emails.mixins import SyncEmailMixin\nfrom meinberlin.apps.contrib.emails import Email\nfrom meinberlin.apps.users import USERNAME_INVALID_MESSAGE\nfrom meinberlin.apps.users import USERNAME_REGEX\n\n\nclass UserAccountEmail(SyncEmailMixin, Email):\n def get_receivers(self):\n return [self.object]\n\n @property\n def template_name(self):\n return self.kwargs['template_name']\n\n def get_context(self):\n context = super().get_context()\n context['contact_email'] = settings.CONTACT_EMAIL\n return context\n\n\nclass AccountAdapter(DefaultAccountAdapter):\n username_regex = re.compile(USERNAME_REGEX)\n error_messages = dict(\n DefaultAccountAdapter.error_messages,\n invalid_username=USERNAME_INVALID_MESSAGE\n )\n\n def get_email_confirmation_url(self, request, emailconfirmation):\n url = super().get_email_confirmation_url(request, emailconfirmation)\n if 'next' in request.POST and is_safe_url(request.POST['next']):\n return '{}?next={}'.format(url, quote(request.POST['next']))\n else:\n return url\n\n def send_mail(self, template_prefix, email, context):\n return UserAccountEmail.send(\n email,\n template_name=template_prefix,\n **context\n )\n\n def get_email_confirmation_redirect_url(self, request):\n if 'next' in request.GET and is_safe_url(request.GET['next']):\n return request.GET['next']\n else:\n return super().get_email_confirmation_redirect_url(request)\n", "path": "meinberlin/apps/users/adapters.py"}]}
| 778 | 113 |
gh_patches_debug_10828
|
rasdani/github-patches
|
git_diff
|
open-mmlab__mmdeploy-700
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pytorch2onnx fails with mmedit models
error with master branch
```
TypeError: forward_dummy() got an unexpected keyword argument 'img_metas'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mmdeploy/apis/pytorch2onnx.py`
Content:
```
1 # Copyright (c) OpenMMLab. All rights reserved.
2 import os.path as osp
3 from typing import Any, Optional, Union
4
5 import mmcv
6 import torch
7
8 from mmdeploy.apis.core.pipeline_manager import no_mp
9 from mmdeploy.utils import (get_backend, get_dynamic_axes, get_input_shape,
10 get_onnx_config, load_config)
11 from .core import PIPELINE_MANAGER
12 from .onnx import export
13
14
15 @PIPELINE_MANAGER.register_pipeline()
16 def torch2onnx(img: Any,
17 work_dir: str,
18 save_file: str,
19 deploy_cfg: Union[str, mmcv.Config],
20 model_cfg: Union[str, mmcv.Config],
21 model_checkpoint: Optional[str] = None,
22 device: str = 'cuda:0'):
23 """Convert PyTorch model to ONNX model.
24
25 Examples:
26 >>> from mmdeploy.apis import torch2onnx
27 >>> img = 'demo.jpg'
28 >>> work_dir = 'work_dir'
29 >>> save_file = 'fcos.onnx'
30 >>> deploy_cfg = ('configs/mmdet/detection/'
31 'detection_onnxruntime_dynamic.py')
32 >>> model_cfg = ('mmdetection/configs/fcos/'
33 'fcos_r50_caffe_fpn_gn-head_1x_coco.py')
34 >>> model_checkpoint = ('checkpoints/'
35 'fcos_r50_caffe_fpn_gn-head_1x_coco-821213aa.pth')
36 >>> device = 'cpu'
37 >>> torch2onnx(img, work_dir, save_file, deploy_cfg, \
38 model_cfg, model_checkpoint, device)
39
40 Args:
41 img (str | np.ndarray | torch.Tensor): Input image used to assist
42 converting model.
43 work_dir (str): A working directory to save files.
44 save_file (str): Filename to save onnx model.
45 deploy_cfg (str | mmcv.Config): Deployment config file or
46 Config object.
47 model_cfg (str | mmcv.Config): Model config file or Config object.
48 model_checkpoint (str): A checkpoint path of PyTorch model,
49 defaults to `None`.
50 device (str): A string specifying device type, defaults to 'cuda:0'.
51 """
52 # load deploy_cfg if necessary
53 deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg)
54 mmcv.mkdir_or_exist(osp.abspath(work_dir))
55
56 input_shape = get_input_shape(deploy_cfg)
57
58 # create model an inputs
59 from mmdeploy.apis import build_task_processor
60 task_processor = build_task_processor(model_cfg, deploy_cfg, device)
61
62 torch_model = task_processor.init_pytorch_model(model_checkpoint)
63 data, model_inputs = task_processor.create_input(img, input_shape)
64 input_metas = dict(img_metas=data.get('img_metas', None))
65 if not isinstance(model_inputs, torch.Tensor) and len(model_inputs) == 1:
66 model_inputs = model_inputs[0]
67
68 # export to onnx
69 context_info = dict()
70 context_info['deploy_cfg'] = deploy_cfg
71 output_prefix = osp.join(work_dir,
72 osp.splitext(osp.basename(save_file))[0])
73 backend = get_backend(deploy_cfg).value
74
75 onnx_cfg = get_onnx_config(deploy_cfg)
76 opset_version = onnx_cfg.get('opset_version', 11)
77
78 input_names = onnx_cfg['input_names']
79 output_names = onnx_cfg['output_names']
80 axis_names = input_names + output_names
81 dynamic_axes = get_dynamic_axes(deploy_cfg, axis_names)
82 verbose = not onnx_cfg.get('strip_doc_string', True) or onnx_cfg.get(
83 'verbose', False)
84 keep_initializers_as_inputs = onnx_cfg.get('keep_initializers_as_inputs',
85 True)
86 optimize = onnx_cfg.get('optimize', False)
87 with no_mp():
88 export(
89 torch_model,
90 model_inputs,
91 input_metas=input_metas,
92 output_path_prefix=output_prefix,
93 backend=backend,
94 input_names=input_names,
95 output_names=output_names,
96 context_info=context_info,
97 opset_version=opset_version,
98 dynamic_axes=dynamic_axes,
99 verbose=verbose,
100 keep_initializers_as_inputs=keep_initializers_as_inputs,
101 optimize=optimize)
102
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mmdeploy/apis/pytorch2onnx.py b/mmdeploy/apis/pytorch2onnx.py
--- a/mmdeploy/apis/pytorch2onnx.py
+++ b/mmdeploy/apis/pytorch2onnx.py
@@ -61,7 +61,11 @@
torch_model = task_processor.init_pytorch_model(model_checkpoint)
data, model_inputs = task_processor.create_input(img, input_shape)
- input_metas = dict(img_metas=data.get('img_metas', None))
+ if 'img_metas' in data:
+ input_metas = dict(img_metas=data['img_metas'])
+ else:
+ # codebases like mmedit do not have img_metas argument
+ input_metas = None
if not isinstance(model_inputs, torch.Tensor) and len(model_inputs) == 1:
model_inputs = model_inputs[0]
|
{"golden_diff": "diff --git a/mmdeploy/apis/pytorch2onnx.py b/mmdeploy/apis/pytorch2onnx.py\n--- a/mmdeploy/apis/pytorch2onnx.py\n+++ b/mmdeploy/apis/pytorch2onnx.py\n@@ -61,7 +61,11 @@\n \n torch_model = task_processor.init_pytorch_model(model_checkpoint)\n data, model_inputs = task_processor.create_input(img, input_shape)\n- input_metas = dict(img_metas=data.get('img_metas', None))\n+ if 'img_metas' in data:\n+ input_metas = dict(img_metas=data['img_metas'])\n+ else:\n+ # codebases like mmedit do not have img_metas argument\n+ input_metas = None\n if not isinstance(model_inputs, torch.Tensor) and len(model_inputs) == 1:\n model_inputs = model_inputs[0]\n", "issue": "pytorch2onnx fails with mmedit models\nerror with master branch\r\n```\r\nTypeError: forward_dummy() got an unexpected keyword argument 'img_metas'\r\n```\n", "before_files": [{"content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport os.path as osp\nfrom typing import Any, Optional, Union\n\nimport mmcv\nimport torch\n\nfrom mmdeploy.apis.core.pipeline_manager import no_mp\nfrom mmdeploy.utils import (get_backend, get_dynamic_axes, get_input_shape,\n get_onnx_config, load_config)\nfrom .core import PIPELINE_MANAGER\nfrom .onnx import export\n\n\n@PIPELINE_MANAGER.register_pipeline()\ndef torch2onnx(img: Any,\n work_dir: str,\n save_file: str,\n deploy_cfg: Union[str, mmcv.Config],\n model_cfg: Union[str, mmcv.Config],\n model_checkpoint: Optional[str] = None,\n device: str = 'cuda:0'):\n \"\"\"Convert PyTorch model to ONNX model.\n\n Examples:\n >>> from mmdeploy.apis import torch2onnx\n >>> img = 'demo.jpg'\n >>> work_dir = 'work_dir'\n >>> save_file = 'fcos.onnx'\n >>> deploy_cfg = ('configs/mmdet/detection/'\n 'detection_onnxruntime_dynamic.py')\n >>> model_cfg = ('mmdetection/configs/fcos/'\n 'fcos_r50_caffe_fpn_gn-head_1x_coco.py')\n >>> model_checkpoint = ('checkpoints/'\n 'fcos_r50_caffe_fpn_gn-head_1x_coco-821213aa.pth')\n >>> device = 'cpu'\n >>> torch2onnx(img, work_dir, save_file, deploy_cfg, \\\n model_cfg, model_checkpoint, device)\n\n Args:\n img (str | np.ndarray | torch.Tensor): Input image used to assist\n converting model.\n work_dir (str): A working directory to save files.\n save_file (str): Filename to save onnx model.\n deploy_cfg (str | mmcv.Config): Deployment config file or\n Config object.\n model_cfg (str | mmcv.Config): Model config file or Config object.\n model_checkpoint (str): A checkpoint path of PyTorch model,\n defaults to `None`.\n device (str): A string specifying device type, defaults to 'cuda:0'.\n \"\"\"\n # load deploy_cfg if necessary\n deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg)\n mmcv.mkdir_or_exist(osp.abspath(work_dir))\n\n input_shape = get_input_shape(deploy_cfg)\n\n # create model an inputs\n from mmdeploy.apis import build_task_processor\n task_processor = build_task_processor(model_cfg, deploy_cfg, device)\n\n torch_model = task_processor.init_pytorch_model(model_checkpoint)\n data, model_inputs = task_processor.create_input(img, input_shape)\n input_metas = dict(img_metas=data.get('img_metas', None))\n if not isinstance(model_inputs, torch.Tensor) and len(model_inputs) == 1:\n model_inputs = model_inputs[0]\n\n # export to onnx\n context_info = dict()\n context_info['deploy_cfg'] = deploy_cfg\n output_prefix = osp.join(work_dir,\n osp.splitext(osp.basename(save_file))[0])\n backend = get_backend(deploy_cfg).value\n\n onnx_cfg = get_onnx_config(deploy_cfg)\n opset_version = onnx_cfg.get('opset_version', 11)\n\n input_names = onnx_cfg['input_names']\n output_names = onnx_cfg['output_names']\n axis_names = input_names + output_names\n dynamic_axes = get_dynamic_axes(deploy_cfg, axis_names)\n verbose = not onnx_cfg.get('strip_doc_string', True) or onnx_cfg.get(\n 'verbose', False)\n keep_initializers_as_inputs = onnx_cfg.get('keep_initializers_as_inputs',\n True)\n optimize = onnx_cfg.get('optimize', False)\n with no_mp():\n export(\n torch_model,\n model_inputs,\n input_metas=input_metas,\n output_path_prefix=output_prefix,\n backend=backend,\n input_names=input_names,\n output_names=output_names,\n context_info=context_info,\n opset_version=opset_version,\n dynamic_axes=dynamic_axes,\n verbose=verbose,\n keep_initializers_as_inputs=keep_initializers_as_inputs,\n optimize=optimize)\n", "path": "mmdeploy/apis/pytorch2onnx.py"}], "after_files": [{"content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport os.path as osp\nfrom typing import Any, Optional, Union\n\nimport mmcv\nimport torch\n\nfrom mmdeploy.apis.core.pipeline_manager import no_mp\nfrom mmdeploy.utils import (get_backend, get_dynamic_axes, get_input_shape,\n get_onnx_config, load_config)\nfrom .core import PIPELINE_MANAGER\nfrom .onnx import export\n\n\n@PIPELINE_MANAGER.register_pipeline()\ndef torch2onnx(img: Any,\n work_dir: str,\n save_file: str,\n deploy_cfg: Union[str, mmcv.Config],\n model_cfg: Union[str, mmcv.Config],\n model_checkpoint: Optional[str] = None,\n device: str = 'cuda:0'):\n \"\"\"Convert PyTorch model to ONNX model.\n\n Examples:\n >>> from mmdeploy.apis import torch2onnx\n >>> img = 'demo.jpg'\n >>> work_dir = 'work_dir'\n >>> save_file = 'fcos.onnx'\n >>> deploy_cfg = ('configs/mmdet/detection/'\n 'detection_onnxruntime_dynamic.py')\n >>> model_cfg = ('mmdetection/configs/fcos/'\n 'fcos_r50_caffe_fpn_gn-head_1x_coco.py')\n >>> model_checkpoint = ('checkpoints/'\n 'fcos_r50_caffe_fpn_gn-head_1x_coco-821213aa.pth')\n >>> device = 'cpu'\n >>> torch2onnx(img, work_dir, save_file, deploy_cfg, \\\n model_cfg, model_checkpoint, device)\n\n Args:\n img (str | np.ndarray | torch.Tensor): Input image used to assist\n converting model.\n work_dir (str): A working directory to save files.\n save_file (str): Filename to save onnx model.\n deploy_cfg (str | mmcv.Config): Deployment config file or\n Config object.\n model_cfg (str | mmcv.Config): Model config file or Config object.\n model_checkpoint (str): A checkpoint path of PyTorch model,\n defaults to `None`.\n device (str): A string specifying device type, defaults to 'cuda:0'.\n \"\"\"\n # load deploy_cfg if necessary\n deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg)\n mmcv.mkdir_or_exist(osp.abspath(work_dir))\n\n input_shape = get_input_shape(deploy_cfg)\n\n # create model an inputs\n from mmdeploy.apis import build_task_processor\n task_processor = build_task_processor(model_cfg, deploy_cfg, device)\n\n torch_model = task_processor.init_pytorch_model(model_checkpoint)\n data, model_inputs = task_processor.create_input(img, input_shape)\n if 'img_metas' in data:\n input_metas = dict(img_metas=data['img_metas'])\n else:\n # codebases like mmedit do not have img_metas argument\n input_metas = None\n if not isinstance(model_inputs, torch.Tensor) and len(model_inputs) == 1:\n model_inputs = model_inputs[0]\n\n # export to onnx\n context_info = dict()\n context_info['deploy_cfg'] = deploy_cfg\n output_prefix = osp.join(work_dir,\n osp.splitext(osp.basename(save_file))[0])\n backend = get_backend(deploy_cfg).value\n\n onnx_cfg = get_onnx_config(deploy_cfg)\n opset_version = onnx_cfg.get('opset_version', 11)\n\n input_names = onnx_cfg['input_names']\n output_names = onnx_cfg['output_names']\n axis_names = input_names + output_names\n dynamic_axes = get_dynamic_axes(deploy_cfg, axis_names)\n verbose = not onnx_cfg.get('strip_doc_string', True) or onnx_cfg.get(\n 'verbose', False)\n keep_initializers_as_inputs = onnx_cfg.get('keep_initializers_as_inputs',\n True)\n optimize = onnx_cfg.get('optimize', False)\n with no_mp():\n export(\n torch_model,\n model_inputs,\n input_metas=input_metas,\n output_path_prefix=output_prefix,\n backend=backend,\n input_names=input_names,\n output_names=output_names,\n context_info=context_info,\n opset_version=opset_version,\n dynamic_axes=dynamic_axes,\n verbose=verbose,\n keep_initializers_as_inputs=keep_initializers_as_inputs,\n optimize=optimize)\n", "path": "mmdeploy/apis/pytorch2onnx.py"}]}
| 1,420 | 193 |
gh_patches_debug_25470
|
rasdani/github-patches
|
git_diff
|
hylang__hy-2188
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Monkey-patching `py.path.local.pyimport` should no longer be necessary
Hi
I noticed **py** is used in conftest.py but not declared in any configuration files .
In addition, py as a Python library is deprecated as its [documentation](https://pypi.org/project/py/) "py.path: uniform local and svn path objects -> please use pathlib/pathlib2 instead"
Maybe it is necessary to migrate to new dependency-pathlib2 and add it to configuration files.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conftest.py`
Content:
```
1 import sys
2 import os
3 import importlib
4 from operator import or_
5 from functools import reduce
6
7 import py
8 import pytest
9 import hy
10 from hy._compat import PY3_8, PY3_10
11
12 NATIVE_TESTS = os.path.join("", "tests", "native_tests", "")
13
14 _fspath_pyimport = py.path.local.pyimport
15
16 # https://github.com/hylang/hy/issues/2029
17 os.environ.pop("HYSTARTUP", None)
18
19
20 def pytest_ignore_collect(path, config):
21 versions = [
22 (sys.version_info < (3, 8), "sub_py3_7_only"),
23 (PY3_8, "py3_8_only"),
24 (PY3_10, "py3_10_only"),
25 ]
26
27 return reduce(
28 or_,
29 (name in path.basename and not condition for condition, name in versions),
30 ) or None
31
32
33 def pyimport_patch_mismatch(self, **kwargs):
34 """Lame fix for https://github.com/pytest-dev/py/issues/195"""
35 try:
36 return _fspath_pyimport(self, **kwargs)
37 except py.path.local.ImportMismatchError:
38 pkgpath = self.pypkgpath()
39 if pkgpath is None:
40 pkgroot = self.dirpath()
41 modname = self.purebasename
42 else:
43 pkgroot = pkgpath.dirpath()
44 names = self.new(ext="").relto(pkgroot).split(self.sep)
45 if names[-1] == "__init__":
46 names.pop()
47 modname = ".".join(names)
48
49 res = importlib.import_module(modname)
50
51 return res
52
53
54 py.path.local.pyimport = pyimport_patch_mismatch
55
56
57 def pytest_collect_file(parent, path):
58 if (path.ext == ".hy"
59 and NATIVE_TESTS in path.dirname + os.sep
60 and path.basename != "__init__.hy"):
61
62 if hasattr(pytest.Module, "from_parent"):
63 pytest_mod = pytest.Module.from_parent(parent, fspath=path)
64 else:
65 pytest_mod = pytest.Module(path, parent)
66 return pytest_mod
67
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/conftest.py b/conftest.py
--- a/conftest.py
+++ b/conftest.py
@@ -4,15 +4,12 @@
from operator import or_
from functools import reduce
-import py
import pytest
import hy
from hy._compat import PY3_8, PY3_10
NATIVE_TESTS = os.path.join("", "tests", "native_tests", "")
-_fspath_pyimport = py.path.local.pyimport
-
# https://github.com/hylang/hy/issues/2029
os.environ.pop("HYSTARTUP", None)
@@ -30,30 +27,6 @@
) or None
-def pyimport_patch_mismatch(self, **kwargs):
- """Lame fix for https://github.com/pytest-dev/py/issues/195"""
- try:
- return _fspath_pyimport(self, **kwargs)
- except py.path.local.ImportMismatchError:
- pkgpath = self.pypkgpath()
- if pkgpath is None:
- pkgroot = self.dirpath()
- modname = self.purebasename
- else:
- pkgroot = pkgpath.dirpath()
- names = self.new(ext="").relto(pkgroot).split(self.sep)
- if names[-1] == "__init__":
- names.pop()
- modname = ".".join(names)
-
- res = importlib.import_module(modname)
-
- return res
-
-
-py.path.local.pyimport = pyimport_patch_mismatch
-
-
def pytest_collect_file(parent, path):
if (path.ext == ".hy"
and NATIVE_TESTS in path.dirname + os.sep
|
{"golden_diff": "diff --git a/conftest.py b/conftest.py\n--- a/conftest.py\n+++ b/conftest.py\n@@ -4,15 +4,12 @@\n from operator import or_\n from functools import reduce\n \n-import py\n import pytest\n import hy\n from hy._compat import PY3_8, PY3_10\n \n NATIVE_TESTS = os.path.join(\"\", \"tests\", \"native_tests\", \"\")\n \n-_fspath_pyimport = py.path.local.pyimport\n-\n # https://github.com/hylang/hy/issues/2029\n os.environ.pop(\"HYSTARTUP\", None)\n \n@@ -30,30 +27,6 @@\n ) or None\n \n \n-def pyimport_patch_mismatch(self, **kwargs):\n- \"\"\"Lame fix for https://github.com/pytest-dev/py/issues/195\"\"\"\n- try:\n- return _fspath_pyimport(self, **kwargs)\n- except py.path.local.ImportMismatchError:\n- pkgpath = self.pypkgpath()\n- if pkgpath is None:\n- pkgroot = self.dirpath()\n- modname = self.purebasename\n- else:\n- pkgroot = pkgpath.dirpath()\n- names = self.new(ext=\"\").relto(pkgroot).split(self.sep)\n- if names[-1] == \"__init__\":\n- names.pop()\n- modname = \".\".join(names)\n-\n- res = importlib.import_module(modname)\n-\n- return res\n-\n-\n-py.path.local.pyimport = pyimport_patch_mismatch\n-\n-\n def pytest_collect_file(parent, path):\n if (path.ext == \".hy\"\n and NATIVE_TESTS in path.dirname + os.sep\n", "issue": "Monkey-patching `py.path.local.pyimport` should no longer be necessary\nHi\r\nI noticed **py** is used in conftest.py but not declared in any configuration files .\r\nIn addition, py as a Python library is deprecated as its [documentation](https://pypi.org/project/py/) \"py.path: uniform local and svn path objects -> please use pathlib/pathlib2 instead\"\r\n\r\nMaybe it is necessary to migrate to new dependency-pathlib2 and add it to configuration files.\n", "before_files": [{"content": "import sys\nimport os\nimport importlib\nfrom operator import or_\nfrom functools import reduce\n\nimport py\nimport pytest\nimport hy\nfrom hy._compat import PY3_8, PY3_10\n\nNATIVE_TESTS = os.path.join(\"\", \"tests\", \"native_tests\", \"\")\n\n_fspath_pyimport = py.path.local.pyimport\n\n# https://github.com/hylang/hy/issues/2029\nos.environ.pop(\"HYSTARTUP\", None)\n\n\ndef pytest_ignore_collect(path, config):\n versions = [\n (sys.version_info < (3, 8), \"sub_py3_7_only\"),\n (PY3_8, \"py3_8_only\"),\n (PY3_10, \"py3_10_only\"),\n ]\n\n return reduce(\n or_,\n (name in path.basename and not condition for condition, name in versions),\n ) or None\n\n\ndef pyimport_patch_mismatch(self, **kwargs):\n \"\"\"Lame fix for https://github.com/pytest-dev/py/issues/195\"\"\"\n try:\n return _fspath_pyimport(self, **kwargs)\n except py.path.local.ImportMismatchError:\n pkgpath = self.pypkgpath()\n if pkgpath is None:\n pkgroot = self.dirpath()\n modname = self.purebasename\n else:\n pkgroot = pkgpath.dirpath()\n names = self.new(ext=\"\").relto(pkgroot).split(self.sep)\n if names[-1] == \"__init__\":\n names.pop()\n modname = \".\".join(names)\n\n res = importlib.import_module(modname)\n\n return res\n\n\npy.path.local.pyimport = pyimport_patch_mismatch\n\n\ndef pytest_collect_file(parent, path):\n if (path.ext == \".hy\"\n and NATIVE_TESTS in path.dirname + os.sep\n and path.basename != \"__init__.hy\"):\n\n if hasattr(pytest.Module, \"from_parent\"):\n pytest_mod = pytest.Module.from_parent(parent, fspath=path)\n else:\n pytest_mod = pytest.Module(path, parent)\n return pytest_mod\n", "path": "conftest.py"}], "after_files": [{"content": "import sys\nimport os\nimport importlib\nfrom operator import or_\nfrom functools import reduce\n\nimport pytest\nimport hy\nfrom hy._compat import PY3_8, PY3_10\n\nNATIVE_TESTS = os.path.join(\"\", \"tests\", \"native_tests\", \"\")\n\n# https://github.com/hylang/hy/issues/2029\nos.environ.pop(\"HYSTARTUP\", None)\n\n\ndef pytest_ignore_collect(path, config):\n versions = [\n (sys.version_info < (3, 8), \"sub_py3_7_only\"),\n (PY3_8, \"py3_8_only\"),\n (PY3_10, \"py3_10_only\"),\n ]\n\n return reduce(\n or_,\n (name in path.basename and not condition for condition, name in versions),\n ) or None\n\n\ndef pytest_collect_file(parent, path):\n if (path.ext == \".hy\"\n and NATIVE_TESTS in path.dirname + os.sep\n and path.basename != \"__init__.hy\"):\n\n if hasattr(pytest.Module, \"from_parent\"):\n pytest_mod = pytest.Module.from_parent(parent, fspath=path)\n else:\n pytest_mod = pytest.Module(path, parent)\n return pytest_mod\n", "path": "conftest.py"}]}
| 939 | 371 |
gh_patches_debug_36132
|
rasdani/github-patches
|
git_diff
|
DataDog__dd-trace-py-1823
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DeprecationWarning: the imp module is deprecated in favour of importlib
When running a Django project using ddtrace with [warnings enabled](https://docs.python.org/3/using/cmdline.html#cmdoption-w), this warning is emitted:
## Issue
> `/usr/local/lib/python3.7/dist-packages/ddtrace/bootstrap/sitecustomize.py:7`: `DeprecationWarning`: the `imp` module is deprecated in favour of `importlib`; see the [module's documentation](https://docs.python.org/3/library/imp.html) for alternative uses
## Details
The line in question:
https://github.com/DataDog/dd-trace-py/blob/94148324196eb41c1f6bef56be51bdd96c758fa7/ddtrace/bootstrap/sitecustomize.py#L7
How it's used:
https://github.com/DataDog/dd-trace-py/blob/94148324196eb41c1f6bef56be51bdd96c758fa7/ddtrace/bootstrap/sitecustomize.py#L103-L120
Documentation note for [`imp.find_module()`](https://docs.python.org/3/library/imp.html#imp.find_module):
> Deprecated since version 3.3: Use `importlib.util.find_spec()` instead unless Python 3.3 compatibility is required, in which case use `importlib.find_loader()`. For example usage of the former case, see the Examples section of the `importlib` documentation.
Documentation note for [`imp.load_module()`](https://docs.python.org/3/library/imp.html#imp.load_module):
> Deprecated since version 3.3: If previously used in conjunction with `imp.find_module()` then consider using `importlib.import_module()`, otherwise use the loader returned by the replacement you chose for `imp.find_module()`. If you called `imp.load_module()` and related functions directly with file path arguments then use a combination of `importlib.util.spec_from_file_location()` and `importlib.util.module_from_spec()`. See the Examples section of the `importlib` documentation for details of the various approaches.
## Resolution
I suspect [this example](https://docs.python.org/3/library/importlib.html#approximating-importlib-import-module) could be worth building off of to do the necessary path customization.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ddtrace/bootstrap/sitecustomize.py`
Content:
```
1 """
2 Bootstrapping code that is run when using the `ddtrace-run` Python entrypoint
3 Add all monkey-patching that needs to run by default here
4 """
5 import logging
6 import os
7 import imp
8 import sys
9
10 from ddtrace.utils.formats import asbool, get_env, parse_tags_str
11 from ddtrace.internal.logger import get_logger
12 from ddtrace import config, constants
13 from ddtrace.tracer import debug_mode, DD_LOG_FORMAT
14
15
16 if config.logs_injection:
17 # immediately patch logging if trace id injected
18 from ddtrace import patch
19
20 patch(logging=True)
21
22
23 # DEV: Once basicConfig is called here, future calls to it cannot be used to
24 # change the formatter since it applies the formatter to the root handler only
25 # upon initializing it the first time.
26 # See https://github.com/python/cpython/blob/112e4afd582515fcdcc0cde5012a4866e5cfda12/Lib/logging/__init__.py#L1550
27 # Debug mode from the tracer will do a basicConfig so only need to do this otherwise
28 if not debug_mode:
29 if config.logs_injection:
30 logging.basicConfig(format=DD_LOG_FORMAT)
31 else:
32 logging.basicConfig()
33
34 log = get_logger(__name__)
35
36 EXTRA_PATCHED_MODULES = {
37 "bottle": True,
38 "django": True,
39 "falcon": True,
40 "flask": True,
41 "pylons": True,
42 "pyramid": True,
43 }
44
45
46 def update_patched_modules():
47 modules_to_patch = os.environ.get("DATADOG_PATCH_MODULES")
48 if not modules_to_patch:
49 return
50
51 modules = parse_tags_str(modules_to_patch)
52 for module, should_patch in modules.items():
53 EXTRA_PATCHED_MODULES[module] = asbool(should_patch)
54
55
56 try:
57 from ddtrace import tracer
58
59 # Respect DATADOG_* environment variables in global tracer configuration
60 # TODO: these variables are deprecated; use utils method and update our documentation
61 # correct prefix should be DD_*
62 hostname = os.environ.get("DD_AGENT_HOST", os.environ.get("DATADOG_TRACE_AGENT_HOSTNAME"))
63 port = os.environ.get("DATADOG_TRACE_AGENT_PORT")
64 priority_sampling = os.environ.get("DATADOG_PRIORITY_SAMPLING")
65 profiling = asbool(os.environ.get("DD_PROFILING_ENABLED", False))
66
67 if profiling:
68 import ddtrace.profiling.auto # noqa: F401
69
70 opts = {}
71
72 if asbool(os.environ.get("DATADOG_TRACE_ENABLED", True)):
73 patch = True
74 else:
75 patch = False
76 opts["enabled"] = False
77
78 if hostname:
79 opts["hostname"] = hostname
80 if port:
81 opts["port"] = int(port)
82 if priority_sampling:
83 opts["priority_sampling"] = asbool(priority_sampling)
84
85 opts["collect_metrics"] = asbool(get_env("runtime_metrics", "enabled"))
86
87 if opts:
88 tracer.configure(**opts)
89
90 if patch:
91 update_patched_modules()
92 from ddtrace import patch_all
93
94 patch_all(**EXTRA_PATCHED_MODULES)
95
96 if "DATADOG_ENV" in os.environ:
97 tracer.set_tags({constants.ENV_KEY: os.environ["DATADOG_ENV"]})
98
99 if "DD_TRACE_GLOBAL_TAGS" in os.environ:
100 env_tags = os.getenv("DD_TRACE_GLOBAL_TAGS")
101 tracer.set_tags(parse_tags_str(env_tags))
102
103 # Ensure sitecustomize.py is properly called if available in application directories:
104 # * exclude `bootstrap_dir` from the search
105 # * find a user `sitecustomize.py` module
106 # * import that module via `imp`
107 bootstrap_dir = os.path.dirname(__file__)
108 path = list(sys.path)
109
110 if bootstrap_dir in path:
111 path.remove(bootstrap_dir)
112
113 try:
114 (f, path, description) = imp.find_module("sitecustomize", path)
115 except ImportError:
116 pass
117 else:
118 # `sitecustomize.py` found, load it
119 log.debug("sitecustomize from user found in: %s", path)
120 imp.load_module("sitecustomize", f, path, description)
121
122 # Loading status used in tests to detect if the `sitecustomize` has been
123 # properly loaded without exceptions. This must be the last action in the module
124 # when the execution ends with a success.
125 loaded = True
126 except Exception:
127 loaded = False
128 log.warning("error configuring Datadog tracing", exc_info=True)
129
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ddtrace/bootstrap/sitecustomize.py b/ddtrace/bootstrap/sitecustomize.py
--- a/ddtrace/bootstrap/sitecustomize.py
+++ b/ddtrace/bootstrap/sitecustomize.py
@@ -4,7 +4,6 @@
"""
import logging
import os
-import imp
import sys
from ddtrace.utils.formats import asbool, get_env, parse_tags_str
@@ -100,24 +99,40 @@
env_tags = os.getenv("DD_TRACE_GLOBAL_TAGS")
tracer.set_tags(parse_tags_str(env_tags))
- # Ensure sitecustomize.py is properly called if available in application directories:
- # * exclude `bootstrap_dir` from the search
- # * find a user `sitecustomize.py` module
- # * import that module via `imp`
+ # Check for and import any sitecustomize that would have normally been used
+ # had ddtrace-run not been used.
bootstrap_dir = os.path.dirname(__file__)
- path = list(sys.path)
-
- if bootstrap_dir in path:
- path.remove(bootstrap_dir)
-
- try:
- (f, path, description) = imp.find_module("sitecustomize", path)
- except ImportError:
- pass
+ if bootstrap_dir in sys.path:
+ index = sys.path.index(bootstrap_dir)
+ del sys.path[index]
+
+ # NOTE: this reference to the module is crucial in Python 2.
+ # Without it the current module gets gc'd and all subsequent references
+ # will be `None`.
+ ddtrace_sitecustomize = sys.modules["sitecustomize"]
+ del sys.modules["sitecustomize"]
+ try:
+ import sitecustomize # noqa
+ except ImportError:
+ # If an additional sitecustomize is not found then put the ddtrace
+ # sitecustomize back.
+ log.debug("additional sitecustomize not found")
+ sys.modules["sitecustomize"] = ddtrace_sitecustomize
+ else:
+ log.debug("additional sitecustomize found in: %s", sys.path)
+ finally:
+ # Always reinsert the ddtrace bootstrap directory to the path so
+ # that introspection and debugging the application makes sense.
+ # Note that this does not interfere with imports since a user
+ # sitecustomize, if it exists, will be imported.
+ sys.path.insert(index, bootstrap_dir)
else:
- # `sitecustomize.py` found, load it
- log.debug("sitecustomize from user found in: %s", path)
- imp.load_module("sitecustomize", f, path, description)
+ try:
+ import sitecustomize # noqa
+ except ImportError:
+ log.debug("additional sitecustomize not found")
+ else:
+ log.debug("additional sitecustomize found in: %s", sys.path)
# Loading status used in tests to detect if the `sitecustomize` has been
# properly loaded without exceptions. This must be the last action in the module
|
{"golden_diff": "diff --git a/ddtrace/bootstrap/sitecustomize.py b/ddtrace/bootstrap/sitecustomize.py\n--- a/ddtrace/bootstrap/sitecustomize.py\n+++ b/ddtrace/bootstrap/sitecustomize.py\n@@ -4,7 +4,6 @@\n \"\"\"\n import logging\n import os\n-import imp\n import sys\n \n from ddtrace.utils.formats import asbool, get_env, parse_tags_str\n@@ -100,24 +99,40 @@\n env_tags = os.getenv(\"DD_TRACE_GLOBAL_TAGS\")\n tracer.set_tags(parse_tags_str(env_tags))\n \n- # Ensure sitecustomize.py is properly called if available in application directories:\n- # * exclude `bootstrap_dir` from the search\n- # * find a user `sitecustomize.py` module\n- # * import that module via `imp`\n+ # Check for and import any sitecustomize that would have normally been used\n+ # had ddtrace-run not been used.\n bootstrap_dir = os.path.dirname(__file__)\n- path = list(sys.path)\n-\n- if bootstrap_dir in path:\n- path.remove(bootstrap_dir)\n-\n- try:\n- (f, path, description) = imp.find_module(\"sitecustomize\", path)\n- except ImportError:\n- pass\n+ if bootstrap_dir in sys.path:\n+ index = sys.path.index(bootstrap_dir)\n+ del sys.path[index]\n+\n+ # NOTE: this reference to the module is crucial in Python 2.\n+ # Without it the current module gets gc'd and all subsequent references\n+ # will be `None`.\n+ ddtrace_sitecustomize = sys.modules[\"sitecustomize\"]\n+ del sys.modules[\"sitecustomize\"]\n+ try:\n+ import sitecustomize # noqa\n+ except ImportError:\n+ # If an additional sitecustomize is not found then put the ddtrace\n+ # sitecustomize back.\n+ log.debug(\"additional sitecustomize not found\")\n+ sys.modules[\"sitecustomize\"] = ddtrace_sitecustomize\n+ else:\n+ log.debug(\"additional sitecustomize found in: %s\", sys.path)\n+ finally:\n+ # Always reinsert the ddtrace bootstrap directory to the path so\n+ # that introspection and debugging the application makes sense.\n+ # Note that this does not interfere with imports since a user\n+ # sitecustomize, if it exists, will be imported.\n+ sys.path.insert(index, bootstrap_dir)\n else:\n- # `sitecustomize.py` found, load it\n- log.debug(\"sitecustomize from user found in: %s\", path)\n- imp.load_module(\"sitecustomize\", f, path, description)\n+ try:\n+ import sitecustomize # noqa\n+ except ImportError:\n+ log.debug(\"additional sitecustomize not found\")\n+ else:\n+ log.debug(\"additional sitecustomize found in: %s\", sys.path)\n \n # Loading status used in tests to detect if the `sitecustomize` has been\n # properly loaded without exceptions. This must be the last action in the module\n", "issue": "DeprecationWarning: the imp module is deprecated in favour of importlib\nWhen running a Django project using ddtrace with [warnings enabled](https://docs.python.org/3/using/cmdline.html#cmdoption-w), this warning is emitted:\r\n\r\n## Issue\r\n\r\n> `/usr/local/lib/python3.7/dist-packages/ddtrace/bootstrap/sitecustomize.py:7`: `DeprecationWarning`: the `imp` module is deprecated in favour of `importlib`; see the [module's documentation](https://docs.python.org/3/library/imp.html) for alternative uses\r\n\r\n## Details\r\n\r\nThe line in question:\r\n\r\nhttps://github.com/DataDog/dd-trace-py/blob/94148324196eb41c1f6bef56be51bdd96c758fa7/ddtrace/bootstrap/sitecustomize.py#L7\r\n\r\nHow it's used: \r\n\r\nhttps://github.com/DataDog/dd-trace-py/blob/94148324196eb41c1f6bef56be51bdd96c758fa7/ddtrace/bootstrap/sitecustomize.py#L103-L120\r\n\r\nDocumentation note for [`imp.find_module()`](https://docs.python.org/3/library/imp.html#imp.find_module):\r\n\r\n> Deprecated since version 3.3: Use `importlib.util.find_spec()` instead unless Python 3.3 compatibility is required, in which case use `importlib.find_loader()`. For example usage of the former case, see the Examples section of the `importlib` documentation.\r\n\r\nDocumentation note for [`imp.load_module()`](https://docs.python.org/3/library/imp.html#imp.load_module):\r\n\r\n> Deprecated since version 3.3: If previously used in conjunction with `imp.find_module()` then consider using `importlib.import_module()`, otherwise use the loader returned by the replacement you chose for `imp.find_module()`. If you called `imp.load_module()` and related functions directly with file path arguments then use a combination of `importlib.util.spec_from_file_location()` and `importlib.util.module_from_spec()`. See the Examples section of the `importlib` documentation for details of the various approaches.\r\n\r\n## Resolution\r\n\r\nI suspect [this example](https://docs.python.org/3/library/importlib.html#approximating-importlib-import-module) could be worth building off of to do the necessary path customization.\n", "before_files": [{"content": "\"\"\"\nBootstrapping code that is run when using the `ddtrace-run` Python entrypoint\nAdd all monkey-patching that needs to run by default here\n\"\"\"\nimport logging\nimport os\nimport imp\nimport sys\n\nfrom ddtrace.utils.formats import asbool, get_env, parse_tags_str\nfrom ddtrace.internal.logger import get_logger\nfrom ddtrace import config, constants\nfrom ddtrace.tracer import debug_mode, DD_LOG_FORMAT\n\n\nif config.logs_injection:\n # immediately patch logging if trace id injected\n from ddtrace import patch\n\n patch(logging=True)\n\n\n# DEV: Once basicConfig is called here, future calls to it cannot be used to\n# change the formatter since it applies the formatter to the root handler only\n# upon initializing it the first time.\n# See https://github.com/python/cpython/blob/112e4afd582515fcdcc0cde5012a4866e5cfda12/Lib/logging/__init__.py#L1550\n# Debug mode from the tracer will do a basicConfig so only need to do this otherwise\nif not debug_mode:\n if config.logs_injection:\n logging.basicConfig(format=DD_LOG_FORMAT)\n else:\n logging.basicConfig()\n\nlog = get_logger(__name__)\n\nEXTRA_PATCHED_MODULES = {\n \"bottle\": True,\n \"django\": True,\n \"falcon\": True,\n \"flask\": True,\n \"pylons\": True,\n \"pyramid\": True,\n}\n\n\ndef update_patched_modules():\n modules_to_patch = os.environ.get(\"DATADOG_PATCH_MODULES\")\n if not modules_to_patch:\n return\n\n modules = parse_tags_str(modules_to_patch)\n for module, should_patch in modules.items():\n EXTRA_PATCHED_MODULES[module] = asbool(should_patch)\n\n\ntry:\n from ddtrace import tracer\n\n # Respect DATADOG_* environment variables in global tracer configuration\n # TODO: these variables are deprecated; use utils method and update our documentation\n # correct prefix should be DD_*\n hostname = os.environ.get(\"DD_AGENT_HOST\", os.environ.get(\"DATADOG_TRACE_AGENT_HOSTNAME\"))\n port = os.environ.get(\"DATADOG_TRACE_AGENT_PORT\")\n priority_sampling = os.environ.get(\"DATADOG_PRIORITY_SAMPLING\")\n profiling = asbool(os.environ.get(\"DD_PROFILING_ENABLED\", False))\n\n if profiling:\n import ddtrace.profiling.auto # noqa: F401\n\n opts = {}\n\n if asbool(os.environ.get(\"DATADOG_TRACE_ENABLED\", True)):\n patch = True\n else:\n patch = False\n opts[\"enabled\"] = False\n\n if hostname:\n opts[\"hostname\"] = hostname\n if port:\n opts[\"port\"] = int(port)\n if priority_sampling:\n opts[\"priority_sampling\"] = asbool(priority_sampling)\n\n opts[\"collect_metrics\"] = asbool(get_env(\"runtime_metrics\", \"enabled\"))\n\n if opts:\n tracer.configure(**opts)\n\n if patch:\n update_patched_modules()\n from ddtrace import patch_all\n\n patch_all(**EXTRA_PATCHED_MODULES)\n\n if \"DATADOG_ENV\" in os.environ:\n tracer.set_tags({constants.ENV_KEY: os.environ[\"DATADOG_ENV\"]})\n\n if \"DD_TRACE_GLOBAL_TAGS\" in os.environ:\n env_tags = os.getenv(\"DD_TRACE_GLOBAL_TAGS\")\n tracer.set_tags(parse_tags_str(env_tags))\n\n # Ensure sitecustomize.py is properly called if available in application directories:\n # * exclude `bootstrap_dir` from the search\n # * find a user `sitecustomize.py` module\n # * import that module via `imp`\n bootstrap_dir = os.path.dirname(__file__)\n path = list(sys.path)\n\n if bootstrap_dir in path:\n path.remove(bootstrap_dir)\n\n try:\n (f, path, description) = imp.find_module(\"sitecustomize\", path)\n except ImportError:\n pass\n else:\n # `sitecustomize.py` found, load it\n log.debug(\"sitecustomize from user found in: %s\", path)\n imp.load_module(\"sitecustomize\", f, path, description)\n\n # Loading status used in tests to detect if the `sitecustomize` has been\n # properly loaded without exceptions. This must be the last action in the module\n # when the execution ends with a success.\n loaded = True\nexcept Exception:\n loaded = False\n log.warning(\"error configuring Datadog tracing\", exc_info=True)\n", "path": "ddtrace/bootstrap/sitecustomize.py"}], "after_files": [{"content": "\"\"\"\nBootstrapping code that is run when using the `ddtrace-run` Python entrypoint\nAdd all monkey-patching that needs to run by default here\n\"\"\"\nimport logging\nimport os\nimport sys\n\nfrom ddtrace.utils.formats import asbool, get_env, parse_tags_str\nfrom ddtrace.internal.logger import get_logger\nfrom ddtrace import config, constants\nfrom ddtrace.tracer import debug_mode, DD_LOG_FORMAT\n\n\nif config.logs_injection:\n # immediately patch logging if trace id injected\n from ddtrace import patch\n\n patch(logging=True)\n\n\n# DEV: Once basicConfig is called here, future calls to it cannot be used to\n# change the formatter since it applies the formatter to the root handler only\n# upon initializing it the first time.\n# See https://github.com/python/cpython/blob/112e4afd582515fcdcc0cde5012a4866e5cfda12/Lib/logging/__init__.py#L1550\n# Debug mode from the tracer will do a basicConfig so only need to do this otherwise\nif not debug_mode:\n if config.logs_injection:\n logging.basicConfig(format=DD_LOG_FORMAT)\n else:\n logging.basicConfig()\n\nlog = get_logger(__name__)\n\nEXTRA_PATCHED_MODULES = {\n \"bottle\": True,\n \"django\": True,\n \"falcon\": True,\n \"flask\": True,\n \"pylons\": True,\n \"pyramid\": True,\n}\n\n\ndef update_patched_modules():\n modules_to_patch = os.environ.get(\"DATADOG_PATCH_MODULES\")\n if not modules_to_patch:\n return\n\n modules = parse_tags_str(modules_to_patch)\n for module, should_patch in modules.items():\n EXTRA_PATCHED_MODULES[module] = asbool(should_patch)\n\n\ntry:\n from ddtrace import tracer\n\n # Respect DATADOG_* environment variables in global tracer configuration\n # TODO: these variables are deprecated; use utils method and update our documentation\n # correct prefix should be DD_*\n hostname = os.environ.get(\"DD_AGENT_HOST\", os.environ.get(\"DATADOG_TRACE_AGENT_HOSTNAME\"))\n port = os.environ.get(\"DATADOG_TRACE_AGENT_PORT\")\n priority_sampling = os.environ.get(\"DATADOG_PRIORITY_SAMPLING\")\n profiling = asbool(os.environ.get(\"DD_PROFILING_ENABLED\", False))\n\n if profiling:\n import ddtrace.profiling.auto # noqa: F401\n\n opts = {}\n\n if asbool(os.environ.get(\"DATADOG_TRACE_ENABLED\", True)):\n patch = True\n else:\n patch = False\n opts[\"enabled\"] = False\n\n if hostname:\n opts[\"hostname\"] = hostname\n if port:\n opts[\"port\"] = int(port)\n if priority_sampling:\n opts[\"priority_sampling\"] = asbool(priority_sampling)\n\n opts[\"collect_metrics\"] = asbool(get_env(\"runtime_metrics\", \"enabled\"))\n\n if opts:\n tracer.configure(**opts)\n\n if patch:\n update_patched_modules()\n from ddtrace import patch_all\n\n patch_all(**EXTRA_PATCHED_MODULES)\n\n if \"DATADOG_ENV\" in os.environ:\n tracer.set_tags({constants.ENV_KEY: os.environ[\"DATADOG_ENV\"]})\n\n if \"DD_TRACE_GLOBAL_TAGS\" in os.environ:\n env_tags = os.getenv(\"DD_TRACE_GLOBAL_TAGS\")\n tracer.set_tags(parse_tags_str(env_tags))\n\n # Check for and import any sitecustomize that would have normally been used\n # had ddtrace-run not been used.\n bootstrap_dir = os.path.dirname(__file__)\n if bootstrap_dir in sys.path:\n index = sys.path.index(bootstrap_dir)\n del sys.path[index]\n\n # NOTE: this reference to the module is crucial in Python 2.\n # Without it the current module gets gc'd and all subsequent references\n # will be `None`.\n ddtrace_sitecustomize = sys.modules[\"sitecustomize\"]\n del sys.modules[\"sitecustomize\"]\n try:\n import sitecustomize # noqa\n except ImportError:\n # If an additional sitecustomize is not found then put the ddtrace\n # sitecustomize back.\n log.debug(\"additional sitecustomize not found\")\n sys.modules[\"sitecustomize\"] = ddtrace_sitecustomize\n else:\n log.debug(\"additional sitecustomize found in: %s\", sys.path)\n finally:\n # Always reinsert the ddtrace bootstrap directory to the path so\n # that introspection and debugging the application makes sense.\n # Note that this does not interfere with imports since a user\n # sitecustomize, if it exists, will be imported.\n sys.path.insert(index, bootstrap_dir)\n else:\n try:\n import sitecustomize # noqa\n except ImportError:\n log.debug(\"additional sitecustomize not found\")\n else:\n log.debug(\"additional sitecustomize found in: %s\", sys.path)\n\n # Loading status used in tests to detect if the `sitecustomize` has been\n # properly loaded without exceptions. This must be the last action in the module\n # when the execution ends with a success.\n loaded = True\nexcept Exception:\n loaded = False\n log.warning(\"error configuring Datadog tracing\", exc_info=True)\n", "path": "ddtrace/bootstrap/sitecustomize.py"}]}
| 2,030 | 649 |
gh_patches_debug_9777
|
rasdani/github-patches
|
git_diff
|
kivy__kivy-1397
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
simplelistadapter should accept objects inheriting from list or tuple
I'll found it usefull if it was possible to extend the list object that I pass to the simplelistadapter, but an exception is raised.
Reproduce :
``` python
from kivy.adapters.simplelistadapter import SimpleListAdapter
class ExtendedList(list):
pass
list_adapter = SimpleListAdapter(data=ExtendedList())
```
A solution :
In kivy/adapters/simplelistadapter.py
``` python
47 if type(kwargs['data']) not in (tuple, list):
48 raise Exception('list adapter: data must be a tuple or list')
```
May be replaced by:
``` python
if not isinstance(kwargs['data'], list) and not isinstance(kwargs['data'], tuple)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kivy/adapters/simplelistadapter.py`
Content:
```
1 '''
2 SimpleListAdapter
3 =================
4
5 .. versionadded:: 1.5
6
7 .. warning::
8
9 This code is still experimental, and its API is subject to change in a
10 future version.
11
12 The :class:`~kivy.adapters.simplelistadapter.SimpleListAdapter` is used for
13 basic lists. For example, it can be used for displaying a list of read-only
14 strings that do not require user interaction.
15
16 '''
17
18 __all__ = ('SimpleListAdapter', )
19
20 from kivy.adapters.adapter import Adapter
21 from kivy.properties import ListProperty
22 from kivy.lang import Builder
23
24
25 class SimpleListAdapter(Adapter):
26 '''A :class:`~kivy.adapters.simplelistadapter.SimpleListAdapter` is an
27 adapter around a Python list.
28
29 From :class:`~kivy.adapters.adapter.Adapter`, the
30 :class:`~kivy.adapters.simplelistadapter.ListAdapter` gets cls, template,
31 and args_converter properties.
32 '''
33
34 data = ListProperty([])
35 '''The data list property contains a list of objects (which can be strings)
36 that will be used directly if no args_converter function is provided. If
37 there is an args_converter, the data objects will be passed to it for
38 instantiating the item view class instances.
39
40 :data:`data` is a :class:`~kivy.properties.ListProperty` and
41 defaults to [].
42 '''
43
44 def __init__(self, **kwargs):
45 if 'data' not in kwargs:
46 raise Exception('list adapter: input must include data argument')
47 if type(kwargs['data']) not in (tuple, list):
48 raise Exception('list adapter: data must be a tuple or list')
49 super(SimpleListAdapter, self).__init__(**kwargs)
50
51 def get_count(self):
52 return len(self.data)
53
54 def get_data_item(self, index):
55 if index < 0 or index >= len(self.data):
56 return None
57 return self.data[index]
58
59 # Returns a view instance for an item.
60 def get_view(self, index):
61 item = self.get_data_item(index)
62
63 if item is None:
64 return None
65
66 item_args = self.args_converter(index, item)
67
68 if self.cls:
69 instance = self.cls(**item_args)
70 return instance
71 else:
72 return Builder.template(self.template, **item_args)
73
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kivy/adapters/simplelistadapter.py b/kivy/adapters/simplelistadapter.py
--- a/kivy/adapters/simplelistadapter.py
+++ b/kivy/adapters/simplelistadapter.py
@@ -44,7 +44,8 @@
def __init__(self, **kwargs):
if 'data' not in kwargs:
raise Exception('list adapter: input must include data argument')
- if type(kwargs['data']) not in (tuple, list):
+ if not isinstance(kwargs['data'], list) and \
+ not isinstance(kwargs['data'], tuple):
raise Exception('list adapter: data must be a tuple or list')
super(SimpleListAdapter, self).__init__(**kwargs)
|
{"golden_diff": "diff --git a/kivy/adapters/simplelistadapter.py b/kivy/adapters/simplelistadapter.py\n--- a/kivy/adapters/simplelistadapter.py\n+++ b/kivy/adapters/simplelistadapter.py\n@@ -44,7 +44,8 @@\n def __init__(self, **kwargs):\n if 'data' not in kwargs:\n raise Exception('list adapter: input must include data argument')\n- if type(kwargs['data']) not in (tuple, list):\n+ if not isinstance(kwargs['data'], list) and \\\n+ not isinstance(kwargs['data'], tuple):\n raise Exception('list adapter: data must be a tuple or list')\n super(SimpleListAdapter, self).__init__(**kwargs)\n", "issue": "simplelistadapter should accept objects inheriting from list or tuple\nI'll found it usefull if it was possible to extend the list object that I pass to the simplelistadapter, but an exception is raised.\n\nReproduce :\n\n``` python\nfrom kivy.adapters.simplelistadapter import SimpleListAdapter\nclass ExtendedList(list):\n pass\n\nlist_adapter = SimpleListAdapter(data=ExtendedList())\n```\n\nA solution :\nIn kivy/adapters/simplelistadapter.py\n\n``` python\n 47 if type(kwargs['data']) not in (tuple, list): \n 48 raise Exception('list adapter: data must be a tuple or list') \n```\n\nMay be replaced by:\n\n``` python\nif not isinstance(kwargs['data'], list) and not isinstance(kwargs['data'], tuple)\n```\n\n", "before_files": [{"content": "'''\nSimpleListAdapter\n=================\n\n.. versionadded:: 1.5\n\n.. warning::\n\n This code is still experimental, and its API is subject to change in a\n future version.\n\nThe :class:`~kivy.adapters.simplelistadapter.SimpleListAdapter` is used for\nbasic lists. For example, it can be used for displaying a list of read-only\nstrings that do not require user interaction.\n\n'''\n\n__all__ = ('SimpleListAdapter', )\n\nfrom kivy.adapters.adapter import Adapter\nfrom kivy.properties import ListProperty\nfrom kivy.lang import Builder\n\n\nclass SimpleListAdapter(Adapter):\n '''A :class:`~kivy.adapters.simplelistadapter.SimpleListAdapter` is an\n adapter around a Python list.\n\n From :class:`~kivy.adapters.adapter.Adapter`, the\n :class:`~kivy.adapters.simplelistadapter.ListAdapter` gets cls, template,\n and args_converter properties.\n '''\n\n data = ListProperty([])\n '''The data list property contains a list of objects (which can be strings)\n that will be used directly if no args_converter function is provided. If\n there is an args_converter, the data objects will be passed to it for\n instantiating the item view class instances.\n\n :data:`data` is a :class:`~kivy.properties.ListProperty` and\n defaults to [].\n '''\n\n def __init__(self, **kwargs):\n if 'data' not in kwargs:\n raise Exception('list adapter: input must include data argument')\n if type(kwargs['data']) not in (tuple, list):\n raise Exception('list adapter: data must be a tuple or list')\n super(SimpleListAdapter, self).__init__(**kwargs)\n\n def get_count(self):\n return len(self.data)\n\n def get_data_item(self, index):\n if index < 0 or index >= len(self.data):\n return None\n return self.data[index]\n\n # Returns a view instance for an item.\n def get_view(self, index):\n item = self.get_data_item(index)\n\n if item is None:\n return None\n\n item_args = self.args_converter(index, item)\n\n if self.cls:\n instance = self.cls(**item_args)\n return instance\n else:\n return Builder.template(self.template, **item_args)\n", "path": "kivy/adapters/simplelistadapter.py"}], "after_files": [{"content": "'''\nSimpleListAdapter\n=================\n\n.. versionadded:: 1.5\n\n.. warning::\n\n This code is still experimental, and its API is subject to change in a\n future version.\n\nThe :class:`~kivy.adapters.simplelistadapter.SimpleListAdapter` is used for\nbasic lists. For example, it can be used for displaying a list of read-only\nstrings that do not require user interaction.\n\n'''\n\n__all__ = ('SimpleListAdapter', )\n\nfrom kivy.adapters.adapter import Adapter\nfrom kivy.properties import ListProperty\nfrom kivy.lang import Builder\n\n\nclass SimpleListAdapter(Adapter):\n '''A :class:`~kivy.adapters.simplelistadapter.SimpleListAdapter` is an\n adapter around a Python list.\n\n From :class:`~kivy.adapters.adapter.Adapter`, the\n :class:`~kivy.adapters.simplelistadapter.ListAdapter` gets cls, template,\n and args_converter properties.\n '''\n\n data = ListProperty([])\n '''The data list property contains a list of objects (which can be strings)\n that will be used directly if no args_converter function is provided. If\n there is an args_converter, the data objects will be passed to it for\n instantiating the item view class instances.\n\n :data:`data` is a :class:`~kivy.properties.ListProperty` and\n defaults to [].\n '''\n\n def __init__(self, **kwargs):\n if 'data' not in kwargs:\n raise Exception('list adapter: input must include data argument')\n if not isinstance(kwargs['data'], list) and \\\n not isinstance(kwargs['data'], tuple):\n raise Exception('list adapter: data must be a tuple or list')\n super(SimpleListAdapter, self).__init__(**kwargs)\n\n def get_count(self):\n return len(self.data)\n\n def get_data_item(self, index):\n if index < 0 or index >= len(self.data):\n return None\n return self.data[index]\n\n # Returns a view instance for an item.\n def get_view(self, index):\n item = self.get_data_item(index)\n\n if item is None:\n return None\n\n item_args = self.args_converter(index, item)\n\n if self.cls:\n instance = self.cls(**item_args)\n return instance\n else:\n return Builder.template(self.template, **item_args)\n", "path": "kivy/adapters/simplelistadapter.py"}]}
| 1,054 | 154 |
gh_patches_debug_24859
|
rasdani/github-patches
|
git_diff
|
zulip__zulip-16242
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Enable translations for hotspots subsystem
There are unused translations at the hotspots subsystem, which could be enabled due to finished and available translations. At the moment there is a mix of English and the configured user language.
Affected file: zerver/lib/hotspots.py
Example (mixed English/German):

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zerver/lib/hotspots.py`
Content:
```
1 # See https://zulip.readthedocs.io/en/latest/subsystems/hotspots.html
2 # for documentation on this subsystem.
3 from typing import Dict, List
4
5 from django.conf import settings
6 from django.utils.translation import ugettext as _
7
8 from zerver.models import UserHotspot, UserProfile
9
10 ALL_HOTSPOTS: Dict[str, Dict[str, str]] = {
11 'intro_reply': {
12 'title': _('Reply to a message'),
13 'description': _('Click anywhere on a message to reply.'),
14 },
15 'intro_streams': {
16 'title': _('Catch up on a stream'),
17 'description': _('Messages sent to a stream are seen by everyone subscribed '
18 'to that stream. Try clicking on one of the stream links below.'),
19 },
20 'intro_topics': {
21 'title': _('Topics'),
22 'description': _('Every message has a topic. Topics keep conversations '
23 'easy to follow, and make it easy to reply to conversations that start '
24 'while you are offline.'),
25 },
26 'intro_gear': {
27 'title': _('Settings'),
28 'description': _('Go to Settings to configure your '
29 'notifications and display settings.'),
30 },
31 'intro_compose': {
32 'title': _('Compose'),
33 'description': _('Click here to start a new conversation. Pick a topic '
34 '(2-3 words is best), and give it a go!'),
35 },
36 }
37
38 def get_next_hotspots(user: UserProfile) -> List[Dict[str, object]]:
39 # For manual testing, it can be convenient to set
40 # ALWAYS_SEND_ALL_HOTSPOTS=True in `zproject/dev_settings.py` to
41 # make it easy to click on all of the hotspots. Note that
42 # ALWAYS_SEND_ALL_HOTSPOTS has some bugs; see ReadTheDocs (link
43 # above) for details.
44 if settings.ALWAYS_SEND_ALL_HOTSPOTS:
45 return [{
46 'name': hotspot,
47 'title': ALL_HOTSPOTS[hotspot]['title'],
48 'description': ALL_HOTSPOTS[hotspot]['description'],
49 'delay': 0,
50 } for hotspot in ALL_HOTSPOTS]
51
52 if user.tutorial_status == UserProfile.TUTORIAL_FINISHED:
53 return []
54
55 seen_hotspots = frozenset(UserHotspot.objects.filter(user=user).values_list('hotspot', flat=True))
56 for hotspot in ['intro_reply', 'intro_streams', 'intro_topics', 'intro_gear', 'intro_compose']:
57 if hotspot not in seen_hotspots:
58 return [{
59 'name': hotspot,
60 'title': ALL_HOTSPOTS[hotspot]['title'],
61 'description': ALL_HOTSPOTS[hotspot]['description'],
62 'delay': 0.5,
63 }]
64
65 user.tutorial_status = UserProfile.TUTORIAL_FINISHED
66 user.save(update_fields=['tutorial_status'])
67 return []
68
69 def copy_hotpots(source_profile: UserProfile, target_profile: UserProfile) -> None:
70 for userhotspot in frozenset(UserHotspot.objects.filter(user=source_profile)):
71 UserHotspot.objects.create(user=target_profile, hotspot=userhotspot.hotspot,
72 timestamp=userhotspot.timestamp)
73
74 target_profile.tutorial_status = source_profile.tutorial_status
75 target_profile.onboarding_steps = source_profile.onboarding_steps
76 target_profile.save(update_fields=['tutorial_status', 'onboarding_steps'])
77
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/zerver/lib/hotspots.py b/zerver/lib/hotspots.py
--- a/zerver/lib/hotspots.py
+++ b/zerver/lib/hotspots.py
@@ -3,7 +3,7 @@
from typing import Dict, List
from django.conf import settings
-from django.utils.translation import ugettext as _
+from django.utils.translation import ugettext_lazy as _
from zerver.models import UserHotspot, UserProfile
@@ -44,8 +44,8 @@
if settings.ALWAYS_SEND_ALL_HOTSPOTS:
return [{
'name': hotspot,
- 'title': ALL_HOTSPOTS[hotspot]['title'],
- 'description': ALL_HOTSPOTS[hotspot]['description'],
+ 'title': str(ALL_HOTSPOTS[hotspot]['title']),
+ 'description': str(ALL_HOTSPOTS[hotspot]['description']),
'delay': 0,
} for hotspot in ALL_HOTSPOTS]
@@ -57,8 +57,8 @@
if hotspot not in seen_hotspots:
return [{
'name': hotspot,
- 'title': ALL_HOTSPOTS[hotspot]['title'],
- 'description': ALL_HOTSPOTS[hotspot]['description'],
+ 'title': str(ALL_HOTSPOTS[hotspot]['title']),
+ 'description': str(ALL_HOTSPOTS[hotspot]['description']),
'delay': 0.5,
}]
|
{"golden_diff": "diff --git a/zerver/lib/hotspots.py b/zerver/lib/hotspots.py\n--- a/zerver/lib/hotspots.py\n+++ b/zerver/lib/hotspots.py\n@@ -3,7 +3,7 @@\n from typing import Dict, List\n \n from django.conf import settings\n-from django.utils.translation import ugettext as _\n+from django.utils.translation import ugettext_lazy as _\n \n from zerver.models import UserHotspot, UserProfile\n \n@@ -44,8 +44,8 @@\n if settings.ALWAYS_SEND_ALL_HOTSPOTS:\n return [{\n 'name': hotspot,\n- 'title': ALL_HOTSPOTS[hotspot]['title'],\n- 'description': ALL_HOTSPOTS[hotspot]['description'],\n+ 'title': str(ALL_HOTSPOTS[hotspot]['title']),\n+ 'description': str(ALL_HOTSPOTS[hotspot]['description']),\n 'delay': 0,\n } for hotspot in ALL_HOTSPOTS]\n \n@@ -57,8 +57,8 @@\n if hotspot not in seen_hotspots:\n return [{\n 'name': hotspot,\n- 'title': ALL_HOTSPOTS[hotspot]['title'],\n- 'description': ALL_HOTSPOTS[hotspot]['description'],\n+ 'title': str(ALL_HOTSPOTS[hotspot]['title']),\n+ 'description': str(ALL_HOTSPOTS[hotspot]['description']),\n 'delay': 0.5,\n }]\n", "issue": "Enable translations for hotspots subsystem\nThere are unused translations at the hotspots subsystem, which could be enabled due to finished and available translations. At the moment there is a mix of English and the configured user language.\r\n\r\nAffected file: zerver/lib/hotspots.py\r\n\r\nExample (mixed English/German):\r\n\r\n\n", "before_files": [{"content": "# See https://zulip.readthedocs.io/en/latest/subsystems/hotspots.html\n# for documentation on this subsystem.\nfrom typing import Dict, List\n\nfrom django.conf import settings\nfrom django.utils.translation import ugettext as _\n\nfrom zerver.models import UserHotspot, UserProfile\n\nALL_HOTSPOTS: Dict[str, Dict[str, str]] = {\n 'intro_reply': {\n 'title': _('Reply to a message'),\n 'description': _('Click anywhere on a message to reply.'),\n },\n 'intro_streams': {\n 'title': _('Catch up on a stream'),\n 'description': _('Messages sent to a stream are seen by everyone subscribed '\n 'to that stream. Try clicking on one of the stream links below.'),\n },\n 'intro_topics': {\n 'title': _('Topics'),\n 'description': _('Every message has a topic. Topics keep conversations '\n 'easy to follow, and make it easy to reply to conversations that start '\n 'while you are offline.'),\n },\n 'intro_gear': {\n 'title': _('Settings'),\n 'description': _('Go to Settings to configure your '\n 'notifications and display settings.'),\n },\n 'intro_compose': {\n 'title': _('Compose'),\n 'description': _('Click here to start a new conversation. Pick a topic '\n '(2-3 words is best), and give it a go!'),\n },\n}\n\ndef get_next_hotspots(user: UserProfile) -> List[Dict[str, object]]:\n # For manual testing, it can be convenient to set\n # ALWAYS_SEND_ALL_HOTSPOTS=True in `zproject/dev_settings.py` to\n # make it easy to click on all of the hotspots. Note that\n # ALWAYS_SEND_ALL_HOTSPOTS has some bugs; see ReadTheDocs (link\n # above) for details.\n if settings.ALWAYS_SEND_ALL_HOTSPOTS:\n return [{\n 'name': hotspot,\n 'title': ALL_HOTSPOTS[hotspot]['title'],\n 'description': ALL_HOTSPOTS[hotspot]['description'],\n 'delay': 0,\n } for hotspot in ALL_HOTSPOTS]\n\n if user.tutorial_status == UserProfile.TUTORIAL_FINISHED:\n return []\n\n seen_hotspots = frozenset(UserHotspot.objects.filter(user=user).values_list('hotspot', flat=True))\n for hotspot in ['intro_reply', 'intro_streams', 'intro_topics', 'intro_gear', 'intro_compose']:\n if hotspot not in seen_hotspots:\n return [{\n 'name': hotspot,\n 'title': ALL_HOTSPOTS[hotspot]['title'],\n 'description': ALL_HOTSPOTS[hotspot]['description'],\n 'delay': 0.5,\n }]\n\n user.tutorial_status = UserProfile.TUTORIAL_FINISHED\n user.save(update_fields=['tutorial_status'])\n return []\n\ndef copy_hotpots(source_profile: UserProfile, target_profile: UserProfile) -> None:\n for userhotspot in frozenset(UserHotspot.objects.filter(user=source_profile)):\n UserHotspot.objects.create(user=target_profile, hotspot=userhotspot.hotspot,\n timestamp=userhotspot.timestamp)\n\n target_profile.tutorial_status = source_profile.tutorial_status\n target_profile.onboarding_steps = source_profile.onboarding_steps\n target_profile.save(update_fields=['tutorial_status', 'onboarding_steps'])\n", "path": "zerver/lib/hotspots.py"}], "after_files": [{"content": "# See https://zulip.readthedocs.io/en/latest/subsystems/hotspots.html\n# for documentation on this subsystem.\nfrom typing import Dict, List\n\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom zerver.models import UserHotspot, UserProfile\n\nALL_HOTSPOTS: Dict[str, Dict[str, str]] = {\n 'intro_reply': {\n 'title': _('Reply to a message'),\n 'description': _('Click anywhere on a message to reply.'),\n },\n 'intro_streams': {\n 'title': _('Catch up on a stream'),\n 'description': _('Messages sent to a stream are seen by everyone subscribed '\n 'to that stream. Try clicking on one of the stream links below.'),\n },\n 'intro_topics': {\n 'title': _('Topics'),\n 'description': _('Every message has a topic. Topics keep conversations '\n 'easy to follow, and make it easy to reply to conversations that start '\n 'while you are offline.'),\n },\n 'intro_gear': {\n 'title': _('Settings'),\n 'description': _('Go to Settings to configure your '\n 'notifications and display settings.'),\n },\n 'intro_compose': {\n 'title': _('Compose'),\n 'description': _('Click here to start a new conversation. Pick a topic '\n '(2-3 words is best), and give it a go!'),\n },\n}\n\ndef get_next_hotspots(user: UserProfile) -> List[Dict[str, object]]:\n # For manual testing, it can be convenient to set\n # ALWAYS_SEND_ALL_HOTSPOTS=True in `zproject/dev_settings.py` to\n # make it easy to click on all of the hotspots. Note that\n # ALWAYS_SEND_ALL_HOTSPOTS has some bugs; see ReadTheDocs (link\n # above) for details.\n if settings.ALWAYS_SEND_ALL_HOTSPOTS:\n return [{\n 'name': hotspot,\n 'title': str(ALL_HOTSPOTS[hotspot]['title']),\n 'description': str(ALL_HOTSPOTS[hotspot]['description']),\n 'delay': 0,\n } for hotspot in ALL_HOTSPOTS]\n\n if user.tutorial_status == UserProfile.TUTORIAL_FINISHED:\n return []\n\n seen_hotspots = frozenset(UserHotspot.objects.filter(user=user).values_list('hotspot', flat=True))\n for hotspot in ['intro_reply', 'intro_streams', 'intro_topics', 'intro_gear', 'intro_compose']:\n if hotspot not in seen_hotspots:\n return [{\n 'name': hotspot,\n 'title': str(ALL_HOTSPOTS[hotspot]['title']),\n 'description': str(ALL_HOTSPOTS[hotspot]['description']),\n 'delay': 0.5,\n }]\n\n user.tutorial_status = UserProfile.TUTORIAL_FINISHED\n user.save(update_fields=['tutorial_status'])\n return []\n\ndef copy_hotpots(source_profile: UserProfile, target_profile: UserProfile) -> None:\n for userhotspot in frozenset(UserHotspot.objects.filter(user=source_profile)):\n UserHotspot.objects.create(user=target_profile, hotspot=userhotspot.hotspot,\n timestamp=userhotspot.timestamp)\n\n target_profile.tutorial_status = source_profile.tutorial_status\n target_profile.onboarding_steps = source_profile.onboarding_steps\n target_profile.save(update_fields=['tutorial_status', 'onboarding_steps'])\n", "path": "zerver/lib/hotspots.py"}]}
| 1,247 | 314 |
gh_patches_debug_13021
|
rasdani/github-patches
|
git_diff
|
mkdocs__mkdocs-173
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update PyPI description
At the moment I wouldn't be tempted if I first seen this page.
https://pypi.python.org/pypi/mkdocs
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 from __future__ import print_function
5 from setuptools import setup
6 import re
7 import os
8 import sys
9
10
11 name = 'mkdocs'
12 package = 'mkdocs'
13 description = 'In progress.'
14 url = 'http://www.mkdocs.org'
15 author = 'Tom Christie'
16 author_email = '[email protected]'
17 license = 'BSD'
18 install_requires = [
19 'Jinja2>=2.7.1',
20 'Markdown>=2.3.1,<2.5',
21 'PyYAML>=3.10',
22 'watchdog>=0.7.0',
23 'ghp-import>=0.4.1'
24 ]
25
26 long_description = """Work in progress."""
27
28
29 def get_version(package):
30 """
31 Return package version as listed in `__version__` in `init.py`.
32 """
33 init_py = open(os.path.join(package, '__init__.py')).read()
34 return re.search("^__version__ = ['\"]([^'\"]+)['\"]", init_py, re.MULTILINE).group(1)
35
36
37 def get_packages(package):
38 """
39 Return root package and all sub-packages.
40 """
41 return [dirpath
42 for dirpath, dirnames, filenames in os.walk(package)
43 if os.path.exists(os.path.join(dirpath, '__init__.py'))]
44
45
46 def get_package_data(package):
47 """
48 Return all files under the root package, that are not in a
49 package themselves.
50 """
51 walk = [(dirpath.replace(package + os.sep, '', 1), filenames)
52 for dirpath, dirnames, filenames in os.walk(package)
53 if not os.path.exists(os.path.join(dirpath, '__init__.py'))]
54
55 filepaths = []
56 for base, filenames in walk:
57 filepaths.extend([os.path.join(base, filename)
58 for filename in filenames])
59 return {package: filepaths}
60
61
62 if sys.argv[-1] == 'publish':
63 os.system("python setup.py sdist upload")
64 args = {'version': get_version(package)}
65 print("You probably want to also tag the version now:")
66 print(" git tag -a %(version)s -m 'version %(version)s'" % args)
67 print(" git push --tags")
68 sys.exit()
69
70
71 setup(
72 name=name,
73 version=get_version(package),
74 url=url,
75 license=license,
76 description=description,
77 long_description=long_description,
78 author=author,
79 author_email=author_email,
80 packages=get_packages(package),
81 package_data=get_package_data(package),
82 install_requires=install_requires,
83 entry_points={
84 'console_scripts': [
85 'mkdocs = mkdocs.main:run_main',
86 ],
87 },
88 classifiers=[
89 'Development Status :: 5 - Production/Stable',
90 'Environment :: Console',
91 'Environment :: Web Environment',
92 'Intended Audience :: Developers',
93 'License :: OSI Approved :: BSD License',
94 'Operating System :: OS Independent',
95 'Programming Language :: Python',
96 'Programming Language :: Python :: 2',
97 'Programming Language :: Python :: 2.6',
98 'Programming Language :: Python :: 2.7',
99 'Programming Language :: Python :: 3',
100 'Programming Language :: Python :: 3.3',
101 'Programming Language :: Python :: 3.4',
102 'Topic :: Documentation',
103 'Topic :: Text Processing',
104 ]
105 )
106
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -10,7 +10,7 @@
name = 'mkdocs'
package = 'mkdocs'
-description = 'In progress.'
+description = 'Project documentation with Markdown.'
url = 'http://www.mkdocs.org'
author = 'Tom Christie'
author_email = '[email protected]'
@@ -23,7 +23,12 @@
'ghp-import>=0.4.1'
]
-long_description = """Work in progress."""
+long_description = (
+ "MkDocs is a fast, simple and downright gorgeous static site generator "
+ "that's geared towards building project documentation. Documentation "
+ "source files are written in Markdown, and configured with a single YAML "
+ "configuration file."
+)
def get_version(package):
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -10,7 +10,7 @@\n \n name = 'mkdocs'\n package = 'mkdocs'\n-description = 'In progress.'\n+description = 'Project documentation with Markdown.'\n url = 'http://www.mkdocs.org'\n author = 'Tom Christie'\n author_email = '[email protected]'\n@@ -23,7 +23,12 @@\n 'ghp-import>=0.4.1'\n ]\n \n-long_description = \"\"\"Work in progress.\"\"\"\n+long_description = (\n+ \"MkDocs is a fast, simple and downright gorgeous static site generator \"\n+ \"that's geared towards building project documentation. Documentation \"\n+ \"source files are written in Markdown, and configured with a single YAML \"\n+ \"configuration file.\"\n+)\n \n \n def get_version(package):\n", "issue": "Update PyPI description\nAt the moment I wouldn't be tempted if I first seen this page.\n\nhttps://pypi.python.org/pypi/mkdocs\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\nfrom setuptools import setup\nimport re\nimport os\nimport sys\n\n\nname = 'mkdocs'\npackage = 'mkdocs'\ndescription = 'In progress.'\nurl = 'http://www.mkdocs.org'\nauthor = 'Tom Christie'\nauthor_email = '[email protected]'\nlicense = 'BSD'\ninstall_requires = [\n 'Jinja2>=2.7.1',\n 'Markdown>=2.3.1,<2.5',\n 'PyYAML>=3.10',\n 'watchdog>=0.7.0',\n 'ghp-import>=0.4.1'\n]\n\nlong_description = \"\"\"Work in progress.\"\"\"\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n init_py = open(os.path.join(package, '__init__.py')).read()\n return re.search(\"^__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py, re.MULTILINE).group(1)\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n\ndef get_package_data(package):\n \"\"\"\n Return all files under the root package, that are not in a\n package themselves.\n \"\"\"\n walk = [(dirpath.replace(package + os.sep, '', 1), filenames)\n for dirpath, dirnames, filenames in os.walk(package)\n if not os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n filepaths = []\n for base, filenames in walk:\n filepaths.extend([os.path.join(base, filename)\n for filename in filenames])\n return {package: filepaths}\n\n\nif sys.argv[-1] == 'publish':\n os.system(\"python setup.py sdist upload\")\n args = {'version': get_version(package)}\n print(\"You probably want to also tag the version now:\")\n print(\" git tag -a %(version)s -m 'version %(version)s'\" % args)\n print(\" git push --tags\")\n sys.exit()\n\n\nsetup(\n name=name,\n version=get_version(package),\n url=url,\n license=license,\n description=description,\n long_description=long_description,\n author=author,\n author_email=author_email,\n packages=get_packages(package),\n package_data=get_package_data(package),\n install_requires=install_requires,\n entry_points={\n 'console_scripts': [\n 'mkdocs = mkdocs.main:run_main',\n ],\n },\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Topic :: Documentation',\n 'Topic :: Text Processing',\n ]\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\nfrom setuptools import setup\nimport re\nimport os\nimport sys\n\n\nname = 'mkdocs'\npackage = 'mkdocs'\ndescription = 'Project documentation with Markdown.'\nurl = 'http://www.mkdocs.org'\nauthor = 'Tom Christie'\nauthor_email = '[email protected]'\nlicense = 'BSD'\ninstall_requires = [\n 'Jinja2>=2.7.1',\n 'Markdown>=2.3.1,<2.5',\n 'PyYAML>=3.10',\n 'watchdog>=0.7.0',\n 'ghp-import>=0.4.1'\n]\n\nlong_description = (\n \"MkDocs is a fast, simple and downright gorgeous static site generator \"\n \"that's geared towards building project documentation. Documentation \"\n \"source files are written in Markdown, and configured with a single YAML \"\n \"configuration file.\"\n)\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n init_py = open(os.path.join(package, '__init__.py')).read()\n return re.search(\"^__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py, re.MULTILINE).group(1)\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n\ndef get_package_data(package):\n \"\"\"\n Return all files under the root package, that are not in a\n package themselves.\n \"\"\"\n walk = [(dirpath.replace(package + os.sep, '', 1), filenames)\n for dirpath, dirnames, filenames in os.walk(package)\n if not os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n filepaths = []\n for base, filenames in walk:\n filepaths.extend([os.path.join(base, filename)\n for filename in filenames])\n return {package: filepaths}\n\n\nif sys.argv[-1] == 'publish':\n os.system(\"python setup.py sdist upload\")\n args = {'version': get_version(package)}\n print(\"You probably want to also tag the version now:\")\n print(\" git tag -a %(version)s -m 'version %(version)s'\" % args)\n print(\" git push --tags\")\n sys.exit()\n\n\nsetup(\n name=name,\n version=get_version(package),\n url=url,\n license=license,\n description=description,\n long_description=long_description,\n author=author,\n author_email=author_email,\n packages=get_packages(package),\n package_data=get_package_data(package),\n install_requires=install_requires,\n entry_points={\n 'console_scripts': [\n 'mkdocs = mkdocs.main:run_main',\n ],\n },\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Topic :: Documentation',\n 'Topic :: Text Processing',\n ]\n)\n", "path": "setup.py"}]}
| 1,234 | 189 |
gh_patches_debug_3389
|
rasdani/github-patches
|
git_diff
|
freedomofpress__securedrop-5011
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Please make the rqrequeue service quieter
## Description
The rqrequeue service feels compelled to report that it has nothing to do, resulting in an endless stream of "No interrupted jobs found in started job registry." messages. This is not helpful during normal operations, and annoying during development.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `securedrop/worker.py`
Content:
```
1 import logging
2 import os
3 from typing import Optional, List
4
5 from redis import Redis
6 from rq.queue import Queue
7 from rq.worker import Worker, WorkerStatus
8 from rq.exceptions import InvalidJobOperation, NoSuchJobError
9 from rq.registry import StartedJobRegistry
10
11 from sdconfig import config
12
13
14 def create_queue(name=None, timeout=3600):
15 # type: (str, int) -> Queue
16 """
17 Create an rq ``Queue`` named ``name`` with default timeout ``timeout``.
18
19 If ``name`` is omitted, ``config.RQ_WORKER_NAME`` is used.
20 """
21 if name is None:
22 name = config.RQ_WORKER_NAME
23 q = Queue(name=name, connection=Redis(), default_timeout=timeout)
24 return q
25
26
27 def rq_workers(queue=None):
28 # type: (Queue) -> List[Worker]
29 """
30 Returns the list of current rq ``Worker``s.
31 """
32
33 return Worker.all(connection=Redis(), queue=queue)
34
35
36 def worker_for_job(job_id):
37 # type: (str) -> Optional[Worker]
38 """
39 If the job is being run, return its ``Worker``.
40 """
41 for worker in rq_workers():
42 # If the worker process no longer exists, skip it. From "man 2
43 # kill": "If sig is 0, then no signal is sent, but existence
44 # and permission checks are still performed; this can be used
45 # to check for the existence of a process ID or process group
46 # ID that the caller is permitted to signal."
47 try:
48 os.kill(worker.pid, 0)
49 except OSError:
50 continue
51
52 # If it's running and working on the given job, return it.
53 if worker.state == WorkerStatus.BUSY and job_id == worker.get_current_job_id():
54 return worker
55 return None
56
57
58 def requeue_interrupted_jobs(queue_name=None):
59 # type: (str) -> None
60 """
61 Requeues jobs found in the given queue's started job registry.
62
63 Only restarts those that aren't already queued or being run.
64
65 When rq starts a job, it records it in the queue's started job
66 registry. If the server is rebooted before the job completes, the
67 job is not automatically restarted from the information in the
68 registry. For tasks like secure deletion of files, this means that
69 information thought to be deleted is still present in the case of
70 seizure or compromise. We have manage.py tasks to clean such files
71 up, but this utility attempts to reduce the need for manual
72 intervention by automatically resuming interrupted jobs.
73
74 This function is predicated on a risky assumption: that all jobs
75 are idempotent. At time of writing, we use rq for securely
76 deleting submission files and hashing submissions for the ETag
77 header. Both of these can be safely repeated. If we add rq tasks
78 that cannot, this function should be improved to omit those.
79 """
80 queue = create_queue(queue_name)
81 started_job_registry = StartedJobRegistry(queue=queue)
82
83 queued_job_ids = queue.get_job_ids()
84 logging.debug("queued jobs: {}".format(queued_job_ids))
85 started_job_ids = started_job_registry.get_job_ids()
86 logging.debug("started jobs: {}".format(started_job_ids))
87 job_ids = [j for j in started_job_ids if j not in queued_job_ids]
88 logging.debug("candidate job ids: {}".format(job_ids))
89
90 if not job_ids:
91 logging.info("No interrupted jobs found in started job registry.")
92
93 for job_id in job_ids:
94 logging.debug("Considering job %s", job_id)
95 try:
96 job = started_job_registry.job_class.fetch(job_id, started_job_registry.connection)
97 except NoSuchJobError as e:
98 logging.error(
99 "Could not find details for job %s: %s", job_id, e
100 )
101 continue
102
103 logging.debug(
104 "Job %s enqueued at %s, started at %s", job_id, job.enqueued_at, job.started_at
105 )
106
107 worker = worker_for_job(job_id)
108 if worker:
109 logging.info(
110 "Skipping job %s, which is already being run by worker %s", job_id, worker.key
111 )
112 continue
113
114 logging.info("Requeuing job %s", job)
115
116 try:
117 started_job_registry.remove(job)
118 except InvalidJobOperation as e:
119 logging.error("Could not remove job %s from started job registry: %s", job, e)
120 continue
121
122 try:
123 queue.enqueue_job(job)
124 logging.debug("Job now enqueued at %s, started at %s", job.enqueued_at, job.started_at)
125 except Exception as e:
126 logging.error("Could not requeue job %s: %s", job, e)
127 continue
128
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/securedrop/worker.py b/securedrop/worker.py
--- a/securedrop/worker.py
+++ b/securedrop/worker.py
@@ -88,7 +88,7 @@
logging.debug("candidate job ids: {}".format(job_ids))
if not job_ids:
- logging.info("No interrupted jobs found in started job registry.")
+ logging.debug("No interrupted jobs found in started job registry.")
for job_id in job_ids:
logging.debug("Considering job %s", job_id)
|
{"golden_diff": "diff --git a/securedrop/worker.py b/securedrop/worker.py\n--- a/securedrop/worker.py\n+++ b/securedrop/worker.py\n@@ -88,7 +88,7 @@\n logging.debug(\"candidate job ids: {}\".format(job_ids))\n \n if not job_ids:\n- logging.info(\"No interrupted jobs found in started job registry.\")\n+ logging.debug(\"No interrupted jobs found in started job registry.\")\n \n for job_id in job_ids:\n logging.debug(\"Considering job %s\", job_id)\n", "issue": "Please make the rqrequeue service quieter\n## Description\r\n\r\nThe rqrequeue service feels compelled to report that it has nothing to do, resulting in an endless stream of \"No interrupted jobs found in started job registry.\" messages. This is not helpful during normal operations, and annoying during development.\n", "before_files": [{"content": "import logging\nimport os\nfrom typing import Optional, List\n\nfrom redis import Redis\nfrom rq.queue import Queue\nfrom rq.worker import Worker, WorkerStatus\nfrom rq.exceptions import InvalidJobOperation, NoSuchJobError\nfrom rq.registry import StartedJobRegistry\n\nfrom sdconfig import config\n\n\ndef create_queue(name=None, timeout=3600):\n # type: (str, int) -> Queue\n \"\"\"\n Create an rq ``Queue`` named ``name`` with default timeout ``timeout``.\n\n If ``name`` is omitted, ``config.RQ_WORKER_NAME`` is used.\n \"\"\"\n if name is None:\n name = config.RQ_WORKER_NAME\n q = Queue(name=name, connection=Redis(), default_timeout=timeout)\n return q\n\n\ndef rq_workers(queue=None):\n # type: (Queue) -> List[Worker]\n \"\"\"\n Returns the list of current rq ``Worker``s.\n \"\"\"\n\n return Worker.all(connection=Redis(), queue=queue)\n\n\ndef worker_for_job(job_id):\n # type: (str) -> Optional[Worker]\n \"\"\"\n If the job is being run, return its ``Worker``.\n \"\"\"\n for worker in rq_workers():\n # If the worker process no longer exists, skip it. From \"man 2\n # kill\": \"If sig is 0, then no signal is sent, but existence\n # and permission checks are still performed; this can be used\n # to check for the existence of a process ID or process group\n # ID that the caller is permitted to signal.\"\n try:\n os.kill(worker.pid, 0)\n except OSError:\n continue\n\n # If it's running and working on the given job, return it.\n if worker.state == WorkerStatus.BUSY and job_id == worker.get_current_job_id():\n return worker\n return None\n\n\ndef requeue_interrupted_jobs(queue_name=None):\n # type: (str) -> None\n \"\"\"\n Requeues jobs found in the given queue's started job registry.\n\n Only restarts those that aren't already queued or being run.\n\n When rq starts a job, it records it in the queue's started job\n registry. If the server is rebooted before the job completes, the\n job is not automatically restarted from the information in the\n registry. For tasks like secure deletion of files, this means that\n information thought to be deleted is still present in the case of\n seizure or compromise. We have manage.py tasks to clean such files\n up, but this utility attempts to reduce the need for manual\n intervention by automatically resuming interrupted jobs.\n\n This function is predicated on a risky assumption: that all jobs\n are idempotent. At time of writing, we use rq for securely\n deleting submission files and hashing submissions for the ETag\n header. Both of these can be safely repeated. If we add rq tasks\n that cannot, this function should be improved to omit those.\n \"\"\"\n queue = create_queue(queue_name)\n started_job_registry = StartedJobRegistry(queue=queue)\n\n queued_job_ids = queue.get_job_ids()\n logging.debug(\"queued jobs: {}\".format(queued_job_ids))\n started_job_ids = started_job_registry.get_job_ids()\n logging.debug(\"started jobs: {}\".format(started_job_ids))\n job_ids = [j for j in started_job_ids if j not in queued_job_ids]\n logging.debug(\"candidate job ids: {}\".format(job_ids))\n\n if not job_ids:\n logging.info(\"No interrupted jobs found in started job registry.\")\n\n for job_id in job_ids:\n logging.debug(\"Considering job %s\", job_id)\n try:\n job = started_job_registry.job_class.fetch(job_id, started_job_registry.connection)\n except NoSuchJobError as e:\n logging.error(\n \"Could not find details for job %s: %s\", job_id, e\n )\n continue\n\n logging.debug(\n \"Job %s enqueued at %s, started at %s\", job_id, job.enqueued_at, job.started_at\n )\n\n worker = worker_for_job(job_id)\n if worker:\n logging.info(\n \"Skipping job %s, which is already being run by worker %s\", job_id, worker.key\n )\n continue\n\n logging.info(\"Requeuing job %s\", job)\n\n try:\n started_job_registry.remove(job)\n except InvalidJobOperation as e:\n logging.error(\"Could not remove job %s from started job registry: %s\", job, e)\n continue\n\n try:\n queue.enqueue_job(job)\n logging.debug(\"Job now enqueued at %s, started at %s\", job.enqueued_at, job.started_at)\n except Exception as e:\n logging.error(\"Could not requeue job %s: %s\", job, e)\n continue\n", "path": "securedrop/worker.py"}], "after_files": [{"content": "import logging\nimport os\nfrom typing import Optional, List\n\nfrom redis import Redis\nfrom rq.queue import Queue\nfrom rq.worker import Worker, WorkerStatus\nfrom rq.exceptions import InvalidJobOperation, NoSuchJobError\nfrom rq.registry import StartedJobRegistry\n\nfrom sdconfig import config\n\n\ndef create_queue(name=None, timeout=3600):\n # type: (str, int) -> Queue\n \"\"\"\n Create an rq ``Queue`` named ``name`` with default timeout ``timeout``.\n\n If ``name`` is omitted, ``config.RQ_WORKER_NAME`` is used.\n \"\"\"\n if name is None:\n name = config.RQ_WORKER_NAME\n q = Queue(name=name, connection=Redis(), default_timeout=timeout)\n return q\n\n\ndef rq_workers(queue=None):\n # type: (Queue) -> List[Worker]\n \"\"\"\n Returns the list of current rq ``Worker``s.\n \"\"\"\n\n return Worker.all(connection=Redis(), queue=queue)\n\n\ndef worker_for_job(job_id):\n # type: (str) -> Optional[Worker]\n \"\"\"\n If the job is being run, return its ``Worker``.\n \"\"\"\n for worker in rq_workers():\n # If the worker process no longer exists, skip it. From \"man 2\n # kill\": \"If sig is 0, then no signal is sent, but existence\n # and permission checks are still performed; this can be used\n # to check for the existence of a process ID or process group\n # ID that the caller is permitted to signal.\"\n try:\n os.kill(worker.pid, 0)\n except OSError:\n continue\n\n # If it's running and working on the given job, return it.\n if worker.state == WorkerStatus.BUSY and job_id == worker.get_current_job_id():\n return worker\n return None\n\n\ndef requeue_interrupted_jobs(queue_name=None):\n # type: (str) -> None\n \"\"\"\n Requeues jobs found in the given queue's started job registry.\n\n Only restarts those that aren't already queued or being run.\n\n When rq starts a job, it records it in the queue's started job\n registry. If the server is rebooted before the job completes, the\n job is not automatically restarted from the information in the\n registry. For tasks like secure deletion of files, this means that\n information thought to be deleted is still present in the case of\n seizure or compromise. We have manage.py tasks to clean such files\n up, but this utility attempts to reduce the need for manual\n intervention by automatically resuming interrupted jobs.\n\n This function is predicated on a risky assumption: that all jobs\n are idempotent. At time of writing, we use rq for securely\n deleting submission files and hashing submissions for the ETag\n header. Both of these can be safely repeated. If we add rq tasks\n that cannot, this function should be improved to omit those.\n \"\"\"\n queue = create_queue(queue_name)\n started_job_registry = StartedJobRegistry(queue=queue)\n\n queued_job_ids = queue.get_job_ids()\n logging.debug(\"queued jobs: {}\".format(queued_job_ids))\n started_job_ids = started_job_registry.get_job_ids()\n logging.debug(\"started jobs: {}\".format(started_job_ids))\n job_ids = [j for j in started_job_ids if j not in queued_job_ids]\n logging.debug(\"candidate job ids: {}\".format(job_ids))\n\n if not job_ids:\n logging.debug(\"No interrupted jobs found in started job registry.\")\n\n for job_id in job_ids:\n logging.debug(\"Considering job %s\", job_id)\n try:\n job = started_job_registry.job_class.fetch(job_id, started_job_registry.connection)\n except NoSuchJobError as e:\n logging.error(\n \"Could not find details for job %s: %s\", job_id, e\n )\n continue\n\n logging.debug(\n \"Job %s enqueued at %s, started at %s\", job_id, job.enqueued_at, job.started_at\n )\n\n worker = worker_for_job(job_id)\n if worker:\n logging.info(\n \"Skipping job %s, which is already being run by worker %s\", job_id, worker.key\n )\n continue\n\n logging.info(\"Requeuing job %s\", job)\n\n try:\n started_job_registry.remove(job)\n except InvalidJobOperation as e:\n logging.error(\"Could not remove job %s from started job registry: %s\", job, e)\n continue\n\n try:\n queue.enqueue_job(job)\n logging.debug(\"Job now enqueued at %s, started at %s\", job.enqueued_at, job.started_at)\n except Exception as e:\n logging.error(\"Could not requeue job %s: %s\", job, e)\n continue\n", "path": "securedrop/worker.py"}]}
| 1,645 | 117 |
gh_patches_debug_25831
|
rasdani/github-patches
|
git_diff
|
larq__larq-93
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Docs: Add links to source code
This is really handy if people want to understand what's going on behind the scenes or want to implement more advanced stuff
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `generate_api_docs.py`
Content:
```
1 """https://github.com/NiklasRosenstein/pydoc-markdown/blob/master/pydocmd/__main__.py"""
2
3 import os
4 import sys
5 import yaml
6
7 from pydocmd.document import Index
8 from pydocmd.imp import dir_object
9 from pydocmd.loader import PythonLoader
10 from pydocmd.preprocessor import Preprocessor
11
12
13 with open("apidocs.yml", "r") as stream:
14 api_structure = yaml.safe_load(stream)
15
16 # Build the index and document structure first, we load the actual
17 # docstrings at a later point.
18 print("Building index...")
19 index = Index()
20
21
22 def add_sections(doc, object_names, depth=1):
23 if isinstance(object_names, list):
24 [add_sections(doc, x, depth) for x in object_names]
25 elif isinstance(object_names, dict):
26 for key, subsections in object_names.items():
27 add_sections(doc, key, depth)
28 add_sections(doc, subsections, depth + 1)
29 elif isinstance(object_names, str):
30 # Check how many levels of recursion we should be going.
31 expand_depth = len(object_names)
32 object_names = object_names.rstrip("+")
33 expand_depth -= len(object_names)
34
35 def create_sections(name, level):
36 if level > expand_depth:
37 return
38 index.new_section(doc, name, depth=depth + level, header_type="markdown")
39 for sub in dir_object(name, "line", False):
40 sub = name + "." + sub
41 create_sections(sub, level + 1)
42
43 create_sections(object_names, 0)
44 else:
45 raise RuntimeError(object_names)
46
47
48 # Make sure that we can find modules from the current working directory,
49 # and have them take precedence over installed modules.
50 sys.path.insert(0, ".")
51
52 for pages in api_structure:
53 for fname, object_names in pages.items():
54 doc = index.new_document(fname)
55 add_sections(doc, object_names)
56
57 loader = PythonLoader({})
58 preproc = Preprocessor({})
59
60 preproc.link_lookup = {}
61 for file, doc in index.documents.items():
62 for section in doc.sections:
63 preproc.link_lookup[section.identifier] = file
64 # Load the docstrings and fill the sections.
65 print("Started generating documentation...")
66 for doc in index.documents.values():
67 for section in filter(lambda s: s.identifier, doc.sections):
68 loader.load_section(section)
69 preproc.preprocess_section(section)
70
71 # Write out all the generated documents.
72 os.makedirs(os.path.join("docs", "api"), exist_ok=True)
73 for fname, doc in index.documents.items():
74 with open(os.path.join("docs", "api", fname), "w") as fp:
75 for section in doc.sections:
76 section.render(fp)
77
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/generate_api_docs.py b/generate_api_docs.py
--- a/generate_api_docs.py
+++ b/generate_api_docs.py
@@ -1,5 +1,6 @@
"""https://github.com/NiklasRosenstein/pydoc-markdown/blob/master/pydocmd/__main__.py"""
+import inspect
import os
import sys
import yaml
@@ -10,6 +11,23 @@
from pydocmd.preprocessor import Preprocessor
+def callable_to_source_link(obj, scope):
+ path = scope.__file__.lstrip(".")
+ source = inspect.getsourcelines(obj)
+ line = source[-1] + 1 if source[0][0].startswith("@") else source[-1]
+ link = f"https://github.com/plumerai/larq/blob/master{path}#L{line}"
+ return f'<a class="headerlink code-link" style="float:right;" href="{link}" title="Source Code"></a>'
+
+
+class PythonLoaderWithSource(PythonLoader):
+ def load_section(self, section):
+ super().load_section(section)
+ obj = section.loader_context["obj"]
+ if callable(obj):
+ scope = section.loader_context["scope"]
+ section.title += callable_to_source_link(obj, scope)
+
+
with open("apidocs.yml", "r") as stream:
api_structure = yaml.safe_load(stream)
@@ -54,7 +72,7 @@
doc = index.new_document(fname)
add_sections(doc, object_names)
-loader = PythonLoader({})
+loader = PythonLoaderWithSource({})
preproc = Preprocessor({})
preproc.link_lookup = {}
|
{"golden_diff": "diff --git a/generate_api_docs.py b/generate_api_docs.py\n--- a/generate_api_docs.py\n+++ b/generate_api_docs.py\n@@ -1,5 +1,6 @@\n \"\"\"https://github.com/NiklasRosenstein/pydoc-markdown/blob/master/pydocmd/__main__.py\"\"\"\n \n+import inspect\n import os\n import sys\n import yaml\n@@ -10,6 +11,23 @@\n from pydocmd.preprocessor import Preprocessor\n \n \n+def callable_to_source_link(obj, scope):\n+ path = scope.__file__.lstrip(\".\")\n+ source = inspect.getsourcelines(obj)\n+ line = source[-1] + 1 if source[0][0].startswith(\"@\") else source[-1]\n+ link = f\"https://github.com/plumerai/larq/blob/master{path}#L{line}\"\n+ return f'<a class=\"headerlink code-link\" style=\"float:right;\" href=\"{link}\" title=\"Source Code\"></a>'\n+\n+\n+class PythonLoaderWithSource(PythonLoader):\n+ def load_section(self, section):\n+ super().load_section(section)\n+ obj = section.loader_context[\"obj\"]\n+ if callable(obj):\n+ scope = section.loader_context[\"scope\"]\n+ section.title += callable_to_source_link(obj, scope)\n+\n+\n with open(\"apidocs.yml\", \"r\") as stream:\n api_structure = yaml.safe_load(stream)\n \n@@ -54,7 +72,7 @@\n doc = index.new_document(fname)\n add_sections(doc, object_names)\n \n-loader = PythonLoader({})\n+loader = PythonLoaderWithSource({})\n preproc = Preprocessor({})\n \n preproc.link_lookup = {}\n", "issue": "Docs: Add links to source code\nThis is really handy if people want to understand what's going on behind the scenes or want to implement more advanced stuff\n", "before_files": [{"content": "\"\"\"https://github.com/NiklasRosenstein/pydoc-markdown/blob/master/pydocmd/__main__.py\"\"\"\n\nimport os\nimport sys\nimport yaml\n\nfrom pydocmd.document import Index\nfrom pydocmd.imp import dir_object\nfrom pydocmd.loader import PythonLoader\nfrom pydocmd.preprocessor import Preprocessor\n\n\nwith open(\"apidocs.yml\", \"r\") as stream:\n api_structure = yaml.safe_load(stream)\n\n# Build the index and document structure first, we load the actual\n# docstrings at a later point.\nprint(\"Building index...\")\nindex = Index()\n\n\ndef add_sections(doc, object_names, depth=1):\n if isinstance(object_names, list):\n [add_sections(doc, x, depth) for x in object_names]\n elif isinstance(object_names, dict):\n for key, subsections in object_names.items():\n add_sections(doc, key, depth)\n add_sections(doc, subsections, depth + 1)\n elif isinstance(object_names, str):\n # Check how many levels of recursion we should be going.\n expand_depth = len(object_names)\n object_names = object_names.rstrip(\"+\")\n expand_depth -= len(object_names)\n\n def create_sections(name, level):\n if level > expand_depth:\n return\n index.new_section(doc, name, depth=depth + level, header_type=\"markdown\")\n for sub in dir_object(name, \"line\", False):\n sub = name + \".\" + sub\n create_sections(sub, level + 1)\n\n create_sections(object_names, 0)\n else:\n raise RuntimeError(object_names)\n\n\n# Make sure that we can find modules from the current working directory,\n# and have them take precedence over installed modules.\nsys.path.insert(0, \".\")\n\nfor pages in api_structure:\n for fname, object_names in pages.items():\n doc = index.new_document(fname)\n add_sections(doc, object_names)\n\nloader = PythonLoader({})\npreproc = Preprocessor({})\n\npreproc.link_lookup = {}\nfor file, doc in index.documents.items():\n for section in doc.sections:\n preproc.link_lookup[section.identifier] = file\n# Load the docstrings and fill the sections.\nprint(\"Started generating documentation...\")\nfor doc in index.documents.values():\n for section in filter(lambda s: s.identifier, doc.sections):\n loader.load_section(section)\n preproc.preprocess_section(section)\n\n# Write out all the generated documents.\nos.makedirs(os.path.join(\"docs\", \"api\"), exist_ok=True)\nfor fname, doc in index.documents.items():\n with open(os.path.join(\"docs\", \"api\", fname), \"w\") as fp:\n for section in doc.sections:\n section.render(fp)\n", "path": "generate_api_docs.py"}], "after_files": [{"content": "\"\"\"https://github.com/NiklasRosenstein/pydoc-markdown/blob/master/pydocmd/__main__.py\"\"\"\n\nimport inspect\nimport os\nimport sys\nimport yaml\n\nfrom pydocmd.document import Index\nfrom pydocmd.imp import dir_object\nfrom pydocmd.loader import PythonLoader\nfrom pydocmd.preprocessor import Preprocessor\n\n\ndef callable_to_source_link(obj, scope):\n path = scope.__file__.lstrip(\".\")\n source = inspect.getsourcelines(obj)\n line = source[-1] + 1 if source[0][0].startswith(\"@\") else source[-1]\n link = f\"https://github.com/plumerai/larq/blob/master{path}#L{line}\"\n return f'<a class=\"headerlink code-link\" style=\"float:right;\" href=\"{link}\" title=\"Source Code\"></a>'\n\n\nclass PythonLoaderWithSource(PythonLoader):\n def load_section(self, section):\n super().load_section(section)\n obj = section.loader_context[\"obj\"]\n if callable(obj):\n scope = section.loader_context[\"scope\"]\n section.title += callable_to_source_link(obj, scope)\n\n\nwith open(\"apidocs.yml\", \"r\") as stream:\n api_structure = yaml.safe_load(stream)\n\n# Build the index and document structure first, we load the actual\n# docstrings at a later point.\nprint(\"Building index...\")\nindex = Index()\n\n\ndef add_sections(doc, object_names, depth=1):\n if isinstance(object_names, list):\n [add_sections(doc, x, depth) for x in object_names]\n elif isinstance(object_names, dict):\n for key, subsections in object_names.items():\n add_sections(doc, key, depth)\n add_sections(doc, subsections, depth + 1)\n elif isinstance(object_names, str):\n # Check how many levels of recursion we should be going.\n expand_depth = len(object_names)\n object_names = object_names.rstrip(\"+\")\n expand_depth -= len(object_names)\n\n def create_sections(name, level):\n if level > expand_depth:\n return\n index.new_section(doc, name, depth=depth + level, header_type=\"markdown\")\n for sub in dir_object(name, \"line\", False):\n sub = name + \".\" + sub\n create_sections(sub, level + 1)\n\n create_sections(object_names, 0)\n else:\n raise RuntimeError(object_names)\n\n\n# Make sure that we can find modules from the current working directory,\n# and have them take precedence over installed modules.\nsys.path.insert(0, \".\")\n\nfor pages in api_structure:\n for fname, object_names in pages.items():\n doc = index.new_document(fname)\n add_sections(doc, object_names)\n\nloader = PythonLoaderWithSource({})\npreproc = Preprocessor({})\n\npreproc.link_lookup = {}\nfor file, doc in index.documents.items():\n for section in doc.sections:\n preproc.link_lookup[section.identifier] = file\n# Load the docstrings and fill the sections.\nprint(\"Started generating documentation...\")\nfor doc in index.documents.values():\n for section in filter(lambda s: s.identifier, doc.sections):\n loader.load_section(section)\n preproc.preprocess_section(section)\n\n# Write out all the generated documents.\nos.makedirs(os.path.join(\"docs\", \"api\"), exist_ok=True)\nfor fname, doc in index.documents.items():\n with open(os.path.join(\"docs\", \"api\", fname), \"w\") as fp:\n for section in doc.sections:\n section.render(fp)\n", "path": "generate_api_docs.py"}]}
| 1,006 | 367 |
gh_patches_debug_39768
|
rasdani/github-patches
|
git_diff
|
bridgecrewio__checkov-4204
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
checkov skips all K8S standard policies if one or more custom policy is specified in --checks
**Description**
Using checkov to verify a kubernetes manifests (a single file with several objects: deployments, configmaps, etc) against a list of checks (so using the --check parameter), checkov verifies only the first check, and appears to skip all others checks in the provided list.
**Examples**
The [manifests are available here](https://gist.github.com/previ/cf193061c767f18be7616dd52739adb0#file-k8s-manifest-yaml)
The [parameters available in the log](https://gist.github.com/previ/cf193061c767f18be7616dd52739adb0#file-full_log_debug-log-L33)
**Version (please complete the following information):**
- Checkov Version 2.2.232
**Additional context**
The [full log, LOG_DEVEL=DEBUG, is available here](https://gist.github.com/previ/cf193061c767f18be7616dd52739adb0#file-full_log_debug-log)
The custom policies yaml files are available [here](https://gist.github.com/previ/cf193061c767f18be7616dd52739adb0#file-k8s_pvc_gov01-yaml) and [here](https://gist.github.com/previ/cf193061c767f18be7616dd52739adb0#file-k8s_sts_gov01-yaml)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/kubernetes/checks/resource/base_registry.py`
Content:
```
1 from __future__ import annotations
2
3 from typing import Any, TYPE_CHECKING
4
5 from checkov.common.checks.base_check_registry import BaseCheckRegistry
6
7 if TYPE_CHECKING:
8 from checkov.common.checks.base_check import BaseCheck
9 from checkov.common.typing import _SkippedCheck, _CheckResult
10 from checkov.runner_filter import RunnerFilter
11
12
13 class Registry(BaseCheckRegistry):
14 def __init__(self, report_type: str) -> None:
15 super().__init__(report_type)
16
17 def extract_entity_details(self, entity: dict[str, Any]) -> tuple[str, dict[str, Any]]: # type:ignore[override]
18 kind = entity.get("kind") or ""
19 conf = entity
20 return kind, conf
21
22 def scan(
23 self,
24 scanned_file: str,
25 entity: dict[str, Any],
26 skipped_checks: list[_SkippedCheck],
27 runner_filter: RunnerFilter,
28 report_type: str | None = None,
29 ) -> dict[BaseCheck, _CheckResult]:
30 (entity_type, entity_configuration) = self.extract_entity_details(entity)
31 results = {}
32 checks = self.get_checks(entity_type)
33 for check in checks:
34 skip_info: "_SkippedCheck" = {}
35 if skipped_checks:
36 if check.id in [x['id'] for x in skipped_checks]:
37 skip_info = [x for x in skipped_checks if x['id'] == check.id][0]
38
39 if self._should_run_scan(check, entity_configuration, runner_filter, self.report_type):
40 self.logger.debug("Running check: {} on file {}".format(check.name, scanned_file))
41
42 result = check.run(scanned_file=scanned_file, entity_configuration=entity_configuration,
43 entity_name=entity_type, entity_type=entity_type, skip_info=skip_info)
44 results[check] = result
45 return results
46
47 @staticmethod
48 def _should_run_scan(
49 check: BaseCheck, entity_configuration: dict[str, Any], runner_filter: RunnerFilter, report_type: str
50 ) -> bool:
51 check_id_allowlist = runner_filter.checks
52 check_id_denylist = runner_filter.skip_checks
53 if check_id_allowlist or runner_filter.check_threshold:
54 # Allow list provides namespace-only allows, check-only allows, or both
55 # If namespaces not specified, all namespaces are scanned
56 # If checks not specified, all checks are scanned
57 run_check = False
58 allowed_namespaces = [string for string in check_id_allowlist if ("CKV_" not in string and "BC_" not in string)]
59 if not any(("CKV_" in check or "BC_" in check) for check in check_id_allowlist) and not runner_filter.check_threshold:
60 if "metadata" in entity_configuration and "namespace" in entity_configuration["metadata"]:
61 if entity_configuration["metadata"]["namespace"] in allowed_namespaces:
62 run_check = True
63 elif "parent_metadata" in entity_configuration and "namespace" in entity_configuration["parent_metadata"]:
64 if entity_configuration["parent_metadata"]["namespace"] in allowed_namespaces:
65 run_check = True
66 else:
67 if "default" in allowed_namespaces:
68 run_check = True
69 else:
70 if runner_filter.should_run_check(check=check, report_type=report_type):
71 if allowed_namespaces:
72 # Check if namespace in allowed namespaces
73 if "metadata" in entity_configuration and "namespace" in entity_configuration["metadata"]:
74 if entity_configuration["metadata"]["namespace"] in allowed_namespaces:
75 run_check = True
76 elif "parent_metadata" in entity_configuration and "namespace" in entity_configuration["parent_metadata"]:
77 if entity_configuration["parent_metadata"]["namespace"] in allowed_namespaces:
78 run_check = True
79 else:
80 if "default" in allowed_namespaces:
81 run_check = True
82 else:
83 # No namespaces to filter
84 run_check = True
85 if run_check:
86 return True
87 elif check_id_denylist or runner_filter.skip_check_threshold or runner_filter.use_enforcement_rules:
88 namespace_skip = False
89 if "metadata" in entity_configuration and "namespace" in entity_configuration["metadata"]:
90 if entity_configuration["metadata"]["namespace"] in check_id_denylist:
91 namespace_skip = True
92 elif "parent_metadata" in entity_configuration and "namespace" in entity_configuration["parent_metadata"]:
93 if entity_configuration["parent_metadata"]["namespace"] in check_id_denylist:
94 namespace_skip = True
95 else:
96 if "default" in check_id_denylist:
97 namespace_skip = True
98 if runner_filter.should_run_check(check=check, report_type=report_type) and not namespace_skip:
99 return True
100 else:
101 return True
102 return False
103
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/checkov/kubernetes/checks/resource/base_registry.py b/checkov/kubernetes/checks/resource/base_registry.py
--- a/checkov/kubernetes/checks/resource/base_registry.py
+++ b/checkov/kubernetes/checks/resource/base_registry.py
@@ -54,35 +54,27 @@
# Allow list provides namespace-only allows, check-only allows, or both
# If namespaces not specified, all namespaces are scanned
# If checks not specified, all checks are scanned
- run_check = False
- allowed_namespaces = [string for string in check_id_allowlist if ("CKV_" not in string and "BC_" not in string)]
- if not any(("CKV_" in check or "BC_" in check) for check in check_id_allowlist) and not runner_filter.check_threshold:
+
+ if any("_" in check_id for check_id in check_id_allowlist) or runner_filter.check_threshold:
+ # a Kubernetes namespace can't have an '_' in its name,
+ # therefore we assume it is a built-in or custom check
+ if not runner_filter.should_run_check(check=check, report_type=report_type):
+ return False
+
+ allowed_namespaces = [check_id for check_id in check_id_allowlist if "_" not in check_id]
+ if allowed_namespaces:
+ # Check if namespace in allowed namespaces
if "metadata" in entity_configuration and "namespace" in entity_configuration["metadata"]:
if entity_configuration["metadata"]["namespace"] in allowed_namespaces:
- run_check = True
+ return True
elif "parent_metadata" in entity_configuration and "namespace" in entity_configuration["parent_metadata"]:
if entity_configuration["parent_metadata"]["namespace"] in allowed_namespaces:
- run_check = True
+ return True
else:
if "default" in allowed_namespaces:
- run_check = True
+ return True
else:
- if runner_filter.should_run_check(check=check, report_type=report_type):
- if allowed_namespaces:
- # Check if namespace in allowed namespaces
- if "metadata" in entity_configuration and "namespace" in entity_configuration["metadata"]:
- if entity_configuration["metadata"]["namespace"] in allowed_namespaces:
- run_check = True
- elif "parent_metadata" in entity_configuration and "namespace" in entity_configuration["parent_metadata"]:
- if entity_configuration["parent_metadata"]["namespace"] in allowed_namespaces:
- run_check = True
- else:
- if "default" in allowed_namespaces:
- run_check = True
- else:
- # No namespaces to filter
- run_check = True
- if run_check:
+ # No namespaces to filter
return True
elif check_id_denylist or runner_filter.skip_check_threshold or runner_filter.use_enforcement_rules:
namespace_skip = False
|
{"golden_diff": "diff --git a/checkov/kubernetes/checks/resource/base_registry.py b/checkov/kubernetes/checks/resource/base_registry.py\n--- a/checkov/kubernetes/checks/resource/base_registry.py\n+++ b/checkov/kubernetes/checks/resource/base_registry.py\n@@ -54,35 +54,27 @@\n # Allow list provides namespace-only allows, check-only allows, or both\n # If namespaces not specified, all namespaces are scanned\n # If checks not specified, all checks are scanned\n- run_check = False\n- allowed_namespaces = [string for string in check_id_allowlist if (\"CKV_\" not in string and \"BC_\" not in string)]\n- if not any((\"CKV_\" in check or \"BC_\" in check) for check in check_id_allowlist) and not runner_filter.check_threshold:\n+\n+ if any(\"_\" in check_id for check_id in check_id_allowlist) or runner_filter.check_threshold:\n+ # a Kubernetes namespace can't have an '_' in its name,\n+ # therefore we assume it is a built-in or custom check\n+ if not runner_filter.should_run_check(check=check, report_type=report_type):\n+ return False\n+\n+ allowed_namespaces = [check_id for check_id in check_id_allowlist if \"_\" not in check_id]\n+ if allowed_namespaces:\n+ # Check if namespace in allowed namespaces\n if \"metadata\" in entity_configuration and \"namespace\" in entity_configuration[\"metadata\"]:\n if entity_configuration[\"metadata\"][\"namespace\"] in allowed_namespaces:\n- run_check = True\n+ return True\n elif \"parent_metadata\" in entity_configuration and \"namespace\" in entity_configuration[\"parent_metadata\"]:\n if entity_configuration[\"parent_metadata\"][\"namespace\"] in allowed_namespaces:\n- run_check = True\n+ return True\n else:\n if \"default\" in allowed_namespaces:\n- run_check = True\n+ return True\n else:\n- if runner_filter.should_run_check(check=check, report_type=report_type):\n- if allowed_namespaces:\n- # Check if namespace in allowed namespaces\n- if \"metadata\" in entity_configuration and \"namespace\" in entity_configuration[\"metadata\"]:\n- if entity_configuration[\"metadata\"][\"namespace\"] in allowed_namespaces:\n- run_check = True\n- elif \"parent_metadata\" in entity_configuration and \"namespace\" in entity_configuration[\"parent_metadata\"]:\n- if entity_configuration[\"parent_metadata\"][\"namespace\"] in allowed_namespaces:\n- run_check = True\n- else:\n- if \"default\" in allowed_namespaces:\n- run_check = True\n- else:\n- # No namespaces to filter\n- run_check = True\n- if run_check:\n+ # No namespaces to filter\n return True\n elif check_id_denylist or runner_filter.skip_check_threshold or runner_filter.use_enforcement_rules:\n namespace_skip = False\n", "issue": "checkov skips all K8S standard policies if one or more custom policy is specified in --checks\n**Description**\r\nUsing checkov to verify a kubernetes manifests (a single file with several objects: deployments, configmaps, etc) against a list of checks (so using the --check parameter), checkov verifies only the first check, and appears to skip all others checks in the provided list.\r\n\r\n**Examples**\r\nThe [manifests are available here](https://gist.github.com/previ/cf193061c767f18be7616dd52739adb0#file-k8s-manifest-yaml)\r\nThe [parameters available in the log](https://gist.github.com/previ/cf193061c767f18be7616dd52739adb0#file-full_log_debug-log-L33)\r\n\r\n**Version (please complete the following information):**\r\n - Checkov Version 2.2.232\r\n\r\n**Additional context**\r\nThe [full log, LOG_DEVEL=DEBUG, is available here](https://gist.github.com/previ/cf193061c767f18be7616dd52739adb0#file-full_log_debug-log)\r\nThe custom policies yaml files are available [here](https://gist.github.com/previ/cf193061c767f18be7616dd52739adb0#file-k8s_pvc_gov01-yaml) and [here](https://gist.github.com/previ/cf193061c767f18be7616dd52739adb0#file-k8s_sts_gov01-yaml)\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any, TYPE_CHECKING\n\nfrom checkov.common.checks.base_check_registry import BaseCheckRegistry\n\nif TYPE_CHECKING:\n from checkov.common.checks.base_check import BaseCheck\n from checkov.common.typing import _SkippedCheck, _CheckResult\n from checkov.runner_filter import RunnerFilter\n\n\nclass Registry(BaseCheckRegistry):\n def __init__(self, report_type: str) -> None:\n super().__init__(report_type)\n\n def extract_entity_details(self, entity: dict[str, Any]) -> tuple[str, dict[str, Any]]: # type:ignore[override]\n kind = entity.get(\"kind\") or \"\"\n conf = entity\n return kind, conf\n\n def scan(\n self,\n scanned_file: str,\n entity: dict[str, Any],\n skipped_checks: list[_SkippedCheck],\n runner_filter: RunnerFilter,\n report_type: str | None = None,\n ) -> dict[BaseCheck, _CheckResult]:\n (entity_type, entity_configuration) = self.extract_entity_details(entity)\n results = {}\n checks = self.get_checks(entity_type)\n for check in checks:\n skip_info: \"_SkippedCheck\" = {}\n if skipped_checks:\n if check.id in [x['id'] for x in skipped_checks]:\n skip_info = [x for x in skipped_checks if x['id'] == check.id][0]\n\n if self._should_run_scan(check, entity_configuration, runner_filter, self.report_type):\n self.logger.debug(\"Running check: {} on file {}\".format(check.name, scanned_file))\n\n result = check.run(scanned_file=scanned_file, entity_configuration=entity_configuration,\n entity_name=entity_type, entity_type=entity_type, skip_info=skip_info)\n results[check] = result\n return results\n\n @staticmethod\n def _should_run_scan(\n check: BaseCheck, entity_configuration: dict[str, Any], runner_filter: RunnerFilter, report_type: str\n ) -> bool:\n check_id_allowlist = runner_filter.checks\n check_id_denylist = runner_filter.skip_checks\n if check_id_allowlist or runner_filter.check_threshold:\n # Allow list provides namespace-only allows, check-only allows, or both\n # If namespaces not specified, all namespaces are scanned\n # If checks not specified, all checks are scanned\n run_check = False\n allowed_namespaces = [string for string in check_id_allowlist if (\"CKV_\" not in string and \"BC_\" not in string)]\n if not any((\"CKV_\" in check or \"BC_\" in check) for check in check_id_allowlist) and not runner_filter.check_threshold:\n if \"metadata\" in entity_configuration and \"namespace\" in entity_configuration[\"metadata\"]:\n if entity_configuration[\"metadata\"][\"namespace\"] in allowed_namespaces:\n run_check = True\n elif \"parent_metadata\" in entity_configuration and \"namespace\" in entity_configuration[\"parent_metadata\"]:\n if entity_configuration[\"parent_metadata\"][\"namespace\"] in allowed_namespaces:\n run_check = True\n else:\n if \"default\" in allowed_namespaces:\n run_check = True\n else:\n if runner_filter.should_run_check(check=check, report_type=report_type):\n if allowed_namespaces:\n # Check if namespace in allowed namespaces\n if \"metadata\" in entity_configuration and \"namespace\" in entity_configuration[\"metadata\"]:\n if entity_configuration[\"metadata\"][\"namespace\"] in allowed_namespaces:\n run_check = True\n elif \"parent_metadata\" in entity_configuration and \"namespace\" in entity_configuration[\"parent_metadata\"]:\n if entity_configuration[\"parent_metadata\"][\"namespace\"] in allowed_namespaces:\n run_check = True\n else:\n if \"default\" in allowed_namespaces:\n run_check = True\n else:\n # No namespaces to filter\n run_check = True\n if run_check:\n return True\n elif check_id_denylist or runner_filter.skip_check_threshold or runner_filter.use_enforcement_rules:\n namespace_skip = False\n if \"metadata\" in entity_configuration and \"namespace\" in entity_configuration[\"metadata\"]:\n if entity_configuration[\"metadata\"][\"namespace\"] in check_id_denylist:\n namespace_skip = True\n elif \"parent_metadata\" in entity_configuration and \"namespace\" in entity_configuration[\"parent_metadata\"]:\n if entity_configuration[\"parent_metadata\"][\"namespace\"] in check_id_denylist:\n namespace_skip = True\n else:\n if \"default\" in check_id_denylist:\n namespace_skip = True\n if runner_filter.should_run_check(check=check, report_type=report_type) and not namespace_skip:\n return True\n else:\n return True\n return False\n", "path": "checkov/kubernetes/checks/resource/base_registry.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any, TYPE_CHECKING\n\nfrom checkov.common.checks.base_check_registry import BaseCheckRegistry\n\nif TYPE_CHECKING:\n from checkov.common.checks.base_check import BaseCheck\n from checkov.common.typing import _SkippedCheck, _CheckResult\n from checkov.runner_filter import RunnerFilter\n\n\nclass Registry(BaseCheckRegistry):\n def __init__(self, report_type: str) -> None:\n super().__init__(report_type)\n\n def extract_entity_details(self, entity: dict[str, Any]) -> tuple[str, dict[str, Any]]: # type:ignore[override]\n kind = entity.get(\"kind\") or \"\"\n conf = entity\n return kind, conf\n\n def scan(\n self,\n scanned_file: str,\n entity: dict[str, Any],\n skipped_checks: list[_SkippedCheck],\n runner_filter: RunnerFilter,\n report_type: str | None = None,\n ) -> dict[BaseCheck, _CheckResult]:\n (entity_type, entity_configuration) = self.extract_entity_details(entity)\n results = {}\n checks = self.get_checks(entity_type)\n for check in checks:\n skip_info: \"_SkippedCheck\" = {}\n if skipped_checks:\n if check.id in [x['id'] for x in skipped_checks]:\n skip_info = [x for x in skipped_checks if x['id'] == check.id][0]\n\n if self._should_run_scan(check, entity_configuration, runner_filter, self.report_type):\n self.logger.debug(\"Running check: {} on file {}\".format(check.name, scanned_file))\n\n result = check.run(scanned_file=scanned_file, entity_configuration=entity_configuration,\n entity_name=entity_type, entity_type=entity_type, skip_info=skip_info)\n results[check] = result\n return results\n\n @staticmethod\n def _should_run_scan(\n check: BaseCheck, entity_configuration: dict[str, Any], runner_filter: RunnerFilter, report_type: str\n ) -> bool:\n check_id_allowlist = runner_filter.checks\n check_id_denylist = runner_filter.skip_checks\n if check_id_allowlist or runner_filter.check_threshold:\n # Allow list provides namespace-only allows, check-only allows, or both\n # If namespaces not specified, all namespaces are scanned\n # If checks not specified, all checks are scanned\n\n if any(\"_\" in check_id for check_id in check_id_allowlist) or runner_filter.check_threshold:\n # a Kubernetes namespace can't have an '_' in its name,\n # therefore we assume it is a built-in or custom check\n if not runner_filter.should_run_check(check=check, report_type=report_type):\n return False\n\n allowed_namespaces = [check_id for check_id in check_id_allowlist if \"_\" not in check_id]\n if allowed_namespaces:\n # Check if namespace in allowed namespaces\n if \"metadata\" in entity_configuration and \"namespace\" in entity_configuration[\"metadata\"]:\n if entity_configuration[\"metadata\"][\"namespace\"] in allowed_namespaces:\n return True\n elif \"parent_metadata\" in entity_configuration and \"namespace\" in entity_configuration[\"parent_metadata\"]:\n if entity_configuration[\"parent_metadata\"][\"namespace\"] in allowed_namespaces:\n return True\n else:\n if \"default\" in allowed_namespaces:\n return True\n else:\n # No namespaces to filter\n return True\n elif check_id_denylist or runner_filter.skip_check_threshold or runner_filter.use_enforcement_rules:\n namespace_skip = False\n if \"metadata\" in entity_configuration and \"namespace\" in entity_configuration[\"metadata\"]:\n if entity_configuration[\"metadata\"][\"namespace\"] in check_id_denylist:\n namespace_skip = True\n elif \"parent_metadata\" in entity_configuration and \"namespace\" in entity_configuration[\"parent_metadata\"]:\n if entity_configuration[\"parent_metadata\"][\"namespace\"] in check_id_denylist:\n namespace_skip = True\n else:\n if \"default\" in check_id_denylist:\n namespace_skip = True\n if runner_filter.should_run_check(check=check, report_type=report_type) and not namespace_skip:\n return True\n else:\n return True\n return False\n", "path": "checkov/kubernetes/checks/resource/base_registry.py"}]}
| 1,867 | 619 |
gh_patches_debug_38033
|
rasdani/github-patches
|
git_diff
|
google__clusterfuzz-1524
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support authentication with Cloud IAP
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/appengine/libs/auth.py`
Content:
```
1 # Copyright 2019 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Authentication helpers."""
15
16 import collections
17
18 from firebase_admin import auth
19 from google.cloud import ndb
20 import webapp2
21
22 from base import utils
23 from config import local_config
24 from datastore import data_types
25 from metrics import logs
26 from system import environment
27
28 User = collections.namedtuple('User', ['email'])
29
30
31 class AuthError(Exception):
32 """Auth error."""
33
34
35 def auth_domain():
36 """Get the auth domain."""
37 domain = local_config.ProjectConfig().get('firebase.auth_domain')
38 if domain:
39 return domain
40
41 return utils.get_application_id() + '.firebaseapp.com'
42
43
44 def is_current_user_admin():
45 """Returns whether or not the current logged in user is an admin."""
46 if environment.is_local_development():
47 return True
48
49 user = get_current_user()
50 if not user:
51 return False
52
53 key = ndb.Key(data_types.Admin, user.email)
54 return bool(key.get())
55
56
57 def get_current_user():
58 """Get the current logged in user, or None."""
59 if environment.is_local_development():
60 return User('user@localhost')
61
62 loas_user = environment.get_value('LOAS_PEER_USERNAME')
63 if loas_user:
64 return User(loas_user + '@google.com')
65
66 current_request = get_current_request()
67 oauth_email = getattr(current_request, '_oauth_email', None)
68 if oauth_email:
69 return User(oauth_email)
70
71 cached_email = getattr(current_request, '_cached_email', None)
72 if cached_email:
73 return User(cached_email)
74
75 session_cookie = get_session_cookie()
76 if not session_cookie:
77 return None
78
79 try:
80 decoded_claims = decode_claims(get_session_cookie())
81 except AuthError:
82 logs.log_warn('Invalid session cookie.')
83 return None
84
85 if not decoded_claims.get('email_verified'):
86 return None
87
88 email = decoded_claims.get('email')
89 if not email:
90 return None
91
92 # We cache the email for this request if we've validated the user to make
93 # subsequent get_current_user() calls fast.
94 setattr(current_request, '_cached_email', email)
95 return User(email)
96
97
98 def create_session_cookie(id_token, expires_in):
99 """Create a new session cookie."""
100 try:
101 return auth.create_session_cookie(id_token, expires_in=expires_in)
102 except auth.AuthError:
103 raise AuthError('Failed to create session cookie.')
104
105
106 def get_current_request():
107 """Get the current request."""
108 return webapp2.get_request()
109
110
111 def get_session_cookie():
112 """Get the current session cookie."""
113 return get_current_request().cookies.get('session')
114
115
116 def revoke_session_cookie(session_cookie):
117 """Revoke a session cookie."""
118 decoded_claims = decode_claims(session_cookie)
119 auth.revoke_refresh_tokens(decoded_claims['sub'])
120
121
122 def decode_claims(session_cookie):
123 """Decode the claims for the current session cookie."""
124 try:
125 return auth.verify_session_cookie(session_cookie, check_revoked=True)
126 except (ValueError, auth.AuthError):
127 raise AuthError('Invalid session cookie.')
128
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/appengine/libs/auth.py b/src/appengine/libs/auth.py
--- a/src/appengine/libs/auth.py
+++ b/src/appengine/libs/auth.py
@@ -13,12 +13,17 @@
# limitations under the License.
"""Authentication helpers."""
+from builtins import str
import collections
+import jwt
from firebase_admin import auth
from google.cloud import ndb
+from googleapiclient.discovery import build
+import requests
import webapp2
+from base import memoize
from base import utils
from config import local_config
from datastore import data_types
@@ -54,6 +59,68 @@
return bool(key.get())
[email protected](memoize.FifoInMemory(1))
+def _project_number_from_id(project_id):
+ """Get the project number from project ID."""
+ resource_manager = build('cloudresourcemanager', 'v1')
+ result = resource_manager.projects().get(projectId=project_id).execute()
+ if 'projectNumber' not in result:
+ raise AuthError('Failed to get project number.')
+
+ return result['projectNumber']
+
+
[email protected](memoize.FifoInMemory(1))
+def _get_iap_key(key_id):
+ """Retrieves a public key from the list published by Identity-Aware Proxy,
+ re-fetching the key file if necessary.
+ """
+ resp = requests.get('https://www.gstatic.com/iap/verify/public_key')
+ if resp.status_code != 200:
+ raise AuthError('Unable to fetch IAP keys: {} / {} / {}'.format(
+ resp.status_code, resp.headers, resp.text))
+
+ result = resp.json()
+ key = result.get(key_id)
+ if not key:
+ raise AuthError('Key {!r} not found'.format(key_id))
+
+ return key
+
+
+def _validate_iap_jwt(iap_jwt):
+ """Validate JWT assertion."""
+ project_id = utils.get_application_id()
+ expected_audience = '/projects/{}/apps/{}'.format(
+ _project_number_from_id(project_id), project_id)
+
+ try:
+ key_id = jwt.get_unverified_header(iap_jwt).get('kid')
+ if not key_id:
+ raise AuthError('No key ID.')
+
+ key = _get_iap_key(key_id)
+ decoded_jwt = jwt.decode(
+ iap_jwt,
+ key,
+ algorithms=['ES256'],
+ issuer='https://cloud.google.com/iap',
+ audience=expected_audience)
+ return decoded_jwt['email']
+ except (jwt.exceptions.InvalidTokenError,
+ requests.exceptions.RequestException) as e:
+ raise AuthError('JWT assertion decode error: ' + str(e))
+
+
+def get_iap_email(current_request):
+ """Get Cloud IAP email."""
+ jwt_assertion = current_request.headers.get('X-Goog-IAP-JWT-Assertion')
+ if not jwt_assertion:
+ return None
+
+ return _validate_iap_jwt(jwt_assertion)
+
+
def get_current_user():
"""Get the current logged in user, or None."""
if environment.is_local_development():
@@ -64,6 +131,10 @@
return User(loas_user + '@google.com')
current_request = get_current_request()
+ iap_email = get_iap_email(current_request)
+ if iap_email:
+ return User(iap_email)
+
oauth_email = getattr(current_request, '_oauth_email', None)
if oauth_email:
return User(oauth_email)
|
{"golden_diff": "diff --git a/src/appengine/libs/auth.py b/src/appengine/libs/auth.py\n--- a/src/appengine/libs/auth.py\n+++ b/src/appengine/libs/auth.py\n@@ -13,12 +13,17 @@\n # limitations under the License.\n \"\"\"Authentication helpers.\"\"\"\n \n+from builtins import str\n import collections\n+import jwt\n \n from firebase_admin import auth\n from google.cloud import ndb\n+from googleapiclient.discovery import build\n+import requests\n import webapp2\n \n+from base import memoize\n from base import utils\n from config import local_config\n from datastore import data_types\n@@ -54,6 +59,68 @@\n return bool(key.get())\n \n \[email protected](memoize.FifoInMemory(1))\n+def _project_number_from_id(project_id):\n+ \"\"\"Get the project number from project ID.\"\"\"\n+ resource_manager = build('cloudresourcemanager', 'v1')\n+ result = resource_manager.projects().get(projectId=project_id).execute()\n+ if 'projectNumber' not in result:\n+ raise AuthError('Failed to get project number.')\n+\n+ return result['projectNumber']\n+\n+\[email protected](memoize.FifoInMemory(1))\n+def _get_iap_key(key_id):\n+ \"\"\"Retrieves a public key from the list published by Identity-Aware Proxy,\n+ re-fetching the key file if necessary.\n+ \"\"\"\n+ resp = requests.get('https://www.gstatic.com/iap/verify/public_key')\n+ if resp.status_code != 200:\n+ raise AuthError('Unable to fetch IAP keys: {} / {} / {}'.format(\n+ resp.status_code, resp.headers, resp.text))\n+\n+ result = resp.json()\n+ key = result.get(key_id)\n+ if not key:\n+ raise AuthError('Key {!r} not found'.format(key_id))\n+\n+ return key\n+\n+\n+def _validate_iap_jwt(iap_jwt):\n+ \"\"\"Validate JWT assertion.\"\"\"\n+ project_id = utils.get_application_id()\n+ expected_audience = '/projects/{}/apps/{}'.format(\n+ _project_number_from_id(project_id), project_id)\n+\n+ try:\n+ key_id = jwt.get_unverified_header(iap_jwt).get('kid')\n+ if not key_id:\n+ raise AuthError('No key ID.')\n+\n+ key = _get_iap_key(key_id)\n+ decoded_jwt = jwt.decode(\n+ iap_jwt,\n+ key,\n+ algorithms=['ES256'],\n+ issuer='https://cloud.google.com/iap',\n+ audience=expected_audience)\n+ return decoded_jwt['email']\n+ except (jwt.exceptions.InvalidTokenError,\n+ requests.exceptions.RequestException) as e:\n+ raise AuthError('JWT assertion decode error: ' + str(e))\n+\n+\n+def get_iap_email(current_request):\n+ \"\"\"Get Cloud IAP email.\"\"\"\n+ jwt_assertion = current_request.headers.get('X-Goog-IAP-JWT-Assertion')\n+ if not jwt_assertion:\n+ return None\n+\n+ return _validate_iap_jwt(jwt_assertion)\n+\n+\n def get_current_user():\n \"\"\"Get the current logged in user, or None.\"\"\"\n if environment.is_local_development():\n@@ -64,6 +131,10 @@\n return User(loas_user + '@google.com')\n \n current_request = get_current_request()\n+ iap_email = get_iap_email(current_request)\n+ if iap_email:\n+ return User(iap_email)\n+\n oauth_email = getattr(current_request, '_oauth_email', None)\n if oauth_email:\n return User(oauth_email)\n", "issue": "Support authentication with Cloud IAP\n\n", "before_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Authentication helpers.\"\"\"\n\nimport collections\n\nfrom firebase_admin import auth\nfrom google.cloud import ndb\nimport webapp2\n\nfrom base import utils\nfrom config import local_config\nfrom datastore import data_types\nfrom metrics import logs\nfrom system import environment\n\nUser = collections.namedtuple('User', ['email'])\n\n\nclass AuthError(Exception):\n \"\"\"Auth error.\"\"\"\n\n\ndef auth_domain():\n \"\"\"Get the auth domain.\"\"\"\n domain = local_config.ProjectConfig().get('firebase.auth_domain')\n if domain:\n return domain\n\n return utils.get_application_id() + '.firebaseapp.com'\n\n\ndef is_current_user_admin():\n \"\"\"Returns whether or not the current logged in user is an admin.\"\"\"\n if environment.is_local_development():\n return True\n\n user = get_current_user()\n if not user:\n return False\n\n key = ndb.Key(data_types.Admin, user.email)\n return bool(key.get())\n\n\ndef get_current_user():\n \"\"\"Get the current logged in user, or None.\"\"\"\n if environment.is_local_development():\n return User('user@localhost')\n\n loas_user = environment.get_value('LOAS_PEER_USERNAME')\n if loas_user:\n return User(loas_user + '@google.com')\n\n current_request = get_current_request()\n oauth_email = getattr(current_request, '_oauth_email', None)\n if oauth_email:\n return User(oauth_email)\n\n cached_email = getattr(current_request, '_cached_email', None)\n if cached_email:\n return User(cached_email)\n\n session_cookie = get_session_cookie()\n if not session_cookie:\n return None\n\n try:\n decoded_claims = decode_claims(get_session_cookie())\n except AuthError:\n logs.log_warn('Invalid session cookie.')\n return None\n\n if not decoded_claims.get('email_verified'):\n return None\n\n email = decoded_claims.get('email')\n if not email:\n return None\n\n # We cache the email for this request if we've validated the user to make\n # subsequent get_current_user() calls fast.\n setattr(current_request, '_cached_email', email)\n return User(email)\n\n\ndef create_session_cookie(id_token, expires_in):\n \"\"\"Create a new session cookie.\"\"\"\n try:\n return auth.create_session_cookie(id_token, expires_in=expires_in)\n except auth.AuthError:\n raise AuthError('Failed to create session cookie.')\n\n\ndef get_current_request():\n \"\"\"Get the current request.\"\"\"\n return webapp2.get_request()\n\n\ndef get_session_cookie():\n \"\"\"Get the current session cookie.\"\"\"\n return get_current_request().cookies.get('session')\n\n\ndef revoke_session_cookie(session_cookie):\n \"\"\"Revoke a session cookie.\"\"\"\n decoded_claims = decode_claims(session_cookie)\n auth.revoke_refresh_tokens(decoded_claims['sub'])\n\n\ndef decode_claims(session_cookie):\n \"\"\"Decode the claims for the current session cookie.\"\"\"\n try:\n return auth.verify_session_cookie(session_cookie, check_revoked=True)\n except (ValueError, auth.AuthError):\n raise AuthError('Invalid session cookie.')\n", "path": "src/appengine/libs/auth.py"}], "after_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Authentication helpers.\"\"\"\n\nfrom builtins import str\nimport collections\nimport jwt\n\nfrom firebase_admin import auth\nfrom google.cloud import ndb\nfrom googleapiclient.discovery import build\nimport requests\nimport webapp2\n\nfrom base import memoize\nfrom base import utils\nfrom config import local_config\nfrom datastore import data_types\nfrom metrics import logs\nfrom system import environment\n\nUser = collections.namedtuple('User', ['email'])\n\n\nclass AuthError(Exception):\n \"\"\"Auth error.\"\"\"\n\n\ndef auth_domain():\n \"\"\"Get the auth domain.\"\"\"\n domain = local_config.ProjectConfig().get('firebase.auth_domain')\n if domain:\n return domain\n\n return utils.get_application_id() + '.firebaseapp.com'\n\n\ndef is_current_user_admin():\n \"\"\"Returns whether or not the current logged in user is an admin.\"\"\"\n if environment.is_local_development():\n return True\n\n user = get_current_user()\n if not user:\n return False\n\n key = ndb.Key(data_types.Admin, user.email)\n return bool(key.get())\n\n\[email protected](memoize.FifoInMemory(1))\ndef _project_number_from_id(project_id):\n \"\"\"Get the project number from project ID.\"\"\"\n resource_manager = build('cloudresourcemanager', 'v1')\n result = resource_manager.projects().get(projectId=project_id).execute()\n if 'projectNumber' not in result:\n raise AuthError('Failed to get project number.')\n\n return result['projectNumber']\n\n\[email protected](memoize.FifoInMemory(1))\ndef _get_iap_key(key_id):\n \"\"\"Retrieves a public key from the list published by Identity-Aware Proxy,\n re-fetching the key file if necessary.\n \"\"\"\n resp = requests.get('https://www.gstatic.com/iap/verify/public_key')\n if resp.status_code != 200:\n raise AuthError('Unable to fetch IAP keys: {} / {} / {}'.format(\n resp.status_code, resp.headers, resp.text))\n\n result = resp.json()\n key = result.get(key_id)\n if not key:\n raise AuthError('Key {!r} not found'.format(key_id))\n\n return key\n\n\ndef _validate_iap_jwt(iap_jwt):\n \"\"\"Validate JWT assertion.\"\"\"\n project_id = utils.get_application_id()\n expected_audience = '/projects/{}/apps/{}'.format(\n _project_number_from_id(project_id), project_id)\n\n try:\n key_id = jwt.get_unverified_header(iap_jwt).get('kid')\n if not key_id:\n raise AuthError('No key ID.')\n\n key = _get_iap_key(key_id)\n decoded_jwt = jwt.decode(\n iap_jwt,\n key,\n algorithms=['ES256'],\n issuer='https://cloud.google.com/iap',\n audience=expected_audience)\n return decoded_jwt['email']\n except (jwt.exceptions.InvalidTokenError,\n requests.exceptions.RequestException) as e:\n raise AuthError('JWT assertion decode error: ' + str(e))\n\n\ndef get_iap_email(current_request):\n \"\"\"Get Cloud IAP email.\"\"\"\n jwt_assertion = current_request.headers.get('X-Goog-IAP-JWT-Assertion')\n if not jwt_assertion:\n return None\n\n return _validate_iap_jwt(jwt_assertion)\n\n\ndef get_current_user():\n \"\"\"Get the current logged in user, or None.\"\"\"\n if environment.is_local_development():\n return User('user@localhost')\n\n loas_user = environment.get_value('LOAS_PEER_USERNAME')\n if loas_user:\n return User(loas_user + '@google.com')\n\n current_request = get_current_request()\n iap_email = get_iap_email(current_request)\n if iap_email:\n return User(iap_email)\n\n oauth_email = getattr(current_request, '_oauth_email', None)\n if oauth_email:\n return User(oauth_email)\n\n cached_email = getattr(current_request, '_cached_email', None)\n if cached_email:\n return User(cached_email)\n\n session_cookie = get_session_cookie()\n if not session_cookie:\n return None\n\n try:\n decoded_claims = decode_claims(get_session_cookie())\n except AuthError:\n logs.log_warn('Invalid session cookie.')\n return None\n\n if not decoded_claims.get('email_verified'):\n return None\n\n email = decoded_claims.get('email')\n if not email:\n return None\n\n # We cache the email for this request if we've validated the user to make\n # subsequent get_current_user() calls fast.\n setattr(current_request, '_cached_email', email)\n return User(email)\n\n\ndef create_session_cookie(id_token, expires_in):\n \"\"\"Create a new session cookie.\"\"\"\n try:\n return auth.create_session_cookie(id_token, expires_in=expires_in)\n except auth.AuthError:\n raise AuthError('Failed to create session cookie.')\n\n\ndef get_current_request():\n \"\"\"Get the current request.\"\"\"\n return webapp2.get_request()\n\n\ndef get_session_cookie():\n \"\"\"Get the current session cookie.\"\"\"\n return get_current_request().cookies.get('session')\n\n\ndef revoke_session_cookie(session_cookie):\n \"\"\"Revoke a session cookie.\"\"\"\n decoded_claims = decode_claims(session_cookie)\n auth.revoke_refresh_tokens(decoded_claims['sub'])\n\n\ndef decode_claims(session_cookie):\n \"\"\"Decode the claims for the current session cookie.\"\"\"\n try:\n return auth.verify_session_cookie(session_cookie, check_revoked=True)\n except (ValueError, auth.AuthError):\n raise AuthError('Invalid session cookie.')\n", "path": "src/appengine/libs/auth.py"}]}
| 1,335 | 810 |
gh_patches_debug_25819
|
rasdani/github-patches
|
git_diff
|
yt-dlp__yt-dlp-4312
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[bigo] Extractor returning invalid parameters
### Checklist
- [X] I'm reporting a broken site
- [X] I've verified that I'm running yt-dlp version **2022.06.22.1** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
- [X] I've checked that all provided URLs are playable in a browser with the same IP and same login details
- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/ytdl-org/youtube-dl#video-url-contains-an-ampersand-and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)
- [X] I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues including closed ones. DO NOT post duplicates
- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)
- [X] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required
### Region
_No response_
### Description
As of about 3 weeks ago, I now receive the following error on all live streams: `Bigo says: paramters invalid (code 1)`
### Verbose log
```shell
$ yt-dlp -vU -g https://www.bigo.tv/841947363
[debug] Command-line config: ['-vU', '-g', 'https://www.bigo.tv/841947363']
[debug] Encodings: locale UTF-8, fs utf-8, pref UTF-8, out utf-8, error utf-8, screen utf-8
[debug] yt-dlp version 2022.06.22.1 [a86e01e]
[debug] Python version 3.10.4 (CPython 64bit) - macOS-12.4-arm64-arm-64bit
[debug] Checking exe version: ffmpeg -bsfs
[debug] Checking exe version: ffprobe -bsfs
[debug] exe versions: ffmpeg 5.0.1 (setts), ffprobe 5.0.1
[debug] Optional libraries: Cryptodome-3.14.1, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
[debug] Proxy map: {}
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
Latest version: 2022.06.22.1, Current version: 2022.06.22.1
yt-dlp is up to date (2022.06.22.1)
[debug] [Bigo] Extracting URL: https://www.bigo.tv/841947363
[Bigo] 841947363: Downloading JSON metadata
ERROR: [Bigo] 841947363: Bigo says: paramters invalid (code 1)
File "/opt/homebrew/Cellar/yt-dlp/2022.6.22.1/libexec/lib/python3.10/site-packages/yt_dlp/extractor/common.py", line 647, in extract
ie_result = self._real_extract(url)
File "/opt/homebrew/Cellar/yt-dlp/2022.6.22.1/libexec/lib/python3.10/site-packages/yt_dlp/extractor/bigo.py", line 37, in _real_extract
raise ExtractorError(
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `yt_dlp/extractor/bigo.py`
Content:
```
1 from .common import InfoExtractor
2 from ..utils import ExtractorError, urlencode_postdata
3
4
5 class BigoIE(InfoExtractor):
6 _VALID_URL = r'https?://(?:www\.)?bigo\.tv/(?:[a-z]{2,}/)?(?P<id>[^/]+)'
7
8 _TESTS = [{
9 'url': 'https://www.bigo.tv/ja/221338632',
10 'info_dict': {
11 'id': '6576287577575737440',
12 'title': '土よ〜💁♂️ 休憩室/REST room',
13 'thumbnail': r're:https?://.+',
14 'uploader': '✨Shin💫',
15 'uploader_id': '221338632',
16 'is_live': True,
17 },
18 'skip': 'livestream',
19 }, {
20 'url': 'https://www.bigo.tv/th/Tarlerm1304',
21 'only_matching': True,
22 }, {
23 'url': 'https://bigo.tv/115976881',
24 'only_matching': True,
25 }]
26
27 def _real_extract(self, url):
28 user_id = self._match_id(url)
29
30 info_raw = self._download_json(
31 'https://bigo.tv/studio/getInternalStudioInfo',
32 user_id, data=urlencode_postdata({'siteId': user_id}))
33
34 if not isinstance(info_raw, dict):
35 raise ExtractorError('Received invalid JSON data')
36 if info_raw.get('code'):
37 raise ExtractorError(
38 'Bigo says: %s (code %s)' % (info_raw.get('msg'), info_raw.get('code')), expected=True)
39 info = info_raw.get('data') or {}
40
41 if not info.get('alive'):
42 raise ExtractorError('This user is offline.', expected=True)
43
44 return {
45 'id': info.get('roomId') or user_id,
46 'title': info.get('roomTopic') or info.get('nick_name') or user_id,
47 'formats': [{
48 'url': info.get('hls_src'),
49 'ext': 'mp4',
50 'protocol': 'm3u8',
51 }],
52 'thumbnail': info.get('snapshot'),
53 'uploader': info.get('nick_name'),
54 'uploader_id': user_id,
55 'is_live': True,
56 }
57
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/yt_dlp/extractor/bigo.py b/yt_dlp/extractor/bigo.py
--- a/yt_dlp/extractor/bigo.py
+++ b/yt_dlp/extractor/bigo.py
@@ -28,7 +28,7 @@
user_id = self._match_id(url)
info_raw = self._download_json(
- 'https://bigo.tv/studio/getInternalStudioInfo',
+ 'https://ta.bigo.tv/official_website/studio/getInternalStudioInfo',
user_id, data=urlencode_postdata({'siteId': user_id}))
if not isinstance(info_raw, dict):
@@ -41,14 +41,14 @@
if not info.get('alive'):
raise ExtractorError('This user is offline.', expected=True)
+ formats, subs = self._extract_m3u8_formats_and_subtitles(
+ info.get('hls_src'), user_id, 'mp4', 'm3u8')
+
return {
'id': info.get('roomId') or user_id,
'title': info.get('roomTopic') or info.get('nick_name') or user_id,
- 'formats': [{
- 'url': info.get('hls_src'),
- 'ext': 'mp4',
- 'protocol': 'm3u8',
- }],
+ 'formats': formats,
+ 'subtitles': subs,
'thumbnail': info.get('snapshot'),
'uploader': info.get('nick_name'),
'uploader_id': user_id,
|
{"golden_diff": "diff --git a/yt_dlp/extractor/bigo.py b/yt_dlp/extractor/bigo.py\n--- a/yt_dlp/extractor/bigo.py\n+++ b/yt_dlp/extractor/bigo.py\n@@ -28,7 +28,7 @@\n user_id = self._match_id(url)\n \n info_raw = self._download_json(\n- 'https://bigo.tv/studio/getInternalStudioInfo',\n+ 'https://ta.bigo.tv/official_website/studio/getInternalStudioInfo',\n user_id, data=urlencode_postdata({'siteId': user_id}))\n \n if not isinstance(info_raw, dict):\n@@ -41,14 +41,14 @@\n if not info.get('alive'):\n raise ExtractorError('This user is offline.', expected=True)\n \n+ formats, subs = self._extract_m3u8_formats_and_subtitles(\n+ info.get('hls_src'), user_id, 'mp4', 'm3u8')\n+\n return {\n 'id': info.get('roomId') or user_id,\n 'title': info.get('roomTopic') or info.get('nick_name') or user_id,\n- 'formats': [{\n- 'url': info.get('hls_src'),\n- 'ext': 'mp4',\n- 'protocol': 'm3u8',\n- }],\n+ 'formats': formats,\n+ 'subtitles': subs,\n 'thumbnail': info.get('snapshot'),\n 'uploader': info.get('nick_name'),\n 'uploader_id': user_id,\n", "issue": "[bigo] Extractor returning invalid parameters\n### Checklist\n\n- [X] I'm reporting a broken site\n- [X] I've verified that I'm running yt-dlp version **2022.06.22.1** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)\n- [X] I've checked that all provided URLs are playable in a browser with the same IP and same login details\n- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/ytdl-org/youtube-dl#video-url-contains-an-ampersand-and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)\n- [X] I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues including closed ones. DO NOT post duplicates\n- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)\n- [X] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required\n\n### Region\n\n_No response_\n\n### Description\n\nAs of about 3 weeks ago, I now receive the following error on all live streams: `Bigo says: paramters invalid (code 1)`\n\n### Verbose log\n\n```shell\n$ yt-dlp -vU -g https://www.bigo.tv/841947363\r\n[debug] Command-line config: ['-vU', '-g', 'https://www.bigo.tv/841947363']\r\n[debug] Encodings: locale UTF-8, fs utf-8, pref UTF-8, out utf-8, error utf-8, screen utf-8\r\n[debug] yt-dlp version 2022.06.22.1 [a86e01e]\r\n[debug] Python version 3.10.4 (CPython 64bit) - macOS-12.4-arm64-arm-64bit\r\n[debug] Checking exe version: ffmpeg -bsfs\r\n[debug] Checking exe version: ffprobe -bsfs\r\n[debug] exe versions: ffmpeg 5.0.1 (setts), ffprobe 5.0.1\r\n[debug] Optional libraries: Cryptodome-3.14.1, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3\r\n[debug] Proxy map: {}\r\n[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest\r\nLatest version: 2022.06.22.1, Current version: 2022.06.22.1\r\nyt-dlp is up to date (2022.06.22.1)\r\n[debug] [Bigo] Extracting URL: https://www.bigo.tv/841947363\r\n[Bigo] 841947363: Downloading JSON metadata\r\nERROR: [Bigo] 841947363: Bigo says: paramters invalid (code 1)\r\n File \"/opt/homebrew/Cellar/yt-dlp/2022.6.22.1/libexec/lib/python3.10/site-packages/yt_dlp/extractor/common.py\", line 647, in extract\r\n ie_result = self._real_extract(url)\r\n File \"/opt/homebrew/Cellar/yt-dlp/2022.6.22.1/libexec/lib/python3.10/site-packages/yt_dlp/extractor/bigo.py\", line 37, in _real_extract\r\n raise ExtractorError(\n```\n\n", "before_files": [{"content": "from .common import InfoExtractor\nfrom ..utils import ExtractorError, urlencode_postdata\n\n\nclass BigoIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?bigo\\.tv/(?:[a-z]{2,}/)?(?P<id>[^/]+)'\n\n _TESTS = [{\n 'url': 'https://www.bigo.tv/ja/221338632',\n 'info_dict': {\n 'id': '6576287577575737440',\n 'title': '\u571f\u3088\u301c\ud83d\udc81\u200d\u2642\ufe0f \u4f11\u61a9\u5ba4/REST room',\n 'thumbnail': r're:https?://.+',\n 'uploader': '\u2728Shin\ud83d\udcab',\n 'uploader_id': '221338632',\n 'is_live': True,\n },\n 'skip': 'livestream',\n }, {\n 'url': 'https://www.bigo.tv/th/Tarlerm1304',\n 'only_matching': True,\n }, {\n 'url': 'https://bigo.tv/115976881',\n 'only_matching': True,\n }]\n\n def _real_extract(self, url):\n user_id = self._match_id(url)\n\n info_raw = self._download_json(\n 'https://bigo.tv/studio/getInternalStudioInfo',\n user_id, data=urlencode_postdata({'siteId': user_id}))\n\n if not isinstance(info_raw, dict):\n raise ExtractorError('Received invalid JSON data')\n if info_raw.get('code'):\n raise ExtractorError(\n 'Bigo says: %s (code %s)' % (info_raw.get('msg'), info_raw.get('code')), expected=True)\n info = info_raw.get('data') or {}\n\n if not info.get('alive'):\n raise ExtractorError('This user is offline.', expected=True)\n\n return {\n 'id': info.get('roomId') or user_id,\n 'title': info.get('roomTopic') or info.get('nick_name') or user_id,\n 'formats': [{\n 'url': info.get('hls_src'),\n 'ext': 'mp4',\n 'protocol': 'm3u8',\n }],\n 'thumbnail': info.get('snapshot'),\n 'uploader': info.get('nick_name'),\n 'uploader_id': user_id,\n 'is_live': True,\n }\n", "path": "yt_dlp/extractor/bigo.py"}], "after_files": [{"content": "from .common import InfoExtractor\nfrom ..utils import ExtractorError, urlencode_postdata\n\n\nclass BigoIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?bigo\\.tv/(?:[a-z]{2,}/)?(?P<id>[^/]+)'\n\n _TESTS = [{\n 'url': 'https://www.bigo.tv/ja/221338632',\n 'info_dict': {\n 'id': '6576287577575737440',\n 'title': '\u571f\u3088\u301c\ud83d\udc81\u200d\u2642\ufe0f \u4f11\u61a9\u5ba4/REST room',\n 'thumbnail': r're:https?://.+',\n 'uploader': '\u2728Shin\ud83d\udcab',\n 'uploader_id': '221338632',\n 'is_live': True,\n },\n 'skip': 'livestream',\n }, {\n 'url': 'https://www.bigo.tv/th/Tarlerm1304',\n 'only_matching': True,\n }, {\n 'url': 'https://bigo.tv/115976881',\n 'only_matching': True,\n }]\n\n def _real_extract(self, url):\n user_id = self._match_id(url)\n\n info_raw = self._download_json(\n 'https://ta.bigo.tv/official_website/studio/getInternalStudioInfo',\n user_id, data=urlencode_postdata({'siteId': user_id}))\n\n if not isinstance(info_raw, dict):\n raise ExtractorError('Received invalid JSON data')\n if info_raw.get('code'):\n raise ExtractorError(\n 'Bigo says: %s (code %s)' % (info_raw.get('msg'), info_raw.get('code')), expected=True)\n info = info_raw.get('data') or {}\n\n if not info.get('alive'):\n raise ExtractorError('This user is offline.', expected=True)\n\n formats, subs = self._extract_m3u8_formats_and_subtitles(\n info.get('hls_src'), user_id, 'mp4', 'm3u8')\n\n return {\n 'id': info.get('roomId') or user_id,\n 'title': info.get('roomTopic') or info.get('nick_name') or user_id,\n 'formats': formats,\n 'subtitles': subs,\n 'thumbnail': info.get('snapshot'),\n 'uploader': info.get('nick_name'),\n 'uploader_id': user_id,\n 'is_live': True,\n }\n", "path": "yt_dlp/extractor/bigo.py"}]}
| 1,840 | 342 |
gh_patches_debug_28600
|
rasdani/github-patches
|
git_diff
|
zestedesavoir__zds-site-3822
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[beta][v20] Lire une notification n'invalide pas le cache
Serveur : Beta
Version : v20-RC2/99bee1d
Système : Mac OS X
Navigateur : 52.0.2743.116 (64-bit)
---
1. Générez une notification.
2. Lisez là depuis le site.
3. Récupérez la liste des notifications par l'API.
4. Si le timeout de 15 minutes n'est pas passé par là, la notification est toujours marquée comme non lue dans la réponse de l'API.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zds/notification/api/views.py`
Content:
```
1 # coding: utf-8
2 from dry_rest_permissions.generics import DRYPermissions
3 from rest_framework import filters
4 from rest_framework.generics import ListAPIView
5 from rest_framework.permissions import IsAuthenticated
6 from rest_framework_extensions.cache.decorators import cache_response
7 from rest_framework_extensions.etag.decorators import etag
8 from rest_framework_extensions.key_constructor import bits
9 from rest_framework_extensions.key_constructor.constructors import DefaultKeyConstructor
10
11 from zds.api.bits import DJRF3xPaginationKeyBit
12 from zds.notification.api.serializers import NotificationSerializer
13 from zds.notification.models import Notification
14
15
16 class PagingNotificationListKeyConstructor(DefaultKeyConstructor):
17 pagination = DJRF3xPaginationKeyBit()
18 search = bits.QueryParamsKeyBit(['search', 'ordering', 'type'])
19 list_sql_query = bits.ListSqlQueryKeyBit()
20 unique_view_id = bits.UniqueViewIdKeyBit()
21 user = bits.UserKeyBit()
22
23
24 class NotificationListAPI(ListAPIView):
25 """
26 List of notification.
27 """
28
29 filter_backends = (filters.SearchFilter, filters.OrderingFilter)
30 search_fields = ('title',)
31 ordering_fields = ('pubdate', 'title',)
32 list_key_func = PagingNotificationListKeyConstructor()
33 serializer_class = NotificationSerializer
34 permission_classes = (IsAuthenticated, DRYPermissions,)
35
36 @etag(list_key_func)
37 @cache_response(key_func=list_key_func)
38 def get(self, request, *args, **kwargs):
39 """
40 Lists all notifications of a user.
41 ---
42
43 parameters:
44 - name: Authorization
45 description: Bearer token to make an authenticated request.
46 required: true
47 paramType: header
48 - name: page
49 description: Restricts output to the given page number.
50 required: false
51 paramType: query
52 - name: page_size
53 description: Sets the number of notifications per page.
54 required: false
55 paramType: query
56 - name: search
57 description: Filters by title.
58 required: false
59 paramType: query
60 - name: ordering
61 description: Sorts the results. You can order by (-)pubdate or (-)title.
62 paramType: query
63 - name: type
64 description: Filters by notification type.
65 paramType: query
66 - name: subscription_type
67 description: Filters by subscription type.
68 paramType: query
69 - name: expand
70 description: Returns an object instead of an identifier representing the given field.
71 required: false
72 paramType: query
73 responseMessages:
74 - code: 401
75 message: Not Authenticated
76 - code: 404
77 message: Not Found
78 """
79 return self.list(request, *args, **kwargs)
80
81 def get_queryset(self):
82 queryset = Notification.objects.get_notifications_of(self.request.user)
83 subscription_type = self.request.query_params.get('subscription_type', None)
84 if subscription_type:
85 queryset = queryset.filter(subscription__content_type__model=subscription_type)
86 _type = self.request.query_params.get('type', None)
87 if _type:
88 queryset = queryset.filter(content_type__model=_type)
89 return queryset
90
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/zds/notification/api/views.py b/zds/notification/api/views.py
--- a/zds/notification/api/views.py
+++ b/zds/notification/api/views.py
@@ -1,4 +1,8 @@
# coding: utf-8
+import datetime
+from django.core.cache import cache
+from django.db.models.signals import post_delete
+from django.db.models.signals import post_save
from dry_rest_permissions.generics import DRYPermissions
from rest_framework import filters
from rest_framework.generics import ListAPIView
@@ -8,7 +12,7 @@
from rest_framework_extensions.key_constructor import bits
from rest_framework_extensions.key_constructor.constructors import DefaultKeyConstructor
-from zds.api.bits import DJRF3xPaginationKeyBit
+from zds.api.bits import DJRF3xPaginationKeyBit, UpdatedAtKeyBit
from zds.notification.api.serializers import NotificationSerializer
from zds.notification.models import Notification
@@ -19,6 +23,15 @@
list_sql_query = bits.ListSqlQueryKeyBit()
unique_view_id = bits.UniqueViewIdKeyBit()
user = bits.UserKeyBit()
+ updated_at = UpdatedAtKeyBit('api_updated_notification')
+
+
+def change_api_notification_updated_at(sender=None, instance=None, *args, **kwargs):
+ cache.set('api_updated_notification', datetime.datetime.utcnow())
+
+
+post_save.connect(receiver=change_api_notification_updated_at, sender=Notification)
+post_delete.connect(receiver=change_api_notification_updated_at, sender=Notification)
class NotificationListAPI(ListAPIView):
|
{"golden_diff": "diff --git a/zds/notification/api/views.py b/zds/notification/api/views.py\n--- a/zds/notification/api/views.py\n+++ b/zds/notification/api/views.py\n@@ -1,4 +1,8 @@\n # coding: utf-8\n+import datetime\n+from django.core.cache import cache\n+from django.db.models.signals import post_delete\n+from django.db.models.signals import post_save\n from dry_rest_permissions.generics import DRYPermissions\n from rest_framework import filters\n from rest_framework.generics import ListAPIView\n@@ -8,7 +12,7 @@\n from rest_framework_extensions.key_constructor import bits\n from rest_framework_extensions.key_constructor.constructors import DefaultKeyConstructor\n \n-from zds.api.bits import DJRF3xPaginationKeyBit\n+from zds.api.bits import DJRF3xPaginationKeyBit, UpdatedAtKeyBit\n from zds.notification.api.serializers import NotificationSerializer\n from zds.notification.models import Notification\n \n@@ -19,6 +23,15 @@\n list_sql_query = bits.ListSqlQueryKeyBit()\n unique_view_id = bits.UniqueViewIdKeyBit()\n user = bits.UserKeyBit()\n+ updated_at = UpdatedAtKeyBit('api_updated_notification')\n+\n+\n+def change_api_notification_updated_at(sender=None, instance=None, *args, **kwargs):\n+ cache.set('api_updated_notification', datetime.datetime.utcnow())\n+\n+\n+post_save.connect(receiver=change_api_notification_updated_at, sender=Notification)\n+post_delete.connect(receiver=change_api_notification_updated_at, sender=Notification)\n \n \n class NotificationListAPI(ListAPIView):\n", "issue": "[beta][v20] Lire une notification n'invalide pas le cache\nServeur : Beta\nVersion : v20-RC2/99bee1d\nSyst\u00e8me : Mac OS X\nNavigateur : 52.0.2743.116 (64-bit)\n\n---\n1. G\u00e9n\u00e9rez une notification.\n2. Lisez l\u00e0 depuis le site.\n3. R\u00e9cup\u00e9rez la liste des notifications par l'API.\n4. Si le timeout de 15 minutes n'est pas pass\u00e9 par l\u00e0, la notification est toujours marqu\u00e9e comme non lue dans la r\u00e9ponse de l'API.\n\n", "before_files": [{"content": "# coding: utf-8\nfrom dry_rest_permissions.generics import DRYPermissions\nfrom rest_framework import filters\nfrom rest_framework.generics import ListAPIView\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework_extensions.cache.decorators import cache_response\nfrom rest_framework_extensions.etag.decorators import etag\nfrom rest_framework_extensions.key_constructor import bits\nfrom rest_framework_extensions.key_constructor.constructors import DefaultKeyConstructor\n\nfrom zds.api.bits import DJRF3xPaginationKeyBit\nfrom zds.notification.api.serializers import NotificationSerializer\nfrom zds.notification.models import Notification\n\n\nclass PagingNotificationListKeyConstructor(DefaultKeyConstructor):\n pagination = DJRF3xPaginationKeyBit()\n search = bits.QueryParamsKeyBit(['search', 'ordering', 'type'])\n list_sql_query = bits.ListSqlQueryKeyBit()\n unique_view_id = bits.UniqueViewIdKeyBit()\n user = bits.UserKeyBit()\n\n\nclass NotificationListAPI(ListAPIView):\n \"\"\"\n List of notification.\n \"\"\"\n\n filter_backends = (filters.SearchFilter, filters.OrderingFilter)\n search_fields = ('title',)\n ordering_fields = ('pubdate', 'title',)\n list_key_func = PagingNotificationListKeyConstructor()\n serializer_class = NotificationSerializer\n permission_classes = (IsAuthenticated, DRYPermissions,)\n\n @etag(list_key_func)\n @cache_response(key_func=list_key_func)\n def get(self, request, *args, **kwargs):\n \"\"\"\n Lists all notifications of a user.\n ---\n\n parameters:\n - name: Authorization\n description: Bearer token to make an authenticated request.\n required: true\n paramType: header\n - name: page\n description: Restricts output to the given page number.\n required: false\n paramType: query\n - name: page_size\n description: Sets the number of notifications per page.\n required: false\n paramType: query\n - name: search\n description: Filters by title.\n required: false\n paramType: query\n - name: ordering\n description: Sorts the results. You can order by (-)pubdate or (-)title.\n paramType: query\n - name: type\n description: Filters by notification type.\n paramType: query\n - name: subscription_type\n description: Filters by subscription type.\n paramType: query\n - name: expand\n description: Returns an object instead of an identifier representing the given field.\n required: false\n paramType: query\n responseMessages:\n - code: 401\n message: Not Authenticated\n - code: 404\n message: Not Found\n \"\"\"\n return self.list(request, *args, **kwargs)\n\n def get_queryset(self):\n queryset = Notification.objects.get_notifications_of(self.request.user)\n subscription_type = self.request.query_params.get('subscription_type', None)\n if subscription_type:\n queryset = queryset.filter(subscription__content_type__model=subscription_type)\n _type = self.request.query_params.get('type', None)\n if _type:\n queryset = queryset.filter(content_type__model=_type)\n return queryset\n", "path": "zds/notification/api/views.py"}], "after_files": [{"content": "# coding: utf-8\nimport datetime\nfrom django.core.cache import cache\nfrom django.db.models.signals import post_delete\nfrom django.db.models.signals import post_save\nfrom dry_rest_permissions.generics import DRYPermissions\nfrom rest_framework import filters\nfrom rest_framework.generics import ListAPIView\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework_extensions.cache.decorators import cache_response\nfrom rest_framework_extensions.etag.decorators import etag\nfrom rest_framework_extensions.key_constructor import bits\nfrom rest_framework_extensions.key_constructor.constructors import DefaultKeyConstructor\n\nfrom zds.api.bits import DJRF3xPaginationKeyBit, UpdatedAtKeyBit\nfrom zds.notification.api.serializers import NotificationSerializer\nfrom zds.notification.models import Notification\n\n\nclass PagingNotificationListKeyConstructor(DefaultKeyConstructor):\n pagination = DJRF3xPaginationKeyBit()\n search = bits.QueryParamsKeyBit(['search', 'ordering', 'type'])\n list_sql_query = bits.ListSqlQueryKeyBit()\n unique_view_id = bits.UniqueViewIdKeyBit()\n user = bits.UserKeyBit()\n updated_at = UpdatedAtKeyBit('api_updated_notification')\n\n\ndef change_api_notification_updated_at(sender=None, instance=None, *args, **kwargs):\n cache.set('api_updated_notification', datetime.datetime.utcnow())\n\n\npost_save.connect(receiver=change_api_notification_updated_at, sender=Notification)\npost_delete.connect(receiver=change_api_notification_updated_at, sender=Notification)\n\n\nclass NotificationListAPI(ListAPIView):\n \"\"\"\n List of notification.\n \"\"\"\n\n filter_backends = (filters.SearchFilter, filters.OrderingFilter)\n search_fields = ('title',)\n ordering_fields = ('pubdate', 'title',)\n list_key_func = PagingNotificationListKeyConstructor()\n serializer_class = NotificationSerializer\n permission_classes = (IsAuthenticated, DRYPermissions,)\n\n @etag(list_key_func)\n @cache_response(key_func=list_key_func)\n def get(self, request, *args, **kwargs):\n \"\"\"\n Lists all notifications of a user.\n ---\n\n parameters:\n - name: Authorization\n description: Bearer token to make an authenticated request.\n required: true\n paramType: header\n - name: page\n description: Restricts output to the given page number.\n required: false\n paramType: query\n - name: page_size\n description: Sets the number of notifications per page.\n required: false\n paramType: query\n - name: search\n description: Filters by title.\n required: false\n paramType: query\n - name: ordering\n description: Sorts the results. You can order by (-)pubdate or (-)title.\n paramType: query\n - name: type\n description: Filters by notification type.\n paramType: query\n - name: subscription_type\n description: Filters by subscription type.\n paramType: query\n - name: expand\n description: Returns an object instead of an identifier representing the given field.\n required: false\n paramType: query\n responseMessages:\n - code: 401\n message: Not Authenticated\n - code: 404\n message: Not Found\n \"\"\"\n return self.list(request, *args, **kwargs)\n\n def get_queryset(self):\n queryset = Notification.objects.get_notifications_of(self.request.user)\n subscription_type = self.request.query_params.get('subscription_type', None)\n if subscription_type:\n queryset = queryset.filter(subscription__content_type__model=subscription_type)\n _type = self.request.query_params.get('type', None)\n if _type:\n queryset = queryset.filter(content_type__model=_type)\n return queryset\n", "path": "zds/notification/api/views.py"}]}
| 1,246 | 332 |
gh_patches_debug_35738
|
rasdani/github-patches
|
git_diff
|
microsoft__botbuilder-python-1682
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Increase streaming unit tests
reach parity with C# unit tests
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `libraries/botframework-streaming/botframework/streaming/receive_request.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 from typing import List
5
6 from botframework.streaming.payloads import ContentStream
7
8
9 class ReceiveRequest:
10 def __init__(
11 self, *, verb: str = None, path: str = None, streams: List[ContentStream]
12 ):
13 self.verb = verb
14 self.path = path
15 self.streams: List[ContentStream] = streams or []
16
17 async def read_body_as_str(self) -> str:
18 try:
19 content_stream = self.streams[0] if self.streams else None
20
21 if not content_stream:
22 # TODO: maybe raise an error
23 return ""
24
25 # TODO: encoding double check
26 stream = await content_stream.stream.read_until_end()
27 return bytes(stream).decode("utf-8-sig")
28 except Exception as error:
29 raise error
30
```
Path: `libraries/botframework-streaming/botframework/streaming/streaming_response.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 import json
5 from uuid import UUID, uuid4
6 from typing import List, Union
7
8 from msrest.serialization import Model
9 from botframework.streaming.payloads import ResponseMessageStream
10 from botframework.streaming.payloads.models import Serializable
11
12
13 class StreamingResponse:
14 def __init__(
15 self, *, status_code: int = None, streams: List[ResponseMessageStream] = None
16 ):
17 self.status_code = status_code
18 self.streams = streams
19
20 def add_stream(self, content: object, identifier: UUID = None):
21 if not content:
22 raise TypeError("content can't be None")
23
24 if self.streams is None:
25 self.streams: List[ResponseMessageStream] = []
26
27 self.streams.append(
28 ResponseMessageStream(id=identifier or uuid4(), content=content)
29 )
30
31 def set_body(self, body: Union[str, Serializable, Model]):
32 # TODO: verify if msrest.serialization.Model is necessary
33 if not body:
34 return
35
36 if isinstance(body, Serializable):
37 body = body.to_json()
38 elif isinstance(body, Model):
39 body = json.dumps(body.as_dict())
40
41 self.add_stream(list(body.encode()))
42
43 @staticmethod
44 def create_response(status_code: int, body: object) -> "StreamingResponse":
45 response = StreamingResponse(status_code=status_code)
46
47 if body:
48 response.add_stream(body)
49
50 return response
51
```
Path: `libraries/botframework-streaming/botframework/streaming/receive_response.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 from typing import List, Union, Type
5
6 from msrest.serialization import Model
7 from botframework.streaming.payloads import ContentStream
8 from botframework.streaming.payloads.models import Serializable
9
10
11 class ReceiveResponse:
12 def __init__(self, status_code: int = None, streams: List[ContentStream] = None):
13 self.status_code = status_code
14 self.streams = streams
15
16 def read_body_as_json(
17 self, cls: Union[Type[Model], Type[Serializable]]
18 ) -> Union[Model, Serializable]:
19 try:
20 body_str = self.read_body_as_str()
21 body = None
22
23 if issubclass(cls, Serializable):
24 body = cls().from_json(body_str)
25 elif isinstance(cls, Model):
26 body = cls.deserialize(body_str)
27 return body
28 except Exception as error:
29 raise error
30
31 def read_body_as_str(self) -> str:
32 try:
33 content_stream = self.read_body()
34
35 if not content_stream:
36 return ""
37
38 # TODO: encoding double check
39 return content_stream.decode("utf8")
40 except Exception as error:
41 raise error
42
43 def read_body(self) -> bytes:
44 try:
45 content_stream = self.streams[0] if self.streams else None
46
47 if not content_stream:
48 return None
49
50 return bytes(content_stream.stream)
51 except Exception as error:
52 raise error
53
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/libraries/botframework-streaming/botframework/streaming/receive_request.py b/libraries/botframework-streaming/botframework/streaming/receive_request.py
--- a/libraries/botframework-streaming/botframework/streaming/receive_request.py
+++ b/libraries/botframework-streaming/botframework/streaming/receive_request.py
@@ -8,7 +8,7 @@
class ReceiveRequest:
def __init__(
- self, *, verb: str = None, path: str = None, streams: List[ContentStream]
+ self, *, verb: str = None, path: str = None, streams: List[ContentStream] = None
):
self.verb = verb
self.path = path
diff --git a/libraries/botframework-streaming/botframework/streaming/receive_response.py b/libraries/botframework-streaming/botframework/streaming/receive_response.py
--- a/libraries/botframework-streaming/botframework/streaming/receive_response.py
+++ b/libraries/botframework-streaming/botframework/streaming/receive_response.py
@@ -9,9 +9,9 @@
class ReceiveResponse:
- def __init__(self, status_code: int = None, streams: List[ContentStream] = None):
+ def __init__(self, status_code: int = 0, streams: List[ContentStream] = None):
self.status_code = status_code
- self.streams = streams
+ self.streams = streams or []
def read_body_as_json(
self, cls: Union[Type[Model], Type[Serializable]]
diff --git a/libraries/botframework-streaming/botframework/streaming/streaming_response.py b/libraries/botframework-streaming/botframework/streaming/streaming_response.py
--- a/libraries/botframework-streaming/botframework/streaming/streaming_response.py
+++ b/libraries/botframework-streaming/botframework/streaming/streaming_response.py
@@ -2,6 +2,7 @@
# Licensed under the MIT License.
import json
+from http import HTTPStatus
from uuid import UUID, uuid4
from typing import List, Union
@@ -12,7 +13,7 @@
class StreamingResponse:
def __init__(
- self, *, status_code: int = None, streams: List[ResponseMessageStream] = None
+ self, *, status_code: int = 0, streams: List[ResponseMessageStream] = None
):
self.status_code = status_code
self.streams = streams
@@ -48,3 +49,20 @@
response.add_stream(body)
return response
+
+ @staticmethod
+ def not_found(body: object = None) -> "StreamingResponse":
+ return StreamingResponse.create_response(HTTPStatus.NOT_FOUND, body)
+
+ @staticmethod
+ def forbidden(body: object = None) -> "StreamingResponse":
+ return StreamingResponse.create_response(HTTPStatus.FORBIDDEN, body)
+
+ # pylint: disable=invalid-name
+ @staticmethod
+ def ok(body: object = None) -> "StreamingResponse":
+ return StreamingResponse.create_response(HTTPStatus.OK, body)
+
+ @staticmethod
+ def internal_server_error(body: object = None) -> "StreamingResponse":
+ return StreamingResponse.create_response(HTTPStatus.INTERNAL_SERVER_ERROR, body)
|
{"golden_diff": "diff --git a/libraries/botframework-streaming/botframework/streaming/receive_request.py b/libraries/botframework-streaming/botframework/streaming/receive_request.py\n--- a/libraries/botframework-streaming/botframework/streaming/receive_request.py\n+++ b/libraries/botframework-streaming/botframework/streaming/receive_request.py\n@@ -8,7 +8,7 @@\n \n class ReceiveRequest:\n def __init__(\n- self, *, verb: str = None, path: str = None, streams: List[ContentStream]\n+ self, *, verb: str = None, path: str = None, streams: List[ContentStream] = None\n ):\n self.verb = verb\n self.path = path\ndiff --git a/libraries/botframework-streaming/botframework/streaming/receive_response.py b/libraries/botframework-streaming/botframework/streaming/receive_response.py\n--- a/libraries/botframework-streaming/botframework/streaming/receive_response.py\n+++ b/libraries/botframework-streaming/botframework/streaming/receive_response.py\n@@ -9,9 +9,9 @@\n \n \n class ReceiveResponse:\n- def __init__(self, status_code: int = None, streams: List[ContentStream] = None):\n+ def __init__(self, status_code: int = 0, streams: List[ContentStream] = None):\n self.status_code = status_code\n- self.streams = streams\n+ self.streams = streams or []\n \n def read_body_as_json(\n self, cls: Union[Type[Model], Type[Serializable]]\ndiff --git a/libraries/botframework-streaming/botframework/streaming/streaming_response.py b/libraries/botframework-streaming/botframework/streaming/streaming_response.py\n--- a/libraries/botframework-streaming/botframework/streaming/streaming_response.py\n+++ b/libraries/botframework-streaming/botframework/streaming/streaming_response.py\n@@ -2,6 +2,7 @@\n # Licensed under the MIT License.\n \n import json\n+from http import HTTPStatus\n from uuid import UUID, uuid4\n from typing import List, Union\n \n@@ -12,7 +13,7 @@\n \n class StreamingResponse:\n def __init__(\n- self, *, status_code: int = None, streams: List[ResponseMessageStream] = None\n+ self, *, status_code: int = 0, streams: List[ResponseMessageStream] = None\n ):\n self.status_code = status_code\n self.streams = streams\n@@ -48,3 +49,20 @@\n response.add_stream(body)\n \n return response\n+\n+ @staticmethod\n+ def not_found(body: object = None) -> \"StreamingResponse\":\n+ return StreamingResponse.create_response(HTTPStatus.NOT_FOUND, body)\n+\n+ @staticmethod\n+ def forbidden(body: object = None) -> \"StreamingResponse\":\n+ return StreamingResponse.create_response(HTTPStatus.FORBIDDEN, body)\n+\n+ # pylint: disable=invalid-name\n+ @staticmethod\n+ def ok(body: object = None) -> \"StreamingResponse\":\n+ return StreamingResponse.create_response(HTTPStatus.OK, body)\n+\n+ @staticmethod\n+ def internal_server_error(body: object = None) -> \"StreamingResponse\":\n+ return StreamingResponse.create_response(HTTPStatus.INTERNAL_SERVER_ERROR, body)\n", "issue": "Increase streaming unit tests\nreach parity with C# unit tests\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nfrom typing import List\n\nfrom botframework.streaming.payloads import ContentStream\n\n\nclass ReceiveRequest:\n def __init__(\n self, *, verb: str = None, path: str = None, streams: List[ContentStream]\n ):\n self.verb = verb\n self.path = path\n self.streams: List[ContentStream] = streams or []\n\n async def read_body_as_str(self) -> str:\n try:\n content_stream = self.streams[0] if self.streams else None\n\n if not content_stream:\n # TODO: maybe raise an error\n return \"\"\n\n # TODO: encoding double check\n stream = await content_stream.stream.read_until_end()\n return bytes(stream).decode(\"utf-8-sig\")\n except Exception as error:\n raise error\n", "path": "libraries/botframework-streaming/botframework/streaming/receive_request.py"}, {"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport json\nfrom uuid import UUID, uuid4\nfrom typing import List, Union\n\nfrom msrest.serialization import Model\nfrom botframework.streaming.payloads import ResponseMessageStream\nfrom botframework.streaming.payloads.models import Serializable\n\n\nclass StreamingResponse:\n def __init__(\n self, *, status_code: int = None, streams: List[ResponseMessageStream] = None\n ):\n self.status_code = status_code\n self.streams = streams\n\n def add_stream(self, content: object, identifier: UUID = None):\n if not content:\n raise TypeError(\"content can't be None\")\n\n if self.streams is None:\n self.streams: List[ResponseMessageStream] = []\n\n self.streams.append(\n ResponseMessageStream(id=identifier or uuid4(), content=content)\n )\n\n def set_body(self, body: Union[str, Serializable, Model]):\n # TODO: verify if msrest.serialization.Model is necessary\n if not body:\n return\n\n if isinstance(body, Serializable):\n body = body.to_json()\n elif isinstance(body, Model):\n body = json.dumps(body.as_dict())\n\n self.add_stream(list(body.encode()))\n\n @staticmethod\n def create_response(status_code: int, body: object) -> \"StreamingResponse\":\n response = StreamingResponse(status_code=status_code)\n\n if body:\n response.add_stream(body)\n\n return response\n", "path": "libraries/botframework-streaming/botframework/streaming/streaming_response.py"}, {"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nfrom typing import List, Union, Type\n\nfrom msrest.serialization import Model\nfrom botframework.streaming.payloads import ContentStream\nfrom botframework.streaming.payloads.models import Serializable\n\n\nclass ReceiveResponse:\n def __init__(self, status_code: int = None, streams: List[ContentStream] = None):\n self.status_code = status_code\n self.streams = streams\n\n def read_body_as_json(\n self, cls: Union[Type[Model], Type[Serializable]]\n ) -> Union[Model, Serializable]:\n try:\n body_str = self.read_body_as_str()\n body = None\n\n if issubclass(cls, Serializable):\n body = cls().from_json(body_str)\n elif isinstance(cls, Model):\n body = cls.deserialize(body_str)\n return body\n except Exception as error:\n raise error\n\n def read_body_as_str(self) -> str:\n try:\n content_stream = self.read_body()\n\n if not content_stream:\n return \"\"\n\n # TODO: encoding double check\n return content_stream.decode(\"utf8\")\n except Exception as error:\n raise error\n\n def read_body(self) -> bytes:\n try:\n content_stream = self.streams[0] if self.streams else None\n\n if not content_stream:\n return None\n\n return bytes(content_stream.stream)\n except Exception as error:\n raise error\n", "path": "libraries/botframework-streaming/botframework/streaming/receive_response.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nfrom typing import List\n\nfrom botframework.streaming.payloads import ContentStream\n\n\nclass ReceiveRequest:\n def __init__(\n self, *, verb: str = None, path: str = None, streams: List[ContentStream] = None\n ):\n self.verb = verb\n self.path = path\n self.streams: List[ContentStream] = streams or []\n\n async def read_body_as_str(self) -> str:\n try:\n content_stream = self.streams[0] if self.streams else None\n\n if not content_stream:\n # TODO: maybe raise an error\n return \"\"\n\n # TODO: encoding double check\n stream = await content_stream.stream.read_until_end()\n return bytes(stream).decode(\"utf-8-sig\")\n except Exception as error:\n raise error\n", "path": "libraries/botframework-streaming/botframework/streaming/receive_request.py"}, {"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport json\nfrom http import HTTPStatus\nfrom uuid import UUID, uuid4\nfrom typing import List, Union\n\nfrom msrest.serialization import Model\nfrom botframework.streaming.payloads import ResponseMessageStream\nfrom botframework.streaming.payloads.models import Serializable\n\n\nclass StreamingResponse:\n def __init__(\n self, *, status_code: int = 0, streams: List[ResponseMessageStream] = None\n ):\n self.status_code = status_code\n self.streams = streams\n\n def add_stream(self, content: object, identifier: UUID = None):\n if not content:\n raise TypeError(\"content can't be None\")\n\n if self.streams is None:\n self.streams: List[ResponseMessageStream] = []\n\n self.streams.append(\n ResponseMessageStream(id=identifier or uuid4(), content=content)\n )\n\n def set_body(self, body: Union[str, Serializable, Model]):\n # TODO: verify if msrest.serialization.Model is necessary\n if not body:\n return\n\n if isinstance(body, Serializable):\n body = body.to_json()\n elif isinstance(body, Model):\n body = json.dumps(body.as_dict())\n\n self.add_stream(list(body.encode()))\n\n @staticmethod\n def create_response(status_code: int, body: object) -> \"StreamingResponse\":\n response = StreamingResponse(status_code=status_code)\n\n if body:\n response.add_stream(body)\n\n return response\n\n @staticmethod\n def not_found(body: object = None) -> \"StreamingResponse\":\n return StreamingResponse.create_response(HTTPStatus.NOT_FOUND, body)\n\n @staticmethod\n def forbidden(body: object = None) -> \"StreamingResponse\":\n return StreamingResponse.create_response(HTTPStatus.FORBIDDEN, body)\n\n # pylint: disable=invalid-name\n @staticmethod\n def ok(body: object = None) -> \"StreamingResponse\":\n return StreamingResponse.create_response(HTTPStatus.OK, body)\n\n @staticmethod\n def internal_server_error(body: object = None) -> \"StreamingResponse\":\n return StreamingResponse.create_response(HTTPStatus.INTERNAL_SERVER_ERROR, body)\n", "path": "libraries/botframework-streaming/botframework/streaming/streaming_response.py"}, {"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nfrom typing import List, Union, Type\n\nfrom msrest.serialization import Model\nfrom botframework.streaming.payloads import ContentStream\nfrom botframework.streaming.payloads.models import Serializable\n\n\nclass ReceiveResponse:\n def __init__(self, status_code: int = 0, streams: List[ContentStream] = None):\n self.status_code = status_code\n self.streams = streams or []\n\n def read_body_as_json(\n self, cls: Union[Type[Model], Type[Serializable]]\n ) -> Union[Model, Serializable]:\n try:\n body_str = self.read_body_as_str()\n body = None\n\n if issubclass(cls, Serializable):\n body = cls().from_json(body_str)\n elif isinstance(cls, Model):\n body = cls.deserialize(body_str)\n return body\n except Exception as error:\n raise error\n\n def read_body_as_str(self) -> str:\n try:\n content_stream = self.read_body()\n\n if not content_stream:\n return \"\"\n\n # TODO: encoding double check\n return content_stream.decode(\"utf8\")\n except Exception as error:\n raise error\n\n def read_body(self) -> bytes:\n try:\n content_stream = self.streams[0] if self.streams else None\n\n if not content_stream:\n return None\n\n return bytes(content_stream.stream)\n except Exception as error:\n raise error\n", "path": "libraries/botframework-streaming/botframework/streaming/receive_response.py"}]}
| 1,410 | 735 |
gh_patches_debug_14007
|
rasdani/github-patches
|
git_diff
|
pyqtgraph__pyqtgraph-1219
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CSV export broken
### Short description
Export CSV failed when the plot name has decode error characters.
### Code to reproduce
```python
from pyqtgraph.Qt import QtGui, QtCore
import numpy as np
import pyqtgraph as pg
#QtGui.QApplication.setGraphicsSystem('raster')
app = QtGui.QApplication([])
win = pg.GraphicsLayoutWidget(show=True, title="Basic plotting examples")
win.resize(1000,600)
win.setWindowTitle('pyqtgraph example: Plotting')
pg.setConfigOptions(antialias=True)
pw = win.addPlot(title="Scatter plot, axis labels, log scale")
pw.addLegend()
pw .plot(np.random.normal(size=100), pen=(255,0,0), name="\u00A0下加热体")
QtGui.QApplication.instance().exec_()
```
### Expected behavior
Export CSV Success
### Real behavior
Export CSV Failed
```
---------------------------------------------------------------------------
UnicodeEncodeError Traceback (most recent call last)
c:\program files\python37\lib\site-packages\pyqtgraph\exporters\Exporter.py in fileSaveFinished(self, fileName)
75 fileName = fileName + '.' + selectedExt.lstrip('.')
76
---> 77 self.export(fileName=fileName, **self.fileDialog.opts)
78
79 def getScene(self):
c:\program files\python37\lib\site-packages\pyqtgraph\exporters\CSVExporter.py in export(self, fileName)
58
59 with open(fileName, 'w') as fd:
---> 60 fd.write(sep.join(header) + '\n')
61 i = 0
62 numFormat = '%%0.%dg' % self.params['precision']
UnicodeEncodeError: 'gbk' codec can't encode character '\xa0' in position 1: illegal multibyte sequence
```
### Tested environment(s)
* PyQtGraph version: 0.11.0.dev0+g2203933
* Qt Python binding: PyQt5 5.13.2 Qt 5.13.2
* Python version: Python 3.7.5
* NumPy version: 1.17.4
* Operating system: Windows 7 X64
* Installation method: pip git+
### Additional context
I use "\u00A0" because i want to add some space before label name in the legend.
Could i use the csv export by "utf-8" but not "gbk" ?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyqtgraph/exporters/CSVExporter.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from ..Qt import QtGui, QtCore
3 from .Exporter import Exporter
4 from ..parametertree import Parameter
5 from .. import PlotItem
6
7 __all__ = ['CSVExporter']
8
9
10 class CSVExporter(Exporter):
11 Name = "CSV from plot data"
12 windows = []
13 def __init__(self, item):
14 Exporter.__init__(self, item)
15 self.params = Parameter(name='params', type='group', children=[
16 {'name': 'separator', 'type': 'list', 'value': 'comma', 'values': ['comma', 'tab']},
17 {'name': 'precision', 'type': 'int', 'value': 10, 'limits': [0, None]},
18 {'name': 'columnMode', 'type': 'list', 'values': ['(x,y) per plot', '(x,y,y,y) for all plots']}
19 ])
20
21 def parameters(self):
22 return self.params
23
24 def export(self, fileName=None):
25
26 if not isinstance(self.item, PlotItem):
27 raise Exception("Must have a PlotItem selected for CSV export.")
28
29 if fileName is None:
30 self.fileSaveDialog(filter=["*.csv", "*.tsv"])
31 return
32
33 data = []
34 header = []
35
36 appendAllX = self.params['columnMode'] == '(x,y) per plot'
37
38 for i, c in enumerate(self.item.curves):
39 cd = c.getData()
40 if cd[0] is None:
41 continue
42 data.append(cd)
43 if hasattr(c, 'implements') and c.implements('plotData') and c.name() is not None:
44 name = c.name().replace('"', '""') + '_'
45 xName, yName = '"'+name+'x"', '"'+name+'y"'
46 else:
47 xName = 'x%04d' % i
48 yName = 'y%04d' % i
49 if appendAllX or i == 0:
50 header.extend([xName, yName])
51 else:
52 header.extend([yName])
53
54 if self.params['separator'] == 'comma':
55 sep = ','
56 else:
57 sep = '\t'
58
59 with open(fileName, 'w') as fd:
60 fd.write(sep.join(header) + '\n')
61 i = 0
62 numFormat = '%%0.%dg' % self.params['precision']
63 numRows = max([len(d[0]) for d in data])
64 for i in range(numRows):
65 for j, d in enumerate(data):
66 # write x value if this is the first column, or if we want
67 # x for all rows
68 if appendAllX or j == 0:
69 if d is not None and i < len(d[0]):
70 fd.write(numFormat % d[0][i] + sep)
71 else:
72 fd.write(' %s' % sep)
73
74 # write y value
75 if d is not None and i < len(d[1]):
76 fd.write(numFormat % d[1][i] + sep)
77 else:
78 fd.write(' %s' % sep)
79 fd.write('\n')
80
81
82 CSVExporter.register()
83
84
85
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pyqtgraph/exporters/CSVExporter.py b/pyqtgraph/exporters/CSVExporter.py
--- a/pyqtgraph/exporters/CSVExporter.py
+++ b/pyqtgraph/exporters/CSVExporter.py
@@ -3,6 +3,7 @@
from .Exporter import Exporter
from ..parametertree import Parameter
from .. import PlotItem
+from ..python2_3 import asUnicode
__all__ = ['CSVExporter']
@@ -57,7 +58,7 @@
sep = '\t'
with open(fileName, 'w') as fd:
- fd.write(sep.join(header) + '\n')
+ fd.write(sep.join(map(asUnicode, header)) + '\n')
i = 0
numFormat = '%%0.%dg' % self.params['precision']
numRows = max([len(d[0]) for d in data])
|
{"golden_diff": "diff --git a/pyqtgraph/exporters/CSVExporter.py b/pyqtgraph/exporters/CSVExporter.py\n--- a/pyqtgraph/exporters/CSVExporter.py\n+++ b/pyqtgraph/exporters/CSVExporter.py\n@@ -3,6 +3,7 @@\n from .Exporter import Exporter\n from ..parametertree import Parameter\n from .. import PlotItem\n+from ..python2_3 import asUnicode\n \n __all__ = ['CSVExporter']\n \n@@ -57,7 +58,7 @@\n sep = '\\t'\n \n with open(fileName, 'w') as fd:\n- fd.write(sep.join(header) + '\\n')\n+ fd.write(sep.join(map(asUnicode, header)) + '\\n')\n i = 0\n numFormat = '%%0.%dg' % self.params['precision']\n numRows = max([len(d[0]) for d in data])\n", "issue": "CSV export broken\n### Short description\r\nExport CSV failed when the plot name has decode error characters.\r\n\r\n### Code to reproduce\r\n```python\r\nfrom pyqtgraph.Qt import QtGui, QtCore\r\nimport numpy as np\r\nimport pyqtgraph as pg\r\n\r\n#QtGui.QApplication.setGraphicsSystem('raster')\r\napp = QtGui.QApplication([])\r\nwin = pg.GraphicsLayoutWidget(show=True, title=\"Basic plotting examples\")\r\nwin.resize(1000,600)\r\nwin.setWindowTitle('pyqtgraph example: Plotting')\r\n\r\n\r\npg.setConfigOptions(antialias=True)\r\n\r\npw = win.addPlot(title=\"Scatter plot, axis labels, log scale\")\r\npw.addLegend()\r\npw .plot(np.random.normal(size=100), pen=(255,0,0), name=\"\\u00A0\u4e0b\u52a0\u70ed\u4f53\")\r\n\r\nQtGui.QApplication.instance().exec_()\r\n```\r\n\r\n### Expected behavior\r\nExport CSV Success\r\n\r\n### Real behavior\r\nExport CSV Failed\r\n\r\n```\r\n---------------------------------------------------------------------------\r\nUnicodeEncodeError Traceback (most recent call last)\r\nc:\\program files\\python37\\lib\\site-packages\\pyqtgraph\\exporters\\Exporter.py in fileSaveFinished(self, fileName)\r\n 75 fileName = fileName + '.' + selectedExt.lstrip('.')\r\n 76\r\n---> 77 self.export(fileName=fileName, **self.fileDialog.opts)\r\n 78\r\n 79 def getScene(self):\r\n\r\nc:\\program files\\python37\\lib\\site-packages\\pyqtgraph\\exporters\\CSVExporter.py in export(self, fileName)\r\n 58\r\n 59 with open(fileName, 'w') as fd:\r\n---> 60 fd.write(sep.join(header) + '\\n')\r\n 61 i = 0\r\n 62 numFormat = '%%0.%dg' % self.params['precision']\r\n\r\nUnicodeEncodeError: 'gbk' codec can't encode character '\\xa0' in position 1: illegal multibyte sequence\r\n```\r\n\r\n### Tested environment(s)\r\n\r\n * PyQtGraph version: 0.11.0.dev0+g2203933\r\n * Qt Python binding: PyQt5 5.13.2 Qt 5.13.2\r\n * Python version: Python 3.7.5 \r\n * NumPy version: 1.17.4\r\n * Operating system: Windows 7 X64\r\n * Installation method: pip git+\r\n\r\n### Additional context\r\nI use \"\\u00A0\" because i want to add some space before label name in the legend.\r\nCould i use the csv export by \"utf-8\" but not \"gbk\" ?\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom ..Qt import QtGui, QtCore\nfrom .Exporter import Exporter\nfrom ..parametertree import Parameter\nfrom .. import PlotItem\n\n__all__ = ['CSVExporter']\n \n \nclass CSVExporter(Exporter):\n Name = \"CSV from plot data\"\n windows = []\n def __init__(self, item):\n Exporter.__init__(self, item)\n self.params = Parameter(name='params', type='group', children=[\n {'name': 'separator', 'type': 'list', 'value': 'comma', 'values': ['comma', 'tab']},\n {'name': 'precision', 'type': 'int', 'value': 10, 'limits': [0, None]},\n {'name': 'columnMode', 'type': 'list', 'values': ['(x,y) per plot', '(x,y,y,y) for all plots']}\n ])\n \n def parameters(self):\n return self.params\n \n def export(self, fileName=None):\n \n if not isinstance(self.item, PlotItem):\n raise Exception(\"Must have a PlotItem selected for CSV export.\")\n \n if fileName is None:\n self.fileSaveDialog(filter=[\"*.csv\", \"*.tsv\"])\n return\n\n data = []\n header = []\n\n appendAllX = self.params['columnMode'] == '(x,y) per plot'\n\n for i, c in enumerate(self.item.curves):\n cd = c.getData()\n if cd[0] is None:\n continue\n data.append(cd)\n if hasattr(c, 'implements') and c.implements('plotData') and c.name() is not None:\n name = c.name().replace('\"', '\"\"') + '_'\n xName, yName = '\"'+name+'x\"', '\"'+name+'y\"'\n else:\n xName = 'x%04d' % i\n yName = 'y%04d' % i\n if appendAllX or i == 0:\n header.extend([xName, yName])\n else:\n header.extend([yName])\n\n if self.params['separator'] == 'comma':\n sep = ','\n else:\n sep = '\\t'\n\n with open(fileName, 'w') as fd:\n fd.write(sep.join(header) + '\\n')\n i = 0\n numFormat = '%%0.%dg' % self.params['precision']\n numRows = max([len(d[0]) for d in data])\n for i in range(numRows):\n for j, d in enumerate(data):\n # write x value if this is the first column, or if we want\n # x for all rows\n if appendAllX or j == 0:\n if d is not None and i < len(d[0]):\n fd.write(numFormat % d[0][i] + sep)\n else:\n fd.write(' %s' % sep)\n\n # write y value\n if d is not None and i < len(d[1]):\n fd.write(numFormat % d[1][i] + sep)\n else:\n fd.write(' %s' % sep)\n fd.write('\\n')\n\n\nCSVExporter.register() \n \n \n", "path": "pyqtgraph/exporters/CSVExporter.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom ..Qt import QtGui, QtCore\nfrom .Exporter import Exporter\nfrom ..parametertree import Parameter\nfrom .. import PlotItem\nfrom ..python2_3 import asUnicode\n\n__all__ = ['CSVExporter']\n \n \nclass CSVExporter(Exporter):\n Name = \"CSV from plot data\"\n windows = []\n def __init__(self, item):\n Exporter.__init__(self, item)\n self.params = Parameter(name='params', type='group', children=[\n {'name': 'separator', 'type': 'list', 'value': 'comma', 'values': ['comma', 'tab']},\n {'name': 'precision', 'type': 'int', 'value': 10, 'limits': [0, None]},\n {'name': 'columnMode', 'type': 'list', 'values': ['(x,y) per plot', '(x,y,y,y) for all plots']}\n ])\n \n def parameters(self):\n return self.params\n \n def export(self, fileName=None):\n \n if not isinstance(self.item, PlotItem):\n raise Exception(\"Must have a PlotItem selected for CSV export.\")\n \n if fileName is None:\n self.fileSaveDialog(filter=[\"*.csv\", \"*.tsv\"])\n return\n\n data = []\n header = []\n\n appendAllX = self.params['columnMode'] == '(x,y) per plot'\n\n for i, c in enumerate(self.item.curves):\n cd = c.getData()\n if cd[0] is None:\n continue\n data.append(cd)\n if hasattr(c, 'implements') and c.implements('plotData') and c.name() is not None:\n name = c.name().replace('\"', '\"\"') + '_'\n xName, yName = '\"'+name+'x\"', '\"'+name+'y\"'\n else:\n xName = 'x%04d' % i\n yName = 'y%04d' % i\n if appendAllX or i == 0:\n header.extend([xName, yName])\n else:\n header.extend([yName])\n\n if self.params['separator'] == 'comma':\n sep = ','\n else:\n sep = '\\t'\n\n with open(fileName, 'w') as fd:\n fd.write(sep.join(map(asUnicode, header)) + '\\n')\n i = 0\n numFormat = '%%0.%dg' % self.params['precision']\n numRows = max([len(d[0]) for d in data])\n for i in range(numRows):\n for j, d in enumerate(data):\n # write x value if this is the first column, or if we want\n # x for all rows\n if appendAllX or j == 0:\n if d is not None and i < len(d[0]):\n fd.write(numFormat % d[0][i] + sep)\n else:\n fd.write(' %s' % sep)\n\n # write y value\n if d is not None and i < len(d[1]):\n fd.write(numFormat % d[1][i] + sep)\n else:\n fd.write(' %s' % sep)\n fd.write('\\n')\n\n\nCSVExporter.register() \n \n \n", "path": "pyqtgraph/exporters/CSVExporter.py"}]}
| 1,690 | 197 |
gh_patches_debug_36559
|
rasdani/github-patches
|
git_diff
|
svthalia__concrexit-2930
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add API endpoint for event slugs
### Is your feature request related to a problem? Please describe.
For the app we want to get events based on their slug, this is currently not possible.
### Describe the solution you'd like
Add an API endpoint for event slugs.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/events/api/v2/urls.py`
Content:
```
1 """Events app API v2 urls."""
2 from django.urls import path
3
4 from events.api.v2.views import (
5 EventDetailView,
6 EventListView,
7 EventRegistrationDetailView,
8 EventRegistrationFieldsView,
9 EventRegistrationsView,
10 ExternalEventDetailView,
11 ExternalEventListView,
12 MarkPresentAPIView,
13 )
14
15 app_name = "events"
16
17 urlpatterns = [
18 path("events/", EventListView.as_view(), name="events-list"),
19 path(
20 "events/<int:pk>/",
21 EventDetailView.as_view(),
22 name="event-detail",
23 ),
24 path(
25 "events/<int:pk>/registrations/",
26 EventRegistrationsView.as_view(),
27 name="event-registrations",
28 ),
29 path(
30 "events/<int:event_id>/registrations/<int:pk>/",
31 EventRegistrationDetailView.as_view(),
32 name="event-registration-detail",
33 ),
34 path(
35 "events/<int:event_id>/registrations/<int:registration_id>/fields/",
36 EventRegistrationFieldsView.as_view(),
37 name="event-registration-fields",
38 ),
39 path(
40 "events/<int:pk>/mark-present/<uuid:token>/",
41 MarkPresentAPIView.as_view(),
42 name="mark-present",
43 ),
44 path(
45 "events/external/", ExternalEventListView.as_view(), name="external-events-list"
46 ),
47 path(
48 "events/external/<int:pk>/",
49 ExternalEventDetailView.as_view(),
50 name="external-event-detail",
51 ),
52 ]
53
```
Path: `website/events/api/v2/serializers/event.py`
Content:
```
1 from rest_framework import serializers
2
3 from activemembers.api.v2.serializers.member_group import MemberGroupSerializer
4 from documents.api.v2.serializers.document import DocumentSerializer
5 from events import services
6 from events.api.v2.serializers.event_registration import EventRegistrationSerializer
7 from events.models import Event
8 from payments.api.v2.serializers.payment_amount import PaymentAmountSerializer
9 from thaliawebsite.api.v2.serializers import CleanedHTMLSerializer
10 from thaliawebsite.api.v2.serializers.cleaned_model_serializer import (
11 CleanedModelSerializer,
12 )
13 from utils.snippets import create_google_maps_url
14
15
16 class EventSerializer(CleanedModelSerializer):
17 """Serializer for events."""
18
19 class Meta:
20 model = Event
21 fields = (
22 "pk",
23 "title",
24 "description",
25 "caption",
26 "start",
27 "end",
28 "category",
29 "registration_start",
30 "registration_end",
31 "cancel_deadline",
32 "optional_registrations",
33 "location",
34 "price",
35 "fine",
36 "num_participants",
37 "max_participants",
38 "no_registration_message",
39 "registration_status",
40 "cancel_too_late_message",
41 "has_fields",
42 "food_event",
43 "maps_url",
44 "user_permissions",
45 "user_registration",
46 "organisers",
47 "documents",
48 )
49
50 description = CleanedHTMLSerializer()
51 organisers = MemberGroupSerializer(many=True)
52 user_registration = serializers.SerializerMethodField("_user_registration")
53 num_participants = serializers.SerializerMethodField("_num_participants")
54 maps_url = serializers.SerializerMethodField("_maps_url")
55 registration_status = serializers.SerializerMethodField("_registration_status")
56 price = PaymentAmountSerializer()
57 fine = PaymentAmountSerializer()
58 documents = DocumentSerializer(many=True)
59 user_permissions = serializers.SerializerMethodField("_user_permissions")
60
61 def _user_registration(self, instance: Event):
62 if self.context["request"].member and len(instance.member_registration) > 0:
63 registration = instance.member_registration[-1]
64 return EventRegistrationSerializer(
65 registration,
66 context=self.context,
67 fields=(
68 "pk",
69 "present",
70 "queue_position",
71 "is_cancelled",
72 "is_late_cancellation",
73 "date",
74 "payment",
75 ),
76 ).data
77 return None
78
79 def _registration_status(self, instance: Event):
80 if self.context["request"].member and len(instance.member_registration) > 0:
81 registration = instance.member_registration[-1]
82 else:
83 registration = None
84 status = services.registration_status(
85 instance, registration, self.context["request"].member
86 )
87 cancel_status = services.cancel_status(instance, registration)
88
89 status_str = services.registration_status_string(status, instance, registration)
90 cancel_str = services.cancel_info_string(instance, cancel_status, status)
91 if services.show_cancel_status(status) and cancel_str != "":
92 return f"{status_str} {cancel_str}"
93 return f"{status_str}"
94
95 def _num_participants(self, instance: Event):
96 if instance.max_participants:
97 return min(instance.participant_count, instance.max_participants)
98 return instance.participant_count
99
100 def _user_permissions(self, instance):
101 member = self.context["request"].member
102 return services.event_permissions(member, instance, registration_prefetch=True)
103
104 def _maps_url(self, instance):
105 return create_google_maps_url(instance.map_location, zoom=13, size="450x250")
106
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/website/events/api/v2/serializers/event.py b/website/events/api/v2/serializers/event.py
--- a/website/events/api/v2/serializers/event.py
+++ b/website/events/api/v2/serializers/event.py
@@ -1,4 +1,5 @@
from rest_framework import serializers
+from rest_framework.reverse import reverse
from activemembers.api.v2.serializers.member_group import MemberGroupSerializer
from documents.api.v2.serializers.document import DocumentSerializer
@@ -20,6 +21,8 @@
model = Event
fields = (
"pk",
+ "slug",
+ "url",
"title",
"description",
"caption",
@@ -57,6 +60,7 @@
fine = PaymentAmountSerializer()
documents = DocumentSerializer(many=True)
user_permissions = serializers.SerializerMethodField("_user_permissions")
+ url = serializers.SerializerMethodField("_url")
def _user_registration(self, instance: Event):
if self.context["request"].member and len(instance.member_registration) > 0:
@@ -101,5 +105,18 @@
member = self.context["request"].member
return services.event_permissions(member, instance, registration_prefetch=True)
+ def _url(self, instance: Event):
+ if instance.slug is None:
+ return reverse(
+ "events:event",
+ kwargs={"pk": instance.pk},
+ request=self.context["request"],
+ )
+ return reverse(
+ "events:event",
+ kwargs={"slug": instance.slug},
+ request=self.context["request"],
+ )
+
def _maps_url(self, instance):
return create_google_maps_url(instance.map_location, zoom=13, size="450x250")
diff --git a/website/events/api/v2/urls.py b/website/events/api/v2/urls.py
--- a/website/events/api/v2/urls.py
+++ b/website/events/api/v2/urls.py
@@ -21,6 +21,11 @@
EventDetailView.as_view(),
name="event-detail",
),
+ path(
+ "events/<slug:slug>/",
+ EventDetailView.as_view(lookup_field="slug"),
+ name="event-detail",
+ ),
path(
"events/<int:pk>/registrations/",
EventRegistrationsView.as_view(),
|
{"golden_diff": "diff --git a/website/events/api/v2/serializers/event.py b/website/events/api/v2/serializers/event.py\n--- a/website/events/api/v2/serializers/event.py\n+++ b/website/events/api/v2/serializers/event.py\n@@ -1,4 +1,5 @@\n from rest_framework import serializers\n+from rest_framework.reverse import reverse\n \n from activemembers.api.v2.serializers.member_group import MemberGroupSerializer\n from documents.api.v2.serializers.document import DocumentSerializer\n@@ -20,6 +21,8 @@\n model = Event\n fields = (\n \"pk\",\n+ \"slug\",\n+ \"url\",\n \"title\",\n \"description\",\n \"caption\",\n@@ -57,6 +60,7 @@\n fine = PaymentAmountSerializer()\n documents = DocumentSerializer(many=True)\n user_permissions = serializers.SerializerMethodField(\"_user_permissions\")\n+ url = serializers.SerializerMethodField(\"_url\")\n \n def _user_registration(self, instance: Event):\n if self.context[\"request\"].member and len(instance.member_registration) > 0:\n@@ -101,5 +105,18 @@\n member = self.context[\"request\"].member\n return services.event_permissions(member, instance, registration_prefetch=True)\n \n+ def _url(self, instance: Event):\n+ if instance.slug is None:\n+ return reverse(\n+ \"events:event\",\n+ kwargs={\"pk\": instance.pk},\n+ request=self.context[\"request\"],\n+ )\n+ return reverse(\n+ \"events:event\",\n+ kwargs={\"slug\": instance.slug},\n+ request=self.context[\"request\"],\n+ )\n+\n def _maps_url(self, instance):\n return create_google_maps_url(instance.map_location, zoom=13, size=\"450x250\")\ndiff --git a/website/events/api/v2/urls.py b/website/events/api/v2/urls.py\n--- a/website/events/api/v2/urls.py\n+++ b/website/events/api/v2/urls.py\n@@ -21,6 +21,11 @@\n EventDetailView.as_view(),\n name=\"event-detail\",\n ),\n+ path(\n+ \"events/<slug:slug>/\",\n+ EventDetailView.as_view(lookup_field=\"slug\"),\n+ name=\"event-detail\",\n+ ),\n path(\n \"events/<int:pk>/registrations/\",\n EventRegistrationsView.as_view(),\n", "issue": "Add API endpoint for event slugs\n### Is your feature request related to a problem? Please describe.\r\nFor the app we want to get events based on their slug, this is currently not possible.\r\n\r\n### Describe the solution you'd like\r\nAdd an API endpoint for event slugs.\r\n\n", "before_files": [{"content": "\"\"\"Events app API v2 urls.\"\"\"\nfrom django.urls import path\n\nfrom events.api.v2.views import (\n EventDetailView,\n EventListView,\n EventRegistrationDetailView,\n EventRegistrationFieldsView,\n EventRegistrationsView,\n ExternalEventDetailView,\n ExternalEventListView,\n MarkPresentAPIView,\n)\n\napp_name = \"events\"\n\nurlpatterns = [\n path(\"events/\", EventListView.as_view(), name=\"events-list\"),\n path(\n \"events/<int:pk>/\",\n EventDetailView.as_view(),\n name=\"event-detail\",\n ),\n path(\n \"events/<int:pk>/registrations/\",\n EventRegistrationsView.as_view(),\n name=\"event-registrations\",\n ),\n path(\n \"events/<int:event_id>/registrations/<int:pk>/\",\n EventRegistrationDetailView.as_view(),\n name=\"event-registration-detail\",\n ),\n path(\n \"events/<int:event_id>/registrations/<int:registration_id>/fields/\",\n EventRegistrationFieldsView.as_view(),\n name=\"event-registration-fields\",\n ),\n path(\n \"events/<int:pk>/mark-present/<uuid:token>/\",\n MarkPresentAPIView.as_view(),\n name=\"mark-present\",\n ),\n path(\n \"events/external/\", ExternalEventListView.as_view(), name=\"external-events-list\"\n ),\n path(\n \"events/external/<int:pk>/\",\n ExternalEventDetailView.as_view(),\n name=\"external-event-detail\",\n ),\n]\n", "path": "website/events/api/v2/urls.py"}, {"content": "from rest_framework import serializers\n\nfrom activemembers.api.v2.serializers.member_group import MemberGroupSerializer\nfrom documents.api.v2.serializers.document import DocumentSerializer\nfrom events import services\nfrom events.api.v2.serializers.event_registration import EventRegistrationSerializer\nfrom events.models import Event\nfrom payments.api.v2.serializers.payment_amount import PaymentAmountSerializer\nfrom thaliawebsite.api.v2.serializers import CleanedHTMLSerializer\nfrom thaliawebsite.api.v2.serializers.cleaned_model_serializer import (\n CleanedModelSerializer,\n)\nfrom utils.snippets import create_google_maps_url\n\n\nclass EventSerializer(CleanedModelSerializer):\n \"\"\"Serializer for events.\"\"\"\n\n class Meta:\n model = Event\n fields = (\n \"pk\",\n \"title\",\n \"description\",\n \"caption\",\n \"start\",\n \"end\",\n \"category\",\n \"registration_start\",\n \"registration_end\",\n \"cancel_deadline\",\n \"optional_registrations\",\n \"location\",\n \"price\",\n \"fine\",\n \"num_participants\",\n \"max_participants\",\n \"no_registration_message\",\n \"registration_status\",\n \"cancel_too_late_message\",\n \"has_fields\",\n \"food_event\",\n \"maps_url\",\n \"user_permissions\",\n \"user_registration\",\n \"organisers\",\n \"documents\",\n )\n\n description = CleanedHTMLSerializer()\n organisers = MemberGroupSerializer(many=True)\n user_registration = serializers.SerializerMethodField(\"_user_registration\")\n num_participants = serializers.SerializerMethodField(\"_num_participants\")\n maps_url = serializers.SerializerMethodField(\"_maps_url\")\n registration_status = serializers.SerializerMethodField(\"_registration_status\")\n price = PaymentAmountSerializer()\n fine = PaymentAmountSerializer()\n documents = DocumentSerializer(many=True)\n user_permissions = serializers.SerializerMethodField(\"_user_permissions\")\n\n def _user_registration(self, instance: Event):\n if self.context[\"request\"].member and len(instance.member_registration) > 0:\n registration = instance.member_registration[-1]\n return EventRegistrationSerializer(\n registration,\n context=self.context,\n fields=(\n \"pk\",\n \"present\",\n \"queue_position\",\n \"is_cancelled\",\n \"is_late_cancellation\",\n \"date\",\n \"payment\",\n ),\n ).data\n return None\n\n def _registration_status(self, instance: Event):\n if self.context[\"request\"].member and len(instance.member_registration) > 0:\n registration = instance.member_registration[-1]\n else:\n registration = None\n status = services.registration_status(\n instance, registration, self.context[\"request\"].member\n )\n cancel_status = services.cancel_status(instance, registration)\n\n status_str = services.registration_status_string(status, instance, registration)\n cancel_str = services.cancel_info_string(instance, cancel_status, status)\n if services.show_cancel_status(status) and cancel_str != \"\":\n return f\"{status_str} {cancel_str}\"\n return f\"{status_str}\"\n\n def _num_participants(self, instance: Event):\n if instance.max_participants:\n return min(instance.participant_count, instance.max_participants)\n return instance.participant_count\n\n def _user_permissions(self, instance):\n member = self.context[\"request\"].member\n return services.event_permissions(member, instance, registration_prefetch=True)\n\n def _maps_url(self, instance):\n return create_google_maps_url(instance.map_location, zoom=13, size=\"450x250\")\n", "path": "website/events/api/v2/serializers/event.py"}], "after_files": [{"content": "\"\"\"Events app API v2 urls.\"\"\"\nfrom django.urls import path\n\nfrom events.api.v2.views import (\n EventDetailView,\n EventListView,\n EventRegistrationDetailView,\n EventRegistrationFieldsView,\n EventRegistrationsView,\n ExternalEventDetailView,\n ExternalEventListView,\n MarkPresentAPIView,\n)\n\napp_name = \"events\"\n\nurlpatterns = [\n path(\"events/\", EventListView.as_view(), name=\"events-list\"),\n path(\n \"events/<int:pk>/\",\n EventDetailView.as_view(),\n name=\"event-detail\",\n ),\n path(\n \"events/<slug:slug>/\",\n EventDetailView.as_view(lookup_field=\"slug\"),\n name=\"event-detail\",\n ),\n path(\n \"events/<int:pk>/registrations/\",\n EventRegistrationsView.as_view(),\n name=\"event-registrations\",\n ),\n path(\n \"events/<int:event_id>/registrations/<int:pk>/\",\n EventRegistrationDetailView.as_view(),\n name=\"event-registration-detail\",\n ),\n path(\n \"events/<int:event_id>/registrations/<int:registration_id>/fields/\",\n EventRegistrationFieldsView.as_view(),\n name=\"event-registration-fields\",\n ),\n path(\n \"events/<int:pk>/mark-present/<uuid:token>/\",\n MarkPresentAPIView.as_view(),\n name=\"mark-present\",\n ),\n path(\n \"events/external/\", ExternalEventListView.as_view(), name=\"external-events-list\"\n ),\n path(\n \"events/external/<int:pk>/\",\n ExternalEventDetailView.as_view(),\n name=\"external-event-detail\",\n ),\n]\n", "path": "website/events/api/v2/urls.py"}, {"content": "from rest_framework import serializers\nfrom rest_framework.reverse import reverse\n\nfrom activemembers.api.v2.serializers.member_group import MemberGroupSerializer\nfrom documents.api.v2.serializers.document import DocumentSerializer\nfrom events import services\nfrom events.api.v2.serializers.event_registration import EventRegistrationSerializer\nfrom events.models import Event\nfrom payments.api.v2.serializers.payment_amount import PaymentAmountSerializer\nfrom thaliawebsite.api.v2.serializers import CleanedHTMLSerializer\nfrom thaliawebsite.api.v2.serializers.cleaned_model_serializer import (\n CleanedModelSerializer,\n)\nfrom utils.snippets import create_google_maps_url\n\n\nclass EventSerializer(CleanedModelSerializer):\n \"\"\"Serializer for events.\"\"\"\n\n class Meta:\n model = Event\n fields = (\n \"pk\",\n \"slug\",\n \"url\",\n \"title\",\n \"description\",\n \"caption\",\n \"start\",\n \"end\",\n \"category\",\n \"registration_start\",\n \"registration_end\",\n \"cancel_deadline\",\n \"optional_registrations\",\n \"location\",\n \"price\",\n \"fine\",\n \"num_participants\",\n \"max_participants\",\n \"no_registration_message\",\n \"registration_status\",\n \"cancel_too_late_message\",\n \"has_fields\",\n \"food_event\",\n \"maps_url\",\n \"user_permissions\",\n \"user_registration\",\n \"organisers\",\n \"documents\",\n )\n\n description = CleanedHTMLSerializer()\n organisers = MemberGroupSerializer(many=True)\n user_registration = serializers.SerializerMethodField(\"_user_registration\")\n num_participants = serializers.SerializerMethodField(\"_num_participants\")\n maps_url = serializers.SerializerMethodField(\"_maps_url\")\n registration_status = serializers.SerializerMethodField(\"_registration_status\")\n price = PaymentAmountSerializer()\n fine = PaymentAmountSerializer()\n documents = DocumentSerializer(many=True)\n user_permissions = serializers.SerializerMethodField(\"_user_permissions\")\n url = serializers.SerializerMethodField(\"_url\")\n\n def _user_registration(self, instance: Event):\n if self.context[\"request\"].member and len(instance.member_registration) > 0:\n registration = instance.member_registration[-1]\n return EventRegistrationSerializer(\n registration,\n context=self.context,\n fields=(\n \"pk\",\n \"present\",\n \"queue_position\",\n \"is_cancelled\",\n \"is_late_cancellation\",\n \"date\",\n \"payment\",\n ),\n ).data\n return None\n\n def _registration_status(self, instance: Event):\n if self.context[\"request\"].member and len(instance.member_registration) > 0:\n registration = instance.member_registration[-1]\n else:\n registration = None\n status = services.registration_status(\n instance, registration, self.context[\"request\"].member\n )\n cancel_status = services.cancel_status(instance, registration)\n\n status_str = services.registration_status_string(status, instance, registration)\n cancel_str = services.cancel_info_string(instance, cancel_status, status)\n if services.show_cancel_status(status) and cancel_str != \"\":\n return f\"{status_str} {cancel_str}\"\n return f\"{status_str}\"\n\n def _num_participants(self, instance: Event):\n if instance.max_participants:\n return min(instance.participant_count, instance.max_participants)\n return instance.participant_count\n\n def _user_permissions(self, instance):\n member = self.context[\"request\"].member\n return services.event_permissions(member, instance, registration_prefetch=True)\n\n def _url(self, instance: Event):\n if instance.slug is None:\n return reverse(\n \"events:event\",\n kwargs={\"pk\": instance.pk},\n request=self.context[\"request\"],\n )\n return reverse(\n \"events:event\",\n kwargs={\"slug\": instance.slug},\n request=self.context[\"request\"],\n )\n\n def _maps_url(self, instance):\n return create_google_maps_url(instance.map_location, zoom=13, size=\"450x250\")\n", "path": "website/events/api/v2/serializers/event.py"}]}
| 1,691 | 524 |
gh_patches_debug_33090
|
rasdani/github-patches
|
git_diff
|
psychopy__psychopy-947
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Polygon setEdges does not update the ShapeStim vertices
If I make a polygon object:
``` python
poly = visual.Polygon(win, edges=3, lineWidth=3, radius=3)
poly.draw()
win.flip()
```
and then want to change the shape on the fly in code, I would have though I would do:
``` python
poly.setEdges(5)
poly.draw()
win.flip()
```
This doesn't actually change the shape that gets shown though, but the following code does:
``` python
poly.setEdges(5)
poly.setVertices(poly.vertices)
poly.draw()
win.flip()
```
I think this is because `poly.setEdges` calls `poly._calcVertices` which sets the `poly.vertices` attribute, but `poly.setEdges` doesn't pass the new array to the `poly.setVertices` method, which I gather is inherited from `ShapeStim`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `psychopy/visual/polygon.py`
Content:
```
1
2 #!/usr/bin/env python2
3
4 '''Creates a regular polygon (triangles, pentagrams, ...)
5 as a special case of a :class:`~psychopy.visual.ShapeStim`'''
6
7 # Part of the PsychoPy library
8 # Copyright (C) 2015 Jonathan Peirce
9 # Distributed under the terms of the GNU General Public License (GPL).
10
11 import psychopy # so we can get the __path__
12
13 from psychopy.visual.shape import ShapeStim
14 from psychopy.tools.attributetools import attributeSetter, setAttribute
15
16 import numpy
17
18
19 class Polygon(ShapeStim):
20 """Creates a regular polygon (triangles, pentagrams, ...) as a special case of a :class:`~psychopy.visual.ShapeStim`
21
22 (New in version 1.72.00)
23 """
24 def __init__(self, win, edges=3, radius=.5, **kwargs):
25 """
26 Polygon accepts all input parameters that :class:`~psychopy.visual.ShapeStim` accepts, except for vertices and closeShape.
27 """
28 #what local vars are defined (these are the init params) for use by __repr__
29 self._initParams = dir()
30 self._initParams.remove('self')
31 #kwargs isn't a parameter, but a list of params
32 self._initParams.remove('kwargs')
33 self._initParams.extend(kwargs)
34 self.autoLog = False #but will be changed if needed at end of init
35 self.__dict__['edges'] = edges
36 self.radius = numpy.asarray(radius)
37 self._calcVertices()
38 kwargs['closeShape'] = True # Make sure nobody messes around here
39 kwargs['vertices'] = self.vertices
40 super(Polygon, self).__init__(win, **kwargs)
41
42 def _calcVertices(self):
43 d = numpy.pi*2/ self.edges
44 self.vertices = numpy.asarray([
45 numpy.asarray(
46 (numpy.sin(e*d), numpy.cos(e*d))
47 ) * self.radius
48 for e in xrange(int(round(self.edges)))
49 ])
50
51 @attributeSetter
52 def edges(self, edges):
53 """Int or float. Number of edges of the polygon. Floats are rounded to int.
54 :ref:`Operations <attrib-operations>` supported."""
55 self.__dict__['edges'] = edges
56 self._calcVertices()
57 def setEdges(self, edges, operation='', log=None):
58 """Usually you can use 'stim.attribute = value' syntax instead,
59 but use this method if you need to suppress the log message"""
60 setAttribute(self, 'edges', edges, log, operation)
61
62 @attributeSetter
63 def radius(self, radius):
64 """float, int, tuple, list or 2x1 array
65 Radius of the Polygon (distance from the center to the corners).
66 May be a -2tuple or list to stretch the polygon asymmetrically.
67
68 :ref:`Operations <attrib-operations>` supported.
69
70 Usually there's a setAttribute(value, log=False) method for each attribute. Use this if you want to disable logging."""
71 self.__dict__['radius'] = numpy.array(radius)
72 self._calcVertices()
73 self.setVertices(self.vertices, log=False)
74 def setRadius(self, radius, operation='', log=None):
75 """Usually you can use 'stim.attribute = value' syntax instead,
76 but use this method if you need to suppress the log message"""
77 setAttribute(self, 'radius', radius, log, operation)
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/psychopy/visual/polygon.py b/psychopy/visual/polygon.py
--- a/psychopy/visual/polygon.py
+++ b/psychopy/visual/polygon.py
@@ -47,13 +47,14 @@
) * self.radius
for e in xrange(int(round(self.edges)))
])
-
+
@attributeSetter
def edges(self, edges):
"""Int or float. Number of edges of the polygon. Floats are rounded to int.
:ref:`Operations <attrib-operations>` supported."""
self.__dict__['edges'] = edges
self._calcVertices()
+ self.setVertices(self.vertices, log=False)
def setEdges(self, edges, operation='', log=None):
"""Usually you can use 'stim.attribute = value' syntax instead,
but use this method if you need to suppress the log message"""
@@ -66,7 +67,7 @@
May be a -2tuple or list to stretch the polygon asymmetrically.
:ref:`Operations <attrib-operations>` supported.
-
+
Usually there's a setAttribute(value, log=False) method for each attribute. Use this if you want to disable logging."""
self.__dict__['radius'] = numpy.array(radius)
self._calcVertices()
@@ -74,4 +75,4 @@
def setRadius(self, radius, operation='', log=None):
"""Usually you can use 'stim.attribute = value' syntax instead,
but use this method if you need to suppress the log message"""
- setAttribute(self, 'radius', radius, log, operation)
\ No newline at end of file
+ setAttribute(self, 'radius', radius, log, operation)
|
{"golden_diff": "diff --git a/psychopy/visual/polygon.py b/psychopy/visual/polygon.py\n--- a/psychopy/visual/polygon.py\n+++ b/psychopy/visual/polygon.py\n@@ -47,13 +47,14 @@\n ) * self.radius\n for e in xrange(int(round(self.edges)))\n ])\n- \n+\n @attributeSetter\n def edges(self, edges):\n \"\"\"Int or float. Number of edges of the polygon. Floats are rounded to int.\n :ref:`Operations <attrib-operations>` supported.\"\"\"\n self.__dict__['edges'] = edges\n self._calcVertices()\n+ self.setVertices(self.vertices, log=False)\n def setEdges(self, edges, operation='', log=None):\n \"\"\"Usually you can use 'stim.attribute = value' syntax instead,\n but use this method if you need to suppress the log message\"\"\"\n@@ -66,7 +67,7 @@\n May be a -2tuple or list to stretch the polygon asymmetrically.\n \n :ref:`Operations <attrib-operations>` supported.\n- \n+\n Usually there's a setAttribute(value, log=False) method for each attribute. Use this if you want to disable logging.\"\"\"\n self.__dict__['radius'] = numpy.array(radius)\n self._calcVertices()\n@@ -74,4 +75,4 @@\n def setRadius(self, radius, operation='', log=None):\n \"\"\"Usually you can use 'stim.attribute = value' syntax instead,\n but use this method if you need to suppress the log message\"\"\"\n- setAttribute(self, 'radius', radius, log, operation)\n\\ No newline at end of file\n+ setAttribute(self, 'radius', radius, log, operation)\n", "issue": "Polygon setEdges does not update the ShapeStim vertices\nIf I make a polygon object:\n\n``` python\npoly = visual.Polygon(win, edges=3, lineWidth=3, radius=3)\npoly.draw()\nwin.flip()\n```\n\nand then want to change the shape on the fly in code, I would have though I would do:\n\n``` python\npoly.setEdges(5)\npoly.draw()\nwin.flip()\n```\n\nThis doesn't actually change the shape that gets shown though, but the following code does:\n\n``` python\npoly.setEdges(5)\npoly.setVertices(poly.vertices)\npoly.draw()\nwin.flip()\n```\n\nI think this is because `poly.setEdges` calls `poly._calcVertices` which sets the `poly.vertices` attribute, but `poly.setEdges` doesn't pass the new array to the `poly.setVertices` method, which I gather is inherited from `ShapeStim`.\n\n", "before_files": [{"content": "\n#!/usr/bin/env python2\n\n'''Creates a regular polygon (triangles, pentagrams, ...)\nas a special case of a :class:`~psychopy.visual.ShapeStim`'''\n\n# Part of the PsychoPy library\n# Copyright (C) 2015 Jonathan Peirce\n# Distributed under the terms of the GNU General Public License (GPL).\n\nimport psychopy # so we can get the __path__\n\nfrom psychopy.visual.shape import ShapeStim\nfrom psychopy.tools.attributetools import attributeSetter, setAttribute\n\nimport numpy\n\n\nclass Polygon(ShapeStim):\n \"\"\"Creates a regular polygon (triangles, pentagrams, ...) as a special case of a :class:`~psychopy.visual.ShapeStim`\n\n (New in version 1.72.00)\n \"\"\"\n def __init__(self, win, edges=3, radius=.5, **kwargs):\n \"\"\"\n Polygon accepts all input parameters that :class:`~psychopy.visual.ShapeStim` accepts, except for vertices and closeShape.\n \"\"\"\n #what local vars are defined (these are the init params) for use by __repr__\n self._initParams = dir()\n self._initParams.remove('self')\n #kwargs isn't a parameter, but a list of params\n self._initParams.remove('kwargs')\n self._initParams.extend(kwargs)\n self.autoLog = False #but will be changed if needed at end of init\n self.__dict__['edges'] = edges\n self.radius = numpy.asarray(radius)\n self._calcVertices()\n kwargs['closeShape'] = True # Make sure nobody messes around here\n kwargs['vertices'] = self.vertices\n super(Polygon, self).__init__(win, **kwargs)\n\n def _calcVertices(self):\n d = numpy.pi*2/ self.edges\n self.vertices = numpy.asarray([\n numpy.asarray(\n (numpy.sin(e*d), numpy.cos(e*d))\n ) * self.radius\n for e in xrange(int(round(self.edges)))\n ])\n \n @attributeSetter\n def edges(self, edges):\n \"\"\"Int or float. Number of edges of the polygon. Floats are rounded to int.\n :ref:`Operations <attrib-operations>` supported.\"\"\"\n self.__dict__['edges'] = edges\n self._calcVertices()\n def setEdges(self, edges, operation='', log=None):\n \"\"\"Usually you can use 'stim.attribute = value' syntax instead,\n but use this method if you need to suppress the log message\"\"\"\n setAttribute(self, 'edges', edges, log, operation)\n\n @attributeSetter\n def radius(self, radius):\n \"\"\"float, int, tuple, list or 2x1 array\n Radius of the Polygon (distance from the center to the corners).\n May be a -2tuple or list to stretch the polygon asymmetrically.\n\n :ref:`Operations <attrib-operations>` supported.\n \n Usually there's a setAttribute(value, log=False) method for each attribute. Use this if you want to disable logging.\"\"\"\n self.__dict__['radius'] = numpy.array(radius)\n self._calcVertices()\n self.setVertices(self.vertices, log=False)\n def setRadius(self, radius, operation='', log=None):\n \"\"\"Usually you can use 'stim.attribute = value' syntax instead,\n but use this method if you need to suppress the log message\"\"\"\n setAttribute(self, 'radius', radius, log, operation)", "path": "psychopy/visual/polygon.py"}], "after_files": [{"content": "\n#!/usr/bin/env python2\n\n'''Creates a regular polygon (triangles, pentagrams, ...)\nas a special case of a :class:`~psychopy.visual.ShapeStim`'''\n\n# Part of the PsychoPy library\n# Copyright (C) 2015 Jonathan Peirce\n# Distributed under the terms of the GNU General Public License (GPL).\n\nimport psychopy # so we can get the __path__\n\nfrom psychopy.visual.shape import ShapeStim\nfrom psychopy.tools.attributetools import attributeSetter, setAttribute\n\nimport numpy\n\n\nclass Polygon(ShapeStim):\n \"\"\"Creates a regular polygon (triangles, pentagrams, ...) as a special case of a :class:`~psychopy.visual.ShapeStim`\n\n (New in version 1.72.00)\n \"\"\"\n def __init__(self, win, edges=3, radius=.5, **kwargs):\n \"\"\"\n Polygon accepts all input parameters that :class:`~psychopy.visual.ShapeStim` accepts, except for vertices and closeShape.\n \"\"\"\n #what local vars are defined (these are the init params) for use by __repr__\n self._initParams = dir()\n self._initParams.remove('self')\n #kwargs isn't a parameter, but a list of params\n self._initParams.remove('kwargs')\n self._initParams.extend(kwargs)\n self.autoLog = False #but will be changed if needed at end of init\n self.__dict__['edges'] = edges\n self.radius = numpy.asarray(radius)\n self._calcVertices()\n kwargs['closeShape'] = True # Make sure nobody messes around here\n kwargs['vertices'] = self.vertices\n super(Polygon, self).__init__(win, **kwargs)\n\n def _calcVertices(self):\n d = numpy.pi*2/ self.edges\n self.vertices = numpy.asarray([\n numpy.asarray(\n (numpy.sin(e*d), numpy.cos(e*d))\n ) * self.radius\n for e in xrange(int(round(self.edges)))\n ])\n\n @attributeSetter\n def edges(self, edges):\n \"\"\"Int or float. Number of edges of the polygon. Floats are rounded to int.\n :ref:`Operations <attrib-operations>` supported.\"\"\"\n self.__dict__['edges'] = edges\n self._calcVertices()\n self.setVertices(self.vertices, log=False)\n def setEdges(self, edges, operation='', log=None):\n \"\"\"Usually you can use 'stim.attribute = value' syntax instead,\n but use this method if you need to suppress the log message\"\"\"\n setAttribute(self, 'edges', edges, log, operation)\n\n @attributeSetter\n def radius(self, radius):\n \"\"\"float, int, tuple, list or 2x1 array\n Radius of the Polygon (distance from the center to the corners).\n May be a -2tuple or list to stretch the polygon asymmetrically.\n\n :ref:`Operations <attrib-operations>` supported.\n\n Usually there's a setAttribute(value, log=False) method for each attribute. Use this if you want to disable logging.\"\"\"\n self.__dict__['radius'] = numpy.array(radius)\n self._calcVertices()\n self.setVertices(self.vertices, log=False)\n def setRadius(self, radius, operation='', log=None):\n \"\"\"Usually you can use 'stim.attribute = value' syntax instead,\n but use this method if you need to suppress the log message\"\"\"\n setAttribute(self, 'radius', radius, log, operation)\n", "path": "psychopy/visual/polygon.py"}]}
| 1,333 | 374 |
gh_patches_debug_50470
|
rasdani/github-patches
|
git_diff
|
cython__cython-4942
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Annotated attributes of cclass are not supporting pointers
<!--
**PLEASE READ THIS FIRST:**
- Do not use the bug and feature tracker for support requests. Use the `cython-users` mailing list instead.
- Did you search for similar issues already? Please do, it helps to save us precious time that we otherwise could not invest into development.
- Did you try the latest master branch or pre-release? It might already have what you want to report. Also see the [Changelog](https://github.com/cython/cython/blob/master/CHANGES.rst) regarding recent changes.
-->
**Describe the bug**
The compilation is failing, when attribute of cclass is declared using annotated type containing pointer.
**To Reproduce**
Following code:
```python
import cython
@cython.cclass
class Foo:
a: cython.pointer(cython.int)
def bar(self):
self.a = cython.NULL
```
fails during compilation with error:
```
$ cython -3 test.py
Error compiling Cython file:
------------------------------------------------------------
...
@cython.cclass
class Foo:
a: cython.pointer(cython.int)
def bar(self):
self.a = cython.NULL
^
------------------------------------------------------------
test.py:8:23: Cannot convert 'void *' to Python object
```
**Expected behavior**
Compilation should be successfull.
**Environment (please complete the following information):**
- OS: Linux
- Python version: Python 3.9.2
- Cython version: master
**Additional context**
When `declare()` statement or `cython.p_int` type is used, compilation is successful:
```python
import cython
@cython.cclass
class Foo:
a = cython.declare(cython.pointer(cython.int))
def bar(self):
self.a = cython.NULL
```
```python
import cython
@cython.cclass
class Foo:
a: cython.p_int
def bar(self):
self.a = cython.NULL
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/examples/tutorial/clibraries/queue.py`
Content:
```
1 from cython.cimports import cqueue
2
3 @cython.cclass
4 class Queue:
5 _c_queue = cython.declare(cython.pointer(cqueue.Queue))
6
7 def __cinit__(self):
8 self._c_queue = cqueue.queue_new()
9
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docs/examples/tutorial/clibraries/queue.py b/docs/examples/tutorial/clibraries/queue.py
--- a/docs/examples/tutorial/clibraries/queue.py
+++ b/docs/examples/tutorial/clibraries/queue.py
@@ -2,7 +2,7 @@
@cython.cclass
class Queue:
- _c_queue = cython.declare(cython.pointer(cqueue.Queue))
+ _c_queue: cython.pointer(cqueue.Queue)
def __cinit__(self):
self._c_queue = cqueue.queue_new()
|
{"golden_diff": "diff --git a/docs/examples/tutorial/clibraries/queue.py b/docs/examples/tutorial/clibraries/queue.py\n--- a/docs/examples/tutorial/clibraries/queue.py\n+++ b/docs/examples/tutorial/clibraries/queue.py\n@@ -2,7 +2,7 @@\n \n @cython.cclass\n class Queue:\n- _c_queue = cython.declare(cython.pointer(cqueue.Queue))\n+ _c_queue: cython.pointer(cqueue.Queue)\n \n def __cinit__(self):\n self._c_queue = cqueue.queue_new()\n", "issue": "[BUG] Annotated attributes of cclass are not supporting pointers\n<!--\r\n**PLEASE READ THIS FIRST:**\r\n- Do not use the bug and feature tracker for support requests. Use the `cython-users` mailing list instead.\r\n- Did you search for similar issues already? Please do, it helps to save us precious time that we otherwise could not invest into development.\r\n- Did you try the latest master branch or pre-release? It might already have what you want to report. Also see the [Changelog](https://github.com/cython/cython/blob/master/CHANGES.rst) regarding recent changes.\r\n-->\r\n\r\n**Describe the bug**\r\nThe compilation is failing, when attribute of cclass is declared using annotated type containing pointer.\r\n\r\n**To Reproduce**\r\nFollowing code:\r\n```python\r\nimport cython\r\n\r\[email protected]\r\nclass Foo:\r\n a: cython.pointer(cython.int)\r\n\r\n def bar(self):\r\n self.a = cython.NULL\r\n```\r\nfails during compilation with error:\r\n```\r\n$ cython -3 test.py\r\n\r\nError compiling Cython file:\r\n------------------------------------------------------------\r\n...\r\[email protected]\r\nclass Foo:\r\n a: cython.pointer(cython.int)\r\n\r\n def bar(self):\r\n self.a = cython.NULL\r\n ^\r\n------------------------------------------------------------\r\n\r\ntest.py:8:23: Cannot convert 'void *' to Python object\r\n```\r\n\r\n**Expected behavior**\r\nCompilation should be successfull.\r\n\r\n**Environment (please complete the following information):**\r\n - OS: Linux\r\n - Python version: Python 3.9.2\r\n - Cython version: master\r\n\r\n**Additional context**\r\nWhen `declare()` statement or `cython.p_int` type is used, compilation is successful:\r\n\r\n```python\r\nimport cython\r\n\r\[email protected]\r\nclass Foo:\r\n a = cython.declare(cython.pointer(cython.int))\r\n\r\n def bar(self):\r\n self.a = cython.NULL\r\n```\r\n\r\n```python\r\nimport cython\r\n\r\[email protected]\r\nclass Foo:\r\n a: cython.p_int\r\n\r\n def bar(self):\r\n self.a = cython.NULL\r\n```\r\n\n", "before_files": [{"content": "from cython.cimports import cqueue\n\[email protected]\nclass Queue:\n _c_queue = cython.declare(cython.pointer(cqueue.Queue))\n\n def __cinit__(self):\n self._c_queue = cqueue.queue_new()\n", "path": "docs/examples/tutorial/clibraries/queue.py"}], "after_files": [{"content": "from cython.cimports import cqueue\n\[email protected]\nclass Queue:\n _c_queue: cython.pointer(cqueue.Queue)\n\n def __cinit__(self):\n self._c_queue = cqueue.queue_new()\n", "path": "docs/examples/tutorial/clibraries/queue.py"}]}
| 761 | 114 |
gh_patches_debug_6285
|
rasdani/github-patches
|
git_diff
|
encode__httpx-1503
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CertTypes `keyfile` and `password` should be Optional types.
`SSLContext.load_cert_chain` can take `None` as arguments values ([docs](https://docs.python.org/3/library/ssl.html#ssl.SSLContext.load_cert_chain)) so I guess this:
https://github.com/encode/httpx/blob/c09e61d50c8f169187cada6dbf14b89c7763c63f/httpx/_types.py#L54
should be rewritten as follows:
```python
CertTypes = Union[str, Tuple[str, Optional[str]], Tuple[str, Optional[str], Optional[str]]]
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `httpx/_types.py`
Content:
```
1 """
2 Type definitions for type checking purposes.
3 """
4
5 import ssl
6 from http.cookiejar import CookieJar
7 from typing import (
8 IO,
9 TYPE_CHECKING,
10 AsyncIterable,
11 Callable,
12 Dict,
13 Iterable,
14 List,
15 Mapping,
16 Optional,
17 Sequence,
18 Tuple,
19 Union,
20 )
21
22 if TYPE_CHECKING: # pragma: no cover
23 from ._auth import Auth # noqa: F401
24 from ._config import Proxy, Timeout # noqa: F401
25 from ._models import URL, Cookies, Headers, QueryParams, Request # noqa: F401
26
27
28 PrimitiveData = Optional[Union[str, int, float, bool]]
29
30 RawURL = Tuple[bytes, bytes, Optional[int], bytes]
31
32 URLTypes = Union["URL", str]
33
34 QueryParamTypes = Union[
35 "QueryParams",
36 Mapping[str, Union[PrimitiveData, Sequence[PrimitiveData]]],
37 List[Tuple[str, PrimitiveData]],
38 Tuple[Tuple[str, PrimitiveData], ...],
39 str,
40 bytes,
41 None,
42 ]
43
44 HeaderTypes = Union[
45 "Headers",
46 Dict[str, str],
47 Dict[bytes, bytes],
48 Sequence[Tuple[str, str]],
49 Sequence[Tuple[bytes, bytes]],
50 ]
51
52 CookieTypes = Union["Cookies", CookieJar, Dict[str, str], List[Tuple[str, str]]]
53
54 CertTypes = Union[str, Tuple[str, str], Tuple[str, str, str]]
55 VerifyTypes = Union[str, bool, ssl.SSLContext]
56 TimeoutTypes = Union[
57 Optional[float],
58 Tuple[Optional[float], Optional[float], Optional[float], Optional[float]],
59 "Timeout",
60 ]
61 ProxiesTypes = Union[URLTypes, "Proxy", Dict[URLTypes, Union[None, URLTypes, "Proxy"]]]
62
63 AuthTypes = Union[
64 Tuple[Union[str, bytes], Union[str, bytes]],
65 Callable[["Request"], "Request"],
66 "Auth",
67 None,
68 ]
69
70 ByteStream = Union[Iterable[bytes], AsyncIterable[bytes]]
71 RequestContent = Union[str, bytes, ByteStream]
72 ResponseContent = Union[str, bytes, ByteStream]
73
74 RequestData = dict
75
76 FileContent = Union[IO[str], IO[bytes], str, bytes]
77 FileTypes = Union[
78 # file (or text)
79 FileContent,
80 # (filename, file (or text))
81 Tuple[Optional[str], FileContent],
82 # (filename, file (or text), content_type)
83 Tuple[Optional[str], FileContent, Optional[str]],
84 ]
85 RequestFiles = Union[Mapping[str, FileTypes], Sequence[Tuple[str, FileTypes]]]
86
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/httpx/_types.py b/httpx/_types.py
--- a/httpx/_types.py
+++ b/httpx/_types.py
@@ -51,7 +51,14 @@
CookieTypes = Union["Cookies", CookieJar, Dict[str, str], List[Tuple[str, str]]]
-CertTypes = Union[str, Tuple[str, str], Tuple[str, str, str]]
+CertTypes = Union[
+ # certfile
+ str,
+ # (certfile, keyfile)
+ Tuple[str, Optional[str]],
+ # (certfile, keyfile, password)
+ Tuple[str, Optional[str], Optional[str]],
+]
VerifyTypes = Union[str, bool, ssl.SSLContext]
TimeoutTypes = Union[
Optional[float],
|
{"golden_diff": "diff --git a/httpx/_types.py b/httpx/_types.py\n--- a/httpx/_types.py\n+++ b/httpx/_types.py\n@@ -51,7 +51,14 @@\n \n CookieTypes = Union[\"Cookies\", CookieJar, Dict[str, str], List[Tuple[str, str]]]\n \n-CertTypes = Union[str, Tuple[str, str], Tuple[str, str, str]]\n+CertTypes = Union[\n+ # certfile\n+ str,\n+ # (certfile, keyfile)\n+ Tuple[str, Optional[str]],\n+ # (certfile, keyfile, password)\n+ Tuple[str, Optional[str], Optional[str]],\n+]\n VerifyTypes = Union[str, bool, ssl.SSLContext]\n TimeoutTypes = Union[\n Optional[float],\n", "issue": "CertTypes `keyfile` and `password` should be Optional types.\n`SSLContext.load_cert_chain` can take `None` as arguments values ([docs](https://docs.python.org/3/library/ssl.html#ssl.SSLContext.load_cert_chain)) so I guess this:\r\nhttps://github.com/encode/httpx/blob/c09e61d50c8f169187cada6dbf14b89c7763c63f/httpx/_types.py#L54\r\nshould be rewritten as follows:\r\n```python\r\nCertTypes = Union[str, Tuple[str, Optional[str]], Tuple[str, Optional[str], Optional[str]]] \r\n```\n", "before_files": [{"content": "\"\"\"\nType definitions for type checking purposes.\n\"\"\"\n\nimport ssl\nfrom http.cookiejar import CookieJar\nfrom typing import (\n IO,\n TYPE_CHECKING,\n AsyncIterable,\n Callable,\n Dict,\n Iterable,\n List,\n Mapping,\n Optional,\n Sequence,\n Tuple,\n Union,\n)\n\nif TYPE_CHECKING: # pragma: no cover\n from ._auth import Auth # noqa: F401\n from ._config import Proxy, Timeout # noqa: F401\n from ._models import URL, Cookies, Headers, QueryParams, Request # noqa: F401\n\n\nPrimitiveData = Optional[Union[str, int, float, bool]]\n\nRawURL = Tuple[bytes, bytes, Optional[int], bytes]\n\nURLTypes = Union[\"URL\", str]\n\nQueryParamTypes = Union[\n \"QueryParams\",\n Mapping[str, Union[PrimitiveData, Sequence[PrimitiveData]]],\n List[Tuple[str, PrimitiveData]],\n Tuple[Tuple[str, PrimitiveData], ...],\n str,\n bytes,\n None,\n]\n\nHeaderTypes = Union[\n \"Headers\",\n Dict[str, str],\n Dict[bytes, bytes],\n Sequence[Tuple[str, str]],\n Sequence[Tuple[bytes, bytes]],\n]\n\nCookieTypes = Union[\"Cookies\", CookieJar, Dict[str, str], List[Tuple[str, str]]]\n\nCertTypes = Union[str, Tuple[str, str], Tuple[str, str, str]]\nVerifyTypes = Union[str, bool, ssl.SSLContext]\nTimeoutTypes = Union[\n Optional[float],\n Tuple[Optional[float], Optional[float], Optional[float], Optional[float]],\n \"Timeout\",\n]\nProxiesTypes = Union[URLTypes, \"Proxy\", Dict[URLTypes, Union[None, URLTypes, \"Proxy\"]]]\n\nAuthTypes = Union[\n Tuple[Union[str, bytes], Union[str, bytes]],\n Callable[[\"Request\"], \"Request\"],\n \"Auth\",\n None,\n]\n\nByteStream = Union[Iterable[bytes], AsyncIterable[bytes]]\nRequestContent = Union[str, bytes, ByteStream]\nResponseContent = Union[str, bytes, ByteStream]\n\nRequestData = dict\n\nFileContent = Union[IO[str], IO[bytes], str, bytes]\nFileTypes = Union[\n # file (or text)\n FileContent,\n # (filename, file (or text))\n Tuple[Optional[str], FileContent],\n # (filename, file (or text), content_type)\n Tuple[Optional[str], FileContent, Optional[str]],\n]\nRequestFiles = Union[Mapping[str, FileTypes], Sequence[Tuple[str, FileTypes]]]\n", "path": "httpx/_types.py"}], "after_files": [{"content": "\"\"\"\nType definitions for type checking purposes.\n\"\"\"\n\nimport ssl\nfrom http.cookiejar import CookieJar\nfrom typing import (\n IO,\n TYPE_CHECKING,\n AsyncIterable,\n Callable,\n Dict,\n Iterable,\n List,\n Mapping,\n Optional,\n Sequence,\n Tuple,\n Union,\n)\n\nif TYPE_CHECKING: # pragma: no cover\n from ._auth import Auth # noqa: F401\n from ._config import Proxy, Timeout # noqa: F401\n from ._models import URL, Cookies, Headers, QueryParams, Request # noqa: F401\n\n\nPrimitiveData = Optional[Union[str, int, float, bool]]\n\nRawURL = Tuple[bytes, bytes, Optional[int], bytes]\n\nURLTypes = Union[\"URL\", str]\n\nQueryParamTypes = Union[\n \"QueryParams\",\n Mapping[str, Union[PrimitiveData, Sequence[PrimitiveData]]],\n List[Tuple[str, PrimitiveData]],\n Tuple[Tuple[str, PrimitiveData], ...],\n str,\n bytes,\n None,\n]\n\nHeaderTypes = Union[\n \"Headers\",\n Dict[str, str],\n Dict[bytes, bytes],\n Sequence[Tuple[str, str]],\n Sequence[Tuple[bytes, bytes]],\n]\n\nCookieTypes = Union[\"Cookies\", CookieJar, Dict[str, str], List[Tuple[str, str]]]\n\nCertTypes = Union[\n # certfile\n str,\n # (certfile, keyfile)\n Tuple[str, Optional[str]],\n # (certfile, keyfile, password)\n Tuple[str, Optional[str], Optional[str]],\n]\nVerifyTypes = Union[str, bool, ssl.SSLContext]\nTimeoutTypes = Union[\n Optional[float],\n Tuple[Optional[float], Optional[float], Optional[float], Optional[float]],\n \"Timeout\",\n]\nProxiesTypes = Union[URLTypes, \"Proxy\", Dict[URLTypes, Union[None, URLTypes, \"Proxy\"]]]\n\nAuthTypes = Union[\n Tuple[Union[str, bytes], Union[str, bytes]],\n Callable[[\"Request\"], \"Request\"],\n \"Auth\",\n None,\n]\n\nByteStream = Union[Iterable[bytes], AsyncIterable[bytes]]\nRequestContent = Union[str, bytes, ByteStream]\nResponseContent = Union[str, bytes, ByteStream]\n\nRequestData = dict\n\nFileContent = Union[IO[str], IO[bytes], str, bytes]\nFileTypes = Union[\n # file (or text)\n FileContent,\n # (filename, file (or text))\n Tuple[Optional[str], FileContent],\n # (filename, file (or text), content_type)\n Tuple[Optional[str], FileContent, Optional[str]],\n]\nRequestFiles = Union[Mapping[str, FileTypes], Sequence[Tuple[str, FileTypes]]]\n", "path": "httpx/_types.py"}]}
| 1,133 | 170 |
gh_patches_debug_14526
|
rasdani/github-patches
|
git_diff
|
pfnet__pytorch-pfn-extras-372
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Typing: ManualScheduleTrigger `points` should accept `int`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pytorch_pfn_extras/training/triggers/manual_schedule_trigger.py`
Content:
```
1 # mypy: ignore-errors
2
3 from typing import List, Union, TYPE_CHECKING
4
5 from pytorch_pfn_extras.training import trigger
6
7
8 if TYPE_CHECKING:
9 from pytorch_pfn_extras.training.manager import _BaseExtensionsManager
10 from pytorch_pfn_extras.training._trigger_util import UnitLiteral
11
12
13 class ManualScheduleTrigger(trigger.Trigger):
14
15 """Trigger invoked at specified point(s) of iterations or epochs.
16
17 This trigger accepts iterations or epochs indicated by given point(s).
18 There are two ways to specify the point(s): iteration and epoch.
19 ``iteration`` means the number of updates, while ``epoch`` means the number
20 of sweeps over the training dataset. Fractional values are allowed
21 if the point is a number of epochs; the trigger uses the ``iteration``
22 and ``epoch_detail`` attributes defined by the manager.
23
24 Args:
25 points (int, float, or list of int or float): time of the trigger.
26 Must be an integer or list of integer if unit is ``'iteration'``.
27 unit (str): Unit of the time specified by ``points``. It must be
28 either ``'iteration'`` or ``'epoch'``.
29
30 """
31
32 def __init__(self, points: Union[float, List[float]], unit: 'UnitLiteral'):
33 if unit not in ('epoch', 'iteration'):
34 raise ValueError(
35 'Trigger unit must be either \'epoch\' or \'iteration\'.')
36
37 self.points = (points if isinstance(points, list) else [points])
38 self.unit = unit
39
40 def __call__(self, manager: '_BaseExtensionsManager') -> bool:
41 """Decides whether the extension should be called on this iteration.
42
43 Args:
44 manager (~pytorch_pfn_extras.training.ExtensionsManager):
45 Manager object that this trigger is associated with.
46 The iteration information in this manager is used to
47 determine if the trigger should fire.
48
49 Returns:
50 bool: True if the corresponding extension should be invoked in this
51 iteration.
52
53 """
54 fire = self.may_fire(manager.iteration, manager._iters_per_epoch)
55 return fire
56
57 def may_fire(self, iteration: int, epoch_length: int) -> bool:
58 if self.unit == 'epoch':
59 fire = any(
60 int(p * epoch_length) == iteration for p in self.points)
61 else:
62 fire = any(p == iteration for p in self.points)
63 return fire
64
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pytorch_pfn_extras/training/triggers/manual_schedule_trigger.py b/pytorch_pfn_extras/training/triggers/manual_schedule_trigger.py
--- a/pytorch_pfn_extras/training/triggers/manual_schedule_trigger.py
+++ b/pytorch_pfn_extras/training/triggers/manual_schedule_trigger.py
@@ -1,6 +1,6 @@
# mypy: ignore-errors
-from typing import List, Union, TYPE_CHECKING
+from typing import Sequence, Union, TYPE_CHECKING
from pytorch_pfn_extras.training import trigger
@@ -29,7 +29,7 @@
"""
- def __init__(self, points: Union[float, List[float]], unit: 'UnitLiteral'):
+ def __init__(self, points: Union[float, Sequence[float]], unit: 'UnitLiteral'):
if unit not in ('epoch', 'iteration'):
raise ValueError(
'Trigger unit must be either \'epoch\' or \'iteration\'.')
|
{"golden_diff": "diff --git a/pytorch_pfn_extras/training/triggers/manual_schedule_trigger.py b/pytorch_pfn_extras/training/triggers/manual_schedule_trigger.py\n--- a/pytorch_pfn_extras/training/triggers/manual_schedule_trigger.py\n+++ b/pytorch_pfn_extras/training/triggers/manual_schedule_trigger.py\n@@ -1,6 +1,6 @@\n # mypy: ignore-errors\n \n-from typing import List, Union, TYPE_CHECKING\n+from typing import Sequence, Union, TYPE_CHECKING\n \n from pytorch_pfn_extras.training import trigger\n \n@@ -29,7 +29,7 @@\n \n \"\"\"\n \n- def __init__(self, points: Union[float, List[float]], unit: 'UnitLiteral'):\n+ def __init__(self, points: Union[float, Sequence[float]], unit: 'UnitLiteral'):\n if unit not in ('epoch', 'iteration'):\n raise ValueError(\n 'Trigger unit must be either \\'epoch\\' or \\'iteration\\'.')\n", "issue": "Typing: ManualScheduleTrigger `points` should accept `int`\n\n", "before_files": [{"content": "# mypy: ignore-errors\n\nfrom typing import List, Union, TYPE_CHECKING\n\nfrom pytorch_pfn_extras.training import trigger\n\n\nif TYPE_CHECKING:\n from pytorch_pfn_extras.training.manager import _BaseExtensionsManager\n from pytorch_pfn_extras.training._trigger_util import UnitLiteral\n\n\nclass ManualScheduleTrigger(trigger.Trigger):\n\n \"\"\"Trigger invoked at specified point(s) of iterations or epochs.\n\n This trigger accepts iterations or epochs indicated by given point(s).\n There are two ways to specify the point(s): iteration and epoch.\n ``iteration`` means the number of updates, while ``epoch`` means the number\n of sweeps over the training dataset. Fractional values are allowed\n if the point is a number of epochs; the trigger uses the ``iteration``\n and ``epoch_detail`` attributes defined by the manager.\n\n Args:\n points (int, float, or list of int or float): time of the trigger.\n Must be an integer or list of integer if unit is ``'iteration'``.\n unit (str): Unit of the time specified by ``points``. It must be\n either ``'iteration'`` or ``'epoch'``.\n\n \"\"\"\n\n def __init__(self, points: Union[float, List[float]], unit: 'UnitLiteral'):\n if unit not in ('epoch', 'iteration'):\n raise ValueError(\n 'Trigger unit must be either \\'epoch\\' or \\'iteration\\'.')\n\n self.points = (points if isinstance(points, list) else [points])\n self.unit = unit\n\n def __call__(self, manager: '_BaseExtensionsManager') -> bool:\n \"\"\"Decides whether the extension should be called on this iteration.\n\n Args:\n manager (~pytorch_pfn_extras.training.ExtensionsManager):\n Manager object that this trigger is associated with.\n The iteration information in this manager is used to\n determine if the trigger should fire.\n\n Returns:\n bool: True if the corresponding extension should be invoked in this\n iteration.\n\n \"\"\"\n fire = self.may_fire(manager.iteration, manager._iters_per_epoch)\n return fire\n\n def may_fire(self, iteration: int, epoch_length: int) -> bool:\n if self.unit == 'epoch':\n fire = any(\n int(p * epoch_length) == iteration for p in self.points)\n else:\n fire = any(p == iteration for p in self.points)\n return fire\n", "path": "pytorch_pfn_extras/training/triggers/manual_schedule_trigger.py"}], "after_files": [{"content": "# mypy: ignore-errors\n\nfrom typing import Sequence, Union, TYPE_CHECKING\n\nfrom pytorch_pfn_extras.training import trigger\n\n\nif TYPE_CHECKING:\n from pytorch_pfn_extras.training.manager import _BaseExtensionsManager\n from pytorch_pfn_extras.training._trigger_util import UnitLiteral\n\n\nclass ManualScheduleTrigger(trigger.Trigger):\n\n \"\"\"Trigger invoked at specified point(s) of iterations or epochs.\n\n This trigger accepts iterations or epochs indicated by given point(s).\n There are two ways to specify the point(s): iteration and epoch.\n ``iteration`` means the number of updates, while ``epoch`` means the number\n of sweeps over the training dataset. Fractional values are allowed\n if the point is a number of epochs; the trigger uses the ``iteration``\n and ``epoch_detail`` attributes defined by the manager.\n\n Args:\n points (int, float, or list of int or float): time of the trigger.\n Must be an integer or list of integer if unit is ``'iteration'``.\n unit (str): Unit of the time specified by ``points``. It must be\n either ``'iteration'`` or ``'epoch'``.\n\n \"\"\"\n\n def __init__(self, points: Union[float, Sequence[float]], unit: 'UnitLiteral'):\n if unit not in ('epoch', 'iteration'):\n raise ValueError(\n 'Trigger unit must be either \\'epoch\\' or \\'iteration\\'.')\n\n self.points = (points if isinstance(points, list) else [points])\n self.unit = unit\n\n def __call__(self, manager: '_BaseExtensionsManager') -> bool:\n \"\"\"Decides whether the extension should be called on this iteration.\n\n Args:\n manager (~pytorch_pfn_extras.training.ExtensionsManager):\n Manager object that this trigger is associated with.\n The iteration information in this manager is used to\n determine if the trigger should fire.\n\n Returns:\n bool: True if the corresponding extension should be invoked in this\n iteration.\n\n \"\"\"\n fire = self.may_fire(manager.iteration, manager._iters_per_epoch)\n return fire\n\n def may_fire(self, iteration: int, epoch_length: int) -> bool:\n if self.unit == 'epoch':\n fire = any(\n int(p * epoch_length) == iteration for p in self.points)\n else:\n fire = any(p == iteration for p in self.points)\n return fire\n", "path": "pytorch_pfn_extras/training/triggers/manual_schedule_trigger.py"}]}
| 931 | 210 |
gh_patches_debug_21898
|
rasdani/github-patches
|
git_diff
|
falconry__falcon-2008
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unclear which `HTTPError` was instantiated from `deprecated_args()` warnings
In Falcon 3.0.x, instantiating `HTTPError` or its subclasses with positional arguments (of course except the allowed ones) generates a `DeprecatedWarning` via the `deprecated_args()` decorator.
However, it is unclear from the warning which class/function was invoked, it just says "calls [with more than N] positional args are deprecated". Brought up by @laurent-chriqui (see the linked PR).
Ideally, as a developer, I would like the warning to read along the lines of
```
DeprecatedWarning: Calls to HTTPNotFound.__init__(...) with positional args are deprecated. Please specify them as keyword arguments instead.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `falcon/util/deprecation.py`
Content:
```
1 # Copyright 2013 by Rackspace Hosting, Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Miscellaneous deprecation utilities.
16
17 This module provides decorators to mark functions and classes as deprecated.
18 """
19
20 import functools
21 import warnings
22
23
24 __all__ = (
25 'DeprecatedWarning',
26 'deprecated',
27 'deprecated_args',
28 )
29
30
31 # NOTE(kgriffs): We don't want our deprecations to be ignored by default,
32 # so create our own type.
33 #
34 # TODO(kgriffs): Revisit this decision if users complain.
35 class DeprecatedWarning(UserWarning):
36 pass
37
38
39 def deprecated(instructions, is_property=False, method_name=None):
40 """Flag a method as deprecated.
41
42 This function returns a decorator which can be used to mark deprecated
43 functions. Applying this decorator will result in a warning being
44 emitted when the function is used.
45
46 Args:
47 instructions (str): Specific guidance for the developer, e.g.:
48 'Please migrate to add_proxy(...)'.
49 is_property (bool): If the deprecated object is a property. It
50 will omit the ``(...)`` from the generated documentation.
51 method_name (str, optional): Set to override the name of the
52 deprecated function or property in the generated
53 documentation (default ``None``). This is useful when
54 decorating an alias that carries the target's ``__name__``.
55
56 """
57
58 def decorator(func):
59
60 object_name = 'property' if is_property else 'function'
61 post_name = '' if is_property else '(...)'
62 message = 'Call to deprecated {} {}{}. {}'.format(
63 object_name, method_name or func.__name__, post_name, instructions
64 )
65
66 @functools.wraps(func)
67 def wrapper(*args, **kwargs):
68 warnings.warn(message, category=DeprecatedWarning, stacklevel=2)
69
70 return func(*args, **kwargs)
71
72 return wrapper
73
74 return decorator
75
76
77 def deprecated_args(*, allowed_positional, is_method=True):
78 """Flag a method call with positional args as deprecated.
79
80 Keyword Args:
81 allowed_positional (int): Number of allowed positional arguments
82 is_method (bool, optional): The decorated function is a method. Will
83 add one to the number of allowed positional args to account for
84 ``self``. Defaults to True.
85 """
86
87 template = (
88 'Calls with{} positional args are deprecated.'
89 ' Please specify them as keyword arguments instead.'
90 )
91 text = ' more than {}'.format(allowed_positional) if allowed_positional else ''
92 warn_text = template.format(text)
93 if is_method:
94 allowed_positional += 1
95
96 def deprecated_args(fn):
97 @functools.wraps(fn)
98 def wraps(*args, **kwargs):
99 if len(args) > allowed_positional:
100 warnings.warn(warn_text, DeprecatedWarning, stacklevel=2)
101 return fn(*args, **kwargs)
102
103 return wraps
104
105 return deprecated_args
106
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/falcon/util/deprecation.py b/falcon/util/deprecation.py
--- a/falcon/util/deprecation.py
+++ b/falcon/util/deprecation.py
@@ -85,11 +85,11 @@
"""
template = (
- 'Calls with{} positional args are deprecated.'
+ 'Calls to {{fn}}(...) with{arg_text} positional args are deprecated.'
' Please specify them as keyword arguments instead.'
)
text = ' more than {}'.format(allowed_positional) if allowed_positional else ''
- warn_text = template.format(text)
+ warn_text = template.format(arg_text=text)
if is_method:
allowed_positional += 1
@@ -97,7 +97,11 @@
@functools.wraps(fn)
def wraps(*args, **kwargs):
if len(args) > allowed_positional:
- warnings.warn(warn_text, DeprecatedWarning, stacklevel=2)
+ warnings.warn(
+ warn_text.format(fn=fn.__qualname__),
+ DeprecatedWarning,
+ stacklevel=2,
+ )
return fn(*args, **kwargs)
return wraps
|
{"golden_diff": "diff --git a/falcon/util/deprecation.py b/falcon/util/deprecation.py\n--- a/falcon/util/deprecation.py\n+++ b/falcon/util/deprecation.py\n@@ -85,11 +85,11 @@\n \"\"\"\n \n template = (\n- 'Calls with{} positional args are deprecated.'\n+ 'Calls to {{fn}}(...) with{arg_text} positional args are deprecated.'\n ' Please specify them as keyword arguments instead.'\n )\n text = ' more than {}'.format(allowed_positional) if allowed_positional else ''\n- warn_text = template.format(text)\n+ warn_text = template.format(arg_text=text)\n if is_method:\n allowed_positional += 1\n \n@@ -97,7 +97,11 @@\n @functools.wraps(fn)\n def wraps(*args, **kwargs):\n if len(args) > allowed_positional:\n- warnings.warn(warn_text, DeprecatedWarning, stacklevel=2)\n+ warnings.warn(\n+ warn_text.format(fn=fn.__qualname__),\n+ DeprecatedWarning,\n+ stacklevel=2,\n+ )\n return fn(*args, **kwargs)\n \n return wraps\n", "issue": "Unclear which `HTTPError` was instantiated from `deprecated_args()` warnings\nIn Falcon 3.0.x, instantiating `HTTPError` or its subclasses with positional arguments (of course except the allowed ones) generates a `DeprecatedWarning` via the `deprecated_args()` decorator.\r\n\r\nHowever, it is unclear from the warning which class/function was invoked, it just says \"calls [with more than N] positional args are deprecated\". Brought up by @laurent-chriqui (see the linked PR).\r\n\r\nIdeally, as a developer, I would like the warning to read along the lines of\r\n```\r\nDeprecatedWarning: Calls to HTTPNotFound.__init__(...) with positional args are deprecated. Please specify them as keyword arguments instead.\r\n```\n", "before_files": [{"content": "# Copyright 2013 by Rackspace Hosting, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Miscellaneous deprecation utilities.\n\nThis module provides decorators to mark functions and classes as deprecated.\n\"\"\"\n\nimport functools\nimport warnings\n\n\n__all__ = (\n 'DeprecatedWarning',\n 'deprecated',\n 'deprecated_args',\n)\n\n\n# NOTE(kgriffs): We don't want our deprecations to be ignored by default,\n# so create our own type.\n#\n# TODO(kgriffs): Revisit this decision if users complain.\nclass DeprecatedWarning(UserWarning):\n pass\n\n\ndef deprecated(instructions, is_property=False, method_name=None):\n \"\"\"Flag a method as deprecated.\n\n This function returns a decorator which can be used to mark deprecated\n functions. Applying this decorator will result in a warning being\n emitted when the function is used.\n\n Args:\n instructions (str): Specific guidance for the developer, e.g.:\n 'Please migrate to add_proxy(...)'.\n is_property (bool): If the deprecated object is a property. It\n will omit the ``(...)`` from the generated documentation.\n method_name (str, optional): Set to override the name of the\n deprecated function or property in the generated\n documentation (default ``None``). This is useful when\n decorating an alias that carries the target's ``__name__``.\n\n \"\"\"\n\n def decorator(func):\n\n object_name = 'property' if is_property else 'function'\n post_name = '' if is_property else '(...)'\n message = 'Call to deprecated {} {}{}. {}'.format(\n object_name, method_name or func.__name__, post_name, instructions\n )\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n warnings.warn(message, category=DeprecatedWarning, stacklevel=2)\n\n return func(*args, **kwargs)\n\n return wrapper\n\n return decorator\n\n\ndef deprecated_args(*, allowed_positional, is_method=True):\n \"\"\"Flag a method call with positional args as deprecated.\n\n Keyword Args:\n allowed_positional (int): Number of allowed positional arguments\n is_method (bool, optional): The decorated function is a method. Will\n add one to the number of allowed positional args to account for\n ``self``. Defaults to True.\n \"\"\"\n\n template = (\n 'Calls with{} positional args are deprecated.'\n ' Please specify them as keyword arguments instead.'\n )\n text = ' more than {}'.format(allowed_positional) if allowed_positional else ''\n warn_text = template.format(text)\n if is_method:\n allowed_positional += 1\n\n def deprecated_args(fn):\n @functools.wraps(fn)\n def wraps(*args, **kwargs):\n if len(args) > allowed_positional:\n warnings.warn(warn_text, DeprecatedWarning, stacklevel=2)\n return fn(*args, **kwargs)\n\n return wraps\n\n return deprecated_args\n", "path": "falcon/util/deprecation.py"}], "after_files": [{"content": "# Copyright 2013 by Rackspace Hosting, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Miscellaneous deprecation utilities.\n\nThis module provides decorators to mark functions and classes as deprecated.\n\"\"\"\n\nimport functools\nimport warnings\n\n\n__all__ = (\n 'DeprecatedWarning',\n 'deprecated',\n 'deprecated_args',\n)\n\n\n# NOTE(kgriffs): We don't want our deprecations to be ignored by default,\n# so create our own type.\n#\n# TODO(kgriffs): Revisit this decision if users complain.\nclass DeprecatedWarning(UserWarning):\n pass\n\n\ndef deprecated(instructions, is_property=False, method_name=None):\n \"\"\"Flag a method as deprecated.\n\n This function returns a decorator which can be used to mark deprecated\n functions. Applying this decorator will result in a warning being\n emitted when the function is used.\n\n Args:\n instructions (str): Specific guidance for the developer, e.g.:\n 'Please migrate to add_proxy(...)'.\n is_property (bool): If the deprecated object is a property. It\n will omit the ``(...)`` from the generated documentation.\n method_name (str, optional): Set to override the name of the\n deprecated function or property in the generated\n documentation (default ``None``). This is useful when\n decorating an alias that carries the target's ``__name__``.\n\n \"\"\"\n\n def decorator(func):\n\n object_name = 'property' if is_property else 'function'\n post_name = '' if is_property else '(...)'\n message = 'Call to deprecated {} {}{}. {}'.format(\n object_name, method_name or func.__name__, post_name, instructions\n )\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n warnings.warn(message, category=DeprecatedWarning, stacklevel=2)\n\n return func(*args, **kwargs)\n\n return wrapper\n\n return decorator\n\n\ndef deprecated_args(*, allowed_positional, is_method=True):\n \"\"\"Flag a method call with positional args as deprecated.\n\n Keyword Args:\n allowed_positional (int): Number of allowed positional arguments\n is_method (bool, optional): The decorated function is a method. Will\n add one to the number of allowed positional args to account for\n ``self``. Defaults to True.\n \"\"\"\n\n template = (\n 'Calls to {{fn}}(...) with{arg_text} positional args are deprecated.'\n ' Please specify them as keyword arguments instead.'\n )\n text = ' more than {}'.format(allowed_positional) if allowed_positional else ''\n warn_text = template.format(arg_text=text)\n if is_method:\n allowed_positional += 1\n\n def deprecated_args(fn):\n @functools.wraps(fn)\n def wraps(*args, **kwargs):\n if len(args) > allowed_positional:\n warnings.warn(\n warn_text.format(fn=fn.__qualname__),\n DeprecatedWarning,\n stacklevel=2,\n )\n return fn(*args, **kwargs)\n\n return wraps\n\n return deprecated_args\n", "path": "falcon/util/deprecation.py"}]}
| 1,376 | 255 |
gh_patches_debug_3352
|
rasdani/github-patches
|
git_diff
|
streamlink__streamlink-3395
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Change `author_email` in setup.py
https://github.com/streamlink/streamlink/blob/08e582580f3411b2de2c368f8b0cc7108264f990/setup.py#L83
@gravyboat
you've registered `[email protected]` a couple of years ago, right? Can this be used instead?
What's the email address of the `streamlink` account on pypi?
https://pypi.org/user/streamlink/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 import codecs
3 from os import environ, path
4 from sys import argv, path as sys_path
5
6 from setuptools import find_packages, setup
7
8 import versioneer
9
10
11 deps = [
12 "requests>=2.21.0,<3.0",
13 "isodate",
14 "websocket-client",
15 # Support for SOCKS proxies
16 "PySocks!=1.5.7,>=1.5.6",
17 ]
18
19 # for encrypted streams
20 if environ.get("STREAMLINK_USE_PYCRYPTO"):
21 deps.append("pycrypto")
22 else:
23 # this version of pycryptodome is known to work and has a Windows wheel for py2.7, py3.3-3.6
24 deps.append("pycryptodome>=3.4.3,<4")
25
26 # for localization
27 if environ.get("STREAMLINK_USE_PYCOUNTRY"):
28 deps.append("pycountry")
29 else:
30 deps.append("iso-639")
31 deps.append("iso3166")
32
33 # When we build an egg for the Win32 bootstrap we don"t want dependency
34 # information built into it.
35 if environ.get("NO_DEPS"):
36 deps = []
37
38 this_directory = path.abspath(path.dirname(__file__))
39 srcdir = path.join(this_directory, "src/")
40 sys_path.insert(0, srcdir)
41
42 with codecs.open(path.join(this_directory, "README.md"), 'r', "utf8") as f:
43 long_description = f.read()
44
45
46 def is_wheel_for_windows():
47 if "bdist_wheel" in argv:
48 names = ["win32", "win-amd64", "cygwin"]
49 length = len(argv)
50 for pos in range(argv.index("bdist_wheel") + 1, length):
51 if argv[pos] == "--plat-name" and pos + 1 < length:
52 return argv[pos + 1] in names
53 elif argv[pos][:12] == "--plat-name=":
54 return argv[pos][12:] in names
55 return False
56
57
58 entry_points = {
59 "console_scripts": ["streamlink=streamlink_cli.main:main"]
60 }
61
62 if is_wheel_for_windows():
63 entry_points["gui_scripts"] = ["streamlinkw=streamlink_cli.main:main"]
64
65
66 setup(name="streamlink",
67 version=versioneer.get_version(),
68 cmdclass=versioneer.get_cmdclass(),
69 description="Streamlink is a command-line utility that extracts streams "
70 "from various services and pipes them into a video player of "
71 "choice.",
72 long_description=long_description,
73 long_description_content_type="text/markdown",
74 url="https://github.com/streamlink/streamlink",
75 project_urls={
76 "Documentation": "https://streamlink.github.io/",
77 "Tracker": "https://github.com/streamlink/streamlink/issues",
78 "Source": "https://github.com/streamlink/streamlink",
79 "Funding": "https://opencollective.com/streamlink"
80 },
81 author="Streamlink",
82 # temp until we have a mailing list / global email
83 author_email="[email protected]",
84 license="Simplified BSD",
85 packages=find_packages("src"),
86 package_dir={"": "src"},
87 entry_points=entry_points,
88 install_requires=deps,
89 test_suite="tests",
90 python_requires=">=3.6, <4",
91 classifiers=["Development Status :: 5 - Production/Stable",
92 "License :: OSI Approved :: BSD License",
93 "Environment :: Console",
94 "Intended Audience :: End Users/Desktop",
95 "Operating System :: POSIX",
96 "Operating System :: Microsoft :: Windows",
97 "Operating System :: MacOS",
98 "Programming Language :: Python :: 3",
99 "Programming Language :: Python :: 3 :: Only",
100 "Programming Language :: Python :: 3.6",
101 "Programming Language :: Python :: 3.7",
102 "Programming Language :: Python :: 3.8",
103 "Programming Language :: Python :: 3.9",
104 "Topic :: Internet :: WWW/HTTP",
105 "Topic :: Multimedia :: Sound/Audio",
106 "Topic :: Multimedia :: Video",
107 "Topic :: Utilities"])
108
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -80,7 +80,7 @@
},
author="Streamlink",
# temp until we have a mailing list / global email
- author_email="[email protected]",
+ author_email="[email protected]",
license="Simplified BSD",
packages=find_packages("src"),
package_dir={"": "src"},
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -80,7 +80,7 @@\n },\n author=\"Streamlink\",\n # temp until we have a mailing list / global email\n- author_email=\"[email protected]\",\n+ author_email=\"[email protected]\",\n license=\"Simplified BSD\",\n packages=find_packages(\"src\"),\n package_dir={\"\": \"src\"},\n", "issue": "Change `author_email` in setup.py\nhttps://github.com/streamlink/streamlink/blob/08e582580f3411b2de2c368f8b0cc7108264f990/setup.py#L83\r\n\r\n@gravyboat \r\nyou've registered `[email protected]` a couple of years ago, right? Can this be used instead?\r\n\r\nWhat's the email address of the `streamlink` account on pypi?\r\nhttps://pypi.org/user/streamlink/\n", "before_files": [{"content": "#!/usr/bin/env python\nimport codecs\nfrom os import environ, path\nfrom sys import argv, path as sys_path\n\nfrom setuptools import find_packages, setup\n\nimport versioneer\n\n\ndeps = [\n \"requests>=2.21.0,<3.0\",\n \"isodate\",\n \"websocket-client\",\n # Support for SOCKS proxies\n \"PySocks!=1.5.7,>=1.5.6\",\n]\n\n# for encrypted streams\nif environ.get(\"STREAMLINK_USE_PYCRYPTO\"):\n deps.append(\"pycrypto\")\nelse:\n # this version of pycryptodome is known to work and has a Windows wheel for py2.7, py3.3-3.6\n deps.append(\"pycryptodome>=3.4.3,<4\")\n\n# for localization\nif environ.get(\"STREAMLINK_USE_PYCOUNTRY\"):\n deps.append(\"pycountry\")\nelse:\n deps.append(\"iso-639\")\n deps.append(\"iso3166\")\n\n# When we build an egg for the Win32 bootstrap we don\"t want dependency\n# information built into it.\nif environ.get(\"NO_DEPS\"):\n deps = []\n\nthis_directory = path.abspath(path.dirname(__file__))\nsrcdir = path.join(this_directory, \"src/\")\nsys_path.insert(0, srcdir)\n\nwith codecs.open(path.join(this_directory, \"README.md\"), 'r', \"utf8\") as f:\n long_description = f.read()\n\n\ndef is_wheel_for_windows():\n if \"bdist_wheel\" in argv:\n names = [\"win32\", \"win-amd64\", \"cygwin\"]\n length = len(argv)\n for pos in range(argv.index(\"bdist_wheel\") + 1, length):\n if argv[pos] == \"--plat-name\" and pos + 1 < length:\n return argv[pos + 1] in names\n elif argv[pos][:12] == \"--plat-name=\":\n return argv[pos][12:] in names\n return False\n\n\nentry_points = {\n \"console_scripts\": [\"streamlink=streamlink_cli.main:main\"]\n}\n\nif is_wheel_for_windows():\n entry_points[\"gui_scripts\"] = [\"streamlinkw=streamlink_cli.main:main\"]\n\n\nsetup(name=\"streamlink\",\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n description=\"Streamlink is a command-line utility that extracts streams \"\n \"from various services and pipes them into a video player of \"\n \"choice.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/streamlink/streamlink\",\n project_urls={\n \"Documentation\": \"https://streamlink.github.io/\",\n \"Tracker\": \"https://github.com/streamlink/streamlink/issues\",\n \"Source\": \"https://github.com/streamlink/streamlink\",\n \"Funding\": \"https://opencollective.com/streamlink\"\n },\n author=\"Streamlink\",\n # temp until we have a mailing list / global email\n author_email=\"[email protected]\",\n license=\"Simplified BSD\",\n packages=find_packages(\"src\"),\n package_dir={\"\": \"src\"},\n entry_points=entry_points,\n install_requires=deps,\n test_suite=\"tests\",\n python_requires=\">=3.6, <4\",\n classifiers=[\"Development Status :: 5 - Production/Stable\",\n \"License :: OSI Approved :: BSD License\",\n \"Environment :: Console\",\n \"Intended Audience :: End Users/Desktop\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Multimedia :: Sound/Audio\",\n \"Topic :: Multimedia :: Video\",\n \"Topic :: Utilities\"])\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nimport codecs\nfrom os import environ, path\nfrom sys import argv, path as sys_path\n\nfrom setuptools import find_packages, setup\n\nimport versioneer\n\n\ndeps = [\n \"requests>=2.21.0,<3.0\",\n \"isodate\",\n \"websocket-client\",\n # Support for SOCKS proxies\n \"PySocks!=1.5.7,>=1.5.6\",\n]\n\n# for encrypted streams\nif environ.get(\"STREAMLINK_USE_PYCRYPTO\"):\n deps.append(\"pycrypto\")\nelse:\n # this version of pycryptodome is known to work and has a Windows wheel for py2.7, py3.3-3.6\n deps.append(\"pycryptodome>=3.4.3,<4\")\n\n# for localization\nif environ.get(\"STREAMLINK_USE_PYCOUNTRY\"):\n deps.append(\"pycountry\")\nelse:\n deps.append(\"iso-639\")\n deps.append(\"iso3166\")\n\n# When we build an egg for the Win32 bootstrap we don\"t want dependency\n# information built into it.\nif environ.get(\"NO_DEPS\"):\n deps = []\n\nthis_directory = path.abspath(path.dirname(__file__))\nsrcdir = path.join(this_directory, \"src/\")\nsys_path.insert(0, srcdir)\n\nwith codecs.open(path.join(this_directory, \"README.md\"), 'r', \"utf8\") as f:\n long_description = f.read()\n\n\ndef is_wheel_for_windows():\n if \"bdist_wheel\" in argv:\n names = [\"win32\", \"win-amd64\", \"cygwin\"]\n length = len(argv)\n for pos in range(argv.index(\"bdist_wheel\") + 1, length):\n if argv[pos] == \"--plat-name\" and pos + 1 < length:\n return argv[pos + 1] in names\n elif argv[pos][:12] == \"--plat-name=\":\n return argv[pos][12:] in names\n return False\n\n\nentry_points = {\n \"console_scripts\": [\"streamlink=streamlink_cli.main:main\"]\n}\n\nif is_wheel_for_windows():\n entry_points[\"gui_scripts\"] = [\"streamlinkw=streamlink_cli.main:main\"]\n\n\nsetup(name=\"streamlink\",\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n description=\"Streamlink is a command-line utility that extracts streams \"\n \"from various services and pipes them into a video player of \"\n \"choice.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/streamlink/streamlink\",\n project_urls={\n \"Documentation\": \"https://streamlink.github.io/\",\n \"Tracker\": \"https://github.com/streamlink/streamlink/issues\",\n \"Source\": \"https://github.com/streamlink/streamlink\",\n \"Funding\": \"https://opencollective.com/streamlink\"\n },\n author=\"Streamlink\",\n # temp until we have a mailing list / global email\n author_email=\"[email protected]\",\n license=\"Simplified BSD\",\n packages=find_packages(\"src\"),\n package_dir={\"\": \"src\"},\n entry_points=entry_points,\n install_requires=deps,\n test_suite=\"tests\",\n python_requires=\">=3.6, <4\",\n classifiers=[\"Development Status :: 5 - Production/Stable\",\n \"License :: OSI Approved :: BSD License\",\n \"Environment :: Console\",\n \"Intended Audience :: End Users/Desktop\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Multimedia :: Sound/Audio\",\n \"Topic :: Multimedia :: Video\",\n \"Topic :: Utilities\"])\n", "path": "setup.py"}]}
| 1,484 | 102 |
gh_patches_debug_33078
|
rasdani/github-patches
|
git_diff
|
getsentry__sentry-python-1641
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Django Signals integration breaks on partial objects for python <3.10
### How do you use Sentry?
Self-hosted/on-premise
### Version
1.9.9
### Steps to Reproduce
1. Use python older than 3.10.
2. Register a partial function as a signal handler.
### Expected Result
Signal is traced correctly.
### Actual Result
Exception is raised from `_get_receiver_name` function as `partial` objects don't have `__module__` before python 3.10 (and even there it's undocumented from what I can see).
It fails in our tests where we don't even register any signals so either Django itself or some kind of integration (Sentry?) registers such signals by default.
The whole signals integration is missing a `capture_internal_exceptions` context too I believe.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sentry_sdk/integrations/django/signals_handlers.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from __future__ import absolute_import
3
4 from django.dispatch import Signal
5
6 from sentry_sdk import Hub
7 from sentry_sdk._types import MYPY
8
9
10 if MYPY:
11 from typing import Any
12 from typing import Callable
13 from typing import List
14
15
16 def patch_signals():
17 # type: () -> None
18 """Patch django signal receivers to create a span"""
19
20 old_live_receivers = Signal._live_receivers
21
22 def _get_receiver_name(receiver):
23 # type: (Callable[..., Any]) -> str
24 name = receiver.__module__ + "."
25 if hasattr(receiver, "__name__"):
26 return name + receiver.__name__
27 return name + str(receiver)
28
29 def _sentry_live_receivers(self, sender):
30 # type: (Signal, Any) -> List[Callable[..., Any]]
31 hub = Hub.current
32 receivers = old_live_receivers(self, sender)
33
34 def sentry_receiver_wrapper(receiver):
35 # type: (Callable[..., Any]) -> Callable[..., Any]
36 def wrapper(*args, **kwargs):
37 # type: (Any, Any) -> Any
38 with hub.start_span(
39 op="django.signals",
40 description=_get_receiver_name(receiver),
41 ) as span:
42 span.set_data("signal", _get_receiver_name(receiver))
43 return receiver(*args, **kwargs)
44
45 return wrapper
46
47 for idx, receiver in enumerate(receivers):
48 receivers[idx] = sentry_receiver_wrapper(receiver)
49
50 return receivers
51
52 Signal._live_receivers = _sentry_live_receivers
53
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sentry_sdk/integrations/django/signals_handlers.py b/sentry_sdk/integrations/django/signals_handlers.py
--- a/sentry_sdk/integrations/django/signals_handlers.py
+++ b/sentry_sdk/integrations/django/signals_handlers.py
@@ -13,19 +13,32 @@
from typing import List
+def _get_receiver_name(receiver):
+ # type: (Callable[..., Any]) -> str
+ name = ""
+
+ if hasattr(receiver, "__qualname__"):
+ name += receiver.__qualname__
+ elif hasattr(receiver, "__name__"): # Python 2.7 has no __qualname__
+ name += receiver.__name__
+
+ if (
+ name == ""
+ ): # certain functions (like partials) dont have a name so return the string representation
+ return str(receiver)
+
+ if hasattr(receiver, "__module__"): # prepend with module, if there is one
+ name = receiver.__module__ + "." + name
+
+ return name
+
+
def patch_signals():
# type: () -> None
"""Patch django signal receivers to create a span"""
old_live_receivers = Signal._live_receivers
- def _get_receiver_name(receiver):
- # type: (Callable[..., Any]) -> str
- name = receiver.__module__ + "."
- if hasattr(receiver, "__name__"):
- return name + receiver.__name__
- return name + str(receiver)
-
def _sentry_live_receivers(self, sender):
# type: (Signal, Any) -> List[Callable[..., Any]]
hub = Hub.current
@@ -35,11 +48,12 @@
# type: (Callable[..., Any]) -> Callable[..., Any]
def wrapper(*args, **kwargs):
# type: (Any, Any) -> Any
+ signal_name = _get_receiver_name(receiver)
with hub.start_span(
op="django.signals",
- description=_get_receiver_name(receiver),
+ description=signal_name,
) as span:
- span.set_data("signal", _get_receiver_name(receiver))
+ span.set_data("signal", signal_name)
return receiver(*args, **kwargs)
return wrapper
|
{"golden_diff": "diff --git a/sentry_sdk/integrations/django/signals_handlers.py b/sentry_sdk/integrations/django/signals_handlers.py\n--- a/sentry_sdk/integrations/django/signals_handlers.py\n+++ b/sentry_sdk/integrations/django/signals_handlers.py\n@@ -13,19 +13,32 @@\n from typing import List\n \n \n+def _get_receiver_name(receiver):\n+ # type: (Callable[..., Any]) -> str\n+ name = \"\"\n+\n+ if hasattr(receiver, \"__qualname__\"):\n+ name += receiver.__qualname__\n+ elif hasattr(receiver, \"__name__\"): # Python 2.7 has no __qualname__\n+ name += receiver.__name__\n+\n+ if (\n+ name == \"\"\n+ ): # certain functions (like partials) dont have a name so return the string representation\n+ return str(receiver)\n+\n+ if hasattr(receiver, \"__module__\"): # prepend with module, if there is one\n+ name = receiver.__module__ + \".\" + name\n+\n+ return name\n+\n+\n def patch_signals():\n # type: () -> None\n \"\"\"Patch django signal receivers to create a span\"\"\"\n \n old_live_receivers = Signal._live_receivers\n \n- def _get_receiver_name(receiver):\n- # type: (Callable[..., Any]) -> str\n- name = receiver.__module__ + \".\"\n- if hasattr(receiver, \"__name__\"):\n- return name + receiver.__name__\n- return name + str(receiver)\n-\n def _sentry_live_receivers(self, sender):\n # type: (Signal, Any) -> List[Callable[..., Any]]\n hub = Hub.current\n@@ -35,11 +48,12 @@\n # type: (Callable[..., Any]) -> Callable[..., Any]\n def wrapper(*args, **kwargs):\n # type: (Any, Any) -> Any\n+ signal_name = _get_receiver_name(receiver)\n with hub.start_span(\n op=\"django.signals\",\n- description=_get_receiver_name(receiver),\n+ description=signal_name,\n ) as span:\n- span.set_data(\"signal\", _get_receiver_name(receiver))\n+ span.set_data(\"signal\", signal_name)\n return receiver(*args, **kwargs)\n \n return wrapper\n", "issue": "Django Signals integration breaks on partial objects for python <3.10\n### How do you use Sentry?\n\nSelf-hosted/on-premise\n\n### Version\n\n1.9.9\n\n### Steps to Reproduce\n\n1. Use python older than 3.10.\r\n2. Register a partial function as a signal handler.\n\n### Expected Result\n\nSignal is traced correctly.\n\n### Actual Result\n\nException is raised from `_get_receiver_name` function as `partial` objects don't have `__module__` before python 3.10 (and even there it's undocumented from what I can see).\r\n\r\nIt fails in our tests where we don't even register any signals so either Django itself or some kind of integration (Sentry?) registers such signals by default.\r\n\r\nThe whole signals integration is missing a `capture_internal_exceptions` context too I believe.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\n\nfrom django.dispatch import Signal\n\nfrom sentry_sdk import Hub\nfrom sentry_sdk._types import MYPY\n\n\nif MYPY:\n from typing import Any\n from typing import Callable\n from typing import List\n\n\ndef patch_signals():\n # type: () -> None\n \"\"\"Patch django signal receivers to create a span\"\"\"\n\n old_live_receivers = Signal._live_receivers\n\n def _get_receiver_name(receiver):\n # type: (Callable[..., Any]) -> str\n name = receiver.__module__ + \".\"\n if hasattr(receiver, \"__name__\"):\n return name + receiver.__name__\n return name + str(receiver)\n\n def _sentry_live_receivers(self, sender):\n # type: (Signal, Any) -> List[Callable[..., Any]]\n hub = Hub.current\n receivers = old_live_receivers(self, sender)\n\n def sentry_receiver_wrapper(receiver):\n # type: (Callable[..., Any]) -> Callable[..., Any]\n def wrapper(*args, **kwargs):\n # type: (Any, Any) -> Any\n with hub.start_span(\n op=\"django.signals\",\n description=_get_receiver_name(receiver),\n ) as span:\n span.set_data(\"signal\", _get_receiver_name(receiver))\n return receiver(*args, **kwargs)\n\n return wrapper\n\n for idx, receiver in enumerate(receivers):\n receivers[idx] = sentry_receiver_wrapper(receiver)\n\n return receivers\n\n Signal._live_receivers = _sentry_live_receivers\n", "path": "sentry_sdk/integrations/django/signals_handlers.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\n\nfrom django.dispatch import Signal\n\nfrom sentry_sdk import Hub\nfrom sentry_sdk._types import MYPY\n\n\nif MYPY:\n from typing import Any\n from typing import Callable\n from typing import List\n\n\ndef _get_receiver_name(receiver):\n # type: (Callable[..., Any]) -> str\n name = \"\"\n\n if hasattr(receiver, \"__qualname__\"):\n name += receiver.__qualname__\n elif hasattr(receiver, \"__name__\"): # Python 2.7 has no __qualname__\n name += receiver.__name__\n\n if (\n name == \"\"\n ): # certain functions (like partials) dont have a name so return the string representation\n return str(receiver)\n\n if hasattr(receiver, \"__module__\"): # prepend with module, if there is one\n name = receiver.__module__ + \".\" + name\n\n return name\n\n\ndef patch_signals():\n # type: () -> None\n \"\"\"Patch django signal receivers to create a span\"\"\"\n\n old_live_receivers = Signal._live_receivers\n\n def _sentry_live_receivers(self, sender):\n # type: (Signal, Any) -> List[Callable[..., Any]]\n hub = Hub.current\n receivers = old_live_receivers(self, sender)\n\n def sentry_receiver_wrapper(receiver):\n # type: (Callable[..., Any]) -> Callable[..., Any]\n def wrapper(*args, **kwargs):\n # type: (Any, Any) -> Any\n signal_name = _get_receiver_name(receiver)\n with hub.start_span(\n op=\"django.signals\",\n description=signal_name,\n ) as span:\n span.set_data(\"signal\", signal_name)\n return receiver(*args, **kwargs)\n\n return wrapper\n\n for idx, receiver in enumerate(receivers):\n receivers[idx] = sentry_receiver_wrapper(receiver)\n\n return receivers\n\n Signal._live_receivers = _sentry_live_receivers\n", "path": "sentry_sdk/integrations/django/signals_handlers.py"}]}
| 888 | 506 |
gh_patches_debug_9131
|
rasdani/github-patches
|
git_diff
|
dask__dask-7623
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Note on `Add x.str.cat (#3028)` (lines 125-126)
It's not clear to me why `String.str.cat` isn't supported in Dask when `others == None`. Not having the ability to concat a single series into a string is a significant cost, I think. Unless there's a reason for this condition, I recommend deleting lines 126-126.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dask/dataframe/accessor.py`
Content:
```
1 from functools import partial
2
3 import numpy as np
4 import pandas as pd
5
6 from ..utils import derived_from
7
8
9 def maybe_wrap_pandas(obj, x):
10 if isinstance(x, np.ndarray):
11 if isinstance(obj, pd.Series):
12 return pd.Series(x, index=obj.index, dtype=x.dtype)
13 return pd.Index(x)
14 return x
15
16
17 class Accessor:
18 """
19 Base class for pandas Accessor objects cat, dt, and str.
20
21 Notes
22 -----
23 Subclasses should define ``_accessor_name``
24 """
25
26 _not_implemented = set()
27
28 def __init__(self, series):
29 from .core import Series
30
31 if not isinstance(series, Series):
32 raise ValueError("Accessor cannot be initialized")
33
34 series_meta = series._meta
35 if hasattr(series_meta, "to_series"): # is index-like
36 series_meta = series_meta.to_series()
37 meta = getattr(series_meta, self._accessor_name)
38
39 self._meta = meta
40 self._series = series
41
42 @staticmethod
43 def _delegate_property(obj, accessor, attr):
44 out = getattr(getattr(obj, accessor, obj), attr)
45 return maybe_wrap_pandas(obj, out)
46
47 @staticmethod
48 def _delegate_method(obj, accessor, attr, args, kwargs):
49 out = getattr(getattr(obj, accessor, obj), attr)(*args, **kwargs)
50 return maybe_wrap_pandas(obj, out)
51
52 def _property_map(self, attr):
53 meta = self._delegate_property(self._series._meta, self._accessor_name, attr)
54 token = "%s-%s" % (self._accessor_name, attr)
55 return self._series.map_partitions(
56 self._delegate_property, self._accessor_name, attr, token=token, meta=meta
57 )
58
59 def _function_map(self, attr, *args, **kwargs):
60 if "meta" in kwargs:
61 meta = kwargs.pop("meta")
62 else:
63 meta = self._delegate_method(
64 self._series._meta_nonempty, self._accessor_name, attr, args, kwargs
65 )
66 token = "%s-%s" % (self._accessor_name, attr)
67 return self._series.map_partitions(
68 self._delegate_method,
69 self._accessor_name,
70 attr,
71 args,
72 kwargs,
73 meta=meta,
74 token=token,
75 )
76
77 @property
78 def _delegates(self):
79 return set(dir(self._meta)) - self._not_implemented
80
81 def __dir__(self):
82 o = self._delegates
83 o.update(self.__dict__)
84 o.update(dir(type(self)))
85 return list(o)
86
87 def __getattr__(self, key):
88 if key in self._delegates:
89 if callable(getattr(self._meta, key)):
90 return partial(self._function_map, key)
91 else:
92 return self._property_map(key)
93 else:
94 raise AttributeError(key)
95
96
97 class DatetimeAccessor(Accessor):
98 """Accessor object for datetimelike properties of the Series values.
99
100 Examples
101 --------
102
103 >>> s.dt.microsecond # doctest: +SKIP
104 """
105
106 _accessor_name = "dt"
107
108
109 class StringAccessor(Accessor):
110 """Accessor object for string properties of the Series values.
111
112 Examples
113 --------
114
115 >>> s.str.lower() # doctest: +SKIP
116 """
117
118 _accessor_name = "str"
119 _not_implemented = {"get_dummies"}
120
121 @derived_from(pd.core.strings.StringMethods)
122 def split(self, pat=None, n=-1, expand=False):
123 if expand:
124 if n == -1:
125 raise NotImplementedError(
126 "To use the expand parameter you must specify the number of "
127 "expected splits with the n= parameter. Usually n splits result in n+1 output columns."
128 )
129 else:
130 delimiter = " " if pat is None else pat
131 meta = self._series._meta._constructor(
132 [delimiter.join(["a"] * (n + 1))],
133 index=self._series._meta_nonempty[:1].index,
134 )
135 meta = meta.str.split(n=n, expand=expand, pat=pat)
136 else:
137 meta = (self._series.name, object)
138 return self._function_map("split", pat=pat, n=n, expand=expand, meta=meta)
139
140 @derived_from(pd.core.strings.StringMethods)
141 def cat(self, others=None, sep=None, na_rep=None):
142 from .core import Index, Series
143
144 if others is None:
145 raise NotImplementedError("x.str.cat() with `others == None`")
146
147 valid_types = (Series, Index, pd.Series, pd.Index)
148 if isinstance(others, valid_types):
149 others = [others]
150 elif not all(isinstance(a, valid_types) for a in others):
151 raise TypeError("others must be Series/Index")
152
153 return self._series.map_partitions(
154 str_cat, *others, sep=sep, na_rep=na_rep, meta=self._series._meta
155 )
156
157 @derived_from(pd.core.strings.StringMethods)
158 def extractall(self, pat, flags=0):
159 return self._series.map_partitions(
160 str_extractall, pat, flags, token="str-extractall"
161 )
162
163 def __getitem__(self, index):
164 return self._series.map_partitions(str_get, index, meta=self._series._meta)
165
166
167 def str_extractall(series, pat, flags):
168 return series.str.extractall(pat, flags=flags)
169
170
171 def str_get(series, index):
172 """ Implements series.str[index] """
173 return series.str[index]
174
175
176 def str_cat(self, *others, **kwargs):
177 return self.str.cat(others=others, **kwargs)
178
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/dask/dataframe/accessor.py b/dask/dataframe/accessor.py
--- a/dask/dataframe/accessor.py
+++ b/dask/dataframe/accessor.py
@@ -142,7 +142,15 @@
from .core import Index, Series
if others is None:
- raise NotImplementedError("x.str.cat() with `others == None`")
+
+ def str_cat_none(x):
+
+ if isinstance(x, (Series, Index)):
+ x = x.compute()
+
+ return x.str.cat(sep=sep, na_rep=na_rep)
+
+ return self._series.reduction(chunk=str_cat_none, aggregate=str_cat_none)
valid_types = (Series, Index, pd.Series, pd.Index)
if isinstance(others, valid_types):
|
{"golden_diff": "diff --git a/dask/dataframe/accessor.py b/dask/dataframe/accessor.py\n--- a/dask/dataframe/accessor.py\n+++ b/dask/dataframe/accessor.py\n@@ -142,7 +142,15 @@\n from .core import Index, Series\n \n if others is None:\n- raise NotImplementedError(\"x.str.cat() with `others == None`\")\n+\n+ def str_cat_none(x):\n+\n+ if isinstance(x, (Series, Index)):\n+ x = x.compute()\n+\n+ return x.str.cat(sep=sep, na_rep=na_rep)\n+\n+ return self._series.reduction(chunk=str_cat_none, aggregate=str_cat_none)\n \n valid_types = (Series, Index, pd.Series, pd.Index)\n if isinstance(others, valid_types):\n", "issue": "Note on `Add x.str.cat (#3028)` (lines 125-126)\nIt's not clear to me why `String.str.cat` isn't supported in Dask when `others == None`. Not having the ability to concat a single series into a string is a significant cost, I think. Unless there's a reason for this condition, I recommend deleting lines 126-126.\r\n\n", "before_files": [{"content": "from functools import partial\n\nimport numpy as np\nimport pandas as pd\n\nfrom ..utils import derived_from\n\n\ndef maybe_wrap_pandas(obj, x):\n if isinstance(x, np.ndarray):\n if isinstance(obj, pd.Series):\n return pd.Series(x, index=obj.index, dtype=x.dtype)\n return pd.Index(x)\n return x\n\n\nclass Accessor:\n \"\"\"\n Base class for pandas Accessor objects cat, dt, and str.\n\n Notes\n -----\n Subclasses should define ``_accessor_name``\n \"\"\"\n\n _not_implemented = set()\n\n def __init__(self, series):\n from .core import Series\n\n if not isinstance(series, Series):\n raise ValueError(\"Accessor cannot be initialized\")\n\n series_meta = series._meta\n if hasattr(series_meta, \"to_series\"): # is index-like\n series_meta = series_meta.to_series()\n meta = getattr(series_meta, self._accessor_name)\n\n self._meta = meta\n self._series = series\n\n @staticmethod\n def _delegate_property(obj, accessor, attr):\n out = getattr(getattr(obj, accessor, obj), attr)\n return maybe_wrap_pandas(obj, out)\n\n @staticmethod\n def _delegate_method(obj, accessor, attr, args, kwargs):\n out = getattr(getattr(obj, accessor, obj), attr)(*args, **kwargs)\n return maybe_wrap_pandas(obj, out)\n\n def _property_map(self, attr):\n meta = self._delegate_property(self._series._meta, self._accessor_name, attr)\n token = \"%s-%s\" % (self._accessor_name, attr)\n return self._series.map_partitions(\n self._delegate_property, self._accessor_name, attr, token=token, meta=meta\n )\n\n def _function_map(self, attr, *args, **kwargs):\n if \"meta\" in kwargs:\n meta = kwargs.pop(\"meta\")\n else:\n meta = self._delegate_method(\n self._series._meta_nonempty, self._accessor_name, attr, args, kwargs\n )\n token = \"%s-%s\" % (self._accessor_name, attr)\n return self._series.map_partitions(\n self._delegate_method,\n self._accessor_name,\n attr,\n args,\n kwargs,\n meta=meta,\n token=token,\n )\n\n @property\n def _delegates(self):\n return set(dir(self._meta)) - self._not_implemented\n\n def __dir__(self):\n o = self._delegates\n o.update(self.__dict__)\n o.update(dir(type(self)))\n return list(o)\n\n def __getattr__(self, key):\n if key in self._delegates:\n if callable(getattr(self._meta, key)):\n return partial(self._function_map, key)\n else:\n return self._property_map(key)\n else:\n raise AttributeError(key)\n\n\nclass DatetimeAccessor(Accessor):\n \"\"\"Accessor object for datetimelike properties of the Series values.\n\n Examples\n --------\n\n >>> s.dt.microsecond # doctest: +SKIP\n \"\"\"\n\n _accessor_name = \"dt\"\n\n\nclass StringAccessor(Accessor):\n \"\"\"Accessor object for string properties of the Series values.\n\n Examples\n --------\n\n >>> s.str.lower() # doctest: +SKIP\n \"\"\"\n\n _accessor_name = \"str\"\n _not_implemented = {\"get_dummies\"}\n\n @derived_from(pd.core.strings.StringMethods)\n def split(self, pat=None, n=-1, expand=False):\n if expand:\n if n == -1:\n raise NotImplementedError(\n \"To use the expand parameter you must specify the number of \"\n \"expected splits with the n= parameter. Usually n splits result in n+1 output columns.\"\n )\n else:\n delimiter = \" \" if pat is None else pat\n meta = self._series._meta._constructor(\n [delimiter.join([\"a\"] * (n + 1))],\n index=self._series._meta_nonempty[:1].index,\n )\n meta = meta.str.split(n=n, expand=expand, pat=pat)\n else:\n meta = (self._series.name, object)\n return self._function_map(\"split\", pat=pat, n=n, expand=expand, meta=meta)\n\n @derived_from(pd.core.strings.StringMethods)\n def cat(self, others=None, sep=None, na_rep=None):\n from .core import Index, Series\n\n if others is None:\n raise NotImplementedError(\"x.str.cat() with `others == None`\")\n\n valid_types = (Series, Index, pd.Series, pd.Index)\n if isinstance(others, valid_types):\n others = [others]\n elif not all(isinstance(a, valid_types) for a in others):\n raise TypeError(\"others must be Series/Index\")\n\n return self._series.map_partitions(\n str_cat, *others, sep=sep, na_rep=na_rep, meta=self._series._meta\n )\n\n @derived_from(pd.core.strings.StringMethods)\n def extractall(self, pat, flags=0):\n return self._series.map_partitions(\n str_extractall, pat, flags, token=\"str-extractall\"\n )\n\n def __getitem__(self, index):\n return self._series.map_partitions(str_get, index, meta=self._series._meta)\n\n\ndef str_extractall(series, pat, flags):\n return series.str.extractall(pat, flags=flags)\n\n\ndef str_get(series, index):\n \"\"\" Implements series.str[index] \"\"\"\n return series.str[index]\n\n\ndef str_cat(self, *others, **kwargs):\n return self.str.cat(others=others, **kwargs)\n", "path": "dask/dataframe/accessor.py"}], "after_files": [{"content": "from functools import partial\n\nimport numpy as np\nimport pandas as pd\n\nfrom ..utils import derived_from\n\n\ndef maybe_wrap_pandas(obj, x):\n if isinstance(x, np.ndarray):\n if isinstance(obj, pd.Series):\n return pd.Series(x, index=obj.index, dtype=x.dtype)\n return pd.Index(x)\n return x\n\n\nclass Accessor:\n \"\"\"\n Base class for pandas Accessor objects cat, dt, and str.\n\n Notes\n -----\n Subclasses should define ``_accessor_name``\n \"\"\"\n\n _not_implemented = set()\n\n def __init__(self, series):\n from .core import Series\n\n if not isinstance(series, Series):\n raise ValueError(\"Accessor cannot be initialized\")\n\n series_meta = series._meta\n if hasattr(series_meta, \"to_series\"): # is index-like\n series_meta = series_meta.to_series()\n meta = getattr(series_meta, self._accessor_name)\n\n self._meta = meta\n self._series = series\n\n @staticmethod\n def _delegate_property(obj, accessor, attr):\n out = getattr(getattr(obj, accessor, obj), attr)\n return maybe_wrap_pandas(obj, out)\n\n @staticmethod\n def _delegate_method(obj, accessor, attr, args, kwargs):\n out = getattr(getattr(obj, accessor, obj), attr)(*args, **kwargs)\n return maybe_wrap_pandas(obj, out)\n\n def _property_map(self, attr):\n meta = self._delegate_property(self._series._meta, self._accessor_name, attr)\n token = \"%s-%s\" % (self._accessor_name, attr)\n return self._series.map_partitions(\n self._delegate_property, self._accessor_name, attr, token=token, meta=meta\n )\n\n def _function_map(self, attr, *args, **kwargs):\n if \"meta\" in kwargs:\n meta = kwargs.pop(\"meta\")\n else:\n meta = self._delegate_method(\n self._series._meta_nonempty, self._accessor_name, attr, args, kwargs\n )\n token = \"%s-%s\" % (self._accessor_name, attr)\n return self._series.map_partitions(\n self._delegate_method,\n self._accessor_name,\n attr,\n args,\n kwargs,\n meta=meta,\n token=token,\n )\n\n @property\n def _delegates(self):\n return set(dir(self._meta)) - self._not_implemented\n\n def __dir__(self):\n o = self._delegates\n o.update(self.__dict__)\n o.update(dir(type(self)))\n return list(o)\n\n def __getattr__(self, key):\n if key in self._delegates:\n if callable(getattr(self._meta, key)):\n return partial(self._function_map, key)\n else:\n return self._property_map(key)\n else:\n raise AttributeError(key)\n\n\nclass DatetimeAccessor(Accessor):\n \"\"\"Accessor object for datetimelike properties of the Series values.\n\n Examples\n --------\n\n >>> s.dt.microsecond # doctest: +SKIP\n \"\"\"\n\n _accessor_name = \"dt\"\n\n\nclass StringAccessor(Accessor):\n \"\"\"Accessor object for string properties of the Series values.\n\n Examples\n --------\n\n >>> s.str.lower() # doctest: +SKIP\n \"\"\"\n\n _accessor_name = \"str\"\n _not_implemented = {\"get_dummies\"}\n\n @derived_from(pd.core.strings.StringMethods)\n def split(self, pat=None, n=-1, expand=False):\n if expand:\n if n == -1:\n raise NotImplementedError(\n \"To use the expand parameter you must specify the number of \"\n \"expected splits with the n= parameter. Usually n splits result in n+1 output columns.\"\n )\n else:\n delimiter = \" \" if pat is None else pat\n meta = self._series._meta._constructor(\n [delimiter.join([\"a\"] * (n + 1))],\n index=self._series._meta_nonempty[:1].index,\n )\n meta = meta.str.split(n=n, expand=expand, pat=pat)\n else:\n meta = (self._series.name, object)\n return self._function_map(\"split\", pat=pat, n=n, expand=expand, meta=meta)\n\n @derived_from(pd.core.strings.StringMethods)\n def cat(self, others=None, sep=None, na_rep=None):\n from .core import Index, Series\n\n if others is None:\n\n def str_cat_none(x):\n\n if isinstance(x, (Series, Index)):\n x = x.compute()\n\n return x.str.cat(sep=sep, na_rep=na_rep)\n\n return self._series.reduction(chunk=str_cat_none, aggregate=str_cat_none)\n\n valid_types = (Series, Index, pd.Series, pd.Index)\n if isinstance(others, valid_types):\n others = [others]\n elif not all(isinstance(a, valid_types) for a in others):\n raise TypeError(\"others must be Series/Index\")\n\n return self._series.map_partitions(\n str_cat, *others, sep=sep, na_rep=na_rep, meta=self._series._meta\n )\n\n @derived_from(pd.core.strings.StringMethods)\n def extractall(self, pat, flags=0):\n return self._series.map_partitions(\n str_extractall, pat, flags, token=\"str-extractall\"\n )\n\n def __getitem__(self, index):\n return self._series.map_partitions(str_get, index, meta=self._series._meta)\n\n\ndef str_extractall(series, pat, flags):\n return series.str.extractall(pat, flags=flags)\n\n\ndef str_get(series, index):\n \"\"\" Implements series.str[index] \"\"\"\n return series.str[index]\n\n\ndef str_cat(self, *others, **kwargs):\n return self.str.cat(others=others, **kwargs)\n", "path": "dask/dataframe/accessor.py"}]}
| 2,032 | 176 |
gh_patches_debug_43
|
rasdani/github-patches
|
git_diff
|
python-discord__site-268
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Ugly prefix on all ID links.
Currently, all the headers that are created by the wiki will have id's that are prefixed with `wiki-toc`. As such, when you want to link a header, the link will look something like https://pythondiscord.com/pages/contributing/site/#wiki-toc-development-environment.
It would be better if this simply said `#development-environment`, so let's change that.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pydis_site/__init__.py`
Content:
```
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pydis_site/__init__.py b/pydis_site/__init__.py
--- a/pydis_site/__init__.py
+++ b/pydis_site/__init__.py
@@ -0,0 +1,4 @@
+from wiki.plugins.macros.mdx import toc
+
+# Remove the toc header prefix. There's no option for this, so we gotta monkey patch it.
+toc.HEADER_ID_PREFIX = ''
|
{"golden_diff": "diff --git a/pydis_site/__init__.py b/pydis_site/__init__.py\n--- a/pydis_site/__init__.py\n+++ b/pydis_site/__init__.py\n@@ -0,0 +1,4 @@\n+from wiki.plugins.macros.mdx import toc\n+\n+# Remove the toc header prefix. There's no option for this, so we gotta monkey patch it.\n+toc.HEADER_ID_PREFIX = ''\n", "issue": "Ugly prefix on all ID links.\nCurrently, all the headers that are created by the wiki will have id's that are prefixed with `wiki-toc`. As such, when you want to link a header, the link will look something like https://pythondiscord.com/pages/contributing/site/#wiki-toc-development-environment.\r\n\r\nIt would be better if this simply said `#development-environment`, so let's change that.\n", "before_files": [{"content": "", "path": "pydis_site/__init__.py"}], "after_files": [{"content": "from wiki.plugins.macros.mdx import toc\n\n# Remove the toc header prefix. There's no option for this, so we gotta monkey patch it.\ntoc.HEADER_ID_PREFIX = ''\n", "path": "pydis_site/__init__.py"}]}
| 350 | 91 |
gh_patches_debug_8884
|
rasdani/github-patches
|
git_diff
|
getsentry__sentry-3421
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Audit log crash on IPv6
Affected version: 8.5.0
I was just saving some preferences and faced this error:
```
DataError: ERREUR: syntaxe en entrée invalide pour le type inet : « 2001 »
LINE 1: [email protected]', 2, NULL, 1, NULL, 11, '2001', 'e...
^
SQL: INSERT INTO "sentry_auditlogentry" ("organization_id", "actor_label", "actor_id", "actor_key_id", "target_object", "target_user_id", "event", "ip_address", "data", "datetime") VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s) RETURNING "sentry_auditlogentry"."id"
```
Looks like IPv6 addresses are not handled properly.
Also reproduced by deleting a project.
Oddly this wasn't triggered on project creation and my IPv4 was logged instead of my IPv6.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/sentry/middleware/proxy.py`
Content:
```
1 from __future__ import absolute_import
2
3
4 class SetRemoteAddrFromForwardedFor(object):
5 def process_request(self, request):
6 try:
7 real_ip = request.META['HTTP_X_FORWARDED_FOR']
8 except KeyError:
9 pass
10 else:
11 # HTTP_X_FORWARDED_FOR can be a comma-separated list of IPs.
12 # Take just the first one.
13 real_ip = real_ip.split(",")[0]
14 if ':' in real_ip:
15 real_ip = real_ip.split(':', 1)[0]
16 request.META['REMOTE_ADDR'] = real_ip
17
18
19 class ContentLengthHeaderMiddleware(object):
20 """
21 Ensure that we have a proper Content-Length/Transfer-Encoding header
22 """
23
24 def process_response(self, request, response):
25 if 'Transfer-Encoding' in response or 'Content-Length' in response:
26 return response
27
28 if not response.streaming:
29 response['Content-Length'] = str(len(response.content))
30
31 return response
32
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/sentry/middleware/proxy.py b/src/sentry/middleware/proxy.py
--- a/src/sentry/middleware/proxy.py
+++ b/src/sentry/middleware/proxy.py
@@ -11,7 +11,8 @@
# HTTP_X_FORWARDED_FOR can be a comma-separated list of IPs.
# Take just the first one.
real_ip = real_ip.split(",")[0]
- if ':' in real_ip:
+ if ':' in real_ip and '.' in real_ip:
+ # Strip the port number off of an IPv4 FORWARDED_FOR entry.
real_ip = real_ip.split(':', 1)[0]
request.META['REMOTE_ADDR'] = real_ip
|
{"golden_diff": "diff --git a/src/sentry/middleware/proxy.py b/src/sentry/middleware/proxy.py\n--- a/src/sentry/middleware/proxy.py\n+++ b/src/sentry/middleware/proxy.py\n@@ -11,7 +11,8 @@\n # HTTP_X_FORWARDED_FOR can be a comma-separated list of IPs.\n # Take just the first one.\n real_ip = real_ip.split(\",\")[0]\n- if ':' in real_ip:\n+ if ':' in real_ip and '.' in real_ip:\n+ # Strip the port number off of an IPv4 FORWARDED_FOR entry.\n real_ip = real_ip.split(':', 1)[0]\n request.META['REMOTE_ADDR'] = real_ip\n", "issue": "Audit log crash on IPv6\nAffected version: 8.5.0\n\nI was just saving some preferences and faced this error:\n\n```\nDataError: ERREUR: syntaxe en entr\u00e9e invalide pour le type inet : \u00ab 2001 \u00bb\nLINE 1: [email protected]', 2, NULL, 1, NULL, 11, '2001', 'e...\n ^\n\nSQL: INSERT INTO \"sentry_auditlogentry\" (\"organization_id\", \"actor_label\", \"actor_id\", \"actor_key_id\", \"target_object\", \"target_user_id\", \"event\", \"ip_address\", \"data\", \"datetime\") VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s) RETURNING \"sentry_auditlogentry\".\"id\"\n```\n\nLooks like IPv6 addresses are not handled properly.\n\nAlso reproduced by deleting a project.\nOddly this wasn't triggered on project creation and my IPv4 was logged instead of my IPv6.\n\n", "before_files": [{"content": "from __future__ import absolute_import\n\n\nclass SetRemoteAddrFromForwardedFor(object):\n def process_request(self, request):\n try:\n real_ip = request.META['HTTP_X_FORWARDED_FOR']\n except KeyError:\n pass\n else:\n # HTTP_X_FORWARDED_FOR can be a comma-separated list of IPs.\n # Take just the first one.\n real_ip = real_ip.split(\",\")[0]\n if ':' in real_ip:\n real_ip = real_ip.split(':', 1)[0]\n request.META['REMOTE_ADDR'] = real_ip\n\n\nclass ContentLengthHeaderMiddleware(object):\n \"\"\"\n Ensure that we have a proper Content-Length/Transfer-Encoding header\n \"\"\"\n\n def process_response(self, request, response):\n if 'Transfer-Encoding' in response or 'Content-Length' in response:\n return response\n\n if not response.streaming:\n response['Content-Length'] = str(len(response.content))\n\n return response\n", "path": "src/sentry/middleware/proxy.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\n\nclass SetRemoteAddrFromForwardedFor(object):\n def process_request(self, request):\n try:\n real_ip = request.META['HTTP_X_FORWARDED_FOR']\n except KeyError:\n pass\n else:\n # HTTP_X_FORWARDED_FOR can be a comma-separated list of IPs.\n # Take just the first one.\n real_ip = real_ip.split(\",\")[0]\n if ':' in real_ip and '.' in real_ip:\n # Strip the port number off of an IPv4 FORWARDED_FOR entry.\n real_ip = real_ip.split(':', 1)[0]\n request.META['REMOTE_ADDR'] = real_ip\n\n\nclass ContentLengthHeaderMiddleware(object):\n \"\"\"\n Ensure that we have a proper Content-Length/Transfer-Encoding header\n \"\"\"\n\n def process_response(self, request, response):\n if 'Transfer-Encoding' in response or 'Content-Length' in response:\n return response\n\n if not response.streaming:\n response['Content-Length'] = str(len(response.content))\n\n return response\n", "path": "src/sentry/middleware/proxy.py"}]}
| 750 | 154 |
gh_patches_debug_36367
|
rasdani/github-patches
|
git_diff
|
searx__searx-335
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Flickr engine is broken
The html seems to have changed, but it's seems there is [REST API](https://api.flickr.com/services/rest?sort=relevance&parse_tags=1&content_type=7&extras=can_comment%2Ccount_comments%2Ccount_faves%2Cisfavorite%2Clicense%2Cmedia%2Cneeds_interstitial%2Cowner_name%2Cpath_alias%2Crealname%2Crotation%2Curl_c%2Curl_l%2Curl_m%2Curl_n%2Curl_q%2Curl_s%2Curl_sq%2Curl_t%2Curl_z&per_page=25&page=1&lang=en-US&rb=1&text=proxy&viewerNSID=&method=flickr.photos.search&csrf=&api_key=3e5918155f464baad83cce2efcf8b57e&format=json&hermes=1&hermesClient=1&reqId=rgb38n1&nojsoncallback=1)
In all parameters there is an api_key : I don't know how long it is valid, in which condition.
The call to this URL is trigger inside another minified javascript.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `searx/engines/flickr_noapi.py`
Content:
```
1 #!/usr/bin/env python
2
3 """
4 Flickr (Images)
5
6 @website https://www.flickr.com
7 @provide-api yes (https://secure.flickr.com/services/api/flickr.photos.search.html)
8
9 @using-api no
10 @results HTML
11 @stable no
12 @parse url, title, thumbnail, img_src
13 """
14
15 from urllib import urlencode
16 from json import loads
17 import re
18 from searx.engines import logger
19
20
21 logger = logger.getChild('flickr-noapi')
22
23 categories = ['images']
24
25 url = 'https://www.flickr.com/'
26 search_url = url + 'search?{query}&page={page}'
27 photo_url = 'https://www.flickr.com/photos/{userid}/{photoid}'
28 regex = re.compile(r"\"search-photos-models\",\"photos\":(.*}),\"totalItems\":", re.DOTALL)
29 image_sizes = ('o', 'k', 'h', 'b', 'c', 'z', 'n', 'm', 't', 'q', 's')
30
31 paging = True
32
33
34 def build_flickr_url(user_id, photo_id):
35 return photo_url.format(userid=user_id, photoid=photo_id)
36
37
38 def request(query, params):
39 params['url'] = search_url.format(query=urlencode({'text': query}),
40 page=params['pageno'])
41 return params
42
43
44 def response(resp):
45 results = []
46
47 matches = regex.search(resp.text)
48
49 if matches is None:
50 return results
51
52 match = matches.group(1)
53 search_results = loads(match)
54
55 if '_data' not in search_results:
56 return []
57
58 photos = search_results['_data']
59
60 for photo in photos:
61
62 # In paged configuration, the first pages' photos
63 # are represented by a None object
64 if photo is None:
65 continue
66
67 img_src = None
68 # From the biggest to the lowest format
69 for image_size in image_sizes:
70 if image_size in photo['sizes']:
71 img_src = photo['sizes'][image_size]['url']
72 break
73
74 if not img_src:
75 logger.debug('cannot find valid image size: {0}'.format(repr(photo)))
76 continue
77
78 if 'id' not in photo['owner']:
79 continue
80
81 # For a bigger thumbnail, keep only the url_z, not the url_n
82 if 'n' in photo['sizes']:
83 thumbnail_src = photo['sizes']['n']['url']
84 elif 'z' in photo['sizes']:
85 thumbnail_src = photo['sizes']['z']['url']
86 else:
87 thumbnail_src = img_src
88
89 url = build_flickr_url(photo['owner']['id'], photo['id'])
90
91 title = photo.get('title', '')
92
93 content = '<span class="photo-author">' +\
94 photo['owner']['username'] +\
95 '</span><br />'
96
97 if 'description' in photo:
98 content = content +\
99 '<span class="description">' +\
100 photo['description'] +\
101 '</span>'
102
103 # append result
104 results.append({'url': url,
105 'title': title,
106 'img_src': img_src,
107 'thumbnail_src': thumbnail_src,
108 'content': content,
109 'template': 'images.html'})
110
111 return results
112
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/searx/engines/flickr_noapi.py b/searx/engines/flickr_noapi.py
--- a/searx/engines/flickr_noapi.py
+++ b/searx/engines/flickr_noapi.py
@@ -25,7 +25,7 @@
url = 'https://www.flickr.com/'
search_url = url + 'search?{query}&page={page}'
photo_url = 'https://www.flickr.com/photos/{userid}/{photoid}'
-regex = re.compile(r"\"search-photos-models\",\"photos\":(.*}),\"totalItems\":", re.DOTALL)
+regex = re.compile(r"\"search-photos-lite-models\",\"photos\":(.*}),\"totalItems\":", re.DOTALL)
image_sizes = ('o', 'k', 'h', 'b', 'c', 'z', 'n', 'm', 't', 'q', 's')
paging = True
@@ -38,6 +38,7 @@
def request(query, params):
params['url'] = search_url.format(query=urlencode({'text': query}),
page=params['pageno'])
+
return params
@@ -75,10 +76,10 @@
logger.debug('cannot find valid image size: {0}'.format(repr(photo)))
continue
- if 'id' not in photo['owner']:
+ if 'ownerNsid' not in photo:
continue
-# For a bigger thumbnail, keep only the url_z, not the url_n
+ # For a bigger thumbnail, keep only the url_z, not the url_n
if 'n' in photo['sizes']:
thumbnail_src = photo['sizes']['n']['url']
elif 'z' in photo['sizes']:
@@ -86,20 +87,14 @@
else:
thumbnail_src = img_src
- url = build_flickr_url(photo['owner']['id'], photo['id'])
+ url = build_flickr_url(photo['ownerNsid'], photo['id'])
title = photo.get('title', '')
content = '<span class="photo-author">' +\
- photo['owner']['username'] +\
+ photo['username'] +\
'</span><br />'
- if 'description' in photo:
- content = content +\
- '<span class="description">' +\
- photo['description'] +\
- '</span>'
-
# append result
results.append({'url': url,
'title': title,
|
{"golden_diff": "diff --git a/searx/engines/flickr_noapi.py b/searx/engines/flickr_noapi.py\n--- a/searx/engines/flickr_noapi.py\n+++ b/searx/engines/flickr_noapi.py\n@@ -25,7 +25,7 @@\n url = 'https://www.flickr.com/'\n search_url = url + 'search?{query}&page={page}'\n photo_url = 'https://www.flickr.com/photos/{userid}/{photoid}'\n-regex = re.compile(r\"\\\"search-photos-models\\\",\\\"photos\\\":(.*}),\\\"totalItems\\\":\", re.DOTALL)\n+regex = re.compile(r\"\\\"search-photos-lite-models\\\",\\\"photos\\\":(.*}),\\\"totalItems\\\":\", re.DOTALL)\n image_sizes = ('o', 'k', 'h', 'b', 'c', 'z', 'n', 'm', 't', 'q', 's')\n \n paging = True\n@@ -38,6 +38,7 @@\n def request(query, params):\n params['url'] = search_url.format(query=urlencode({'text': query}),\n page=params['pageno'])\n+\n return params\n \n \n@@ -75,10 +76,10 @@\n logger.debug('cannot find valid image size: {0}'.format(repr(photo)))\n continue\n \n- if 'id' not in photo['owner']:\n+ if 'ownerNsid' not in photo:\n continue\n \n-# For a bigger thumbnail, keep only the url_z, not the url_n\n+ # For a bigger thumbnail, keep only the url_z, not the url_n\n if 'n' in photo['sizes']:\n thumbnail_src = photo['sizes']['n']['url']\n elif 'z' in photo['sizes']:\n@@ -86,20 +87,14 @@\n else:\n thumbnail_src = img_src\n \n- url = build_flickr_url(photo['owner']['id'], photo['id'])\n+ url = build_flickr_url(photo['ownerNsid'], photo['id'])\n \n title = photo.get('title', '')\n \n content = '<span class=\"photo-author\">' +\\\n- photo['owner']['username'] +\\\n+ photo['username'] +\\\n '</span><br />'\n \n- if 'description' in photo:\n- content = content +\\\n- '<span class=\"description\">' +\\\n- photo['description'] +\\\n- '</span>'\n-\n # append result\n results.append({'url': url,\n 'title': title,\n", "issue": "Flickr engine is broken\nThe html seems to have changed, but it's seems there is [REST API](https://api.flickr.com/services/rest?sort=relevance&parse_tags=1&content_type=7&extras=can_comment%2Ccount_comments%2Ccount_faves%2Cisfavorite%2Clicense%2Cmedia%2Cneeds_interstitial%2Cowner_name%2Cpath_alias%2Crealname%2Crotation%2Curl_c%2Curl_l%2Curl_m%2Curl_n%2Curl_q%2Curl_s%2Curl_sq%2Curl_t%2Curl_z&per_page=25&page=1&lang=en-US&rb=1&text=proxy&viewerNSID=&method=flickr.photos.search&csrf=&api_key=3e5918155f464baad83cce2efcf8b57e&format=json&hermes=1&hermesClient=1&reqId=rgb38n1&nojsoncallback=1)\n\nIn all parameters there is an api_key : I don't know how long it is valid, in which condition.\nThe call to this URL is trigger inside another minified javascript.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"\n Flickr (Images)\n\n @website https://www.flickr.com\n @provide-api yes (https://secure.flickr.com/services/api/flickr.photos.search.html)\n\n @using-api no\n @results HTML\n @stable no\n @parse url, title, thumbnail, img_src\n\"\"\"\n\nfrom urllib import urlencode\nfrom json import loads\nimport re\nfrom searx.engines import logger\n\n\nlogger = logger.getChild('flickr-noapi')\n\ncategories = ['images']\n\nurl = 'https://www.flickr.com/'\nsearch_url = url + 'search?{query}&page={page}'\nphoto_url = 'https://www.flickr.com/photos/{userid}/{photoid}'\nregex = re.compile(r\"\\\"search-photos-models\\\",\\\"photos\\\":(.*}),\\\"totalItems\\\":\", re.DOTALL)\nimage_sizes = ('o', 'k', 'h', 'b', 'c', 'z', 'n', 'm', 't', 'q', 's')\n\npaging = True\n\n\ndef build_flickr_url(user_id, photo_id):\n return photo_url.format(userid=user_id, photoid=photo_id)\n\n\ndef request(query, params):\n params['url'] = search_url.format(query=urlencode({'text': query}),\n page=params['pageno'])\n return params\n\n\ndef response(resp):\n results = []\n\n matches = regex.search(resp.text)\n\n if matches is None:\n return results\n\n match = matches.group(1)\n search_results = loads(match)\n\n if '_data' not in search_results:\n return []\n\n photos = search_results['_data']\n\n for photo in photos:\n\n # In paged configuration, the first pages' photos\n # are represented by a None object\n if photo is None:\n continue\n\n img_src = None\n # From the biggest to the lowest format\n for image_size in image_sizes:\n if image_size in photo['sizes']:\n img_src = photo['sizes'][image_size]['url']\n break\n\n if not img_src:\n logger.debug('cannot find valid image size: {0}'.format(repr(photo)))\n continue\n\n if 'id' not in photo['owner']:\n continue\n\n# For a bigger thumbnail, keep only the url_z, not the url_n\n if 'n' in photo['sizes']:\n thumbnail_src = photo['sizes']['n']['url']\n elif 'z' in photo['sizes']:\n thumbnail_src = photo['sizes']['z']['url']\n else:\n thumbnail_src = img_src\n\n url = build_flickr_url(photo['owner']['id'], photo['id'])\n\n title = photo.get('title', '')\n\n content = '<span class=\"photo-author\">' +\\\n photo['owner']['username'] +\\\n '</span><br />'\n\n if 'description' in photo:\n content = content +\\\n '<span class=\"description\">' +\\\n photo['description'] +\\\n '</span>'\n\n # append result\n results.append({'url': url,\n 'title': title,\n 'img_src': img_src,\n 'thumbnail_src': thumbnail_src,\n 'content': content,\n 'template': 'images.html'})\n\n return results\n", "path": "searx/engines/flickr_noapi.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"\n Flickr (Images)\n\n @website https://www.flickr.com\n @provide-api yes (https://secure.flickr.com/services/api/flickr.photos.search.html)\n\n @using-api no\n @results HTML\n @stable no\n @parse url, title, thumbnail, img_src\n\"\"\"\n\nfrom urllib import urlencode\nfrom json import loads\nimport re\nfrom searx.engines import logger\n\n\nlogger = logger.getChild('flickr-noapi')\n\ncategories = ['images']\n\nurl = 'https://www.flickr.com/'\nsearch_url = url + 'search?{query}&page={page}'\nphoto_url = 'https://www.flickr.com/photos/{userid}/{photoid}'\nregex = re.compile(r\"\\\"search-photos-lite-models\\\",\\\"photos\\\":(.*}),\\\"totalItems\\\":\", re.DOTALL)\nimage_sizes = ('o', 'k', 'h', 'b', 'c', 'z', 'n', 'm', 't', 'q', 's')\n\npaging = True\n\n\ndef build_flickr_url(user_id, photo_id):\n return photo_url.format(userid=user_id, photoid=photo_id)\n\n\ndef request(query, params):\n params['url'] = search_url.format(query=urlencode({'text': query}),\n page=params['pageno'])\n\n return params\n\n\ndef response(resp):\n results = []\n\n matches = regex.search(resp.text)\n\n if matches is None:\n return results\n\n match = matches.group(1)\n search_results = loads(match)\n\n if '_data' not in search_results:\n return []\n\n photos = search_results['_data']\n\n for photo in photos:\n\n # In paged configuration, the first pages' photos\n # are represented by a None object\n if photo is None:\n continue\n\n img_src = None\n # From the biggest to the lowest format\n for image_size in image_sizes:\n if image_size in photo['sizes']:\n img_src = photo['sizes'][image_size]['url']\n break\n\n if not img_src:\n logger.debug('cannot find valid image size: {0}'.format(repr(photo)))\n continue\n\n if 'ownerNsid' not in photo:\n continue\n\n # For a bigger thumbnail, keep only the url_z, not the url_n\n if 'n' in photo['sizes']:\n thumbnail_src = photo['sizes']['n']['url']\n elif 'z' in photo['sizes']:\n thumbnail_src = photo['sizes']['z']['url']\n else:\n thumbnail_src = img_src\n\n url = build_flickr_url(photo['ownerNsid'], photo['id'])\n\n title = photo.get('title', '')\n\n content = '<span class=\"photo-author\">' +\\\n photo['username'] +\\\n '</span><br />'\n\n # append result\n results.append({'url': url,\n 'title': title,\n 'img_src': img_src,\n 'thumbnail_src': thumbnail_src,\n 'content': content,\n 'template': 'images.html'})\n\n return results\n", "path": "searx/engines/flickr_noapi.py"}]}
| 1,483 | 557 |
gh_patches_debug_35825
|
rasdani/github-patches
|
git_diff
|
facebookresearch__hydra-2174
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add more details to Callback doc
(from zulip chat)
> When are the Callbacks executed in the hydra stack exactly? More specifically, are they executed after the launchers in the environments that the launchers provide? (Would be awesome to add a sentence about this in the docs)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `hydra/experimental/callback.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 import logging
3 from typing import Any
4
5 from omegaconf import DictConfig
6
7 from hydra.core.utils import JobReturn
8
9 logger = logging.getLogger(__name__)
10
11
12 class Callback:
13 def on_run_start(self, config: DictConfig, **kwargs: Any) -> None:
14 """
15 Called in RUN mode before job starts.
16 """
17 ...
18
19 def on_run_end(self, config: DictConfig, **kwargs: Any) -> None:
20 """
21 Called in RUN mode after job ends.
22 """
23 ...
24
25 def on_multirun_start(self, config: DictConfig, **kwargs: Any) -> None:
26 """
27 Called in MULTIRUN mode before any job starts.
28 """
29 ...
30
31 def on_multirun_end(self, config: DictConfig, **kwargs: Any) -> None:
32 """
33 Called in MULTIRUN mode after all jobs end.
34 """
35 ...
36
37 def on_job_start(self, config: DictConfig, **kwargs: Any) -> None:
38 """
39 Called in both RUN and MULTIRUN modes, once for each Hydra job (before running
40 application code).
41 """
42 ...
43
44 def on_job_end(
45 self, config: DictConfig, job_return: JobReturn, **kwargs: Any
46 ) -> None:
47 """
48 Called in both RUN and MULTIRUN modes, once for each Hydra job (after running
49 application code).
50 """
51 ...
52
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/hydra/experimental/callback.py b/hydra/experimental/callback.py
--- a/hydra/experimental/callback.py
+++ b/hydra/experimental/callback.py
@@ -12,32 +12,37 @@
class Callback:
def on_run_start(self, config: DictConfig, **kwargs: Any) -> None:
"""
- Called in RUN mode before job starts.
+ Called in RUN mode before job/application code starts. `config` is composed with overrides.
+ Some `hydra.runtime` configs are not populated yet.
+ See hydra.core.utils.run_job for more info.
"""
...
def on_run_end(self, config: DictConfig, **kwargs: Any) -> None:
"""
- Called in RUN mode after job ends.
+ Called in RUN mode after job/application code returns.
"""
...
def on_multirun_start(self, config: DictConfig, **kwargs: Any) -> None:
"""
Called in MULTIRUN mode before any job starts.
+ When using a launcher, this will be executed on local machine before any Sweeper/Launcher is initialized.
"""
...
def on_multirun_end(self, config: DictConfig, **kwargs: Any) -> None:
"""
- Called in MULTIRUN mode after all jobs end.
+ Called in MULTIRUN mode after all jobs returns.
+ When using a launcher, this will be executed on local machine.
"""
...
def on_job_start(self, config: DictConfig, **kwargs: Any) -> None:
"""
- Called in both RUN and MULTIRUN modes, once for each Hydra job (before running
- application code).
+ Called in both RUN and MULTIRUN modes, once for each Hydra job (before running application code).
+ This is called from within `hydra.core.utils.run_job`. In the case of remote launching, this will be executed
+ on the remote server along with your application code.
"""
...
@@ -47,5 +52,10 @@
"""
Called in both RUN and MULTIRUN modes, once for each Hydra job (after running
application code).
+ This is called from within `hydra.core.utils.run_job`. In the case of remote launching, this will be executed
+ on the remote server after your application code.
+
+ `job_return` contains info that could be useful for logging or post-processing.
+ See hydra.core.utils.JobReturn for more.
"""
...
|
{"golden_diff": "diff --git a/hydra/experimental/callback.py b/hydra/experimental/callback.py\n--- a/hydra/experimental/callback.py\n+++ b/hydra/experimental/callback.py\n@@ -12,32 +12,37 @@\n class Callback:\n def on_run_start(self, config: DictConfig, **kwargs: Any) -> None:\n \"\"\"\n- Called in RUN mode before job starts.\n+ Called in RUN mode before job/application code starts. `config` is composed with overrides.\n+ Some `hydra.runtime` configs are not populated yet.\n+ See hydra.core.utils.run_job for more info.\n \"\"\"\n ...\n \n def on_run_end(self, config: DictConfig, **kwargs: Any) -> None:\n \"\"\"\n- Called in RUN mode after job ends.\n+ Called in RUN mode after job/application code returns.\n \"\"\"\n ...\n \n def on_multirun_start(self, config: DictConfig, **kwargs: Any) -> None:\n \"\"\"\n Called in MULTIRUN mode before any job starts.\n+ When using a launcher, this will be executed on local machine before any Sweeper/Launcher is initialized.\n \"\"\"\n ...\n \n def on_multirun_end(self, config: DictConfig, **kwargs: Any) -> None:\n \"\"\"\n- Called in MULTIRUN mode after all jobs end.\n+ Called in MULTIRUN mode after all jobs returns.\n+ When using a launcher, this will be executed on local machine.\n \"\"\"\n ...\n \n def on_job_start(self, config: DictConfig, **kwargs: Any) -> None:\n \"\"\"\n- Called in both RUN and MULTIRUN modes, once for each Hydra job (before running\n- application code).\n+ Called in both RUN and MULTIRUN modes, once for each Hydra job (before running application code).\n+ This is called from within `hydra.core.utils.run_job`. In the case of remote launching, this will be executed\n+ on the remote server along with your application code.\n \"\"\"\n ...\n \n@@ -47,5 +52,10 @@\n \"\"\"\n Called in both RUN and MULTIRUN modes, once for each Hydra job (after running\n application code).\n+ This is called from within `hydra.core.utils.run_job`. In the case of remote launching, this will be executed\n+ on the remote server after your application code.\n+\n+ `job_return` contains info that could be useful for logging or post-processing.\n+ See hydra.core.utils.JobReturn for more.\n \"\"\"\n ...\n", "issue": "Add more details to Callback doc\n(from zulip chat)\r\n> When are the Callbacks executed in the hydra stack exactly? More specifically, are they executed after the launchers in the environments that the launchers provide? (Would be awesome to add a sentence about this in the docs)\r\n\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport logging\nfrom typing import Any\n\nfrom omegaconf import DictConfig\n\nfrom hydra.core.utils import JobReturn\n\nlogger = logging.getLogger(__name__)\n\n\nclass Callback:\n def on_run_start(self, config: DictConfig, **kwargs: Any) -> None:\n \"\"\"\n Called in RUN mode before job starts.\n \"\"\"\n ...\n\n def on_run_end(self, config: DictConfig, **kwargs: Any) -> None:\n \"\"\"\n Called in RUN mode after job ends.\n \"\"\"\n ...\n\n def on_multirun_start(self, config: DictConfig, **kwargs: Any) -> None:\n \"\"\"\n Called in MULTIRUN mode before any job starts.\n \"\"\"\n ...\n\n def on_multirun_end(self, config: DictConfig, **kwargs: Any) -> None:\n \"\"\"\n Called in MULTIRUN mode after all jobs end.\n \"\"\"\n ...\n\n def on_job_start(self, config: DictConfig, **kwargs: Any) -> None:\n \"\"\"\n Called in both RUN and MULTIRUN modes, once for each Hydra job (before running\n application code).\n \"\"\"\n ...\n\n def on_job_end(\n self, config: DictConfig, job_return: JobReturn, **kwargs: Any\n ) -> None:\n \"\"\"\n Called in both RUN and MULTIRUN modes, once for each Hydra job (after running\n application code).\n \"\"\"\n ...\n", "path": "hydra/experimental/callback.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport logging\nfrom typing import Any\n\nfrom omegaconf import DictConfig\n\nfrom hydra.core.utils import JobReturn\n\nlogger = logging.getLogger(__name__)\n\n\nclass Callback:\n def on_run_start(self, config: DictConfig, **kwargs: Any) -> None:\n \"\"\"\n Called in RUN mode before job/application code starts. `config` is composed with overrides.\n Some `hydra.runtime` configs are not populated yet.\n See hydra.core.utils.run_job for more info.\n \"\"\"\n ...\n\n def on_run_end(self, config: DictConfig, **kwargs: Any) -> None:\n \"\"\"\n Called in RUN mode after job/application code returns.\n \"\"\"\n ...\n\n def on_multirun_start(self, config: DictConfig, **kwargs: Any) -> None:\n \"\"\"\n Called in MULTIRUN mode before any job starts.\n When using a launcher, this will be executed on local machine before any Sweeper/Launcher is initialized.\n \"\"\"\n ...\n\n def on_multirun_end(self, config: DictConfig, **kwargs: Any) -> None:\n \"\"\"\n Called in MULTIRUN mode after all jobs returns.\n When using a launcher, this will be executed on local machine.\n \"\"\"\n ...\n\n def on_job_start(self, config: DictConfig, **kwargs: Any) -> None:\n \"\"\"\n Called in both RUN and MULTIRUN modes, once for each Hydra job (before running application code).\n This is called from within `hydra.core.utils.run_job`. In the case of remote launching, this will be executed\n on the remote server along with your application code.\n \"\"\"\n ...\n\n def on_job_end(\n self, config: DictConfig, job_return: JobReturn, **kwargs: Any\n ) -> None:\n \"\"\"\n Called in both RUN and MULTIRUN modes, once for each Hydra job (after running\n application code).\n This is called from within `hydra.core.utils.run_job`. In the case of remote launching, this will be executed\n on the remote server after your application code.\n\n `job_return` contains info that could be useful for logging or post-processing.\n See hydra.core.utils.JobReturn for more.\n \"\"\"\n ...\n", "path": "hydra/experimental/callback.py"}]}
| 738 | 552 |
gh_patches_debug_29649
|
rasdani/github-patches
|
git_diff
|
bridgecrewio__checkov-4897
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CKV_AWS_325 check might not be accurate for PostgreSQL RDS instances
**Describe the issue**
The CKV_AWS_325 check expects the 'audit' log type to be enabled for RDS Cluster audit logging. However, this log type may not be applicable for PostgreSQL RDS instances, which use the 'postgresql' log type instead.
**Examples**
For a PostgreSQL RDS instance with the following Terraform configuration:
**main.tf**
```
resource "aws_rds_cluster" "main" {
# ... other configurations ...
enabled_cloudwatch_logs_exports = var.enabled_cloudwatch_logs_exports
# ... other configurations ...
}
```
**variables.tf**
```
variable "enabled_cloudwatch_logs_exports" {
default = [
"postgresql"
]
}
```
The CKV_AWS_325 check fails, even though the 'postgresql' log type is enabled for the RDS instance.
**Expected outcome**
The CKV_AWS_325 check should either not be applicable or should pass for PostgreSQL RDS instances with the 'postgresql' log type enabled.
**Version**
**Checkov Version:** Lastest
**Additional context**
This issue was discovered while analyzing a Terraform configuration for an AWS PostgreSQL RDS instance. The CKV_AWS_325 check might need to be updated to accommodate different log types for different RDS engines.
https://github.com/bridgecrewio/checkov/blob/main/checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py`
Content:
```
1 from __future__ import annotations
2
3 from typing import Any
4
5 from checkov.common.models.enums import CheckCategories, CheckResult
6 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
7
8
9 class RDSClusterAuditLogging(BaseResourceCheck):
10 def __init__(self) -> None:
11 """
12 NIST.800-53.r5 AC-2(4), NIST.800-53.r5 AC-4(26), NIST.800-53.r5 AC-6(9), NIST.800-53.r5 AU-10,
13 NIST.800-53.r5 AU-12, NIST.800-53.r5 AU-2, NIST.800-53.r5 AU-3, NIST.800-53.r5 AU-6(3), NIST.800-53.r5 AU-6(4),
14 NIST.800-53.r5 CA-7, NIST.800-53.r5 SC-7(10), NIST.800-53.r5 SC-7(9), NIST.800-53.r5 SI-3(8),
15 NIST.800-53.r5 SI-4(20), NIST.800-53.r5 SI-7(8)
16 Database logging should be enabled
17 """
18 name = "Ensure that RDS Cluster audit logging is enabled"
19 id = "CKV_AWS_325"
20 supported_resources = ("aws_rds_cluster",)
21 categories = (CheckCategories.LOGGING,)
22 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
23
24 def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:
25 logs_exports = conf.get("enabled_cloudwatch_logs_exports")
26 if (
27 logs_exports
28 and isinstance(logs_exports, list)
29 and isinstance(logs_exports[0], list)
30 and "audit" in logs_exports[0]
31 ):
32 return CheckResult.PASSED
33
34 return CheckResult.FAILED
35
36
37 check = RDSClusterAuditLogging()
38
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py b/checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py
--- a/checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py
+++ b/checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py
@@ -5,6 +5,12 @@
from checkov.common.models.enums import CheckCategories, CheckResult
from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
+SUPPORTED_ENGINES = {
+ "aurora",
+ "aurora-mysql",
+ "mysql"
+}
+
class RDSClusterAuditLogging(BaseResourceCheck):
def __init__(self) -> None:
@@ -15,13 +21,18 @@
NIST.800-53.r5 SI-4(20), NIST.800-53.r5 SI-7(8)
Database logging should be enabled
"""
- name = "Ensure that RDS Cluster audit logging is enabled"
+ name = "Ensure that RDS Cluster audit logging is enabled for MySQL engine"
id = "CKV_AWS_325"
supported_resources = ("aws_rds_cluster",)
categories = (CheckCategories.LOGGING,)
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:
+ engine = conf.get("engine")
+ if engine and isinstance(engine, list) and engine[0] not in SUPPORTED_ENGINES:
+ # only MySQL cluster support easy audit logging export
+ return CheckResult.UNKNOWN
+
logs_exports = conf.get("enabled_cloudwatch_logs_exports")
if (
logs_exports
|
{"golden_diff": "diff --git a/checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py b/checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py\n--- a/checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py\n+++ b/checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py\n@@ -5,6 +5,12 @@\n from checkov.common.models.enums import CheckCategories, CheckResult\n from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n \n+SUPPORTED_ENGINES = {\n+ \"aurora\",\n+ \"aurora-mysql\",\n+ \"mysql\"\n+}\n+\n \n class RDSClusterAuditLogging(BaseResourceCheck):\n def __init__(self) -> None:\n@@ -15,13 +21,18 @@\n NIST.800-53.r5 SI-4(20), NIST.800-53.r5 SI-7(8)\n Database logging should be enabled\n \"\"\"\n- name = \"Ensure that RDS Cluster audit logging is enabled\"\n+ name = \"Ensure that RDS Cluster audit logging is enabled for MySQL engine\"\n id = \"CKV_AWS_325\"\n supported_resources = (\"aws_rds_cluster\",)\n categories = (CheckCategories.LOGGING,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n \n def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:\n+ engine = conf.get(\"engine\")\n+ if engine and isinstance(engine, list) and engine[0] not in SUPPORTED_ENGINES:\n+ # only MySQL cluster support easy audit logging export\n+ return CheckResult.UNKNOWN\n+\n logs_exports = conf.get(\"enabled_cloudwatch_logs_exports\")\n if (\n logs_exports\n", "issue": "CKV_AWS_325 check might not be accurate for PostgreSQL RDS instances\n**Describe the issue**\r\nThe CKV_AWS_325 check expects the 'audit' log type to be enabled for RDS Cluster audit logging. However, this log type may not be applicable for PostgreSQL RDS instances, which use the 'postgresql' log type instead.\r\n\r\n**Examples**\r\nFor a PostgreSQL RDS instance with the following Terraform configuration:\r\n\r\n**main.tf**\r\n\r\n```\r\nresource \"aws_rds_cluster\" \"main\" {\r\n # ... other configurations ...\r\n enabled_cloudwatch_logs_exports = var.enabled_cloudwatch_logs_exports\r\n # ... other configurations ...\r\n}\r\n```\r\n\r\n**variables.tf**\r\n\r\n```\r\nvariable \"enabled_cloudwatch_logs_exports\" {\r\n default = [\r\n \"postgresql\"\r\n ]\r\n}\r\n```\r\n\r\nThe CKV_AWS_325 check fails, even though the 'postgresql' log type is enabled for the RDS instance.\r\n\r\n**Expected outcome**\r\nThe CKV_AWS_325 check should either not be applicable or should pass for PostgreSQL RDS instances with the 'postgresql' log type enabled.\r\n\r\n**Version**\r\n\r\n**Checkov Version:** Lastest\r\n\r\n**Additional context**\r\nThis issue was discovered while analyzing a Terraform configuration for an AWS PostgreSQL RDS instance. The CKV_AWS_325 check might need to be updated to accommodate different log types for different RDS engines.\r\n\r\nhttps://github.com/bridgecrewio/checkov/blob/main/checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any\n\nfrom checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\n\nclass RDSClusterAuditLogging(BaseResourceCheck):\n def __init__(self) -> None:\n \"\"\"\n NIST.800-53.r5 AC-2(4), NIST.800-53.r5 AC-4(26), NIST.800-53.r5 AC-6(9), NIST.800-53.r5 AU-10,\n NIST.800-53.r5 AU-12, NIST.800-53.r5 AU-2, NIST.800-53.r5 AU-3, NIST.800-53.r5 AU-6(3), NIST.800-53.r5 AU-6(4),\n NIST.800-53.r5 CA-7, NIST.800-53.r5 SC-7(10), NIST.800-53.r5 SC-7(9), NIST.800-53.r5 SI-3(8),\n NIST.800-53.r5 SI-4(20), NIST.800-53.r5 SI-7(8)\n Database logging should be enabled\n \"\"\"\n name = \"Ensure that RDS Cluster audit logging is enabled\"\n id = \"CKV_AWS_325\"\n supported_resources = (\"aws_rds_cluster\",)\n categories = (CheckCategories.LOGGING,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:\n logs_exports = conf.get(\"enabled_cloudwatch_logs_exports\")\n if (\n logs_exports\n and isinstance(logs_exports, list)\n and isinstance(logs_exports[0], list)\n and \"audit\" in logs_exports[0]\n ):\n return CheckResult.PASSED\n\n return CheckResult.FAILED\n\n\ncheck = RDSClusterAuditLogging()\n", "path": "checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any\n\nfrom checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\nSUPPORTED_ENGINES = {\n \"aurora\",\n \"aurora-mysql\",\n \"mysql\"\n}\n\n\nclass RDSClusterAuditLogging(BaseResourceCheck):\n def __init__(self) -> None:\n \"\"\"\n NIST.800-53.r5 AC-2(4), NIST.800-53.r5 AC-4(26), NIST.800-53.r5 AC-6(9), NIST.800-53.r5 AU-10,\n NIST.800-53.r5 AU-12, NIST.800-53.r5 AU-2, NIST.800-53.r5 AU-3, NIST.800-53.r5 AU-6(3), NIST.800-53.r5 AU-6(4),\n NIST.800-53.r5 CA-7, NIST.800-53.r5 SC-7(10), NIST.800-53.r5 SC-7(9), NIST.800-53.r5 SI-3(8),\n NIST.800-53.r5 SI-4(20), NIST.800-53.r5 SI-7(8)\n Database logging should be enabled\n \"\"\"\n name = \"Ensure that RDS Cluster audit logging is enabled for MySQL engine\"\n id = \"CKV_AWS_325\"\n supported_resources = (\"aws_rds_cluster\",)\n categories = (CheckCategories.LOGGING,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:\n engine = conf.get(\"engine\")\n if engine and isinstance(engine, list) and engine[0] not in SUPPORTED_ENGINES:\n # only MySQL cluster support easy audit logging export\n return CheckResult.UNKNOWN\n\n logs_exports = conf.get(\"enabled_cloudwatch_logs_exports\")\n if (\n logs_exports\n and isinstance(logs_exports, list)\n and isinstance(logs_exports[0], list)\n and \"audit\" in logs_exports[0]\n ):\n return CheckResult.PASSED\n\n return CheckResult.FAILED\n\n\ncheck = RDSClusterAuditLogging()\n", "path": "checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py"}]}
| 1,158 | 405 |
gh_patches_debug_7747
|
rasdani/github-patches
|
git_diff
|
Cog-Creators__Red-DiscordBot-1981
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[V3 ModLog] Error in command "case"
# Command bugs
#### Command name
[p]case
#### What cog is this command from?
ModLog
#### What were you expecting to happen?
To show me information about a case.
#### What actually happened?
I get this error:

#### How can we reproduce this issue?
1. Have a modlog with cases.
2. Do [p]case
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redbot/cogs/modlog/modlog.py`
Content:
```
1 import discord
2
3 from redbot.core import checks, modlog, commands
4 from redbot.core.bot import Red
5 from redbot.core.i18n import Translator, cog_i18n
6 from redbot.core.utils.chat_formatting import box
7
8 _ = Translator("ModLog", __file__)
9
10
11 @cog_i18n(_)
12 class ModLog:
13 """Log for mod actions"""
14
15 def __init__(self, bot: Red):
16 self.bot = bot
17
18 @commands.group()
19 @checks.guildowner_or_permissions(administrator=True)
20 async def modlogset(self, ctx: commands.Context):
21 """Settings for the mod log"""
22 pass
23
24 @modlogset.command()
25 @commands.guild_only()
26 async def modlog(self, ctx: commands.Context, channel: discord.TextChannel = None):
27 """Sets a channel as mod log
28
29 Leaving the channel parameter empty will deactivate it"""
30 guild = ctx.guild
31 if channel:
32 if channel.permissions_for(guild.me).send_messages:
33 await modlog.set_modlog_channel(guild, channel)
34 await ctx.send(_("Mod events will be sent to {}").format(channel.mention))
35 else:
36 await ctx.send(
37 _("I do not have permissions to send messages in {}!").format(channel.mention)
38 )
39 else:
40 try:
41 await modlog.get_modlog_channel(guild)
42 except RuntimeError:
43 await ctx.send_help()
44 else:
45 await modlog.set_modlog_channel(guild, None)
46 await ctx.send(_("Mod log deactivated."))
47
48 @modlogset.command(name="cases")
49 @commands.guild_only()
50 async def set_cases(self, ctx: commands.Context, action: str = None):
51 """Enables or disables case creation for each type of mod action"""
52 guild = ctx.guild
53
54 if action is None: # No args given
55 casetypes = await modlog.get_all_casetypes(guild)
56 await ctx.send_help()
57 title = _("Current settings:")
58 msg = ""
59 for ct in casetypes:
60 enabled = await ct.is_enabled()
61 value = "enabled" if enabled else "disabled"
62 msg += "%s : %s\n" % (ct.name, value)
63
64 msg = title + "\n" + box(msg)
65 await ctx.send(msg)
66 return
67 casetype = await modlog.get_casetype(action, guild)
68 if not casetype:
69 await ctx.send(_("That action is not registered"))
70 else:
71
72 enabled = await casetype.is_enabled()
73 await casetype.set_enabled(True if not enabled else False)
74
75 msg = _("Case creation for {} actions is now {}.").format(
76 action, "enabled" if not enabled else "disabled"
77 )
78 await ctx.send(msg)
79
80 @modlogset.command()
81 @commands.guild_only()
82 async def resetcases(self, ctx: commands.Context):
83 """Resets modlog's cases"""
84 guild = ctx.guild
85 await modlog.reset_cases(guild)
86 await ctx.send(_("Cases have been reset."))
87
88 @commands.command()
89 @commands.guild_only()
90 async def case(self, ctx: commands.Context, number: int):
91 """Shows the specified case"""
92 try:
93 case = await modlog.get_case(number, ctx.guild, self.bot)
94 except RuntimeError:
95 await ctx.send(_("That case does not exist for that server"))
96 return
97 else:
98 await ctx.send(embed=await case.get_case_msg_content())
99
100 @commands.command(usage="[case] <reason>")
101 @commands.guild_only()
102 async def reason(self, ctx: commands.Context, *, reason: str):
103 """Lets you specify a reason for mod-log's cases
104
105 Please note that you can only edit cases you are
106 the owner of unless you are a mod/admin or the server owner.
107
108 If no number is specified, the latest case will be used."""
109 author = ctx.author
110 guild = ctx.guild
111 potential_case = reason.split()[0]
112 if potential_case.isdigit():
113 case = int(potential_case)
114 reason = reason.replace(potential_case, "")
115 else:
116 case = str(int(await modlog.get_next_case_number(guild)) - 1)
117 # latest case
118 try:
119 case_before = await modlog.get_case(case, guild, self.bot)
120 except RuntimeError:
121 await ctx.send(_("That case does not exist!"))
122 return
123 else:
124 if case_before.moderator is None:
125 # No mod set, so attempt to find out if the author
126 # triggered the case creation with an action
127 bot_perms = guild.me.guild_permissions
128 if bot_perms.view_audit_log:
129 case_type = await modlog.get_casetype(case_before.action_type, guild)
130 if case_type is not None and case_type.audit_type is not None:
131 audit_type = getattr(discord.AuditLogAction, case_type.audit_type)
132 if audit_type:
133 audit_case = None
134 async for entry in guild.audit_logs(action=audit_type):
135 if (
136 entry.target.id == case_before.user.id
137 and entry.action == audit_type
138 ):
139 audit_case = entry
140 break
141 if audit_case:
142 case_before.moderator = audit_case.user
143 is_guild_owner = author == guild.owner
144 is_case_author = author == case_before.moderator
145 author_is_mod = await ctx.bot.is_mod(author)
146 if not (is_guild_owner or is_case_author or author_is_mod):
147 await ctx.send(_("You are not authorized to modify that case!"))
148 return
149 to_modify = {"reason": reason}
150 if case_before.moderator != author:
151 to_modify["amended_by"] = author
152 to_modify["modified_at"] = ctx.message.created_at.timestamp()
153 await case_before.edit(to_modify)
154 await ctx.send(_("Reason has been updated."))
155
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/redbot/cogs/modlog/modlog.py b/redbot/cogs/modlog/modlog.py
--- a/redbot/cogs/modlog/modlog.py
+++ b/redbot/cogs/modlog/modlog.py
@@ -95,7 +95,10 @@
await ctx.send(_("That case does not exist for that server"))
return
else:
- await ctx.send(embed=await case.get_case_msg_content())
+ if await ctx.embed_requested():
+ await ctx.send(embed=await case.message_content(embed=True))
+ else:
+ await ctx.send(await case.message_content(embed=False))
@commands.command(usage="[case] <reason>")
@commands.guild_only()
|
{"golden_diff": "diff --git a/redbot/cogs/modlog/modlog.py b/redbot/cogs/modlog/modlog.py\n--- a/redbot/cogs/modlog/modlog.py\n+++ b/redbot/cogs/modlog/modlog.py\n@@ -95,7 +95,10 @@\n await ctx.send(_(\"That case does not exist for that server\"))\n return\n else:\n- await ctx.send(embed=await case.get_case_msg_content())\n+ if await ctx.embed_requested():\n+ await ctx.send(embed=await case.message_content(embed=True))\n+ else:\n+ await ctx.send(await case.message_content(embed=False))\n \n @commands.command(usage=\"[case] <reason>\")\n @commands.guild_only()\n", "issue": "[V3 ModLog] Error in command \"case\"\n# Command bugs\r\n\r\n\r\n#### Command name\r\n\r\n[p]case\r\n\r\n#### What cog is this command from?\r\n\r\nModLog\r\n\r\n#### What were you expecting to happen?\r\n\r\nTo show me information about a case.\r\n\r\n\r\n#### What actually happened?\r\nI get this error:\r\n\r\n\r\n\r\n\r\n#### How can we reproduce this issue?\r\n\r\n1. Have a modlog with cases.\r\n2. Do [p]case\r\n\n", "before_files": [{"content": "import discord\n\nfrom redbot.core import checks, modlog, commands\nfrom redbot.core.bot import Red\nfrom redbot.core.i18n import Translator, cog_i18n\nfrom redbot.core.utils.chat_formatting import box\n\n_ = Translator(\"ModLog\", __file__)\n\n\n@cog_i18n(_)\nclass ModLog:\n \"\"\"Log for mod actions\"\"\"\n\n def __init__(self, bot: Red):\n self.bot = bot\n\n @commands.group()\n @checks.guildowner_or_permissions(administrator=True)\n async def modlogset(self, ctx: commands.Context):\n \"\"\"Settings for the mod log\"\"\"\n pass\n\n @modlogset.command()\n @commands.guild_only()\n async def modlog(self, ctx: commands.Context, channel: discord.TextChannel = None):\n \"\"\"Sets a channel as mod log\n\n Leaving the channel parameter empty will deactivate it\"\"\"\n guild = ctx.guild\n if channel:\n if channel.permissions_for(guild.me).send_messages:\n await modlog.set_modlog_channel(guild, channel)\n await ctx.send(_(\"Mod events will be sent to {}\").format(channel.mention))\n else:\n await ctx.send(\n _(\"I do not have permissions to send messages in {}!\").format(channel.mention)\n )\n else:\n try:\n await modlog.get_modlog_channel(guild)\n except RuntimeError:\n await ctx.send_help()\n else:\n await modlog.set_modlog_channel(guild, None)\n await ctx.send(_(\"Mod log deactivated.\"))\n\n @modlogset.command(name=\"cases\")\n @commands.guild_only()\n async def set_cases(self, ctx: commands.Context, action: str = None):\n \"\"\"Enables or disables case creation for each type of mod action\"\"\"\n guild = ctx.guild\n\n if action is None: # No args given\n casetypes = await modlog.get_all_casetypes(guild)\n await ctx.send_help()\n title = _(\"Current settings:\")\n msg = \"\"\n for ct in casetypes:\n enabled = await ct.is_enabled()\n value = \"enabled\" if enabled else \"disabled\"\n msg += \"%s : %s\\n\" % (ct.name, value)\n\n msg = title + \"\\n\" + box(msg)\n await ctx.send(msg)\n return\n casetype = await modlog.get_casetype(action, guild)\n if not casetype:\n await ctx.send(_(\"That action is not registered\"))\n else:\n\n enabled = await casetype.is_enabled()\n await casetype.set_enabled(True if not enabled else False)\n\n msg = _(\"Case creation for {} actions is now {}.\").format(\n action, \"enabled\" if not enabled else \"disabled\"\n )\n await ctx.send(msg)\n\n @modlogset.command()\n @commands.guild_only()\n async def resetcases(self, ctx: commands.Context):\n \"\"\"Resets modlog's cases\"\"\"\n guild = ctx.guild\n await modlog.reset_cases(guild)\n await ctx.send(_(\"Cases have been reset.\"))\n\n @commands.command()\n @commands.guild_only()\n async def case(self, ctx: commands.Context, number: int):\n \"\"\"Shows the specified case\"\"\"\n try:\n case = await modlog.get_case(number, ctx.guild, self.bot)\n except RuntimeError:\n await ctx.send(_(\"That case does not exist for that server\"))\n return\n else:\n await ctx.send(embed=await case.get_case_msg_content())\n\n @commands.command(usage=\"[case] <reason>\")\n @commands.guild_only()\n async def reason(self, ctx: commands.Context, *, reason: str):\n \"\"\"Lets you specify a reason for mod-log's cases\n \n Please note that you can only edit cases you are\n the owner of unless you are a mod/admin or the server owner.\n \n If no number is specified, the latest case will be used.\"\"\"\n author = ctx.author\n guild = ctx.guild\n potential_case = reason.split()[0]\n if potential_case.isdigit():\n case = int(potential_case)\n reason = reason.replace(potential_case, \"\")\n else:\n case = str(int(await modlog.get_next_case_number(guild)) - 1)\n # latest case\n try:\n case_before = await modlog.get_case(case, guild, self.bot)\n except RuntimeError:\n await ctx.send(_(\"That case does not exist!\"))\n return\n else:\n if case_before.moderator is None:\n # No mod set, so attempt to find out if the author\n # triggered the case creation with an action\n bot_perms = guild.me.guild_permissions\n if bot_perms.view_audit_log:\n case_type = await modlog.get_casetype(case_before.action_type, guild)\n if case_type is not None and case_type.audit_type is not None:\n audit_type = getattr(discord.AuditLogAction, case_type.audit_type)\n if audit_type:\n audit_case = None\n async for entry in guild.audit_logs(action=audit_type):\n if (\n entry.target.id == case_before.user.id\n and entry.action == audit_type\n ):\n audit_case = entry\n break\n if audit_case:\n case_before.moderator = audit_case.user\n is_guild_owner = author == guild.owner\n is_case_author = author == case_before.moderator\n author_is_mod = await ctx.bot.is_mod(author)\n if not (is_guild_owner or is_case_author or author_is_mod):\n await ctx.send(_(\"You are not authorized to modify that case!\"))\n return\n to_modify = {\"reason\": reason}\n if case_before.moderator != author:\n to_modify[\"amended_by\"] = author\n to_modify[\"modified_at\"] = ctx.message.created_at.timestamp()\n await case_before.edit(to_modify)\n await ctx.send(_(\"Reason has been updated.\"))\n", "path": "redbot/cogs/modlog/modlog.py"}], "after_files": [{"content": "import discord\n\nfrom redbot.core import checks, modlog, commands\nfrom redbot.core.bot import Red\nfrom redbot.core.i18n import Translator, cog_i18n\nfrom redbot.core.utils.chat_formatting import box\n\n_ = Translator(\"ModLog\", __file__)\n\n\n@cog_i18n(_)\nclass ModLog:\n \"\"\"Log for mod actions\"\"\"\n\n def __init__(self, bot: Red):\n self.bot = bot\n\n @commands.group()\n @checks.guildowner_or_permissions(administrator=True)\n async def modlogset(self, ctx: commands.Context):\n \"\"\"Settings for the mod log\"\"\"\n pass\n\n @modlogset.command()\n @commands.guild_only()\n async def modlog(self, ctx: commands.Context, channel: discord.TextChannel = None):\n \"\"\"Sets a channel as mod log\n\n Leaving the channel parameter empty will deactivate it\"\"\"\n guild = ctx.guild\n if channel:\n if channel.permissions_for(guild.me).send_messages:\n await modlog.set_modlog_channel(guild, channel)\n await ctx.send(_(\"Mod events will be sent to {}\").format(channel.mention))\n else:\n await ctx.send(\n _(\"I do not have permissions to send messages in {}!\").format(channel.mention)\n )\n else:\n try:\n await modlog.get_modlog_channel(guild)\n except RuntimeError:\n await ctx.send_help()\n else:\n await modlog.set_modlog_channel(guild, None)\n await ctx.send(_(\"Mod log deactivated.\"))\n\n @modlogset.command(name=\"cases\")\n @commands.guild_only()\n async def set_cases(self, ctx: commands.Context, action: str = None):\n \"\"\"Enables or disables case creation for each type of mod action\"\"\"\n guild = ctx.guild\n\n if action is None: # No args given\n casetypes = await modlog.get_all_casetypes(guild)\n await ctx.send_help()\n title = _(\"Current settings:\")\n msg = \"\"\n for ct in casetypes:\n enabled = await ct.is_enabled()\n value = \"enabled\" if enabled else \"disabled\"\n msg += \"%s : %s\\n\" % (ct.name, value)\n\n msg = title + \"\\n\" + box(msg)\n await ctx.send(msg)\n return\n casetype = await modlog.get_casetype(action, guild)\n if not casetype:\n await ctx.send(_(\"That action is not registered\"))\n else:\n\n enabled = await casetype.is_enabled()\n await casetype.set_enabled(True if not enabled else False)\n\n msg = _(\"Case creation for {} actions is now {}.\").format(\n action, \"enabled\" if not enabled else \"disabled\"\n )\n await ctx.send(msg)\n\n @modlogset.command()\n @commands.guild_only()\n async def resetcases(self, ctx: commands.Context):\n \"\"\"Resets modlog's cases\"\"\"\n guild = ctx.guild\n await modlog.reset_cases(guild)\n await ctx.send(_(\"Cases have been reset.\"))\n\n @commands.command()\n @commands.guild_only()\n async def case(self, ctx: commands.Context, number: int):\n \"\"\"Shows the specified case\"\"\"\n try:\n case = await modlog.get_case(number, ctx.guild, self.bot)\n except RuntimeError:\n await ctx.send(_(\"That case does not exist for that server\"))\n return\n else:\n if await ctx.embed_requested():\n await ctx.send(embed=await case.message_content(embed=True))\n else:\n await ctx.send(await case.message_content(embed=False))\n\n @commands.command(usage=\"[case] <reason>\")\n @commands.guild_only()\n async def reason(self, ctx: commands.Context, *, reason: str):\n \"\"\"Lets you specify a reason for mod-log's cases\n \n Please note that you can only edit cases you are\n the owner of unless you are a mod/admin or the server owner.\n \n If no number is specified, the latest case will be used.\"\"\"\n author = ctx.author\n guild = ctx.guild\n potential_case = reason.split()[0]\n if potential_case.isdigit():\n case = int(potential_case)\n reason = reason.replace(potential_case, \"\")\n else:\n case = str(int(await modlog.get_next_case_number(guild)) - 1)\n # latest case\n try:\n case_before = await modlog.get_case(case, guild, self.bot)\n except RuntimeError:\n await ctx.send(_(\"That case does not exist!\"))\n return\n else:\n if case_before.moderator is None:\n # No mod set, so attempt to find out if the author\n # triggered the case creation with an action\n bot_perms = guild.me.guild_permissions\n if bot_perms.view_audit_log:\n case_type = await modlog.get_casetype(case_before.action_type, guild)\n if case_type is not None and case_type.audit_type is not None:\n audit_type = getattr(discord.AuditLogAction, case_type.audit_type)\n if audit_type:\n audit_case = None\n async for entry in guild.audit_logs(action=audit_type):\n if (\n entry.target.id == case_before.user.id\n and entry.action == audit_type\n ):\n audit_case = entry\n break\n if audit_case:\n case_before.moderator = audit_case.user\n is_guild_owner = author == guild.owner\n is_case_author = author == case_before.moderator\n author_is_mod = await ctx.bot.is_mod(author)\n if not (is_guild_owner or is_case_author or author_is_mod):\n await ctx.send(_(\"You are not authorized to modify that case!\"))\n return\n to_modify = {\"reason\": reason}\n if case_before.moderator != author:\n to_modify[\"amended_by\"] = author\n to_modify[\"modified_at\"] = ctx.message.created_at.timestamp()\n await case_before.edit(to_modify)\n await ctx.send(_(\"Reason has been updated.\"))\n", "path": "redbot/cogs/modlog/modlog.py"}]}
| 2,036 | 152 |
gh_patches_debug_9538
|
rasdani/github-patches
|
git_diff
|
aws-cloudformation__cfn-lint-481
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Can't run in Travis CI xenial
I was testing the new Travis CI image:
https://blog.travis-ci.com/2018-11-08-xenial-release
https://docs.travis-ci.com/user/reference/xenial/
My install script ran `pip install --user --upgrade cfn-lint` and then `cfn-lint my.cfn.yaml` which gave this error:
```Traceback (most recent call last):
File "/home/travis/.local/bin/cfn-lint", line 7, in <module>
from cfnlint.__main__ import main
File "/home/travis/.local/lib/python2.7/site-packages/cfnlint/__main__.py", line 19, in <module>
import cfnlint.core
File "/home/travis/.local/lib/python2.7/site-packages/cfnlint/core.py", line 24, in <module>
import cfnlint.maintenance
File "/home/travis/.local/lib/python2.7/site-packages/cfnlint/maintenance.py", line 19, in <module>
import requests
File "/usr/lib/python2.7/dist-packages/requests/__init__.py", line 58, in <module>
from . import utils
File "/usr/lib/python2.7/dist-packages/requests/utils.py", line 25, in <module>
from . import certs
ImportError: cannot import name certs
```
I "fixed" this problem by just changing to Python3 in my build script:
```
pyenv global 3.7
pip3 install --user --upgrade cfn-lint
cfn-lint stack.cfn.yaml
```
But there might be some other lingering issue here
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 """
2 Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
4 Permission is hereby granted, free of charge, to any person obtaining a copy of this
5 software and associated documentation files (the "Software"), to deal in the Software
6 without restriction, including without limitation the rights to use, copy, modify,
7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
8 permit persons to whom the Software is furnished to do so.
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
16 """
17 import codecs
18 import re
19 from setuptools import find_packages
20 from setuptools import setup
21
22
23 def get_version(filename):
24 with codecs.open(filename, 'r', 'utf-8') as fp:
25 contents = fp.read()
26 return re.search(r"__version__ = ['\"]([^'\"]+)['\"]", contents).group(1)
27
28
29 version = get_version('src/cfnlint/version.py')
30
31
32 with open('README.md') as f:
33 readme = f.read()
34
35 setup(
36 name='cfn-lint',
37 version=version,
38 description=('checks cloudformation for practices and behaviour \
39 that could potentially be improved'),
40 long_description=readme,
41 long_description_content_type="text/markdown",
42 keywords='aws, lint',
43 author='kddejong',
44 author_email='[email protected]',
45 url='https://github.com/awslabs/cfn-python-lint',
46 package_dir={'': 'src'},
47 package_data={'cfnlint': [
48 'data/CloudSpecs/*.json',
49 'data/AdditionalSpecs/*.json',
50 'data/Serverless/*.json',
51 'data/CfnLintCli/config/schema.json'
52 ]},
53 packages=find_packages('src'),
54 zip_safe=False,
55 install_requires=['pyyaml', 'six', 'requests', 'aws-sam-translator>=1.6.0', 'jsonpatch', 'jsonschema~=2.6.0', 'pathlib2'],
56 python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
57 entry_points={
58 'console_scripts': [
59 'cfn-lint = cfnlint.__main__:main'
60 ]
61 },
62 license='MIT no attribution',
63 test_suite="unittest",
64 classifiers=[
65 'Development Status :: 5 - Production/Stable',
66 'Intended Audience :: Developers',
67 'License :: OSI Approved :: MIT License',
68 'Natural Language :: English',
69 'Operating System :: OS Independent',
70 'Programming Language :: Python :: 2',
71 'Programming Language :: Python :: 2.7',
72 'Programming Language :: Python :: 3',
73 'Programming Language :: Python :: 3.4',
74 'Programming Language :: Python :: 3.5',
75 'Programming Language :: Python :: 3.6',
76 ],
77 )
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -52,7 +52,15 @@
]},
packages=find_packages('src'),
zip_safe=False,
- install_requires=['pyyaml', 'six', 'requests', 'aws-sam-translator>=1.6.0', 'jsonpatch', 'jsonschema~=2.6.0', 'pathlib2'],
+ install_requires=[
+ 'pyyaml',
+ 'six~=1.11',
+ 'requests>=2.15.0',
+ 'aws-sam-translator>=1.8.0',
+ 'jsonpatch',
+ 'jsonschema~=2.6',
+ 'pathlib2>=2.3.0;python_version<"3.4"'
+ ],
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
entry_points={
'console_scripts': [
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -52,7 +52,15 @@\n ]},\n packages=find_packages('src'),\n zip_safe=False,\n- install_requires=['pyyaml', 'six', 'requests', 'aws-sam-translator>=1.6.0', 'jsonpatch', 'jsonschema~=2.6.0', 'pathlib2'],\n+ install_requires=[\n+ 'pyyaml',\n+ 'six~=1.11',\n+ 'requests>=2.15.0',\n+ 'aws-sam-translator>=1.8.0',\n+ 'jsonpatch',\n+ 'jsonschema~=2.6',\n+ 'pathlib2>=2.3.0;python_version<\"3.4\"'\n+ ],\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n entry_points={\n 'console_scripts': [\n", "issue": "Can't run in Travis CI xenial\nI was testing the new Travis CI image:\r\n\r\nhttps://blog.travis-ci.com/2018-11-08-xenial-release\r\nhttps://docs.travis-ci.com/user/reference/xenial/\r\n\r\nMy install script ran `pip install --user --upgrade cfn-lint` and then `cfn-lint my.cfn.yaml` which gave this error:\r\n\r\n```Traceback (most recent call last):\r\n File \"/home/travis/.local/bin/cfn-lint\", line 7, in <module>\r\n from cfnlint.__main__ import main\r\n File \"/home/travis/.local/lib/python2.7/site-packages/cfnlint/__main__.py\", line 19, in <module>\r\n import cfnlint.core\r\n File \"/home/travis/.local/lib/python2.7/site-packages/cfnlint/core.py\", line 24, in <module>\r\n import cfnlint.maintenance\r\n File \"/home/travis/.local/lib/python2.7/site-packages/cfnlint/maintenance.py\", line 19, in <module>\r\n import requests\r\n File \"/usr/lib/python2.7/dist-packages/requests/__init__.py\", line 58, in <module>\r\n from . import utils\r\n File \"/usr/lib/python2.7/dist-packages/requests/utils.py\", line 25, in <module>\r\n from . import certs\r\nImportError: cannot import name certs\r\n```\r\n\r\nI \"fixed\" this problem by just changing to Python3 in my build script:\r\n\r\n```\r\npyenv global 3.7\r\npip3 install --user --upgrade cfn-lint\r\ncfn-lint stack.cfn.yaml\r\n```\r\n\r\nBut there might be some other lingering issue here\n", "before_files": [{"content": "\"\"\"\n Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport codecs\nimport re\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\ndef get_version(filename):\n with codecs.open(filename, 'r', 'utf-8') as fp:\n contents = fp.read()\n return re.search(r\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", contents).group(1)\n\n\nversion = get_version('src/cfnlint/version.py')\n\n\nwith open('README.md') as f:\n readme = f.read()\n\nsetup(\n name='cfn-lint',\n version=version,\n description=('checks cloudformation for practices and behaviour \\\n that could potentially be improved'),\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n keywords='aws, lint',\n author='kddejong',\n author_email='[email protected]',\n url='https://github.com/awslabs/cfn-python-lint',\n package_dir={'': 'src'},\n package_data={'cfnlint': [\n 'data/CloudSpecs/*.json',\n 'data/AdditionalSpecs/*.json',\n 'data/Serverless/*.json',\n 'data/CfnLintCli/config/schema.json'\n ]},\n packages=find_packages('src'),\n zip_safe=False,\n install_requires=['pyyaml', 'six', 'requests', 'aws-sam-translator>=1.6.0', 'jsonpatch', 'jsonschema~=2.6.0', 'pathlib2'],\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n entry_points={\n 'console_scripts': [\n 'cfn-lint = cfnlint.__main__:main'\n ]\n },\n license='MIT no attribution',\n test_suite=\"unittest\",\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "\"\"\"\n Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport codecs\nimport re\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\ndef get_version(filename):\n with codecs.open(filename, 'r', 'utf-8') as fp:\n contents = fp.read()\n return re.search(r\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", contents).group(1)\n\n\nversion = get_version('src/cfnlint/version.py')\n\n\nwith open('README.md') as f:\n readme = f.read()\n\nsetup(\n name='cfn-lint',\n version=version,\n description=('checks cloudformation for practices and behaviour \\\n that could potentially be improved'),\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n keywords='aws, lint',\n author='kddejong',\n author_email='[email protected]',\n url='https://github.com/awslabs/cfn-python-lint',\n package_dir={'': 'src'},\n package_data={'cfnlint': [\n 'data/CloudSpecs/*.json',\n 'data/AdditionalSpecs/*.json',\n 'data/Serverless/*.json',\n 'data/CfnLintCli/config/schema.json'\n ]},\n packages=find_packages('src'),\n zip_safe=False,\n install_requires=[\n 'pyyaml',\n 'six~=1.11',\n 'requests>=2.15.0',\n 'aws-sam-translator>=1.8.0',\n 'jsonpatch',\n 'jsonschema~=2.6',\n 'pathlib2>=2.3.0;python_version<\"3.4\"'\n ],\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n entry_points={\n 'console_scripts': [\n 'cfn-lint = cfnlint.__main__:main'\n ]\n },\n license='MIT no attribution',\n test_suite=\"unittest\",\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n)\n", "path": "setup.py"}]}
| 1,488 | 221 |
gh_patches_debug_62284
|
rasdani/github-patches
|
git_diff
|
SciTools__cartopy-228
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
coastlines drawn under filled contours
When using matplotlib v1.3.x and cartopy v0.7.x (development master from github) coastlines and other features are drawn underneath filled contours unless:
```
ax.coastlines(zorder=1)
```
is used to manually raise them above. I suspect this may be a matplotlib issue, but thought it would be best to raise it here for the cartopy experts to take a look at first.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/cartopy/mpl/feature_artist.py`
Content:
```
1 # (C) British Crown Copyright 2011 - 2012, Met Office
2 #
3 # This file is part of cartopy.
4 #
5 # cartopy is free software: you can redistribute it and/or modify it under
6 # the terms of the GNU Lesser General Public License as published by the
7 # Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # cartopy is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU Lesser General Public License for more details.
14 #
15 # You should have received a copy of the GNU Lesser General Public License
16 # along with cartopy. If not, see <http://www.gnu.org/licenses/>.
17 """
18 This module defines the :class:`FeatureArtist` class, for drawing
19 :class:`Feature` instances with matplotlib.
20
21 """
22 import warnings
23 import weakref
24
25 import matplotlib.artist
26 import matplotlib.cbook
27 import matplotlib.collections
28 import matplotlib.lines
29
30 import cartopy.mpl.patch
31
32
33 class FeatureArtist(matplotlib.artist.Artist):
34 """
35 A subclass of :class:`~matplotlib.artist.Artist` capable of
36 drawing a :class:`cartopy.feature.Feature`.
37
38 """
39 _geometry_to_path_cache = weakref.WeakKeyDictionary()
40 """
41 A nested mapping from geometry and target projection to the
42 resulting transformed matplotlib paths::
43
44 {geom: {target_projection: list_of_paths}}
45
46 This provides a significant boost when producing multiple maps of the
47 same projection.
48
49 """
50 def __init__(self, feature, **kwargs):
51 """
52 Args:
53
54 * feature:
55 an instance of :class:`cartopy.feature.Feature` to draw.
56 * kwargs:
57 keyword arguments to be used when drawing the feature. These
58 will override those shared with the feature.
59
60 """
61 super(FeatureArtist, self).__init__()
62
63 if kwargs is None:
64 kwargs = {}
65 self._kwargs = dict(kwargs)
66
67 # Set default zorder so that features are drawn before
68 # lines e.g. contours but after images.
69 # Note that the zorder of Patch, PatchCollection and PathCollection
70 # are all 1 by default. Assuming equal zorder drawing takes place in
71 # the following order: collections, patches, lines (default zorder=2),
72 # text (default zorder=3), then other artists e.g. FeatureArtist.
73 if self._kwargs.get('zorder') is not None:
74 self.set_zorder(self._kwargs['zorder'])
75 elif feature.kwargs.get('zorder') is not None:
76 self.set_zorder(feature.kwargs['zorder'])
77 else:
78 self.set_zorder(matplotlib.collections.PathCollection.zorder)
79
80 self._feature = feature
81
82 @matplotlib.artist.allow_rasterization
83 def draw(self, renderer, *args, **kwargs):
84 """
85 Draws the geometries of the feature that intersect with the extent of
86 the :class:`cartopy.mpl.GeoAxes` instance to which this
87 object has been added.
88
89 """
90 if not self.get_visible():
91 return
92
93 ax = self.get_axes()
94 feature_crs = self._feature.crs
95
96 # Get geometries that we need to draw.
97 extent = None
98 try:
99 extent = ax.get_extent(feature_crs)
100 except ValueError:
101 warnings.warn('Unable to determine extent. Defaulting to global.')
102 geoms = self._feature.intersecting_geometries(extent)
103
104 # Project (if necessary) and convert geometries to matplotlib paths.
105 paths = []
106 key = ax.projection
107 for geom in geoms:
108 mapping = FeatureArtist._geometry_to_path_cache.setdefault(geom,
109 {})
110 geom_paths = mapping.get(key)
111 if geom_paths is None:
112 if ax.projection != feature_crs:
113 projected_geom = ax.projection.project_geometry(
114 geom, feature_crs)
115 else:
116 projected_geom = geom
117 geom_paths = cartopy.mpl.patch.geos_to_path(
118 projected_geom)
119 mapping[key] = geom_paths
120 paths.extend(geom_paths)
121
122 # Build path collection and draw it.
123 transform = ax.projection._as_mpl_transform(ax)
124 # Combine all the keyword args in priority order
125 final_kwargs = dict(self._feature.kwargs)
126 final_kwargs.update(self._kwargs)
127 final_kwargs.update(kwargs)
128 c = matplotlib.collections.PathCollection(paths,
129 transform=transform,
130 **final_kwargs)
131 c.set_clip_path(ax.patch)
132 return c.draw(renderer)
133
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/lib/cartopy/mpl/feature_artist.py b/lib/cartopy/mpl/feature_artist.py
--- a/lib/cartopy/mpl/feature_artist.py
+++ b/lib/cartopy/mpl/feature_artist.py
@@ -75,7 +75,10 @@
elif feature.kwargs.get('zorder') is not None:
self.set_zorder(feature.kwargs['zorder'])
else:
- self.set_zorder(matplotlib.collections.PathCollection.zorder)
+ # The class attribute matplotlib.collections.PathCollection.zorder
+ # was removed after mpl v1.2.0, so the hard-coded value of 1 is
+ # used instead.
+ self.set_zorder(1)
self._feature = feature
|
{"golden_diff": "diff --git a/lib/cartopy/mpl/feature_artist.py b/lib/cartopy/mpl/feature_artist.py\n--- a/lib/cartopy/mpl/feature_artist.py\n+++ b/lib/cartopy/mpl/feature_artist.py\n@@ -75,7 +75,10 @@\n elif feature.kwargs.get('zorder') is not None:\n self.set_zorder(feature.kwargs['zorder'])\n else:\n- self.set_zorder(matplotlib.collections.PathCollection.zorder)\n+ # The class attribute matplotlib.collections.PathCollection.zorder\n+ # was removed after mpl v1.2.0, so the hard-coded value of 1 is\n+ # used instead.\n+ self.set_zorder(1)\n \n self._feature = feature\n", "issue": "coastlines drawn under filled contours\nWhen using matplotlib v1.3.x and cartopy v0.7.x (development master from github) coastlines and other features are drawn underneath filled contours unless:\n\n```\nax.coastlines(zorder=1)\n```\n\nis used to manually raise them above. I suspect this may be a matplotlib issue, but thought it would be best to raise it here for the cartopy experts to take a look at first.\n\n", "before_files": [{"content": "# (C) British Crown Copyright 2011 - 2012, Met Office\n#\n# This file is part of cartopy.\n#\n# cartopy is free software: you can redistribute it and/or modify it under\n# the terms of the GNU Lesser General Public License as published by the\n# Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# cartopy is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with cartopy. If not, see <http://www.gnu.org/licenses/>.\n\"\"\"\nThis module defines the :class:`FeatureArtist` class, for drawing\n:class:`Feature` instances with matplotlib.\n\n\"\"\"\nimport warnings\nimport weakref\n\nimport matplotlib.artist\nimport matplotlib.cbook\nimport matplotlib.collections\nimport matplotlib.lines\n\nimport cartopy.mpl.patch\n\n\nclass FeatureArtist(matplotlib.artist.Artist):\n \"\"\"\n A subclass of :class:`~matplotlib.artist.Artist` capable of\n drawing a :class:`cartopy.feature.Feature`.\n\n \"\"\"\n _geometry_to_path_cache = weakref.WeakKeyDictionary()\n \"\"\"\n A nested mapping from geometry and target projection to the\n resulting transformed matplotlib paths::\n\n {geom: {target_projection: list_of_paths}}\n\n This provides a significant boost when producing multiple maps of the\n same projection.\n\n \"\"\"\n def __init__(self, feature, **kwargs):\n \"\"\"\n Args:\n\n * feature:\n an instance of :class:`cartopy.feature.Feature` to draw.\n * kwargs:\n keyword arguments to be used when drawing the feature. These\n will override those shared with the feature.\n\n \"\"\"\n super(FeatureArtist, self).__init__()\n\n if kwargs is None:\n kwargs = {}\n self._kwargs = dict(kwargs)\n\n # Set default zorder so that features are drawn before\n # lines e.g. contours but after images.\n # Note that the zorder of Patch, PatchCollection and PathCollection\n # are all 1 by default. Assuming equal zorder drawing takes place in\n # the following order: collections, patches, lines (default zorder=2),\n # text (default zorder=3), then other artists e.g. FeatureArtist.\n if self._kwargs.get('zorder') is not None:\n self.set_zorder(self._kwargs['zorder'])\n elif feature.kwargs.get('zorder') is not None:\n self.set_zorder(feature.kwargs['zorder'])\n else:\n self.set_zorder(matplotlib.collections.PathCollection.zorder)\n\n self._feature = feature\n\n @matplotlib.artist.allow_rasterization\n def draw(self, renderer, *args, **kwargs):\n \"\"\"\n Draws the geometries of the feature that intersect with the extent of\n the :class:`cartopy.mpl.GeoAxes` instance to which this\n object has been added.\n\n \"\"\"\n if not self.get_visible():\n return\n\n ax = self.get_axes()\n feature_crs = self._feature.crs\n\n # Get geometries that we need to draw.\n extent = None\n try:\n extent = ax.get_extent(feature_crs)\n except ValueError:\n warnings.warn('Unable to determine extent. Defaulting to global.')\n geoms = self._feature.intersecting_geometries(extent)\n\n # Project (if necessary) and convert geometries to matplotlib paths.\n paths = []\n key = ax.projection\n for geom in geoms:\n mapping = FeatureArtist._geometry_to_path_cache.setdefault(geom,\n {})\n geom_paths = mapping.get(key)\n if geom_paths is None:\n if ax.projection != feature_crs:\n projected_geom = ax.projection.project_geometry(\n geom, feature_crs)\n else:\n projected_geom = geom\n geom_paths = cartopy.mpl.patch.geos_to_path(\n projected_geom)\n mapping[key] = geom_paths\n paths.extend(geom_paths)\n\n # Build path collection and draw it.\n transform = ax.projection._as_mpl_transform(ax)\n # Combine all the keyword args in priority order\n final_kwargs = dict(self._feature.kwargs)\n final_kwargs.update(self._kwargs)\n final_kwargs.update(kwargs)\n c = matplotlib.collections.PathCollection(paths,\n transform=transform,\n **final_kwargs)\n c.set_clip_path(ax.patch)\n return c.draw(renderer)\n", "path": "lib/cartopy/mpl/feature_artist.py"}], "after_files": [{"content": "# (C) British Crown Copyright 2011 - 2012, Met Office\n#\n# This file is part of cartopy.\n#\n# cartopy is free software: you can redistribute it and/or modify it under\n# the terms of the GNU Lesser General Public License as published by the\n# Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# cartopy is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with cartopy. If not, see <http://www.gnu.org/licenses/>.\n\"\"\"\nThis module defines the :class:`FeatureArtist` class, for drawing\n:class:`Feature` instances with matplotlib.\n\n\"\"\"\nimport warnings\nimport weakref\n\nimport matplotlib.artist\nimport matplotlib.cbook\nimport matplotlib.collections\nimport matplotlib.lines\n\nimport cartopy.mpl.patch\n\n\nclass FeatureArtist(matplotlib.artist.Artist):\n \"\"\"\n A subclass of :class:`~matplotlib.artist.Artist` capable of\n drawing a :class:`cartopy.feature.Feature`.\n\n \"\"\"\n _geometry_to_path_cache = weakref.WeakKeyDictionary()\n \"\"\"\n A nested mapping from geometry and target projection to the\n resulting transformed matplotlib paths::\n\n {geom: {target_projection: list_of_paths}}\n\n This provides a significant boost when producing multiple maps of the\n same projection.\n\n \"\"\"\n def __init__(self, feature, **kwargs):\n \"\"\"\n Args:\n\n * feature:\n an instance of :class:`cartopy.feature.Feature` to draw.\n * kwargs:\n keyword arguments to be used when drawing the feature. These\n will override those shared with the feature.\n\n \"\"\"\n super(FeatureArtist, self).__init__()\n\n if kwargs is None:\n kwargs = {}\n self._kwargs = dict(kwargs)\n\n # Set default zorder so that features are drawn before\n # lines e.g. contours but after images.\n # Note that the zorder of Patch, PatchCollection and PathCollection\n # are all 1 by default. Assuming equal zorder drawing takes place in\n # the following order: collections, patches, lines (default zorder=2),\n # text (default zorder=3), then other artists e.g. FeatureArtist.\n if self._kwargs.get('zorder') is not None:\n self.set_zorder(self._kwargs['zorder'])\n elif feature.kwargs.get('zorder') is not None:\n self.set_zorder(feature.kwargs['zorder'])\n else:\n # The class attribute matplotlib.collections.PathCollection.zorder\n # was removed after mpl v1.2.0, so the hard-coded value of 1 is\n # used instead.\n self.set_zorder(1)\n\n self._feature = feature\n\n @matplotlib.artist.allow_rasterization\n def draw(self, renderer, *args, **kwargs):\n \"\"\"\n Draws the geometries of the feature that intersect with the extent of\n the :class:`cartopy.mpl.GeoAxes` instance to which this\n object has been added.\n\n \"\"\"\n if not self.get_visible():\n return\n\n ax = self.get_axes()\n feature_crs = self._feature.crs\n\n # Get geometries that we need to draw.\n extent = None\n try:\n extent = ax.get_extent(feature_crs)\n except ValueError:\n warnings.warn('Unable to determine extent. Defaulting to global.')\n geoms = self._feature.intersecting_geometries(extent)\n\n # Project (if necessary) and convert geometries to matplotlib paths.\n paths = []\n key = ax.projection\n for geom in geoms:\n mapping = FeatureArtist._geometry_to_path_cache.setdefault(geom,\n {})\n geom_paths = mapping.get(key)\n if geom_paths is None:\n if ax.projection != feature_crs:\n projected_geom = ax.projection.project_geometry(\n geom, feature_crs)\n else:\n projected_geom = geom\n geom_paths = cartopy.mpl.patch.geos_to_path(\n projected_geom)\n mapping[key] = geom_paths\n paths.extend(geom_paths)\n\n # Build path collection and draw it.\n transform = ax.projection._as_mpl_transform(ax)\n # Combine all the keyword args in priority order\n final_kwargs = dict(self._feature.kwargs)\n final_kwargs.update(self._kwargs)\n final_kwargs.update(kwargs)\n c = matplotlib.collections.PathCollection(paths,\n transform=transform,\n **final_kwargs)\n c.set_clip_path(ax.patch)\n return c.draw(renderer)\n", "path": "lib/cartopy/mpl/feature_artist.py"}]}
| 1,644 | 162 |
gh_patches_debug_9163
|
rasdani/github-patches
|
git_diff
|
opsdroid__opsdroid-1540
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add 3.8 support to setup.py
We've been running CI against 3.8 for a while now, we should update the metadata in `setup.py` to explicitly state we support 3.8.
We should also update the [support table](https://github.com/opsdroid/opsdroid/blob/master/docs/maintaining/supported-python-versions.md) to say we support 3.8.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python3
2 import os
3 from setuptools import setup, find_packages
4 from setuptools.command.build_py import build_py
5 from setuptools.command.sdist import sdist
6 from setuptools.command.develop import develop
7 import versioneer
8
9 PACKAGE_NAME = "opsdroid"
10 HERE = os.path.abspath(os.path.dirname(__file__))
11 README = open(os.path.join(HERE, "README.md"), encoding="utf8").read()
12
13 PACKAGES = find_packages(
14 exclude=["tests", "tests.*", "modules", "modules.*", "docs", "docs.*"]
15 )
16
17
18 # For now we simply define the install_requires based on the contents
19 # of requirements.txt. In the future, install_requires may become much
20 # looser than the (automatically) resolved requirements.txt.
21 with open(os.path.join(HERE, "requirements.txt"), "r") as fh:
22 REQUIRES = [line.strip() for line in fh]
23
24
25 class Develop(develop):
26 """Custom `develop` command to always build mo files on install -e."""
27
28 def run(self):
29 self.run_command("compile_catalog")
30 develop.run(self) # old style class
31
32
33 class BuildPy(build_py):
34 """Custom `build_py` command to always build mo files for wheels."""
35
36 def run(self):
37 self.run_command("compile_catalog")
38 build_py.run(self) # old style class
39
40
41 class Sdist(sdist):
42 """Custom `sdist` command to ensure that mo files are always created."""
43
44 def run(self):
45 self.run_command("compile_catalog")
46 sdist.run(self) # old style class
47
48
49 setup(
50 name=PACKAGE_NAME,
51 version=versioneer.get_version(),
52 license="Apache License 2.0",
53 url="https://opsdroid.github.io/",
54 download_url="https://github.com/opsdroid/opsdroid/releases",
55 author="Jacob Tomlinson",
56 author_email="[email protected]",
57 description="An open source ChatOps bot framework.",
58 long_description=README,
59 long_description_content_type="text/markdown",
60 packages=PACKAGES,
61 include_package_data=True,
62 zip_safe=False,
63 platforms="any",
64 classifiers=[
65 "Development Status :: 4 - Beta",
66 "Environment :: Console",
67 "Framework :: AsyncIO",
68 "Intended Audience :: Developers",
69 "Intended Audience :: System Administrators",
70 "Intended Audience :: Information Technology",
71 "License :: OSI Approved :: Apache Software License",
72 "Programming Language :: Python",
73 "Programming Language :: Python :: 3",
74 "Programming Language :: Python :: 3 :: Only",
75 "Programming Language :: Python :: 3.6",
76 "Programming Language :: Python :: 3.7",
77 "Topic :: Communications :: Chat",
78 "Topic :: Scientific/Engineering :: Artificial Intelligence",
79 "Topic :: Software Development :: Libraries :: Python Modules",
80 ],
81 install_requires=REQUIRES,
82 test_suite="tests",
83 keywords=[
84 "bot",
85 "bot-framework",
86 "opsdroid",
87 "botkit",
88 "python3",
89 "asyncio",
90 "chatops",
91 "devops",
92 "nlu",
93 ],
94 setup_requires=["Babel"],
95 cmdclass=versioneer.get_cmdclass(
96 {"sdist": Sdist, "build_py": BuildPy, "develop": Develop}
97 ),
98 entry_points={"console_scripts": ["opsdroid = opsdroid.cli:cli"]},
99 )
100
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -74,6 +74,7 @@
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
+ "Programming Language :: Python :: 3.8",
"Topic :: Communications :: Chat",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development :: Libraries :: Python Modules",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -74,6 +74,7 @@\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n+ \"Programming Language :: Python :: 3.8\",\n \"Topic :: Communications :: Chat\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n", "issue": "Add 3.8 support to setup.py\nWe've been running CI against 3.8 for a while now, we should update the metadata in `setup.py` to explicitly state we support 3.8.\r\n\r\nWe should also update the [support table](https://github.com/opsdroid/opsdroid/blob/master/docs/maintaining/supported-python-versions.md) to say we support 3.8.\n", "before_files": [{"content": "#!/usr/bin/env python3\nimport os\nfrom setuptools import setup, find_packages\nfrom setuptools.command.build_py import build_py\nfrom setuptools.command.sdist import sdist\nfrom setuptools.command.develop import develop\nimport versioneer\n\nPACKAGE_NAME = \"opsdroid\"\nHERE = os.path.abspath(os.path.dirname(__file__))\nREADME = open(os.path.join(HERE, \"README.md\"), encoding=\"utf8\").read()\n\nPACKAGES = find_packages(\n exclude=[\"tests\", \"tests.*\", \"modules\", \"modules.*\", \"docs\", \"docs.*\"]\n)\n\n\n# For now we simply define the install_requires based on the contents\n# of requirements.txt. In the future, install_requires may become much\n# looser than the (automatically) resolved requirements.txt.\nwith open(os.path.join(HERE, \"requirements.txt\"), \"r\") as fh:\n REQUIRES = [line.strip() for line in fh]\n\n\nclass Develop(develop):\n \"\"\"Custom `develop` command to always build mo files on install -e.\"\"\"\n\n def run(self):\n self.run_command(\"compile_catalog\")\n develop.run(self) # old style class\n\n\nclass BuildPy(build_py):\n \"\"\"Custom `build_py` command to always build mo files for wheels.\"\"\"\n\n def run(self):\n self.run_command(\"compile_catalog\")\n build_py.run(self) # old style class\n\n\nclass Sdist(sdist):\n \"\"\"Custom `sdist` command to ensure that mo files are always created.\"\"\"\n\n def run(self):\n self.run_command(\"compile_catalog\")\n sdist.run(self) # old style class\n\n\nsetup(\n name=PACKAGE_NAME,\n version=versioneer.get_version(),\n license=\"Apache License 2.0\",\n url=\"https://opsdroid.github.io/\",\n download_url=\"https://github.com/opsdroid/opsdroid/releases\",\n author=\"Jacob Tomlinson\",\n author_email=\"[email protected]\",\n description=\"An open source ChatOps bot framework.\",\n long_description=README,\n long_description_content_type=\"text/markdown\",\n packages=PACKAGES,\n include_package_data=True,\n zip_safe=False,\n platforms=\"any\",\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Environment :: Console\",\n \"Framework :: AsyncIO\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"Intended Audience :: Information Technology\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Communications :: Chat\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n install_requires=REQUIRES,\n test_suite=\"tests\",\n keywords=[\n \"bot\",\n \"bot-framework\",\n \"opsdroid\",\n \"botkit\",\n \"python3\",\n \"asyncio\",\n \"chatops\",\n \"devops\",\n \"nlu\",\n ],\n setup_requires=[\"Babel\"],\n cmdclass=versioneer.get_cmdclass(\n {\"sdist\": Sdist, \"build_py\": BuildPy, \"develop\": Develop}\n ),\n entry_points={\"console_scripts\": [\"opsdroid = opsdroid.cli:cli\"]},\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python3\nimport os\nfrom setuptools import setup, find_packages\nfrom setuptools.command.build_py import build_py\nfrom setuptools.command.sdist import sdist\nfrom setuptools.command.develop import develop\nimport versioneer\n\nPACKAGE_NAME = \"opsdroid\"\nHERE = os.path.abspath(os.path.dirname(__file__))\nREADME = open(os.path.join(HERE, \"README.md\"), encoding=\"utf8\").read()\n\nPACKAGES = find_packages(\n exclude=[\"tests\", \"tests.*\", \"modules\", \"modules.*\", \"docs\", \"docs.*\"]\n)\n\n\n# For now we simply define the install_requires based on the contents\n# of requirements.txt. In the future, install_requires may become much\n# looser than the (automatically) resolved requirements.txt.\nwith open(os.path.join(HERE, \"requirements.txt\"), \"r\") as fh:\n REQUIRES = [line.strip() for line in fh]\n\n\nclass Develop(develop):\n \"\"\"Custom `develop` command to always build mo files on install -e.\"\"\"\n\n def run(self):\n self.run_command(\"compile_catalog\")\n develop.run(self) # old style class\n\n\nclass BuildPy(build_py):\n \"\"\"Custom `build_py` command to always build mo files for wheels.\"\"\"\n\n def run(self):\n self.run_command(\"compile_catalog\")\n build_py.run(self) # old style class\n\n\nclass Sdist(sdist):\n \"\"\"Custom `sdist` command to ensure that mo files are always created.\"\"\"\n\n def run(self):\n self.run_command(\"compile_catalog\")\n sdist.run(self) # old style class\n\n\nsetup(\n name=PACKAGE_NAME,\n version=versioneer.get_version(),\n license=\"Apache License 2.0\",\n url=\"https://opsdroid.github.io/\",\n download_url=\"https://github.com/opsdroid/opsdroid/releases\",\n author=\"Jacob Tomlinson\",\n author_email=\"[email protected]\",\n description=\"An open source ChatOps bot framework.\",\n long_description=README,\n long_description_content_type=\"text/markdown\",\n packages=PACKAGES,\n include_package_data=True,\n zip_safe=False,\n platforms=\"any\",\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Environment :: Console\",\n \"Framework :: AsyncIO\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"Intended Audience :: Information Technology\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Communications :: Chat\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n install_requires=REQUIRES,\n test_suite=\"tests\",\n keywords=[\n \"bot\",\n \"bot-framework\",\n \"opsdroid\",\n \"botkit\",\n \"python3\",\n \"asyncio\",\n \"chatops\",\n \"devops\",\n \"nlu\",\n ],\n setup_requires=[\"Babel\"],\n cmdclass=versioneer.get_cmdclass(\n {\"sdist\": Sdist, \"build_py\": BuildPy, \"develop\": Develop}\n ),\n entry_points={\"console_scripts\": [\"opsdroid = opsdroid.cli:cli\"]},\n)\n", "path": "setup.py"}]}
| 1,280 | 112 |
gh_patches_debug_14665
|
rasdani/github-patches
|
git_diff
|
getsentry__sentry-9517
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SVG static assets broken and not displayed
Every asset with svg type are displayed as broken in web interface.

We installed sentry 9 with python and nginx as proxy on CentOS 7.2
When I look the content-type returned, svg files are not recognized.
```
$ curl -v https://sentry/_static/1533634345/sentry/dist/php.svg
(...)
< Content-Type: application/octet-stream
```
If I check the file on server, the svg is not recognized.
```
$ file php.svg
php.svg: HTML document, ASCII text, with very long lines, with no line terminators
```
If we serve the file directly with nginx, the correct mime type is sent as _image/svg+xml_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/sentry/runner/settings.py`
Content:
```
1 """
2 sentry.runner.settings
3 ~~~~~~~~~~~~~~~~~~~~~~
4
5 :copyright: (c) 2015 by the Sentry Team, see AUTHORS for more details.
6 :license: BSD, see LICENSE for more details.
7 """
8 from __future__ import absolute_import, print_function
9
10 import os
11 import click
12
13 DEFAULT_SETTINGS_MODULE = 'sentry.conf.server'
14 DEFAULT_SETTINGS_CONF = 'config.yml'
15 DEFAULT_SETTINGS_OVERRIDE = 'sentry.conf.py'
16
17
18 def generate_secret_key():
19 from django.utils.crypto import get_random_string
20 chars = u'abcdefghijklmnopqrstuvwxyz0123456789!@#%^&*(-_=+)'
21 return get_random_string(50, chars)
22
23
24 def load_config_template(path, version='default'):
25 from pkg_resources import resource_string
26 return resource_string('sentry', 'data/config/%s.%s' % (path, version)).decode('utf8')
27
28
29 def generate_settings(dev=False):
30 """
31 This command is run when ``default_path`` doesn't exist, or ``init`` is
32 run and returns a string representing the default data to put into their
33 settings file.
34 """
35 context = {
36 'secret_key': generate_secret_key(),
37 'debug_flag': dev,
38 'mail.backend': 'console' if dev else 'smtp',
39 }
40
41 py = load_config_template(DEFAULT_SETTINGS_OVERRIDE, 'default') % context
42 yaml = load_config_template(DEFAULT_SETTINGS_CONF, 'default') % context
43 return py, yaml
44
45
46 def get_sentry_conf():
47 """
48 Fetch the SENTRY_CONF value, either from the click context
49 if available, or SENTRY_CONF environment variable.
50 """
51 try:
52 ctx = click.get_current_context()
53 return ctx.obj['config']
54 except (RuntimeError, KeyError, TypeError):
55 try:
56 return os.environ['SENTRY_CONF']
57 except KeyError:
58 return '~/.sentry'
59
60
61 def discover_configs():
62 """
63 Discover the locations of three configuration components:
64 * Config directory (~/.sentry)
65 * Optional python config file (~/.sentry/sentry.conf.py)
66 * Optional yaml config (~/.sentry/config.yml)
67 """
68 try:
69 config = os.environ['SENTRY_CONF']
70 except KeyError:
71 config = '~/.sentry'
72
73 config = os.path.expanduser(config)
74
75 # This is the old, now deprecated code path where SENTRY_CONF is pointed directly
76 # to a python file
77 if config.endswith(('.py', '.conf')) or os.path.isfile(config):
78 return (os.path.dirname(config), config, None, )
79
80 return (
81 config, os.path.join(config, DEFAULT_SETTINGS_OVERRIDE),
82 os.path.join(config, DEFAULT_SETTINGS_CONF),
83 )
84
85
86 def configure(ctx, py, yaml, skip_service_validation=False):
87 """
88 Given the two different config files, set up the environment.
89
90 NOTE: Will only execute once, so it's safe to call multiple times.
91 """
92 global __installed
93 if __installed:
94 return
95
96 # Make sure that our warnings are always displayed
97 import warnings
98 warnings.filterwarnings('default', '', Warning, r'^sentry')
99
100 # Add in additional mimetypes that are useful for our static files
101 # which aren't common in default system registries
102 import mimetypes
103 for type, ext in (
104 ('application/json', 'map'), ('application/font-woff', 'woff'),
105 ('application/font-woff2', 'woff2'), ('application/vnd.ms-fontobject', 'eot'),
106 ('application/x-font-ttf', 'ttf'), ('application/x-font-ttf',
107 'ttc'), ('font/opentype', 'otf'),
108 ):
109 mimetypes.add_type(type, '.' + ext)
110
111 from .importer import install
112
113 if yaml is None:
114 # `yaml` will be None when SENTRY_CONF is pointed
115 # directly to a file, in which case, this file must exist
116 if not os.path.exists(py):
117 if ctx:
118 raise click.ClickException(
119 "Configuration file does not exist. Use 'sentry init' to initialize the file."
120 )
121 raise ValueError(
122 "Configuration file does not exist at '%s'" % click.format_filename(py)
123 )
124 elif not os.path.exists(yaml) and not os.path.exists(py):
125 if ctx:
126 raise click.ClickException(
127 "Configuration file does not exist. Use 'sentry init' to initialize the file."
128 )
129 raise ValueError("Configuration file does not exist at '%s'" % click.format_filename(yaml))
130
131 # Add autoreload for config.yml file if needed
132 if yaml is not None and os.path.exists(yaml):
133 from sentry.utils.uwsgi import reload_on_change
134 reload_on_change(yaml)
135
136 os.environ['DJANGO_SETTINGS_MODULE'] = 'sentry_config'
137
138 install('sentry_config', py, DEFAULT_SETTINGS_MODULE)
139
140 # HACK: we need to force access of django.conf.settings to
141 # ensure we don't hit any import-driven recursive behavior
142 from django.conf import settings
143 hasattr(settings, 'INSTALLED_APPS')
144
145 from .initializer import initialize_app, on_configure
146 initialize_app(
147 {
148 'config_path': py,
149 'settings': settings,
150 'options': yaml,
151 },
152 skip_service_validation=skip_service_validation
153 )
154 on_configure({'settings': settings})
155
156 __installed = True
157
158
159 __installed = False
160
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/sentry/runner/settings.py b/src/sentry/runner/settings.py
--- a/src/sentry/runner/settings.py
+++ b/src/sentry/runner/settings.py
@@ -101,10 +101,14 @@
# which aren't common in default system registries
import mimetypes
for type, ext in (
- ('application/json', 'map'), ('application/font-woff', 'woff'),
- ('application/font-woff2', 'woff2'), ('application/vnd.ms-fontobject', 'eot'),
- ('application/x-font-ttf', 'ttf'), ('application/x-font-ttf',
- 'ttc'), ('font/opentype', 'otf'),
+ ('application/json', 'map'),
+ ('application/font-woff', 'woff'),
+ ('application/font-woff2', 'woff2'),
+ ('application/vnd.ms-fontobject', 'eot'),
+ ('application/x-font-ttf', 'ttf'),
+ ('application/x-font-ttf', 'ttc'),
+ ('font/opentype', 'otf'),
+ ('image/svg+xml', 'svg'),
):
mimetypes.add_type(type, '.' + ext)
|
{"golden_diff": "diff --git a/src/sentry/runner/settings.py b/src/sentry/runner/settings.py\n--- a/src/sentry/runner/settings.py\n+++ b/src/sentry/runner/settings.py\n@@ -101,10 +101,14 @@\n # which aren't common in default system registries\n import mimetypes\n for type, ext in (\n- ('application/json', 'map'), ('application/font-woff', 'woff'),\n- ('application/font-woff2', 'woff2'), ('application/vnd.ms-fontobject', 'eot'),\n- ('application/x-font-ttf', 'ttf'), ('application/x-font-ttf',\n- 'ttc'), ('font/opentype', 'otf'),\n+ ('application/json', 'map'),\n+ ('application/font-woff', 'woff'),\n+ ('application/font-woff2', 'woff2'),\n+ ('application/vnd.ms-fontobject', 'eot'),\n+ ('application/x-font-ttf', 'ttf'),\n+ ('application/x-font-ttf', 'ttc'),\n+ ('font/opentype', 'otf'),\n+ ('image/svg+xml', 'svg'),\n ):\n mimetypes.add_type(type, '.' + ext)\n", "issue": "SVG static assets broken and not displayed\nEvery asset with svg type are displayed as broken in web interface.\r\n\r\n\r\n\r\nWe installed sentry 9 with python and nginx as proxy on CentOS 7.2\r\n\r\nWhen I look the content-type returned, svg files are not recognized.\r\n\r\n```\r\n$ curl -v https://sentry/_static/1533634345/sentry/dist/php.svg\r\n(...)\r\n< Content-Type: application/octet-stream\r\n```\r\n\r\nIf I check the file on server, the svg is not recognized.\r\n\r\n```\r\n$ file php.svg \r\nphp.svg: HTML document, ASCII text, with very long lines, with no line terminators\r\n```\r\n\r\nIf we serve the file directly with nginx, the correct mime type is sent as _image/svg+xml_\r\n\n", "before_files": [{"content": "\"\"\"\nsentry.runner.settings\n~~~~~~~~~~~~~~~~~~~~~~\n\n:copyright: (c) 2015 by the Sentry Team, see AUTHORS for more details.\n:license: BSD, see LICENSE for more details.\n\"\"\"\nfrom __future__ import absolute_import, print_function\n\nimport os\nimport click\n\nDEFAULT_SETTINGS_MODULE = 'sentry.conf.server'\nDEFAULT_SETTINGS_CONF = 'config.yml'\nDEFAULT_SETTINGS_OVERRIDE = 'sentry.conf.py'\n\n\ndef generate_secret_key():\n from django.utils.crypto import get_random_string\n chars = u'abcdefghijklmnopqrstuvwxyz0123456789!@#%^&*(-_=+)'\n return get_random_string(50, chars)\n\n\ndef load_config_template(path, version='default'):\n from pkg_resources import resource_string\n return resource_string('sentry', 'data/config/%s.%s' % (path, version)).decode('utf8')\n\n\ndef generate_settings(dev=False):\n \"\"\"\n This command is run when ``default_path`` doesn't exist, or ``init`` is\n run and returns a string representing the default data to put into their\n settings file.\n \"\"\"\n context = {\n 'secret_key': generate_secret_key(),\n 'debug_flag': dev,\n 'mail.backend': 'console' if dev else 'smtp',\n }\n\n py = load_config_template(DEFAULT_SETTINGS_OVERRIDE, 'default') % context\n yaml = load_config_template(DEFAULT_SETTINGS_CONF, 'default') % context\n return py, yaml\n\n\ndef get_sentry_conf():\n \"\"\"\n Fetch the SENTRY_CONF value, either from the click context\n if available, or SENTRY_CONF environment variable.\n \"\"\"\n try:\n ctx = click.get_current_context()\n return ctx.obj['config']\n except (RuntimeError, KeyError, TypeError):\n try:\n return os.environ['SENTRY_CONF']\n except KeyError:\n return '~/.sentry'\n\n\ndef discover_configs():\n \"\"\"\n Discover the locations of three configuration components:\n * Config directory (~/.sentry)\n * Optional python config file (~/.sentry/sentry.conf.py)\n * Optional yaml config (~/.sentry/config.yml)\n \"\"\"\n try:\n config = os.environ['SENTRY_CONF']\n except KeyError:\n config = '~/.sentry'\n\n config = os.path.expanduser(config)\n\n # This is the old, now deprecated code path where SENTRY_CONF is pointed directly\n # to a python file\n if config.endswith(('.py', '.conf')) or os.path.isfile(config):\n return (os.path.dirname(config), config, None, )\n\n return (\n config, os.path.join(config, DEFAULT_SETTINGS_OVERRIDE),\n os.path.join(config, DEFAULT_SETTINGS_CONF),\n )\n\n\ndef configure(ctx, py, yaml, skip_service_validation=False):\n \"\"\"\n Given the two different config files, set up the environment.\n\n NOTE: Will only execute once, so it's safe to call multiple times.\n \"\"\"\n global __installed\n if __installed:\n return\n\n # Make sure that our warnings are always displayed\n import warnings\n warnings.filterwarnings('default', '', Warning, r'^sentry')\n\n # Add in additional mimetypes that are useful for our static files\n # which aren't common in default system registries\n import mimetypes\n for type, ext in (\n ('application/json', 'map'), ('application/font-woff', 'woff'),\n ('application/font-woff2', 'woff2'), ('application/vnd.ms-fontobject', 'eot'),\n ('application/x-font-ttf', 'ttf'), ('application/x-font-ttf',\n 'ttc'), ('font/opentype', 'otf'),\n ):\n mimetypes.add_type(type, '.' + ext)\n\n from .importer import install\n\n if yaml is None:\n # `yaml` will be None when SENTRY_CONF is pointed\n # directly to a file, in which case, this file must exist\n if not os.path.exists(py):\n if ctx:\n raise click.ClickException(\n \"Configuration file does not exist. Use 'sentry init' to initialize the file.\"\n )\n raise ValueError(\n \"Configuration file does not exist at '%s'\" % click.format_filename(py)\n )\n elif not os.path.exists(yaml) and not os.path.exists(py):\n if ctx:\n raise click.ClickException(\n \"Configuration file does not exist. Use 'sentry init' to initialize the file.\"\n )\n raise ValueError(\"Configuration file does not exist at '%s'\" % click.format_filename(yaml))\n\n # Add autoreload for config.yml file if needed\n if yaml is not None and os.path.exists(yaml):\n from sentry.utils.uwsgi import reload_on_change\n reload_on_change(yaml)\n\n os.environ['DJANGO_SETTINGS_MODULE'] = 'sentry_config'\n\n install('sentry_config', py, DEFAULT_SETTINGS_MODULE)\n\n # HACK: we need to force access of django.conf.settings to\n # ensure we don't hit any import-driven recursive behavior\n from django.conf import settings\n hasattr(settings, 'INSTALLED_APPS')\n\n from .initializer import initialize_app, on_configure\n initialize_app(\n {\n 'config_path': py,\n 'settings': settings,\n 'options': yaml,\n },\n skip_service_validation=skip_service_validation\n )\n on_configure({'settings': settings})\n\n __installed = True\n\n\n__installed = False\n", "path": "src/sentry/runner/settings.py"}], "after_files": [{"content": "\"\"\"\nsentry.runner.settings\n~~~~~~~~~~~~~~~~~~~~~~\n\n:copyright: (c) 2015 by the Sentry Team, see AUTHORS for more details.\n:license: BSD, see LICENSE for more details.\n\"\"\"\nfrom __future__ import absolute_import, print_function\n\nimport os\nimport click\n\nDEFAULT_SETTINGS_MODULE = 'sentry.conf.server'\nDEFAULT_SETTINGS_CONF = 'config.yml'\nDEFAULT_SETTINGS_OVERRIDE = 'sentry.conf.py'\n\n\ndef generate_secret_key():\n from django.utils.crypto import get_random_string\n chars = u'abcdefghijklmnopqrstuvwxyz0123456789!@#%^&*(-_=+)'\n return get_random_string(50, chars)\n\n\ndef load_config_template(path, version='default'):\n from pkg_resources import resource_string\n return resource_string('sentry', 'data/config/%s.%s' % (path, version)).decode('utf8')\n\n\ndef generate_settings(dev=False):\n \"\"\"\n This command is run when ``default_path`` doesn't exist, or ``init`` is\n run and returns a string representing the default data to put into their\n settings file.\n \"\"\"\n context = {\n 'secret_key': generate_secret_key(),\n 'debug_flag': dev,\n 'mail.backend': 'console' if dev else 'smtp',\n }\n\n py = load_config_template(DEFAULT_SETTINGS_OVERRIDE, 'default') % context\n yaml = load_config_template(DEFAULT_SETTINGS_CONF, 'default') % context\n return py, yaml\n\n\ndef get_sentry_conf():\n \"\"\"\n Fetch the SENTRY_CONF value, either from the click context\n if available, or SENTRY_CONF environment variable.\n \"\"\"\n try:\n ctx = click.get_current_context()\n return ctx.obj['config']\n except (RuntimeError, KeyError, TypeError):\n try:\n return os.environ['SENTRY_CONF']\n except KeyError:\n return '~/.sentry'\n\n\ndef discover_configs():\n \"\"\"\n Discover the locations of three configuration components:\n * Config directory (~/.sentry)\n * Optional python config file (~/.sentry/sentry.conf.py)\n * Optional yaml config (~/.sentry/config.yml)\n \"\"\"\n try:\n config = os.environ['SENTRY_CONF']\n except KeyError:\n config = '~/.sentry'\n\n config = os.path.expanduser(config)\n\n # This is the old, now deprecated code path where SENTRY_CONF is pointed directly\n # to a python file\n if config.endswith(('.py', '.conf')) or os.path.isfile(config):\n return (os.path.dirname(config), config, None, )\n\n return (\n config, os.path.join(config, DEFAULT_SETTINGS_OVERRIDE),\n os.path.join(config, DEFAULT_SETTINGS_CONF),\n )\n\n\ndef configure(ctx, py, yaml, skip_service_validation=False):\n \"\"\"\n Given the two different config files, set up the environment.\n\n NOTE: Will only execute once, so it's safe to call multiple times.\n \"\"\"\n global __installed\n if __installed:\n return\n\n # Make sure that our warnings are always displayed\n import warnings\n warnings.filterwarnings('default', '', Warning, r'^sentry')\n\n # Add in additional mimetypes that are useful for our static files\n # which aren't common in default system registries\n import mimetypes\n for type, ext in (\n ('application/json', 'map'),\n ('application/font-woff', 'woff'),\n ('application/font-woff2', 'woff2'),\n ('application/vnd.ms-fontobject', 'eot'),\n ('application/x-font-ttf', 'ttf'),\n ('application/x-font-ttf', 'ttc'),\n ('font/opentype', 'otf'),\n ('image/svg+xml', 'svg'),\n ):\n mimetypes.add_type(type, '.' + ext)\n\n from .importer import install\n\n if yaml is None:\n # `yaml` will be None when SENTRY_CONF is pointed\n # directly to a file, in which case, this file must exist\n if not os.path.exists(py):\n if ctx:\n raise click.ClickException(\n \"Configuration file does not exist. Use 'sentry init' to initialize the file.\"\n )\n raise ValueError(\n \"Configuration file does not exist at '%s'\" % click.format_filename(py)\n )\n elif not os.path.exists(yaml) and not os.path.exists(py):\n if ctx:\n raise click.ClickException(\n \"Configuration file does not exist. Use 'sentry init' to initialize the file.\"\n )\n raise ValueError(\"Configuration file does not exist at '%s'\" % click.format_filename(yaml))\n\n # Add autoreload for config.yml file if needed\n if yaml is not None and os.path.exists(yaml):\n from sentry.utils.uwsgi import reload_on_change\n reload_on_change(yaml)\n\n os.environ['DJANGO_SETTINGS_MODULE'] = 'sentry_config'\n\n install('sentry_config', py, DEFAULT_SETTINGS_MODULE)\n\n # HACK: we need to force access of django.conf.settings to\n # ensure we don't hit any import-driven recursive behavior\n from django.conf import settings\n hasattr(settings, 'INSTALLED_APPS')\n\n from .initializer import initialize_app, on_configure\n initialize_app(\n {\n 'config_path': py,\n 'settings': settings,\n 'options': yaml,\n },\n skip_service_validation=skip_service_validation\n )\n on_configure({'settings': settings})\n\n __installed = True\n\n\n__installed = False\n", "path": "src/sentry/runner/settings.py"}]}
| 2,029 | 265 |
gh_patches_debug_63641
|
rasdani/github-patches
|
git_diff
|
bookwyrm-social__bookwyrm-566
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Feature request - block quote formatting in reviews
I published [a review](https://bookwyrm.social/user/darius/review/14497) just now with a markdown-style block quote in it. The second-to-last paragraph is formatted, at least when I wrote it in the dialog box, like this:
```
> "I was expecting to go to prison for the rest of my life," Ellsberg wryly said later, "and Ithiel wanted to know whether I understood that I'd never get another dollar from the federal government."
```
But it renders as regular text. I'm guessing a lot of reviewers will want to excerpt their books, so block quotes seem like a good thing to include!
I would expect it to render more like it does here on Github:
> "I was expecting to go to prison for the rest of my life," Ellsberg wryly said later, "and Ithiel wanted to know whether I understood that I'd never get another dollar from the federal government."
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bookwyrm/sanitize_html.py`
Content:
```
1 ''' html parser to clean up incoming text from unknown sources '''
2 from html.parser import HTMLParser
3
4 class InputHtmlParser(HTMLParser):#pylint: disable=abstract-method
5 ''' Removes any html that isn't allowed_tagsed from a block '''
6
7 def __init__(self):
8 HTMLParser.__init__(self)
9 self.allowed_tags = [
10 'p', 'br',
11 'b', 'i', 'strong', 'em', 'pre',
12 'a', 'span', 'ul', 'ol', 'li'
13 ]
14 self.tag_stack = []
15 self.output = []
16 # if the html appears invalid, we just won't allow any at all
17 self.allow_html = True
18
19
20 def handle_starttag(self, tag, attrs):
21 ''' check if the tag is valid '''
22 if self.allow_html and tag in self.allowed_tags:
23 self.output.append(('tag', self.get_starttag_text()))
24 self.tag_stack.append(tag)
25 else:
26 self.output.append(('data', ''))
27
28
29 def handle_endtag(self, tag):
30 ''' keep the close tag '''
31 if not self.allow_html or tag not in self.allowed_tags:
32 self.output.append(('data', ''))
33 return
34
35 if not self.tag_stack or self.tag_stack[-1] != tag:
36 # the end tag doesn't match the most recent start tag
37 self.allow_html = False
38 self.output.append(('data', ''))
39 return
40
41 self.tag_stack = self.tag_stack[:-1]
42 self.output.append(('tag', '</%s>' % tag))
43
44
45 def handle_data(self, data):
46 ''' extract the answer, if we're in an answer tag '''
47 self.output.append(('data', data))
48
49
50 def get_output(self):
51 ''' convert the output from a list of tuples to a string '''
52 if self.tag_stack:
53 self.allow_html = False
54 if not self.allow_html:
55 return ''.join(v for (k, v) in self.output if k == 'data')
56 return ''.join(v for (k, v) in self.output)
57
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bookwyrm/sanitize_html.py b/bookwyrm/sanitize_html.py
--- a/bookwyrm/sanitize_html.py
+++ b/bookwyrm/sanitize_html.py
@@ -7,7 +7,7 @@
def __init__(self):
HTMLParser.__init__(self)
self.allowed_tags = [
- 'p', 'br',
+ 'p', 'blockquote', 'br',
'b', 'i', 'strong', 'em', 'pre',
'a', 'span', 'ul', 'ol', 'li'
]
|
{"golden_diff": "diff --git a/bookwyrm/sanitize_html.py b/bookwyrm/sanitize_html.py\n--- a/bookwyrm/sanitize_html.py\n+++ b/bookwyrm/sanitize_html.py\n@@ -7,7 +7,7 @@\n def __init__(self):\n HTMLParser.__init__(self)\n self.allowed_tags = [\n- 'p', 'br',\n+ 'p', 'blockquote', 'br',\n 'b', 'i', 'strong', 'em', 'pre',\n 'a', 'span', 'ul', 'ol', 'li'\n ]\n", "issue": "Feature request - block quote formatting in reviews\nI published [a review](https://bookwyrm.social/user/darius/review/14497) just now with a markdown-style block quote in it. The second-to-last paragraph is formatted, at least when I wrote it in the dialog box, like this:\r\n\r\n```\r\n> \"I was expecting to go to prison for the rest of my life,\" Ellsberg wryly said later, \"and Ithiel wanted to know whether I understood that I'd never get another dollar from the federal government.\"\r\n```\r\n\r\nBut it renders as regular text. I'm guessing a lot of reviewers will want to excerpt their books, so block quotes seem like a good thing to include!\r\n\r\nI would expect it to render more like it does here on Github:\r\n\r\n> \"I was expecting to go to prison for the rest of my life,\" Ellsberg wryly said later, \"and Ithiel wanted to know whether I understood that I'd never get another dollar from the federal government.\"\n", "before_files": [{"content": "''' html parser to clean up incoming text from unknown sources '''\nfrom html.parser import HTMLParser\n\nclass InputHtmlParser(HTMLParser):#pylint: disable=abstract-method\n ''' Removes any html that isn't allowed_tagsed from a block '''\n\n def __init__(self):\n HTMLParser.__init__(self)\n self.allowed_tags = [\n 'p', 'br',\n 'b', 'i', 'strong', 'em', 'pre',\n 'a', 'span', 'ul', 'ol', 'li'\n ]\n self.tag_stack = []\n self.output = []\n # if the html appears invalid, we just won't allow any at all\n self.allow_html = True\n\n\n def handle_starttag(self, tag, attrs):\n ''' check if the tag is valid '''\n if self.allow_html and tag in self.allowed_tags:\n self.output.append(('tag', self.get_starttag_text()))\n self.tag_stack.append(tag)\n else:\n self.output.append(('data', ''))\n\n\n def handle_endtag(self, tag):\n ''' keep the close tag '''\n if not self.allow_html or tag not in self.allowed_tags:\n self.output.append(('data', ''))\n return\n\n if not self.tag_stack or self.tag_stack[-1] != tag:\n # the end tag doesn't match the most recent start tag\n self.allow_html = False\n self.output.append(('data', ''))\n return\n\n self.tag_stack = self.tag_stack[:-1]\n self.output.append(('tag', '</%s>' % tag))\n\n\n def handle_data(self, data):\n ''' extract the answer, if we're in an answer tag '''\n self.output.append(('data', data))\n\n\n def get_output(self):\n ''' convert the output from a list of tuples to a string '''\n if self.tag_stack:\n self.allow_html = False\n if not self.allow_html:\n return ''.join(v for (k, v) in self.output if k == 'data')\n return ''.join(v for (k, v) in self.output)\n", "path": "bookwyrm/sanitize_html.py"}], "after_files": [{"content": "''' html parser to clean up incoming text from unknown sources '''\nfrom html.parser import HTMLParser\n\nclass InputHtmlParser(HTMLParser):#pylint: disable=abstract-method\n ''' Removes any html that isn't allowed_tagsed from a block '''\n\n def __init__(self):\n HTMLParser.__init__(self)\n self.allowed_tags = [\n 'p', 'blockquote', 'br',\n 'b', 'i', 'strong', 'em', 'pre',\n 'a', 'span', 'ul', 'ol', 'li'\n ]\n self.tag_stack = []\n self.output = []\n # if the html appears invalid, we just won't allow any at all\n self.allow_html = True\n\n\n def handle_starttag(self, tag, attrs):\n ''' check if the tag is valid '''\n if self.allow_html and tag in self.allowed_tags:\n self.output.append(('tag', self.get_starttag_text()))\n self.tag_stack.append(tag)\n else:\n self.output.append(('data', ''))\n\n\n def handle_endtag(self, tag):\n ''' keep the close tag '''\n if not self.allow_html or tag not in self.allowed_tags:\n self.output.append(('data', ''))\n return\n\n if not self.tag_stack or self.tag_stack[-1] != tag:\n # the end tag doesn't match the most recent start tag\n self.allow_html = False\n self.output.append(('data', ''))\n return\n\n self.tag_stack = self.tag_stack[:-1]\n self.output.append(('tag', '</%s>' % tag))\n\n\n def handle_data(self, data):\n ''' extract the answer, if we're in an answer tag '''\n self.output.append(('data', data))\n\n\n def get_output(self):\n ''' convert the output from a list of tuples to a string '''\n if self.tag_stack:\n self.allow_html = False\n if not self.allow_html:\n return ''.join(v for (k, v) in self.output if k == 'data')\n return ''.join(v for (k, v) in self.output)\n", "path": "bookwyrm/sanitize_html.py"}]}
| 1,025 | 124 |
gh_patches_debug_21318
|
rasdani/github-patches
|
git_diff
|
openai__gym-1400
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
why MultiDiscrete dtype=int8
MultiDiscrete dtype is hardcoded as int8.
Is this a bug ? What happens when we need more than 256 category
```
space = MultiDiscrete([7, 1024])
space.sample()
```
https://github.com/openai/gym/blob/e944885e3b31a10fb6973093b39ff7682ef3aa3d/gym/spaces/multi_discrete.py#L10
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gym/spaces/multi_discrete.py`
Content:
```
1 import gym
2 import numpy as np
3 from .space import Space
4
5
6 class MultiDiscrete(Space):
7 """
8 - The multi-discrete action space consists of a series of discrete action spaces with different number of actions in eachs
9 - It is useful to represent game controllers or keyboards where each key can be represented as a discrete action space
10 - It is parametrized by passing an array of positive integers specifying number of actions for each discrete action space
11
12 Note: A value of 0 always need to represent the NOOP action.
13
14 e.g. Nintendo Game Controller
15 - Can be conceptualized as 3 discrete action spaces:
16
17 1) Arrow Keys: Discrete 5 - NOOP[0], UP[1], RIGHT[2], DOWN[3], LEFT[4] - params: min: 0, max: 4
18 2) Button A: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1
19 3) Button B: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1
20
21 - Can be initialized as
22
23 MultiDiscrete([ 5, 2, 2 ])
24
25 """
26 def __init__(self, nvec):
27
28 """
29 nvec: vector of counts of each categorical variable
30 """
31 assert (np.array(nvec) > 0).all(), 'nvec (counts) have to be positive'
32 self.nvec = np.asarray(nvec, dtype=np.uint32)
33
34 super(MultiDiscrete, self).__init__(self.nvec.shape, np.uint32)
35 self.np_random = np.random.RandomState()
36
37 def seed(self, seed):
38 self.np_random.seed(seed)
39
40 def sample(self):
41 return (self.np_random.random_sample(self.nvec.shape) * self.nvec).astype(self.dtype)
42
43 def contains(self, x):
44 # if nvec is uint32 and space dtype is uint32, then 0 <= x < self.nvec guarantees that x
45 # is within correct bounds for space dtype (even though x does not have to be unsigned)
46 return (0 <= x).all() and (x < self.nvec).all()
47
48 def to_jsonable(self, sample_n):
49 return [sample.tolist() for sample in sample_n]
50
51 def from_jsonable(self, sample_n):
52 return np.array(sample_n)
53
54 def __repr__(self):
55 return "MultiDiscrete({})".format(self.nvec)
56
57 def __eq__(self, other):
58 return np.all(self.nvec == other.nvec)
59
```
Path: `gym/spaces/space.py`
Content:
```
1 import numpy as np
2
3
4 class Space(object):
5 """Defines the observation and action spaces, so you can write generic
6 code that applies to any Env. For example, you can choose a random
7 action.
8 """
9 def __init__(self, shape=None, dtype=None):
10 import numpy as np # takes about 300-400ms to import, so we load lazily
11 self.shape = None if shape is None else tuple(shape)
12 self.dtype = None if dtype is None else np.dtype(dtype)
13
14 def sample(self):
15 """
16 Uniformly randomly sample a random element of this space
17 """
18 raise NotImplementedError
19
20 def seed(self, seed):
21 """Set the seed for this space's pseudo-random number generator. """
22 raise NotImplementedError
23
24 def contains(self, x):
25 """
26 Return boolean specifying if x is a valid
27 member of this space
28 """
29 raise NotImplementedError
30
31 def __contains__(self, x):
32 return self.contains(x)
33
34 def to_jsonable(self, sample_n):
35 """Convert a batch of samples from this space to a JSONable data type."""
36 # By default, assume identity is JSONable
37 return sample_n
38
39 def from_jsonable(self, sample_n):
40 """Convert a JSONable data type to a batch of samples from this space."""
41 # By default, assume identity is JSONable
42 return sample_n
43
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/gym/spaces/multi_discrete.py b/gym/spaces/multi_discrete.py
--- a/gym/spaces/multi_discrete.py
+++ b/gym/spaces/multi_discrete.py
@@ -1,4 +1,3 @@
-import gym
import numpy as np
from .space import Space
@@ -29,9 +28,9 @@
nvec: vector of counts of each categorical variable
"""
assert (np.array(nvec) > 0).all(), 'nvec (counts) have to be positive'
- self.nvec = np.asarray(nvec, dtype=np.uint32)
+ self.nvec = np.asarray(nvec, dtype=np.int64)
- super(MultiDiscrete, self).__init__(self.nvec.shape, np.uint32)
+ super(MultiDiscrete, self).__init__(self.nvec.shape, np.int64)
self.np_random = np.random.RandomState()
def seed(self, seed):
diff --git a/gym/spaces/space.py b/gym/spaces/space.py
--- a/gym/spaces/space.py
+++ b/gym/spaces/space.py
@@ -1,6 +1,3 @@
-import numpy as np
-
-
class Space(object):
"""Defines the observation and action spaces, so you can write generic
code that applies to any Env. For example, you can choose a random
|
{"golden_diff": "diff --git a/gym/spaces/multi_discrete.py b/gym/spaces/multi_discrete.py\n--- a/gym/spaces/multi_discrete.py\n+++ b/gym/spaces/multi_discrete.py\n@@ -1,4 +1,3 @@\n-import gym\n import numpy as np\n from .space import Space\n \n@@ -29,9 +28,9 @@\n nvec: vector of counts of each categorical variable\n \"\"\"\n assert (np.array(nvec) > 0).all(), 'nvec (counts) have to be positive'\n- self.nvec = np.asarray(nvec, dtype=np.uint32)\n+ self.nvec = np.asarray(nvec, dtype=np.int64)\n \n- super(MultiDiscrete, self).__init__(self.nvec.shape, np.uint32)\n+ super(MultiDiscrete, self).__init__(self.nvec.shape, np.int64)\n self.np_random = np.random.RandomState()\n \n def seed(self, seed):\ndiff --git a/gym/spaces/space.py b/gym/spaces/space.py\n--- a/gym/spaces/space.py\n+++ b/gym/spaces/space.py\n@@ -1,6 +1,3 @@\n-import numpy as np\n-\n-\n class Space(object):\n \"\"\"Defines the observation and action spaces, so you can write generic\n code that applies to any Env. For example, you can choose a random\n", "issue": "why MultiDiscrete dtype=int8 \nMultiDiscrete dtype is hardcoded as int8. \r\nIs this a bug ? What happens when we need more than 256 category \r\n```\r\nspace = MultiDiscrete([7, 1024])\r\nspace.sample()\r\n```\r\n\r\nhttps://github.com/openai/gym/blob/e944885e3b31a10fb6973093b39ff7682ef3aa3d/gym/spaces/multi_discrete.py#L10\n", "before_files": [{"content": "import gym\nimport numpy as np\nfrom .space import Space\n\n\nclass MultiDiscrete(Space):\n \"\"\"\n - The multi-discrete action space consists of a series of discrete action spaces with different number of actions in eachs\n - It is useful to represent game controllers or keyboards where each key can be represented as a discrete action space\n - It is parametrized by passing an array of positive integers specifying number of actions for each discrete action space\n\n Note: A value of 0 always need to represent the NOOP action.\n\n e.g. Nintendo Game Controller\n - Can be conceptualized as 3 discrete action spaces:\n\n 1) Arrow Keys: Discrete 5 - NOOP[0], UP[1], RIGHT[2], DOWN[3], LEFT[4] - params: min: 0, max: 4\n 2) Button A: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1\n 3) Button B: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1\n\n - Can be initialized as\n\n MultiDiscrete([ 5, 2, 2 ])\n\n \"\"\"\n def __init__(self, nvec):\n \n \"\"\"\n nvec: vector of counts of each categorical variable\n \"\"\"\n assert (np.array(nvec) > 0).all(), 'nvec (counts) have to be positive'\n self.nvec = np.asarray(nvec, dtype=np.uint32)\n\n super(MultiDiscrete, self).__init__(self.nvec.shape, np.uint32)\n self.np_random = np.random.RandomState()\n\n def seed(self, seed):\n self.np_random.seed(seed)\n\n def sample(self):\n return (self.np_random.random_sample(self.nvec.shape) * self.nvec).astype(self.dtype)\n\n def contains(self, x):\n # if nvec is uint32 and space dtype is uint32, then 0 <= x < self.nvec guarantees that x\n # is within correct bounds for space dtype (even though x does not have to be unsigned)\n return (0 <= x).all() and (x < self.nvec).all()\n\n def to_jsonable(self, sample_n):\n return [sample.tolist() for sample in sample_n]\n\n def from_jsonable(self, sample_n):\n return np.array(sample_n)\n\n def __repr__(self):\n return \"MultiDiscrete({})\".format(self.nvec)\n\n def __eq__(self, other):\n return np.all(self.nvec == other.nvec)\n", "path": "gym/spaces/multi_discrete.py"}, {"content": "import numpy as np\n\n\nclass Space(object):\n \"\"\"Defines the observation and action spaces, so you can write generic\n code that applies to any Env. For example, you can choose a random\n action.\n \"\"\"\n def __init__(self, shape=None, dtype=None):\n import numpy as np # takes about 300-400ms to import, so we load lazily\n self.shape = None if shape is None else tuple(shape)\n self.dtype = None if dtype is None else np.dtype(dtype)\n\n def sample(self):\n \"\"\"\n Uniformly randomly sample a random element of this space\n \"\"\"\n raise NotImplementedError\n\n def seed(self, seed):\n \"\"\"Set the seed for this space's pseudo-random number generator. \"\"\"\n raise NotImplementedError\n\n def contains(self, x):\n \"\"\"\n Return boolean specifying if x is a valid\n member of this space\n \"\"\"\n raise NotImplementedError\n\n def __contains__(self, x):\n return self.contains(x)\n\n def to_jsonable(self, sample_n):\n \"\"\"Convert a batch of samples from this space to a JSONable data type.\"\"\"\n # By default, assume identity is JSONable\n return sample_n\n\n def from_jsonable(self, sample_n):\n \"\"\"Convert a JSONable data type to a batch of samples from this space.\"\"\"\n # By default, assume identity is JSONable\n return sample_n\n", "path": "gym/spaces/space.py"}], "after_files": [{"content": "import numpy as np\nfrom .space import Space\n\n\nclass MultiDiscrete(Space):\n \"\"\"\n - The multi-discrete action space consists of a series of discrete action spaces with different number of actions in eachs\n - It is useful to represent game controllers or keyboards where each key can be represented as a discrete action space\n - It is parametrized by passing an array of positive integers specifying number of actions for each discrete action space\n\n Note: A value of 0 always need to represent the NOOP action.\n\n e.g. Nintendo Game Controller\n - Can be conceptualized as 3 discrete action spaces:\n\n 1) Arrow Keys: Discrete 5 - NOOP[0], UP[1], RIGHT[2], DOWN[3], LEFT[4] - params: min: 0, max: 4\n 2) Button A: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1\n 3) Button B: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1\n\n - Can be initialized as\n\n MultiDiscrete([ 5, 2, 2 ])\n\n \"\"\"\n def __init__(self, nvec):\n \n \"\"\"\n nvec: vector of counts of each categorical variable\n \"\"\"\n assert (np.array(nvec) > 0).all(), 'nvec (counts) have to be positive'\n self.nvec = np.asarray(nvec, dtype=np.int64)\n\n super(MultiDiscrete, self).__init__(self.nvec.shape, np.int64)\n self.np_random = np.random.RandomState()\n\n def seed(self, seed):\n self.np_random.seed(seed)\n\n def sample(self):\n return (self.np_random.random_sample(self.nvec.shape) * self.nvec).astype(self.dtype)\n\n def contains(self, x):\n # if nvec is uint32 and space dtype is uint32, then 0 <= x < self.nvec guarantees that x\n # is within correct bounds for space dtype (even though x does not have to be unsigned)\n return (0 <= x).all() and (x < self.nvec).all()\n\n def to_jsonable(self, sample_n):\n return [sample.tolist() for sample in sample_n]\n\n def from_jsonable(self, sample_n):\n return np.array(sample_n)\n\n def __repr__(self):\n return \"MultiDiscrete({})\".format(self.nvec)\n\n def __eq__(self, other):\n return np.all(self.nvec == other.nvec)\n", "path": "gym/spaces/multi_discrete.py"}, {"content": "class Space(object):\n \"\"\"Defines the observation and action spaces, so you can write generic\n code that applies to any Env. For example, you can choose a random\n action.\n \"\"\"\n def __init__(self, shape=None, dtype=None):\n import numpy as np # takes about 300-400ms to import, so we load lazily\n self.shape = None if shape is None else tuple(shape)\n self.dtype = None if dtype is None else np.dtype(dtype)\n\n def sample(self):\n \"\"\"\n Uniformly randomly sample a random element of this space\n \"\"\"\n raise NotImplementedError\n\n def seed(self, seed):\n \"\"\"Set the seed for this space's pseudo-random number generator. \"\"\"\n raise NotImplementedError\n\n def contains(self, x):\n \"\"\"\n Return boolean specifying if x is a valid\n member of this space\n \"\"\"\n raise NotImplementedError\n\n def __contains__(self, x):\n return self.contains(x)\n\n def to_jsonable(self, sample_n):\n \"\"\"Convert a batch of samples from this space to a JSONable data type.\"\"\"\n # By default, assume identity is JSONable\n return sample_n\n\n def from_jsonable(self, sample_n):\n \"\"\"Convert a JSONable data type to a batch of samples from this space.\"\"\"\n # By default, assume identity is JSONable\n return sample_n\n", "path": "gym/spaces/space.py"}]}
| 1,484 | 314 |
gh_patches_debug_56609
|
rasdani/github-patches
|
git_diff
|
spacetelescope__jwql-678
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Upgrade Django to 3.0
Django 3.0 is out, and since it is a major release, we should consider upgrading to this.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import numpy as np
2 from setuptools import setup
3 from setuptools import find_packages
4
5 VERSION = '0.24.0'
6
7 AUTHORS = 'Matthew Bourque, Lauren Chambers, Misty Cracraft, Mike Engesser, Mees Fix, Joe Filippazzo, Bryan Hilbert, '
8 AUTHORS += 'Graham Kanarek, Teagan King, Catherine Martlin, Maria Pena-Guerrero, Johannes Sahlmann, Ben Sunnquist'
9
10 DESCRIPTION = 'The James Webb Space Telescope Quicklook Project'
11
12 DEPENDENCY_LINKS = ['git+https://github.com/spacetelescope/jwst_reffiles#egg=jwst_reffiles']
13
14 REQUIRES = [
15 'asdf>=2.3.3',
16 'astropy>=3.2.1',
17 'astroquery>=0.3.9',
18 'authlib',
19 'bokeh>=1.0,<1.4',
20 'codecov',
21 'crds',
22 'cryptography',
23 'django>=2.0,<3.0',
24 'flake8',
25 'inflection',
26 'ipython',
27 'jinja2',
28 'jsonschema',
29 'jwedb>=0.0.3',
30 'jwst',
31 'matplotlib',
32 'nodejs',
33 'numpy',
34 'numpydoc',
35 'pandas',
36 'psycopg2',
37 'pysiaf',
38 'pytest',
39 'pytest-cov',
40 'scipy',
41 'sphinx',
42 'sqlalchemy',
43 'stsci_rtd_theme',
44 'twine',
45 'wtforms'
46 ]
47
48 setup(
49 name='jwql',
50 version=VERSION,
51 description=DESCRIPTION,
52 url='https://github.com/spacetelescope/jwql.git',
53 author=AUTHORS,
54 author_email='[email protected]',
55 license='BSD',
56 keywords=['astronomy', 'python'],
57 classifiers=['Programming Language :: Python'],
58 packages=find_packages(),
59 install_requires=REQUIRES,
60 dependency_links=DEPENDENCY_LINKS,
61 include_package_data=True,
62 include_dirs=[np.get_include()],
63 )
64
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -20,7 +20,7 @@
'codecov',
'crds',
'cryptography',
- 'django>=2.0,<3.0',
+ 'django',
'flake8',
'inflection',
'ipython',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -20,7 +20,7 @@\n 'codecov',\n 'crds',\n 'cryptography',\n- 'django>=2.0,<3.0',\n+ 'django',\n 'flake8',\n 'inflection',\n 'ipython',\n", "issue": "Upgrade Django to 3.0\nDjango 3.0 is out, and since it is a major release, we should consider upgrading to this.\n", "before_files": [{"content": "import numpy as np\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nVERSION = '0.24.0'\n\nAUTHORS = 'Matthew Bourque, Lauren Chambers, Misty Cracraft, Mike Engesser, Mees Fix, Joe Filippazzo, Bryan Hilbert, '\nAUTHORS += 'Graham Kanarek, Teagan King, Catherine Martlin, Maria Pena-Guerrero, Johannes Sahlmann, Ben Sunnquist'\n\nDESCRIPTION = 'The James Webb Space Telescope Quicklook Project'\n\nDEPENDENCY_LINKS = ['git+https://github.com/spacetelescope/jwst_reffiles#egg=jwst_reffiles']\n\nREQUIRES = [\n 'asdf>=2.3.3',\n 'astropy>=3.2.1',\n 'astroquery>=0.3.9',\n 'authlib',\n 'bokeh>=1.0,<1.4',\n 'codecov',\n 'crds',\n 'cryptography',\n 'django>=2.0,<3.0',\n 'flake8',\n 'inflection',\n 'ipython',\n 'jinja2',\n 'jsonschema',\n 'jwedb>=0.0.3',\n 'jwst',\n 'matplotlib',\n 'nodejs',\n 'numpy',\n 'numpydoc',\n 'pandas',\n 'psycopg2',\n 'pysiaf',\n 'pytest',\n 'pytest-cov',\n 'scipy',\n 'sphinx',\n 'sqlalchemy',\n 'stsci_rtd_theme',\n 'twine',\n 'wtforms'\n]\n\nsetup(\n name='jwql',\n version=VERSION,\n description=DESCRIPTION,\n url='https://github.com/spacetelescope/jwql.git',\n author=AUTHORS,\n author_email='[email protected]',\n license='BSD',\n keywords=['astronomy', 'python'],\n classifiers=['Programming Language :: Python'],\n packages=find_packages(),\n install_requires=REQUIRES,\n dependency_links=DEPENDENCY_LINKS,\n include_package_data=True,\n include_dirs=[np.get_include()],\n)\n", "path": "setup.py"}], "after_files": [{"content": "import numpy as np\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nVERSION = '0.24.0'\n\nAUTHORS = 'Matthew Bourque, Lauren Chambers, Misty Cracraft, Mike Engesser, Mees Fix, Joe Filippazzo, Bryan Hilbert, '\nAUTHORS += 'Graham Kanarek, Teagan King, Catherine Martlin, Maria Pena-Guerrero, Johannes Sahlmann, Ben Sunnquist'\n\nDESCRIPTION = 'The James Webb Space Telescope Quicklook Project'\n\nDEPENDENCY_LINKS = ['git+https://github.com/spacetelescope/jwst_reffiles#egg=jwst_reffiles']\n\nREQUIRES = [\n 'asdf>=2.3.3',\n 'astropy>=3.2.1',\n 'astroquery>=0.3.9',\n 'authlib',\n 'bokeh>=1.0,<1.4',\n 'codecov',\n 'crds',\n 'cryptography',\n 'django',\n 'flake8',\n 'inflection',\n 'ipython',\n 'jinja2',\n 'jsonschema',\n 'jwedb>=0.0.3',\n 'jwst',\n 'matplotlib',\n 'nodejs',\n 'numpy',\n 'numpydoc',\n 'pandas',\n 'psycopg2',\n 'pysiaf',\n 'pytest',\n 'pytest-cov',\n 'scipy',\n 'sphinx',\n 'sqlalchemy',\n 'stsci_rtd_theme',\n 'twine',\n 'wtforms'\n]\n\nsetup(\n name='jwql',\n version=VERSION,\n description=DESCRIPTION,\n url='https://github.com/spacetelescope/jwql.git',\n author=AUTHORS,\n author_email='[email protected]',\n license='BSD',\n keywords=['astronomy', 'python'],\n classifiers=['Programming Language :: Python'],\n packages=find_packages(),\n install_requires=REQUIRES,\n dependency_links=DEPENDENCY_LINKS,\n include_package_data=True,\n include_dirs=[np.get_include()],\n)\n", "path": "setup.py"}]}
| 868 | 80 |
gh_patches_debug_47926
|
rasdani/github-patches
|
git_diff
|
uccser__cs-unplugged-717
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Workaround Crowdin bug where integer yaml keys are not preserved
When downloading in-context localisation files, integer keys in YAML files are not preserved. This is only an issue in the file `topics/content/en/programming-challenges-structure-difficulties.yaml`, which uses the difficulty number as the key.
As a work around, we can use the string value of the integer as the key, i.e. `"0"` instead of `0`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py`
Content:
```
1 """Custom loader for loading structure of programming challenges."""
2
3 import os
4 from django.db import transaction
5 from utils.errors.MissingRequiredFieldError import MissingRequiredFieldError
6 from utils.TranslatableModelLoader import TranslatableModelLoader
7 from topics.models import ProgrammingChallengeLanguage, ProgrammingChallengeDifficulty
8
9
10 class ProgrammingChallengesStructureLoader(TranslatableModelLoader):
11 """Custom loader for loading structure of programming challenges."""
12
13 @transaction.atomic
14 def load(self):
15 """Load the content for structure of programming challenges.
16
17 Raises:
18 MissingRequiredFieldError: when no object can be found with the matching
19 attribute.
20 """
21 structure = self.load_yaml_file(self.structure_file_path)
22
23 prog_languages = structure.get("languages", None)
24 difficulty_levels = structure.get("difficulties", None)
25 if None in [prog_languages, difficulty_levels]:
26 raise MissingRequiredFieldError(
27 self.structure_file_path,
28 ["lanugages", "difficulties"],
29 "Programming Challenge Structure"
30 )
31
32 # Add "-languages" to the structure filename
33 prog_languages_translation_filename = "{}-languages.yaml".format(
34 os.path.splitext(self.structure_filename)[0]
35 )
36 prog_languages_translations = self.get_yaml_translations(
37 prog_languages_translation_filename,
38 required_slugs=prog_languages.keys(),
39 required_fields=["name"]
40 )
41
42 for (prog_language, prog_language_data) in prog_languages.items():
43
44 if prog_language_data is None:
45 raise MissingRequiredFieldError(
46 self.structure_file_path,
47 ["number"],
48 "Programming Challenge Language"
49 )
50
51 # Check for required fields
52 prog_language_number = prog_language_data.get("number", None)
53 if prog_language_number is None:
54 raise MissingRequiredFieldError(
55 self.structure_file_path,
56 ["number"],
57 "Programming Challenge Language"
58 )
59
60 # Check if icon is given
61 if "icon" in prog_language_data:
62 prog_language_icon = prog_language_data["icon"]
63 else:
64 prog_language_icon = None
65
66 new_prog_language = ProgrammingChallengeLanguage(
67 slug=prog_language,
68 number=prog_language_number,
69 icon=prog_language_icon
70 )
71
72 translations = prog_languages_translations.get(prog_language, dict())
73 self.populate_translations(new_prog_language, translations)
74 self.mark_translation_availability(new_prog_language, required_fields=["name"])
75 new_prog_language.save()
76
77 self.log("Added programming language: {}".format(new_prog_language.__str__()))
78
79 # Add "-languages" to the structure filename
80 difficulties_translation_filename = "{}-difficulties.yaml".format(
81 os.path.splitext(self.structure_filename)[0]
82 )
83 difficulties_translations = self.get_yaml_translations(
84 difficulties_translation_filename,
85 required_slugs=difficulty_levels,
86 required_fields=["name"],
87 )
88
89 for difficulty in difficulty_levels:
90
91 new_difficulty = ProgrammingChallengeDifficulty(
92 level=difficulty,
93 )
94
95 translations = difficulties_translations.get(difficulty, dict())
96 self.populate_translations(new_difficulty, translations)
97 self.mark_translation_availability(new_difficulty, required_fields=["name"])
98 new_difficulty.save()
99
100 self.log("Added programming difficulty level: {}".format(new_difficulty.__str__()))
101
102 self.log("")
103
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py b/csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py
--- a/csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py
+++ b/csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py
@@ -89,7 +89,7 @@
for difficulty in difficulty_levels:
new_difficulty = ProgrammingChallengeDifficulty(
- level=difficulty,
+ level=int(difficulty),
)
translations = difficulties_translations.get(difficulty, dict())
|
{"golden_diff": "diff --git a/csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py b/csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py\n--- a/csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py\n+++ b/csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py\n@@ -89,7 +89,7 @@\n for difficulty in difficulty_levels:\n \n new_difficulty = ProgrammingChallengeDifficulty(\n- level=difficulty,\n+ level=int(difficulty),\n )\n \n translations = difficulties_translations.get(difficulty, dict())\n", "issue": "Workaround Crowdin bug where integer yaml keys are not preserved\nWhen downloading in-context localisation files, integer keys in YAML files are not preserved. This is only an issue in the file `topics/content/en/programming-challenges-structure-difficulties.yaml`, which uses the difficulty number as the key.\r\n\r\nAs a work around, we can use the string value of the integer as the key, i.e. `\"0\"` instead of `0`\n", "before_files": [{"content": "\"\"\"Custom loader for loading structure of programming challenges.\"\"\"\n\nimport os\nfrom django.db import transaction\nfrom utils.errors.MissingRequiredFieldError import MissingRequiredFieldError\nfrom utils.TranslatableModelLoader import TranslatableModelLoader\nfrom topics.models import ProgrammingChallengeLanguage, ProgrammingChallengeDifficulty\n\n\nclass ProgrammingChallengesStructureLoader(TranslatableModelLoader):\n \"\"\"Custom loader for loading structure of programming challenges.\"\"\"\n\n @transaction.atomic\n def load(self):\n \"\"\"Load the content for structure of programming challenges.\n\n Raises:\n MissingRequiredFieldError: when no object can be found with the matching\n attribute.\n \"\"\"\n structure = self.load_yaml_file(self.structure_file_path)\n\n prog_languages = structure.get(\"languages\", None)\n difficulty_levels = structure.get(\"difficulties\", None)\n if None in [prog_languages, difficulty_levels]:\n raise MissingRequiredFieldError(\n self.structure_file_path,\n [\"lanugages\", \"difficulties\"],\n \"Programming Challenge Structure\"\n )\n\n # Add \"-languages\" to the structure filename\n prog_languages_translation_filename = \"{}-languages.yaml\".format(\n os.path.splitext(self.structure_filename)[0]\n )\n prog_languages_translations = self.get_yaml_translations(\n prog_languages_translation_filename,\n required_slugs=prog_languages.keys(),\n required_fields=[\"name\"]\n )\n\n for (prog_language, prog_language_data) in prog_languages.items():\n\n if prog_language_data is None:\n raise MissingRequiredFieldError(\n self.structure_file_path,\n [\"number\"],\n \"Programming Challenge Language\"\n )\n\n # Check for required fields\n prog_language_number = prog_language_data.get(\"number\", None)\n if prog_language_number is None:\n raise MissingRequiredFieldError(\n self.structure_file_path,\n [\"number\"],\n \"Programming Challenge Language\"\n )\n\n # Check if icon is given\n if \"icon\" in prog_language_data:\n prog_language_icon = prog_language_data[\"icon\"]\n else:\n prog_language_icon = None\n\n new_prog_language = ProgrammingChallengeLanguage(\n slug=prog_language,\n number=prog_language_number,\n icon=prog_language_icon\n )\n\n translations = prog_languages_translations.get(prog_language, dict())\n self.populate_translations(new_prog_language, translations)\n self.mark_translation_availability(new_prog_language, required_fields=[\"name\"])\n new_prog_language.save()\n\n self.log(\"Added programming language: {}\".format(new_prog_language.__str__()))\n\n # Add \"-languages\" to the structure filename\n difficulties_translation_filename = \"{}-difficulties.yaml\".format(\n os.path.splitext(self.structure_filename)[0]\n )\n difficulties_translations = self.get_yaml_translations(\n difficulties_translation_filename,\n required_slugs=difficulty_levels,\n required_fields=[\"name\"],\n )\n\n for difficulty in difficulty_levels:\n\n new_difficulty = ProgrammingChallengeDifficulty(\n level=difficulty,\n )\n\n translations = difficulties_translations.get(difficulty, dict())\n self.populate_translations(new_difficulty, translations)\n self.mark_translation_availability(new_difficulty, required_fields=[\"name\"])\n new_difficulty.save()\n\n self.log(\"Added programming difficulty level: {}\".format(new_difficulty.__str__()))\n\n self.log(\"\")\n", "path": "csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py"}], "after_files": [{"content": "\"\"\"Custom loader for loading structure of programming challenges.\"\"\"\n\nimport os\nfrom django.db import transaction\nfrom utils.errors.MissingRequiredFieldError import MissingRequiredFieldError\nfrom utils.TranslatableModelLoader import TranslatableModelLoader\nfrom topics.models import ProgrammingChallengeLanguage, ProgrammingChallengeDifficulty\n\n\nclass ProgrammingChallengesStructureLoader(TranslatableModelLoader):\n \"\"\"Custom loader for loading structure of programming challenges.\"\"\"\n\n @transaction.atomic\n def load(self):\n \"\"\"Load the content for structure of programming challenges.\n\n Raises:\n MissingRequiredFieldError: when no object can be found with the matching\n attribute.\n \"\"\"\n structure = self.load_yaml_file(self.structure_file_path)\n\n prog_languages = structure.get(\"languages\", None)\n difficulty_levels = structure.get(\"difficulties\", None)\n if None in [prog_languages, difficulty_levels]:\n raise MissingRequiredFieldError(\n self.structure_file_path,\n [\"lanugages\", \"difficulties\"],\n \"Programming Challenge Structure\"\n )\n\n # Add \"-languages\" to the structure filename\n prog_languages_translation_filename = \"{}-languages.yaml\".format(\n os.path.splitext(self.structure_filename)[0]\n )\n prog_languages_translations = self.get_yaml_translations(\n prog_languages_translation_filename,\n required_slugs=prog_languages.keys(),\n required_fields=[\"name\"]\n )\n\n for (prog_language, prog_language_data) in prog_languages.items():\n\n if prog_language_data is None:\n raise MissingRequiredFieldError(\n self.structure_file_path,\n [\"number\"],\n \"Programming Challenge Language\"\n )\n\n # Check for required fields\n prog_language_number = prog_language_data.get(\"number\", None)\n if prog_language_number is None:\n raise MissingRequiredFieldError(\n self.structure_file_path,\n [\"number\"],\n \"Programming Challenge Language\"\n )\n\n # Check if icon is given\n if \"icon\" in prog_language_data:\n prog_language_icon = prog_language_data[\"icon\"]\n else:\n prog_language_icon = None\n\n new_prog_language = ProgrammingChallengeLanguage(\n slug=prog_language,\n number=prog_language_number,\n icon=prog_language_icon\n )\n\n translations = prog_languages_translations.get(prog_language, dict())\n self.populate_translations(new_prog_language, translations)\n self.mark_translation_availability(new_prog_language, required_fields=[\"name\"])\n new_prog_language.save()\n\n self.log(\"Added programming language: {}\".format(new_prog_language.__str__()))\n\n # Add \"-languages\" to the structure filename\n difficulties_translation_filename = \"{}-difficulties.yaml\".format(\n os.path.splitext(self.structure_filename)[0]\n )\n difficulties_translations = self.get_yaml_translations(\n difficulties_translation_filename,\n required_slugs=difficulty_levels,\n required_fields=[\"name\"],\n )\n\n for difficulty in difficulty_levels:\n\n new_difficulty = ProgrammingChallengeDifficulty(\n level=int(difficulty),\n )\n\n translations = difficulties_translations.get(difficulty, dict())\n self.populate_translations(new_difficulty, translations)\n self.mark_translation_availability(new_difficulty, required_fields=[\"name\"])\n new_difficulty.save()\n\n self.log(\"Added programming difficulty level: {}\".format(new_difficulty.__str__()))\n\n self.log(\"\")\n", "path": "csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py"}]}
| 1,250 | 133 |
gh_patches_debug_13056
|
rasdani/github-patches
|
git_diff
|
pypa__pip-392
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
"pip freeze -r" returns ValueError() if requirements file contains --find-links
It seems that pip freeze should not pass --find-links or --index-url to distribute/setuptools when inlined in a requirements file. Here is an easy way to repro the problem:
1) echo "--find-links http://foo.bar/" > /tmp/req.txt
2) pip freeze -r /tmp/req.txt
"""
Exception:
Traceback (most recent call last):
File "/Users/hpfennig/work/pip/pip/basecommand.py", line 95, in main
self.run(options, args)
File "/Users/hpfennig/work/pip/pip/commands/freeze.py", line 92, in run
line_req = InstallRequirement.from_line(line)
File "/Users/hpfennig/work/pip/pip/req.py", line 105, in from_line
return cls(req, comes_from, url=url)
File "/Users/hpfennig/work/pip/pip/req.py", line 39, in **init**
req = pkg_resources.Requirement.parse(req)
File "/Users/hpfennig/.virtualenvs/pip-bug-virt-env/lib/python2.7/site-packages/setuptools-0.6c11-py2.7.egg/pkg_resources.py", line 2510, in parse
reqs = list(parse_requirements(s))
File "/Users/hpfennig/.virtualenvs/pip-bug-virt-env/lib/python2.7/site-packages/setuptools-0.6c11-py2.7.egg/pkg_resources.py", line 2436, in parse_requirements
line, p, specs = scan_list(VERSION,LINE_END,line,p,(1,2),"version spec")
File "/Users/hpfennig/.virtualenvs/pip-bug-virt-env/lib/python2.7/site-packages/setuptools-0.6c11-py2.7.egg/pkg_resources.py", line 2404, in scan_list
raise ValueError("Expected "+item_name+" in",line,"at",line[p:])
ValueError: ('Expected version spec in', '--find-links http://foo.bar/', 'at', ' http://foo.bar/')
Storing complete log in /Users/hpfennig/.pip/pip.log
"""
I have a fix, but I am not sure this is the correct way to go about it. Would be happy to do an official pull request if needed.
diff --git a/pip/commands/freeze.py b/pip/commands/freeze.py
index 01b5df9..03ac80f 100644
--- a/pip/commands/freeze.py
+++ b/pip/commands/freeze.py
@@ -85,7 +85,9 @@ class FreezeCommand(Command):
elif (line.startswith('-r') or line.startswith('--requirement')
or line.startswith('-Z') or line.startswith('--always-unzip')
or line.startswith('-f') or line.startswith('-i')
- or line.startswith('--extra-index-url')):
- or line.startswith('--extra-index-url')
- or line.startswith('--find-links')
- or line.startswith('--index-url')):
f.write(line)
continue
else:
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pip/commands/freeze.py`
Content:
```
1 import re
2 import sys
3 import pkg_resources
4 import pip
5 from pip.req import InstallRequirement
6 from pip.log import logger
7 from pip.basecommand import Command
8 from pip.util import get_installed_distributions
9
10
11 class FreezeCommand(Command):
12 name = 'freeze'
13 usage = '%prog [OPTIONS]'
14 summary = 'Output all currently installed packages (exact versions) to stdout'
15
16 def __init__(self):
17 super(FreezeCommand, self).__init__()
18 self.parser.add_option(
19 '-r', '--requirement',
20 dest='requirement',
21 action='store',
22 default=None,
23 metavar='FILENAME',
24 help='Use the given requirements file as a hint about how to generate the new frozen requirements')
25 self.parser.add_option(
26 '-f', '--find-links',
27 dest='find_links',
28 action='append',
29 default=[],
30 metavar='URL',
31 help='URL for finding packages, which will be added to the frozen requirements file')
32 self.parser.add_option(
33 '-l', '--local',
34 dest='local',
35 action='store_true',
36 default=False,
37 help='If in a virtualenv, do not report globally-installed packages')
38
39 def setup_logging(self):
40 logger.move_stdout_to_stderr()
41
42 def run(self, options, args):
43 requirement = options.requirement
44 find_links = options.find_links or []
45 local_only = options.local
46 ## FIXME: Obviously this should be settable:
47 find_tags = False
48 skip_match = None
49
50 skip_regex = options.skip_requirements_regex
51 if skip_regex:
52 skip_match = re.compile(skip_regex)
53
54 dependency_links = []
55
56 f = sys.stdout
57
58 for dist in pkg_resources.working_set:
59 if dist.has_metadata('dependency_links.txt'):
60 dependency_links.extend(dist.get_metadata_lines('dependency_links.txt'))
61 for link in find_links:
62 if '#egg=' in link:
63 dependency_links.append(link)
64 for link in find_links:
65 f.write('-f %s\n' % link)
66 installations = {}
67 for dist in get_installed_distributions(local_only=local_only):
68 req = pip.FrozenRequirement.from_dist(dist, dependency_links, find_tags=find_tags)
69 installations[req.name] = req
70 if requirement:
71 req_f = open(requirement)
72 for line in req_f:
73 if not line.strip() or line.strip().startswith('#'):
74 f.write(line)
75 continue
76 if skip_match and skip_match.search(line):
77 f.write(line)
78 continue
79 elif line.startswith('-e') or line.startswith('--editable'):
80 if line.startswith('-e'):
81 line = line[2:].strip()
82 else:
83 line = line[len('--editable'):].strip().lstrip('=')
84 line_req = InstallRequirement.from_editable(line, default_vcs=options.default_vcs)
85 elif (line.startswith('-r') or line.startswith('--requirement')
86 or line.startswith('-Z') or line.startswith('--always-unzip')
87 or line.startswith('-f') or line.startswith('-i')
88 or line.startswith('--extra-index-url')):
89 f.write(line)
90 continue
91 else:
92 line_req = InstallRequirement.from_line(line)
93 if not line_req.name:
94 logger.notify("Skipping line because it's not clear what it would install: %s"
95 % line.strip())
96 logger.notify(" (add #egg=PackageName to the URL to avoid this warning)")
97 continue
98 if line_req.name not in installations:
99 logger.warn("Requirement file contains %s, but that package is not installed"
100 % line.strip())
101 continue
102 f.write(str(installations[line_req.name]))
103 del installations[line_req.name]
104 f.write('## The following requirements were added by pip --freeze:\n')
105 for installation in sorted(installations.values(), key=lambda x: x.name):
106 f.write(str(installation))
107
108
109 FreezeCommand()
110
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pip/commands/freeze.py b/pip/commands/freeze.py
--- a/pip/commands/freeze.py
+++ b/pip/commands/freeze.py
@@ -85,7 +85,9 @@
elif (line.startswith('-r') or line.startswith('--requirement')
or line.startswith('-Z') or line.startswith('--always-unzip')
or line.startswith('-f') or line.startswith('-i')
- or line.startswith('--extra-index-url')):
+ or line.startswith('--extra-index-url')
+ or line.startswith('--find-links')
+ or line.startswith('--index-url')):
f.write(line)
continue
else:
|
{"golden_diff": "diff --git a/pip/commands/freeze.py b/pip/commands/freeze.py\n--- a/pip/commands/freeze.py\n+++ b/pip/commands/freeze.py\n@@ -85,7 +85,9 @@\n elif (line.startswith('-r') or line.startswith('--requirement')\n or line.startswith('-Z') or line.startswith('--always-unzip')\n or line.startswith('-f') or line.startswith('-i')\n- or line.startswith('--extra-index-url')):\n+ or line.startswith('--extra-index-url')\n+ or line.startswith('--find-links')\n+ or line.startswith('--index-url')):\n f.write(line)\n continue\n else:\n", "issue": "\"pip freeze -r\" returns ValueError() if requirements file contains --find-links\nIt seems that pip freeze should not pass --find-links or --index-url to distribute/setuptools when inlined in a requirements file. Here is an easy way to repro the problem:\n\n1) echo \"--find-links http://foo.bar/\" > /tmp/req.txt\n2) pip freeze -r /tmp/req.txt\n\n\"\"\"\nException:\nTraceback (most recent call last):\n File \"/Users/hpfennig/work/pip/pip/basecommand.py\", line 95, in main\n self.run(options, args)\n File \"/Users/hpfennig/work/pip/pip/commands/freeze.py\", line 92, in run\n line_req = InstallRequirement.from_line(line)\n File \"/Users/hpfennig/work/pip/pip/req.py\", line 105, in from_line\n return cls(req, comes_from, url=url)\n File \"/Users/hpfennig/work/pip/pip/req.py\", line 39, in **init**\n req = pkg_resources.Requirement.parse(req)\n File \"/Users/hpfennig/.virtualenvs/pip-bug-virt-env/lib/python2.7/site-packages/setuptools-0.6c11-py2.7.egg/pkg_resources.py\", line 2510, in parse\n reqs = list(parse_requirements(s))\n File \"/Users/hpfennig/.virtualenvs/pip-bug-virt-env/lib/python2.7/site-packages/setuptools-0.6c11-py2.7.egg/pkg_resources.py\", line 2436, in parse_requirements\n line, p, specs = scan_list(VERSION,LINE_END,line,p,(1,2),\"version spec\")\n File \"/Users/hpfennig/.virtualenvs/pip-bug-virt-env/lib/python2.7/site-packages/setuptools-0.6c11-py2.7.egg/pkg_resources.py\", line 2404, in scan_list\n raise ValueError(\"Expected \"+item_name+\" in\",line,\"at\",line[p:])\nValueError: ('Expected version spec in', '--find-links http://foo.bar/', 'at', ' http://foo.bar/')\n\nStoring complete log in /Users/hpfennig/.pip/pip.log\n\"\"\"\n\nI have a fix, but I am not sure this is the correct way to go about it. Would be happy to do an official pull request if needed.\n\ndiff --git a/pip/commands/freeze.py b/pip/commands/freeze.py\nindex 01b5df9..03ac80f 100644\n--- a/pip/commands/freeze.py\n+++ b/pip/commands/freeze.py\n@@ -85,7 +85,9 @@ class FreezeCommand(Command):\n elif (line.startswith('-r') or line.startswith('--requirement')\n or line.startswith('-Z') or line.startswith('--always-unzip')\n or line.startswith('-f') or line.startswith('-i')\n- or line.startswith('--extra-index-url')):\n- or line.startswith('--extra-index-url')\n- or line.startswith('--find-links')\n- or line.startswith('--index-url')):\n f.write(line)\n continue\n else:\n\n", "before_files": [{"content": "import re\nimport sys\nimport pkg_resources\nimport pip\nfrom pip.req import InstallRequirement\nfrom pip.log import logger\nfrom pip.basecommand import Command\nfrom pip.util import get_installed_distributions\n\n\nclass FreezeCommand(Command):\n name = 'freeze'\n usage = '%prog [OPTIONS]'\n summary = 'Output all currently installed packages (exact versions) to stdout'\n\n def __init__(self):\n super(FreezeCommand, self).__init__()\n self.parser.add_option(\n '-r', '--requirement',\n dest='requirement',\n action='store',\n default=None,\n metavar='FILENAME',\n help='Use the given requirements file as a hint about how to generate the new frozen requirements')\n self.parser.add_option(\n '-f', '--find-links',\n dest='find_links',\n action='append',\n default=[],\n metavar='URL',\n help='URL for finding packages, which will be added to the frozen requirements file')\n self.parser.add_option(\n '-l', '--local',\n dest='local',\n action='store_true',\n default=False,\n help='If in a virtualenv, do not report globally-installed packages')\n\n def setup_logging(self):\n logger.move_stdout_to_stderr()\n\n def run(self, options, args):\n requirement = options.requirement\n find_links = options.find_links or []\n local_only = options.local\n ## FIXME: Obviously this should be settable:\n find_tags = False\n skip_match = None\n\n skip_regex = options.skip_requirements_regex\n if skip_regex:\n skip_match = re.compile(skip_regex)\n\n dependency_links = []\n\n f = sys.stdout\n\n for dist in pkg_resources.working_set:\n if dist.has_metadata('dependency_links.txt'):\n dependency_links.extend(dist.get_metadata_lines('dependency_links.txt'))\n for link in find_links:\n if '#egg=' in link:\n dependency_links.append(link)\n for link in find_links:\n f.write('-f %s\\n' % link)\n installations = {}\n for dist in get_installed_distributions(local_only=local_only):\n req = pip.FrozenRequirement.from_dist(dist, dependency_links, find_tags=find_tags)\n installations[req.name] = req\n if requirement:\n req_f = open(requirement)\n for line in req_f:\n if not line.strip() or line.strip().startswith('#'):\n f.write(line)\n continue\n if skip_match and skip_match.search(line):\n f.write(line)\n continue\n elif line.startswith('-e') or line.startswith('--editable'):\n if line.startswith('-e'):\n line = line[2:].strip()\n else:\n line = line[len('--editable'):].strip().lstrip('=')\n line_req = InstallRequirement.from_editable(line, default_vcs=options.default_vcs)\n elif (line.startswith('-r') or line.startswith('--requirement')\n or line.startswith('-Z') or line.startswith('--always-unzip')\n or line.startswith('-f') or line.startswith('-i')\n or line.startswith('--extra-index-url')):\n f.write(line)\n continue\n else:\n line_req = InstallRequirement.from_line(line)\n if not line_req.name:\n logger.notify(\"Skipping line because it's not clear what it would install: %s\"\n % line.strip())\n logger.notify(\" (add #egg=PackageName to the URL to avoid this warning)\")\n continue\n if line_req.name not in installations:\n logger.warn(\"Requirement file contains %s, but that package is not installed\"\n % line.strip())\n continue\n f.write(str(installations[line_req.name]))\n del installations[line_req.name]\n f.write('## The following requirements were added by pip --freeze:\\n')\n for installation in sorted(installations.values(), key=lambda x: x.name):\n f.write(str(installation))\n\n\nFreezeCommand()\n", "path": "pip/commands/freeze.py"}], "after_files": [{"content": "import re\nimport sys\nimport pkg_resources\nimport pip\nfrom pip.req import InstallRequirement\nfrom pip.log import logger\nfrom pip.basecommand import Command\nfrom pip.util import get_installed_distributions\n\n\nclass FreezeCommand(Command):\n name = 'freeze'\n usage = '%prog [OPTIONS]'\n summary = 'Output all currently installed packages (exact versions) to stdout'\n\n def __init__(self):\n super(FreezeCommand, self).__init__()\n self.parser.add_option(\n '-r', '--requirement',\n dest='requirement',\n action='store',\n default=None,\n metavar='FILENAME',\n help='Use the given requirements file as a hint about how to generate the new frozen requirements')\n self.parser.add_option(\n '-f', '--find-links',\n dest='find_links',\n action='append',\n default=[],\n metavar='URL',\n help='URL for finding packages, which will be added to the frozen requirements file')\n self.parser.add_option(\n '-l', '--local',\n dest='local',\n action='store_true',\n default=False,\n help='If in a virtualenv, do not report globally-installed packages')\n\n def setup_logging(self):\n logger.move_stdout_to_stderr()\n\n def run(self, options, args):\n requirement = options.requirement\n find_links = options.find_links or []\n local_only = options.local\n ## FIXME: Obviously this should be settable:\n find_tags = False\n skip_match = None\n\n skip_regex = options.skip_requirements_regex\n if skip_regex:\n skip_match = re.compile(skip_regex)\n\n dependency_links = []\n\n f = sys.stdout\n\n for dist in pkg_resources.working_set:\n if dist.has_metadata('dependency_links.txt'):\n dependency_links.extend(dist.get_metadata_lines('dependency_links.txt'))\n for link in find_links:\n if '#egg=' in link:\n dependency_links.append(link)\n for link in find_links:\n f.write('-f %s\\n' % link)\n installations = {}\n for dist in get_installed_distributions(local_only=local_only):\n req = pip.FrozenRequirement.from_dist(dist, dependency_links, find_tags=find_tags)\n installations[req.name] = req\n if requirement:\n req_f = open(requirement)\n for line in req_f:\n if not line.strip() or line.strip().startswith('#'):\n f.write(line)\n continue\n if skip_match and skip_match.search(line):\n f.write(line)\n continue\n elif line.startswith('-e') or line.startswith('--editable'):\n if line.startswith('-e'):\n line = line[2:].strip()\n else:\n line = line[len('--editable'):].strip().lstrip('=')\n line_req = InstallRequirement.from_editable(line, default_vcs=options.default_vcs)\n elif (line.startswith('-r') or line.startswith('--requirement')\n or line.startswith('-Z') or line.startswith('--always-unzip')\n or line.startswith('-f') or line.startswith('-i')\n or line.startswith('--extra-index-url')\n or line.startswith('--find-links')\n or line.startswith('--index-url')):\n f.write(line)\n continue\n else:\n line_req = InstallRequirement.from_line(line)\n if not line_req.name:\n logger.notify(\"Skipping line because it's not clear what it would install: %s\"\n % line.strip())\n logger.notify(\" (add #egg=PackageName to the URL to avoid this warning)\")\n continue\n if line_req.name not in installations:\n logger.warn(\"Requirement file contains %s, but that package is not installed\"\n % line.strip())\n continue\n f.write(str(installations[line_req.name]))\n del installations[line_req.name]\n f.write('## The following requirements were added by pip --freeze:\\n')\n for installation in sorted(installations.values(), key=lambda x: x.name):\n f.write(str(installation))\n\n\nFreezeCommand()\n", "path": "pip/commands/freeze.py"}]}
| 2,014 | 144 |
gh_patches_debug_11571
|
rasdani/github-patches
|
git_diff
|
microsoft__hi-ml-78
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make the package tag-line on PyPi more concrete
Javier pointed out that our tagline, _Microsoft Health Intelligence AzureML helpers_, on https://pypi.org/manage/project/hi-ml/releases/ is too generic.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # ------------------------------------------------------------------------------------------
2 # Copyright (c) Microsoft Corporation. All rights reserved.
3 # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
4 # ------------------------------------------------------------------------------------------
5
6 """A setuptools based setup module.
7
8 See:
9 https://packaging.python.org/guides/distributing-packages-using-setuptools/
10 """
11
12 import os
13 from math import floor
14 import pathlib
15 from random import random
16 from setuptools import setup, find_packages # type: ignore
17
18
19 here = pathlib.Path(__file__).parent.resolve()
20
21 # Get the long description from the README file
22 long_description = (here / 'README.md').read_text(encoding='utf-8')
23
24 version = ''
25
26 # If running from a GitHub Action then a standard set of environment variables will be
27 # populated (https://docs.github.com/en/actions/reference/environment-variables#default-environment-variables).
28 # In particular, GITHUB_REF is the branch or tag ref that triggered the workflow.
29 # If this was triggered by a tagged commit then GITHUB_REF will be: 'ref/tags/new_tag'.
30 # Extract this tag and use it as a version string
31 # See also:
32 # https://packaging.python.org/guides/publishing-package-distribution-releases-using-github-actions-ci-cd-workflows/
33 # https://github.com/pypa/gh-action-pypi-publish
34 GITHUB_REF_TAG_COMMIT = 'refs/tags/'
35
36 github_ref = os.getenv('GITHUB_REF')
37 if github_ref and github_ref.startswith(GITHUB_REF_TAG_COMMIT):
38 version = github_ref[len(GITHUB_REF_TAG_COMMIT):]
39
40 # Otherwise, if running from a GitHub Action, but not a tagged commit then GITHUB_RUN_NUMBER will be populated.
41 # Use this as a post release number. For example if GITHUB_RUN_NUMBER = 124 then the version string will be
42 # '0.1.2.post124'. Although this is discouraged, see:
43 # https://www.python.org/dev/peps/pep-0440/#post-releases
44 # it is necessary here to avoid duplicate packages in Test.PyPI.
45 if not version:
46 # TODO: Replace this with more principled package version management for the package wheels built during local test
47 # runs, one which circumvents AzureML's apparent package caching:
48 build_number = os.getenv('GITHUB_RUN_NUMBER')
49 if build_number:
50 version = '0.1.0.post' + build_number
51 else:
52 default_random_version_number = floor(random() * 10_000_000_000)
53 version = f'0.1.0.post{str(default_random_version_number)}'
54
55 (here / 'latest_version.txt').write_text(version)
56
57 # Read run_requirements.txt to get install_requires
58 install_requires = (here / 'run_requirements.txt').read_text().split("\n")
59 # Remove any whitespace and blank lines
60 install_requires = [line.strip() for line in install_requires if line.strip()]
61
62 setup(
63 name='hi-ml',
64 version=version,
65 description='Microsoft Health Intelligence AzureML helpers',
66 long_description=long_description,
67 long_description_content_type='text/markdown',
68 url='https://github.com/microsoft/hi-ml',
69 author="Microsoft Research Cambridge InnerEye Team ",
70 author_email="[email protected]",
71 classifiers=[
72 'Development Status :: 3 - Alpha',
73 'Intended Audience :: Science/Research',
74 "Topic :: Scientific/Engineering :: Medical Science Apps.",
75 'License :: OSI Approved :: MIT License',
76 'Programming Language :: Python :: 3.7'
77 ],
78 keywords='InnerEye, HealthIntelligence, AzureML',
79 license='MIT License',
80 packages=find_packages(where="src"),
81 package_dir={"": "src"},
82 include_package_data=True,
83 install_requires=install_requires,
84 scripts=['src/health/azure/run_tensorboard.py']
85 )
86
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -59,10 +59,12 @@
# Remove any whitespace and blank lines
install_requires = [line.strip() for line in install_requires if line.strip()]
+description = 'Microsoft Health Intelligence package to elevate and monitor scripts to an AzureML workspace'
+
setup(
name='hi-ml',
version=version,
- description='Microsoft Health Intelligence AzureML helpers',
+ description=description,
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/microsoft/hi-ml',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -59,10 +59,12 @@\n # Remove any whitespace and blank lines\n install_requires = [line.strip() for line in install_requires if line.strip()]\n \n+description = 'Microsoft Health Intelligence package to elevate and monitor scripts to an AzureML workspace'\n+\n setup(\n name='hi-ml',\n version=version,\n- description='Microsoft Health Intelligence AzureML helpers',\n+ description=description,\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/microsoft/hi-ml',\n", "issue": "Make the package tag-line on PyPi more concrete\nJavier pointed out that our tagline, _Microsoft Health Intelligence AzureML helpers_, on https://pypi.org/manage/project/hi-ml/releases/ is too generic.\n", "before_files": [{"content": "# ------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.\n# ------------------------------------------------------------------------------------------\n\n\"\"\"A setuptools based setup module.\n\nSee:\nhttps://packaging.python.org/guides/distributing-packages-using-setuptools/\n\"\"\"\n\nimport os\nfrom math import floor\nimport pathlib\nfrom random import random\nfrom setuptools import setup, find_packages # type: ignore\n\n\nhere = pathlib.Path(__file__).parent.resolve()\n\n# Get the long description from the README file\nlong_description = (here / 'README.md').read_text(encoding='utf-8')\n\nversion = ''\n\n# If running from a GitHub Action then a standard set of environment variables will be\n# populated (https://docs.github.com/en/actions/reference/environment-variables#default-environment-variables).\n# In particular, GITHUB_REF is the branch or tag ref that triggered the workflow.\n# If this was triggered by a tagged commit then GITHUB_REF will be: 'ref/tags/new_tag'.\n# Extract this tag and use it as a version string\n# See also:\n# https://packaging.python.org/guides/publishing-package-distribution-releases-using-github-actions-ci-cd-workflows/\n# https://github.com/pypa/gh-action-pypi-publish\nGITHUB_REF_TAG_COMMIT = 'refs/tags/'\n\ngithub_ref = os.getenv('GITHUB_REF')\nif github_ref and github_ref.startswith(GITHUB_REF_TAG_COMMIT):\n version = github_ref[len(GITHUB_REF_TAG_COMMIT):]\n\n# Otherwise, if running from a GitHub Action, but not a tagged commit then GITHUB_RUN_NUMBER will be populated.\n# Use this as a post release number. For example if GITHUB_RUN_NUMBER = 124 then the version string will be\n# '0.1.2.post124'. Although this is discouraged, see:\n# https://www.python.org/dev/peps/pep-0440/#post-releases\n# it is necessary here to avoid duplicate packages in Test.PyPI.\nif not version:\n # TODO: Replace this with more principled package version management for the package wheels built during local test\n # runs, one which circumvents AzureML's apparent package caching:\n build_number = os.getenv('GITHUB_RUN_NUMBER')\n if build_number:\n version = '0.1.0.post' + build_number\n else:\n default_random_version_number = floor(random() * 10_000_000_000)\n version = f'0.1.0.post{str(default_random_version_number)}'\n\n(here / 'latest_version.txt').write_text(version)\n\n# Read run_requirements.txt to get install_requires\ninstall_requires = (here / 'run_requirements.txt').read_text().split(\"\\n\")\n# Remove any whitespace and blank lines\ninstall_requires = [line.strip() for line in install_requires if line.strip()]\n\nsetup(\n name='hi-ml',\n version=version,\n description='Microsoft Health Intelligence AzureML helpers',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/microsoft/hi-ml',\n author=\"Microsoft Research Cambridge InnerEye Team \",\n author_email=\"[email protected]\",\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Science/Research',\n \"Topic :: Scientific/Engineering :: Medical Science Apps.\",\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.7'\n ],\n keywords='InnerEye, HealthIntelligence, AzureML',\n license='MIT License',\n packages=find_packages(where=\"src\"),\n package_dir={\"\": \"src\"},\n include_package_data=True,\n install_requires=install_requires,\n scripts=['src/health/azure/run_tensorboard.py']\n)\n", "path": "setup.py"}], "after_files": [{"content": "# ------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.\n# ------------------------------------------------------------------------------------------\n\n\"\"\"A setuptools based setup module.\n\nSee:\nhttps://packaging.python.org/guides/distributing-packages-using-setuptools/\n\"\"\"\n\nimport os\nfrom math import floor\nimport pathlib\nfrom random import random\nfrom setuptools import setup, find_packages # type: ignore\n\n\nhere = pathlib.Path(__file__).parent.resolve()\n\n# Get the long description from the README file\nlong_description = (here / 'README.md').read_text(encoding='utf-8')\n\nversion = ''\n\n# If running from a GitHub Action then a standard set of environment variables will be\n# populated (https://docs.github.com/en/actions/reference/environment-variables#default-environment-variables).\n# In particular, GITHUB_REF is the branch or tag ref that triggered the workflow.\n# If this was triggered by a tagged commit then GITHUB_REF will be: 'ref/tags/new_tag'.\n# Extract this tag and use it as a version string\n# See also:\n# https://packaging.python.org/guides/publishing-package-distribution-releases-using-github-actions-ci-cd-workflows/\n# https://github.com/pypa/gh-action-pypi-publish\nGITHUB_REF_TAG_COMMIT = 'refs/tags/'\n\ngithub_ref = os.getenv('GITHUB_REF')\nif github_ref and github_ref.startswith(GITHUB_REF_TAG_COMMIT):\n version = github_ref[len(GITHUB_REF_TAG_COMMIT):]\n\n# Otherwise, if running from a GitHub Action, but not a tagged commit then GITHUB_RUN_NUMBER will be populated.\n# Use this as a post release number. For example if GITHUB_RUN_NUMBER = 124 then the version string will be\n# '0.1.2.post124'. Although this is discouraged, see:\n# https://www.python.org/dev/peps/pep-0440/#post-releases\n# it is necessary here to avoid duplicate packages in Test.PyPI.\nif not version:\n # TODO: Replace this with more principled package version management for the package wheels built during local test\n # runs, one which circumvents AzureML's apparent package caching:\n build_number = os.getenv('GITHUB_RUN_NUMBER')\n if build_number:\n version = '0.1.0.post' + build_number\n else:\n default_random_version_number = floor(random() * 10_000_000_000)\n version = f'0.1.0.post{str(default_random_version_number)}'\n\n(here / 'latest_version.txt').write_text(version)\n\n# Read run_requirements.txt to get install_requires\ninstall_requires = (here / 'run_requirements.txt').read_text().split(\"\\n\")\n# Remove any whitespace and blank lines\ninstall_requires = [line.strip() for line in install_requires if line.strip()]\n\ndescription = 'Microsoft Health Intelligence package to elevate and monitor scripts to an AzureML workspace'\n\nsetup(\n name='hi-ml',\n version=version,\n description=description,\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/microsoft/hi-ml',\n author=\"Microsoft Research Cambridge InnerEye Team \",\n author_email=\"[email protected]\",\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Science/Research',\n \"Topic :: Scientific/Engineering :: Medical Science Apps.\",\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.7'\n ],\n keywords='InnerEye, HealthIntelligence, AzureML',\n license='MIT License',\n packages=find_packages(where=\"src\"),\n package_dir={\"\": \"src\"},\n include_package_data=True,\n install_requires=install_requires,\n scripts=['src/health/azure/run_tensorboard.py']\n)\n", "path": "setup.py"}]}
| 1,292 | 141 |
gh_patches_debug_5409
|
rasdani/github-patches
|
git_diff
|
mirumee__ariadne-183
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Move documentation to separate repo and host it on gh-pages
Sphinx has served us well, but we fell its too limiting for what we have planned for Ariadne.
We've decided to migrate the site to the [Docusaurus](https://docusaurus.io) and keep it on separate repo.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Configuration file for the Sphinx documentation builder.
4 #
5 # This file does only contain a selection of the most common options. For a
6 # full list see the documentation:
7 # http://www.sphinx-doc.org/en/master/config
8
9 # -- Path setup --------------------------------------------------------------
10
11 # If extensions (or modules to document with autodoc) are in another directory,
12 # add these directories to sys.path here. If the directory is relative to the
13 # documentation root, use os.path.abspath to make it absolute, like shown here.
14 #
15 # import os
16 # import sys
17 # sys.path.insert(0, os.path.abspath('.'))
18
19 from datetime import date
20
21 year = date.today().year
22
23
24 # -- Project information -----------------------------------------------------
25
26 project = "Ariadne"
27 copyright = "%s, Mirumee Software" % year
28 author = "Mirumee Software"
29
30 # The short X.Y version
31 version = "4"
32 # The full version, including alpha/beta/rc tags
33 release = "0.4"
34
35
36 # -- General configuration ---------------------------------------------------
37
38 # If your documentation needs a minimal Sphinx version, state it here.
39 #
40 # needs_sphinx = '1.0'
41
42 # Add any Sphinx extension module names here, as strings. They can be
43 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
44 # ones.
45 extensions = ["sphinxcontrib.asyncio"]
46
47 # Add any paths that contain templates here, relative to this directory.
48 templates_path = ["_templates"]
49
50 # The suffix(es) of source filenames.
51 # You can specify multiple suffix as a list of string:
52 #
53 # source_suffix = ['.rst', '.md']
54 source_suffix = [".rst", ".md"]
55
56 # The master toctree document.
57 master_doc = "index"
58
59 # The language for content autogenerated by Sphinx. Refer to documentation
60 # for a list of supported languages.
61 #
62 # This is also used if you do content translation via gettext catalogs.
63 # Usually you set "language" from the command line for these cases.
64 language = None
65
66 # List of patterns, relative to source directory, that match files and
67 # directories to ignore when looking for source files.
68 # This pattern also affects html_static_path and html_extra_path.
69 exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
70
71 # The name of the Pygments (syntax highlighting) style to use.
72 pygments_style = None
73
74
75 # -- Options for HTML output -------------------------------------------------
76
77 # The theme to use for HTML and HTML Help pages. See the documentation for
78 # a list of builtin themes.
79 #
80 html_theme = "alabaster"
81
82 # Theme options are theme-specific and customize the look and feel of a theme
83 # further. For a list of options available for each theme, see the
84 # documentation.
85 #
86 html_theme_options = {
87 "logo": "logo-vertical.png",
88 "github_user": "mirumee",
89 "github_repo": "ariadne",
90 }
91
92 # Add any paths that contain custom static files (such as style sheets) here,
93 # relative to this directory. They are copied after the builtin static files,
94 # so a file named "default.css" will overwrite the builtin "default.css".
95 html_static_path = ["_static"]
96
97 # Custom sidebar templates, must be a dictionary that maps document names
98 # to template names.
99 #
100 # The default sidebars (for documents that don't match any pattern) are
101 # defined by theme itself. Builtin themes are using these templates by
102 # default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
103 # 'searchbox.html']``.
104 #
105 # html_sidebars = {}
106
107
108 # -- Options for HTMLHelp output ---------------------------------------------
109
110 # Output file base name for HTML help builder.
111 htmlhelp_basename = "Ariadnedoc"
112
113
114 # -- Options for LaTeX output ------------------------------------------------
115
116 latex_elements = {
117 # The paper size ('letterpaper' or 'a4paper').
118 #
119 # 'papersize': 'letterpaper',
120 # The font size ('10pt', '11pt' or '12pt').
121 #
122 # 'pointsize': '10pt',
123 # Additional stuff for the LaTeX preamble.
124 #
125 # 'preamble': '',
126 # Latex figure (float) alignment
127 #
128 # 'figure_align': 'htbp',
129 }
130
131 # Grouping the document tree into LaTeX files. List of tuples
132 # (source start file, target name, title,
133 # author, documentclass [howto, manual, or own class]).
134 latex_documents = [
135 (master_doc, "Ariadne.tex", "Ariadne Documentation", "Mirumee Software", "manual")
136 ]
137
138
139 # -- Options for manual page output ------------------------------------------
140
141 # One entry per manual page. List of tuples
142 # (source start file, name, description, authors, manual section).
143 man_pages = [(master_doc, "ariadne", "Ariadne Documentation", [author], 1)]
144
145
146 # -- Options for Texinfo output ----------------------------------------------
147
148 # Grouping the document tree into Texinfo files. List of tuples
149 # (source start file, target name, title, author,
150 # dir menu entry, description, category)
151 texinfo_documents = [
152 (
153 master_doc,
154 "Ariadne",
155 "Ariadne",
156 author,
157 "Ariadne",
158 "Ariadne is a Python library for implementing GraphQL servers, inspired by Apollo Server and built with GraphQL-core-next.",
159 "Miscellaneous",
160 )
161 ]
162
163
164 # -- Options for Epub output -------------------------------------------------
165
166 # Bibliographic Dublin Core info.
167 epub_title = project
168
169 # The unique identifier of the text. This can be a ISBN number
170 # or the project homepage.
171 #
172 # epub_identifier = ''
173
174 # A unique identification for the text.
175 #
176 # epub_uid = ''
177
178 # A list of files that should not be packed into the epub file.
179 epub_exclude_files = ["search.html"]
180
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -42,7 +42,7 @@
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
-extensions = ["sphinxcontrib.asyncio"]
+extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
|
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -42,7 +42,7 @@\n # Add any Sphinx extension module names here, as strings. They can be\n # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n # ones.\n-extensions = [\"sphinxcontrib.asyncio\"]\n+extensions = []\n \n # Add any paths that contain templates here, relative to this directory.\n templates_path = [\"_templates\"]\n", "issue": "Move documentation to separate repo and host it on gh-pages\nSphinx has served us well, but we fell its too limiting for what we have planned for Ariadne.\r\n\r\nWe've decided to migrate the site to the [Docusaurus](https://docusaurus.io) and keep it on separate repo.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\nfrom datetime import date\n\nyear = date.today().year\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"Ariadne\"\ncopyright = \"%s, Mirumee Software\" % year\nauthor = \"Mirumee Software\"\n\n# The short X.Y version\nversion = \"4\"\n# The full version, including alpha/beta/rc tags\nrelease = \"0.4\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\"sphinxcontrib.asyncio\"]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = [\".rst\", \".md\"]\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = None\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"alabaster\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_theme_options = {\n \"logo\": \"logo-vertical.png\",\n \"github_user\": \"mirumee\",\n \"github_repo\": \"ariadne\",\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\n# html_sidebars = {}\n\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"Ariadnedoc\"\n\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, \"Ariadne.tex\", \"Ariadne Documentation\", \"Mirumee Software\", \"manual\")\n]\n\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, \"ariadne\", \"Ariadne Documentation\", [author], 1)]\n\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"Ariadne\",\n \"Ariadne\",\n author,\n \"Ariadne\",\n \"Ariadne is a Python library for implementing GraphQL servers, inspired by Apollo Server and built with GraphQL-core-next.\",\n \"Miscellaneous\",\n )\n]\n\n\n# -- Options for Epub output -------------------------------------------------\n\n# Bibliographic Dublin Core info.\nepub_title = project\n\n# The unique identifier of the text. This can be a ISBN number\n# or the project homepage.\n#\n# epub_identifier = ''\n\n# A unique identification for the text.\n#\n# epub_uid = ''\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = [\"search.html\"]\n", "path": "docs/conf.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\nfrom datetime import date\n\nyear = date.today().year\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"Ariadne\"\ncopyright = \"%s, Mirumee Software\" % year\nauthor = \"Mirumee Software\"\n\n# The short X.Y version\nversion = \"4\"\n# The full version, including alpha/beta/rc tags\nrelease = \"0.4\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = []\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = [\".rst\", \".md\"]\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = None\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"alabaster\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_theme_options = {\n \"logo\": \"logo-vertical.png\",\n \"github_user\": \"mirumee\",\n \"github_repo\": \"ariadne\",\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\n# html_sidebars = {}\n\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"Ariadnedoc\"\n\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, \"Ariadne.tex\", \"Ariadne Documentation\", \"Mirumee Software\", \"manual\")\n]\n\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, \"ariadne\", \"Ariadne Documentation\", [author], 1)]\n\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"Ariadne\",\n \"Ariadne\",\n author,\n \"Ariadne\",\n \"Ariadne is a Python library for implementing GraphQL servers, inspired by Apollo Server and built with GraphQL-core-next.\",\n \"Miscellaneous\",\n )\n]\n\n\n# -- Options for Epub output -------------------------------------------------\n\n# Bibliographic Dublin Core info.\nepub_title = project\n\n# The unique identifier of the text. This can be a ISBN number\n# or the project homepage.\n#\n# epub_identifier = ''\n\n# A unique identification for the text.\n#\n# epub_uid = ''\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = [\"search.html\"]\n", "path": "docs/conf.py"}]}
| 2,019 | 108 |
gh_patches_debug_21805
|
rasdani/github-patches
|
git_diff
|
psychopy__psychopy-4622
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
404 errors on some of the page in the HELP menus
Version used
- psychopy: 2021.2.3
https://www.psychopy.org/builder/builder.html
https://www.psychopy.org/api/api.html
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `psychopy/app/urls.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 """A central location to store information about urls
5 """
6 urls = dict()
7
8 # links based on string names
9 urls['builder'] = "https://www.psychopy.org/builder/builder.html"
10 urls['builder.loops'] = "https://www.psychopy.org/builder/flow.html#loops"
11 # NB. builder components get their urls defined by the component code
12 # (so a custom component can have a url)
13
14 urls['downloads'] = "https://github.com/psychopy/psychopy/releases"
15 urls['changelog'] = "https://www.psychopy.org/changelog.html"
16
17 general = "https://www.psychopy.org/general/"
18 urls['prefs'] = general + "prefs.html"
19 urls['prefs.general'] = general + "prefs.html#general-settings"
20 urls['prefs.app'] = general + "prefs.html#application-settings"
21 urls['prefs.coder'] = general + "prefs.html#coder-settings"
22 urls['prefs.builder'] = general + "prefs.html#builder-settings"
23 urls['prefs.connections'] = general + "prefs.html#connection-settings"
24
25 # links keyed by wxIDs (e.g. menu item IDs)
26 urls['psychopyHome'] = "https://www.psychopy.org/"
27 urls['psychopyReference'] = "https://www.psychopy.org/api/api.html"
28 urls['coderTutorial'] = "https://www.psychopy.org/coder/tutorial1.html"
29 urls['builderHelp'] = urls['builder']
30 urls['builderDemos'] = "http://code.google.com/p/psychopy/downloads/list?can=2&q=demos"
31 urls['projsAbout'] = "https://www.psychopy.org/general/projects.html"
32
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/psychopy/app/urls.py b/psychopy/app/urls.py
--- a/psychopy/app/urls.py
+++ b/psychopy/app/urls.py
@@ -6,7 +6,7 @@
urls = dict()
# links based on string names
-urls['builder'] = "https://www.psychopy.org/builder/builder.html"
+urls['builder'] = "https://www.psychopy.org/builder"
urls['builder.loops'] = "https://www.psychopy.org/builder/flow.html#loops"
# NB. builder components get their urls defined by the component code
# (so a custom component can have a url)
@@ -24,7 +24,7 @@
# links keyed by wxIDs (e.g. menu item IDs)
urls['psychopyHome'] = "https://www.psychopy.org/"
-urls['psychopyReference'] = "https://www.psychopy.org/api/api.html"
+urls['psychopyReference'] = "https://www.psychopy.org/api"
urls['coderTutorial'] = "https://www.psychopy.org/coder/tutorial1.html"
urls['builderHelp'] = urls['builder']
urls['builderDemos'] = "http://code.google.com/p/psychopy/downloads/list?can=2&q=demos"
|
{"golden_diff": "diff --git a/psychopy/app/urls.py b/psychopy/app/urls.py\n--- a/psychopy/app/urls.py\n+++ b/psychopy/app/urls.py\n@@ -6,7 +6,7 @@\n urls = dict()\n \n # links based on string names\n-urls['builder'] = \"https://www.psychopy.org/builder/builder.html\"\n+urls['builder'] = \"https://www.psychopy.org/builder\"\n urls['builder.loops'] = \"https://www.psychopy.org/builder/flow.html#loops\"\n # NB. builder components get their urls defined by the component code\n # (so a custom component can have a url)\n@@ -24,7 +24,7 @@\n \n # links keyed by wxIDs (e.g. menu item IDs)\n urls['psychopyHome'] = \"https://www.psychopy.org/\"\n-urls['psychopyReference'] = \"https://www.psychopy.org/api/api.html\"\n+urls['psychopyReference'] = \"https://www.psychopy.org/api\"\n urls['coderTutorial'] = \"https://www.psychopy.org/coder/tutorial1.html\"\n urls['builderHelp'] = urls['builder']\n urls['builderDemos'] = \"http://code.google.com/p/psychopy/downloads/list?can=2&q=demos\"\n", "issue": "404 errors on some of the page in the HELP menus\nVersion used\r\n- psychopy: 2021.2.3\r\n\r\nhttps://www.psychopy.org/builder/builder.html\r\nhttps://www.psychopy.org/api/api.html\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"A central location to store information about urls\n\"\"\"\nurls = dict()\n\n# links based on string names\nurls['builder'] = \"https://www.psychopy.org/builder/builder.html\"\nurls['builder.loops'] = \"https://www.psychopy.org/builder/flow.html#loops\"\n# NB. builder components get their urls defined by the component code\n# (so a custom component can have a url)\n\nurls['downloads'] = \"https://github.com/psychopy/psychopy/releases\"\nurls['changelog'] = \"https://www.psychopy.org/changelog.html\"\n\ngeneral = \"https://www.psychopy.org/general/\"\nurls['prefs'] = general + \"prefs.html\"\nurls['prefs.general'] = general + \"prefs.html#general-settings\"\nurls['prefs.app'] = general + \"prefs.html#application-settings\"\nurls['prefs.coder'] = general + \"prefs.html#coder-settings\"\nurls['prefs.builder'] = general + \"prefs.html#builder-settings\"\nurls['prefs.connections'] = general + \"prefs.html#connection-settings\"\n\n# links keyed by wxIDs (e.g. menu item IDs)\nurls['psychopyHome'] = \"https://www.psychopy.org/\"\nurls['psychopyReference'] = \"https://www.psychopy.org/api/api.html\"\nurls['coderTutorial'] = \"https://www.psychopy.org/coder/tutorial1.html\"\nurls['builderHelp'] = urls['builder']\nurls['builderDemos'] = \"http://code.google.com/p/psychopy/downloads/list?can=2&q=demos\"\nurls['projsAbout'] = \"https://www.psychopy.org/general/projects.html\"\n", "path": "psychopy/app/urls.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"A central location to store information about urls\n\"\"\"\nurls = dict()\n\n# links based on string names\nurls['builder'] = \"https://www.psychopy.org/builder\"\nurls['builder.loops'] = \"https://www.psychopy.org/builder/flow.html#loops\"\n# NB. builder components get their urls defined by the component code\n# (so a custom component can have a url)\n\nurls['downloads'] = \"https://github.com/psychopy/psychopy/releases\"\nurls['changelog'] = \"https://www.psychopy.org/changelog.html\"\n\ngeneral = \"https://www.psychopy.org/general/\"\nurls['prefs'] = general + \"prefs.html\"\nurls['prefs.general'] = general + \"prefs.html#general-settings\"\nurls['prefs.app'] = general + \"prefs.html#application-settings\"\nurls['prefs.coder'] = general + \"prefs.html#coder-settings\"\nurls['prefs.builder'] = general + \"prefs.html#builder-settings\"\nurls['prefs.connections'] = general + \"prefs.html#connection-settings\"\n\n# links keyed by wxIDs (e.g. menu item IDs)\nurls['psychopyHome'] = \"https://www.psychopy.org/\"\nurls['psychopyReference'] = \"https://www.psychopy.org/api\"\nurls['coderTutorial'] = \"https://www.psychopy.org/coder/tutorial1.html\"\nurls['builderHelp'] = urls['builder']\nurls['builderDemos'] = \"http://code.google.com/p/psychopy/downloads/list?can=2&q=demos\"\nurls['projsAbout'] = \"https://www.psychopy.org/general/projects.html\"\n", "path": "psychopy/app/urls.py"}]}
| 739 | 285 |
gh_patches_debug_30758
|
rasdani/github-patches
|
git_diff
|
opendatacube__datacube-core-1061
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Example PickleWriterDriver does not support URIs
### Expected behaviour
Using the example `pickles` driver should allow to successfully run, e.g., `integration_tests/test_end_to_end`. This is assuming the example dcio driver has been installed first.
### Actual behaviour
The test fails because a URI gets passed to `PickleWriterDriver.write_dataset_to_storage()` which still expects a filepath string. Also, this URI may contain intermediate directories that need creating.
### Steps to reproduce the behaviour
- Set `pickles` as driver name in, e.g., `datacube-core/docs/config_samples/ingester/ls5_nbar_albers.yaml(106)`
- Run `pytest integration_tests/test_end_to_end.py`
- First error: `AttributeError: 'PickleWriterDriver' object has no attribute 'mk_uri'` but subsequent errors happen due to missing intermediate directories.
### Environment information
- `Open Data Cube core, version 1.8.4.dev52+g07bc51a5.d20210222`
- Docker image: `opendatacube/datacube-tests:latest`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/io_plugin/dcio_example/pickles.py`
Content:
```
1 # This file is part of the Open Data Cube, see https://opendatacube.org for more information
2 #
3 # Copyright (c) 2015-2020 ODC Contributors
4 # SPDX-License-Identifier: Apache-2.0
5 """ Example reader plugin
6 """
7 from contextlib import contextmanager
8 import pickle
9
10
11 PROTOCOL = 'file'
12 FORMAT = 'pickle'
13
14
15 def uri_split(uri):
16 loc = uri.find('://')
17 if loc < 0:
18 return uri, PROTOCOL
19 return uri[loc+3:], uri[:loc]
20
21
22 class PickleDataSource(object):
23 class BandDataSource(object):
24 def __init__(self, da):
25 self._da = da
26 self.nodata = da.nodata
27
28 @property
29 def crs(self):
30 return self._da.crs
31
32 @property
33 def transform(self):
34 return self._da.affine
35
36 @property
37 def dtype(self):
38 return self._da.dtype
39
40 @property
41 def shape(self):
42 return self._da.shape
43
44 def read(self, window=None, out_shape=None):
45 if window is None:
46 data = self._da.values
47 else:
48 rows, cols = [slice(*w) for w in window]
49 data = self._da.values[rows, cols]
50
51 if out_shape is None or out_shape == data.shape:
52 return data
53
54 raise NotImplementedError('Native reading not supported for this data source')
55
56 def __init__(self, band):
57 self._band = band
58 uri = band.uri
59 self._filename, protocol = uri_split(uri)
60
61 if protocol not in [PROTOCOL, 'pickle']:
62 raise ValueError('Expected file:// or pickle:// url')
63
64 @contextmanager
65 def open(self):
66 with open(self._filename, 'rb') as f:
67 ds = pickle.load(f)
68
69 yield PickleDataSource.BandDataSource(ds[self._band.name].isel(time=0))
70
71
72 class PickleReaderDriver(object):
73 def __init__(self):
74 self.name = 'PickleReader'
75 self.protocols = [PROTOCOL, 'pickle']
76 self.formats = [FORMAT]
77
78 def supports(self, protocol, fmt):
79 return (protocol in self.protocols and
80 fmt in self.formats)
81
82 def new_datasource(self, band):
83 return PickleDataSource(band)
84
85
86 def rdr_driver_init():
87 return PickleReaderDriver()
88
89
90 class PickleWriterDriver(object):
91 def __init__(self):
92 pass
93
94 @property
95 def aliases(self):
96 return ['pickles']
97
98 @property
99 def format(self):
100 return FORMAT
101
102 @property
103 def uri_scheme(self):
104 return PROTOCOL
105
106 def write_dataset_to_storage(self, dataset, filename,
107 global_attributes=None,
108 variable_params=None,
109 storage_config=None,
110 **kwargs):
111 with open(filename, 'wb') as f:
112 pickle.dump(dataset, f)
113 return {}
114
115
116 def writer_driver_init():
117 return PickleWriterDriver()
118
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/examples/io_plugin/dcio_example/pickles.py b/examples/io_plugin/dcio_example/pickles.py
--- a/examples/io_plugin/dcio_example/pickles.py
+++ b/examples/io_plugin/dcio_example/pickles.py
@@ -4,9 +4,12 @@
# SPDX-License-Identifier: Apache-2.0
""" Example reader plugin
"""
-from contextlib import contextmanager
import pickle
+from contextlib import contextmanager
+from pathlib import Path
+from urllib.parse import urlsplit
+from datacube.utils.uris import normalise_path
PROTOCOL = 'file'
FORMAT = 'pickle'
@@ -103,12 +106,33 @@
def uri_scheme(self):
return PROTOCOL
- def write_dataset_to_storage(self, dataset, filename,
+ def mk_uri(self, file_path, storage_config):
+ """
+ Constructs a URI from the file_path and storage config.
+
+ A typical implementation should return f'{scheme}://{file_path}'
+
+ Example:
+ file_path = '/path/to/my_file.pickled'
+ storage_config = {'driver': 'pickles'}
+
+ mk_uri(file_path, storage_config) should return 'file:///path/to/my_file.pickled'
+
+ :param Path file_path: The file path of the file to be converted into a URI.
+ :param dict storage_config: The dict holding the storage config found in the ingest definition.
+ :return: file_path as a URI that the Driver understands.
+ :rtype: str
+ """
+ return normalise_path(file_path).as_uri()
+
+ def write_dataset_to_storage(self, dataset, file_uri,
global_attributes=None,
variable_params=None,
storage_config=None,
**kwargs):
- with open(filename, 'wb') as f:
+ filepath = Path(urlsplit(file_uri).path)
+ filepath.parent.mkdir(parents=True, exist_ok=True)
+ with filepath.open('wb') as f:
pickle.dump(dataset, f)
return {}
|
{"golden_diff": "diff --git a/examples/io_plugin/dcio_example/pickles.py b/examples/io_plugin/dcio_example/pickles.py\n--- a/examples/io_plugin/dcio_example/pickles.py\n+++ b/examples/io_plugin/dcio_example/pickles.py\n@@ -4,9 +4,12 @@\n # SPDX-License-Identifier: Apache-2.0\n \"\"\" Example reader plugin\n \"\"\"\n-from contextlib import contextmanager\n import pickle\n+from contextlib import contextmanager\n+from pathlib import Path\n+from urllib.parse import urlsplit\n \n+from datacube.utils.uris import normalise_path\n \n PROTOCOL = 'file'\n FORMAT = 'pickle'\n@@ -103,12 +106,33 @@\n def uri_scheme(self):\n return PROTOCOL\n \n- def write_dataset_to_storage(self, dataset, filename,\n+ def mk_uri(self, file_path, storage_config):\n+ \"\"\"\n+ Constructs a URI from the file_path and storage config.\n+\n+ A typical implementation should return f'{scheme}://{file_path}'\n+\n+ Example:\n+ file_path = '/path/to/my_file.pickled'\n+ storage_config = {'driver': 'pickles'}\n+\n+ mk_uri(file_path, storage_config) should return 'file:///path/to/my_file.pickled'\n+\n+ :param Path file_path: The file path of the file to be converted into a URI.\n+ :param dict storage_config: The dict holding the storage config found in the ingest definition.\n+ :return: file_path as a URI that the Driver understands.\n+ :rtype: str\n+ \"\"\"\n+ return normalise_path(file_path).as_uri()\n+\n+ def write_dataset_to_storage(self, dataset, file_uri,\n global_attributes=None,\n variable_params=None,\n storage_config=None,\n **kwargs):\n- with open(filename, 'wb') as f:\n+ filepath = Path(urlsplit(file_uri).path)\n+ filepath.parent.mkdir(parents=True, exist_ok=True)\n+ with filepath.open('wb') as f:\n pickle.dump(dataset, f)\n return {}\n", "issue": "Example PickleWriterDriver does not support URIs\n### Expected behaviour\r\nUsing the example `pickles` driver should allow to successfully run, e.g., `integration_tests/test_end_to_end`. This is assuming the example dcio driver has been installed first.\r\n\r\n### Actual behaviour\r\nThe test fails because a URI gets passed to `PickleWriterDriver.write_dataset_to_storage()` which still expects a filepath string. Also, this URI may contain intermediate directories that need creating.\r\n\r\n### Steps to reproduce the behaviour\r\n- Set `pickles` as driver name in, e.g., `datacube-core/docs/config_samples/ingester/ls5_nbar_albers.yaml(106)`\r\n- Run `pytest integration_tests/test_end_to_end.py`\r\n- First error: `AttributeError: 'PickleWriterDriver' object has no attribute 'mk_uri'` but subsequent errors happen due to missing intermediate directories.\r\n\r\n### Environment information\r\n- `Open Data Cube core, version 1.8.4.dev52+g07bc51a5.d20210222`\r\n- Docker image: `opendatacube/datacube-tests:latest`\n", "before_files": [{"content": "# This file is part of the Open Data Cube, see https://opendatacube.org for more information\n#\n# Copyright (c) 2015-2020 ODC Contributors\n# SPDX-License-Identifier: Apache-2.0\n\"\"\" Example reader plugin\n\"\"\"\nfrom contextlib import contextmanager\nimport pickle\n\n\nPROTOCOL = 'file'\nFORMAT = 'pickle'\n\n\ndef uri_split(uri):\n loc = uri.find('://')\n if loc < 0:\n return uri, PROTOCOL\n return uri[loc+3:], uri[:loc]\n\n\nclass PickleDataSource(object):\n class BandDataSource(object):\n def __init__(self, da):\n self._da = da\n self.nodata = da.nodata\n\n @property\n def crs(self):\n return self._da.crs\n\n @property\n def transform(self):\n return self._da.affine\n\n @property\n def dtype(self):\n return self._da.dtype\n\n @property\n def shape(self):\n return self._da.shape\n\n def read(self, window=None, out_shape=None):\n if window is None:\n data = self._da.values\n else:\n rows, cols = [slice(*w) for w in window]\n data = self._da.values[rows, cols]\n\n if out_shape is None or out_shape == data.shape:\n return data\n\n raise NotImplementedError('Native reading not supported for this data source')\n\n def __init__(self, band):\n self._band = band\n uri = band.uri\n self._filename, protocol = uri_split(uri)\n\n if protocol not in [PROTOCOL, 'pickle']:\n raise ValueError('Expected file:// or pickle:// url')\n\n @contextmanager\n def open(self):\n with open(self._filename, 'rb') as f:\n ds = pickle.load(f)\n\n yield PickleDataSource.BandDataSource(ds[self._band.name].isel(time=0))\n\n\nclass PickleReaderDriver(object):\n def __init__(self):\n self.name = 'PickleReader'\n self.protocols = [PROTOCOL, 'pickle']\n self.formats = [FORMAT]\n\n def supports(self, protocol, fmt):\n return (protocol in self.protocols and\n fmt in self.formats)\n\n def new_datasource(self, band):\n return PickleDataSource(band)\n\n\ndef rdr_driver_init():\n return PickleReaderDriver()\n\n\nclass PickleWriterDriver(object):\n def __init__(self):\n pass\n\n @property\n def aliases(self):\n return ['pickles']\n\n @property\n def format(self):\n return FORMAT\n\n @property\n def uri_scheme(self):\n return PROTOCOL\n\n def write_dataset_to_storage(self, dataset, filename,\n global_attributes=None,\n variable_params=None,\n storage_config=None,\n **kwargs):\n with open(filename, 'wb') as f:\n pickle.dump(dataset, f)\n return {}\n\n\ndef writer_driver_init():\n return PickleWriterDriver()\n", "path": "examples/io_plugin/dcio_example/pickles.py"}], "after_files": [{"content": "# This file is part of the Open Data Cube, see https://opendatacube.org for more information\n#\n# Copyright (c) 2015-2020 ODC Contributors\n# SPDX-License-Identifier: Apache-2.0\n\"\"\" Example reader plugin\n\"\"\"\nimport pickle\nfrom contextlib import contextmanager\nfrom pathlib import Path\nfrom urllib.parse import urlsplit\n\nfrom datacube.utils.uris import normalise_path\n\nPROTOCOL = 'file'\nFORMAT = 'pickle'\n\n\ndef uri_split(uri):\n loc = uri.find('://')\n if loc < 0:\n return uri, PROTOCOL\n return uri[loc+3:], uri[:loc]\n\n\nclass PickleDataSource(object):\n class BandDataSource(object):\n def __init__(self, da):\n self._da = da\n self.nodata = da.nodata\n\n @property\n def crs(self):\n return self._da.crs\n\n @property\n def transform(self):\n return self._da.affine\n\n @property\n def dtype(self):\n return self._da.dtype\n\n @property\n def shape(self):\n return self._da.shape\n\n def read(self, window=None, out_shape=None):\n if window is None:\n data = self._da.values\n else:\n rows, cols = [slice(*w) for w in window]\n data = self._da.values[rows, cols]\n\n if out_shape is None or out_shape == data.shape:\n return data\n\n raise NotImplementedError('Native reading not supported for this data source')\n\n def __init__(self, band):\n self._band = band\n uri = band.uri\n self._filename, protocol = uri_split(uri)\n\n if protocol not in [PROTOCOL, 'pickle']:\n raise ValueError('Expected file:// or pickle:// url')\n\n @contextmanager\n def open(self):\n with open(self._filename, 'rb') as f:\n ds = pickle.load(f)\n\n yield PickleDataSource.BandDataSource(ds[self._band.name].isel(time=0))\n\n\nclass PickleReaderDriver(object):\n def __init__(self):\n self.name = 'PickleReader'\n self.protocols = [PROTOCOL, 'pickle']\n self.formats = [FORMAT]\n\n def supports(self, protocol, fmt):\n return (protocol in self.protocols and\n fmt in self.formats)\n\n def new_datasource(self, band):\n return PickleDataSource(band)\n\n\ndef rdr_driver_init():\n return PickleReaderDriver()\n\n\nclass PickleWriterDriver(object):\n def __init__(self):\n pass\n\n @property\n def aliases(self):\n return ['pickles']\n\n @property\n def format(self):\n return FORMAT\n\n @property\n def uri_scheme(self):\n return PROTOCOL\n\n def mk_uri(self, file_path, storage_config):\n \"\"\"\n Constructs a URI from the file_path and storage config.\n\n A typical implementation should return f'{scheme}://{file_path}'\n\n Example:\n file_path = '/path/to/my_file.pickled'\n storage_config = {'driver': 'pickles'}\n\n mk_uri(file_path, storage_config) should return 'file:///path/to/my_file.pickled'\n\n :param Path file_path: The file path of the file to be converted into a URI.\n :param dict storage_config: The dict holding the storage config found in the ingest definition.\n :return: file_path as a URI that the Driver understands.\n :rtype: str\n \"\"\"\n return normalise_path(file_path).as_uri()\n\n def write_dataset_to_storage(self, dataset, file_uri,\n global_attributes=None,\n variable_params=None,\n storage_config=None,\n **kwargs):\n filepath = Path(urlsplit(file_uri).path)\n filepath.parent.mkdir(parents=True, exist_ok=True)\n with filepath.open('wb') as f:\n pickle.dump(dataset, f)\n return {}\n\n\ndef writer_driver_init():\n return PickleWriterDriver()\n", "path": "examples/io_plugin/dcio_example/pickles.py"}]}
| 1,419 | 444 |
gh_patches_debug_16915
|
rasdani/github-patches
|
git_diff
|
fossasia__open-event-server-5322
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add unit test for Event helpers
**Is your feature request related to a problem? Please describe.**
<!-- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->
We should add unit tests for event helpers.
**Additional context**
<!-- Add any other context or screenshots about the feature request here. -->
Parent: #5320
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/api/helpers/errors.py`
Content:
```
1 import json
2
3 from flask import make_response
4 from flask_rest_jsonapi.errors import jsonapi_errors
5
6
7 class ErrorResponse:
8 """
9 Parent ErrorResponse class for handling json-api compliant errors.
10 Inspired by the JsonApiException class of `flask-rest-jsonapi` itself
11 """
12 title = 'Unknown error'
13 status = 500
14 headers = {'Content-Type': 'application/vnd.api+json'}
15
16 def __init__(self, source, detail, title=None, status=None):
17 """Initialize a jsonapi ErrorResponse Object
18
19 :param dict source: the source of the error
20 :param str detail: the detail of the error
21 """
22 self.source = source
23 self.detail = detail
24 if title is not None:
25 self.title = title
26 if status is not None:
27 self.status = status
28
29 def respond(self):
30 """
31 :return: a jsonapi compliant response object
32 """
33 dict_ = self.to_dict()
34 return make_response(json.dumps(jsonapi_errors([dict_])), self.status, self.headers)
35
36 def to_dict(self):
37 """
38 :return: Dict from details of the object
39 """
40 return {'status': self.status,
41 'source': self.source,
42 'title': self.title,
43 'detail': self.detail}
44
45
46 class ForbiddenError(ErrorResponse):
47 """
48 Default class for 403 Error
49 """
50 title = 'Access Forbidden'
51 status = 403
52
53
54 class NotFoundError(ErrorResponse):
55 """
56 Default class for 404 Error
57 """
58 title = 'Not Found'
59 status = 404
60
61
62 class ServerError(ErrorResponse):
63 status = 500
64 title = 'Internal Server Error'
65
66
67 class UnprocessableEntityError(ErrorResponse):
68 status = 422
69 title = 'Unprocessable Entity'
70
71
72 class BadRequestError(ErrorResponse):
73 status = 400
74 title = 'Bad Request'
75
```
Path: `app/api/helpers/exceptions.py`
Content:
```
1 from flask_rest_jsonapi.exceptions import JsonApiException
2
3
4 class UnprocessableEntity(JsonApiException):
5 title = "Unprocessable Entity"
6 status = 422
7
8
9 class ConflictException(JsonApiException):
10 title = "Conflict"
11 status = 409
12
13
14 class ForbiddenException(JsonApiException):
15 """
16 Default class for 403 Error
17 """
18 title = 'Access Forbidden'
19 status = 403
20
21
22 class MethodNotAllowed(JsonApiException):
23 """
24 Default Class to throw HTTP 405 Exception
25 """
26 title = "Method Not Allowed"
27 status = 405
28
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/app/api/helpers/errors.py b/app/api/helpers/errors.py
--- a/app/api/helpers/errors.py
+++ b/app/api/helpers/errors.py
@@ -65,10 +65,16 @@
class UnprocessableEntityError(ErrorResponse):
+ """
+ Default class for 422 Error
+ """
status = 422
title = 'Unprocessable Entity'
class BadRequestError(ErrorResponse):
+ """
+ Default class for 400 Error
+ """
status = 400
title = 'Bad Request'
diff --git a/app/api/helpers/exceptions.py b/app/api/helpers/exceptions.py
--- a/app/api/helpers/exceptions.py
+++ b/app/api/helpers/exceptions.py
@@ -2,11 +2,17 @@
class UnprocessableEntity(JsonApiException):
+ """
+ Default class for 422 Error
+ """
title = "Unprocessable Entity"
status = 422
class ConflictException(JsonApiException):
+ """
+ Default class for 409 Error
+ """
title = "Conflict"
status = 409
|
{"golden_diff": "diff --git a/app/api/helpers/errors.py b/app/api/helpers/errors.py\n--- a/app/api/helpers/errors.py\n+++ b/app/api/helpers/errors.py\n@@ -65,10 +65,16 @@\n \n \n class UnprocessableEntityError(ErrorResponse):\n+ \"\"\"\n+ Default class for 422 Error\n+ \"\"\"\n status = 422\n title = 'Unprocessable Entity'\n \n \n class BadRequestError(ErrorResponse):\n+ \"\"\"\n+ Default class for 400 Error\n+ \"\"\"\n status = 400\n title = 'Bad Request'\ndiff --git a/app/api/helpers/exceptions.py b/app/api/helpers/exceptions.py\n--- a/app/api/helpers/exceptions.py\n+++ b/app/api/helpers/exceptions.py\n@@ -2,11 +2,17 @@\n \n \n class UnprocessableEntity(JsonApiException):\n+ \"\"\"\n+ Default class for 422 Error\n+ \"\"\"\n title = \"Unprocessable Entity\"\n status = 422\n \n \n class ConflictException(JsonApiException):\n+ \"\"\"\n+ Default class for 409 Error\n+ \"\"\"\n title = \"Conflict\"\n status = 409\n", "issue": "Add unit test for Event helpers\n**Is your feature request related to a problem? Please describe.**\r\n<!-- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->\r\nWe should add unit tests for event helpers.\r\n\r\n**Additional context**\r\n<!-- Add any other context or screenshots about the feature request here. -->\r\nParent: #5320 \n", "before_files": [{"content": "import json\n\nfrom flask import make_response\nfrom flask_rest_jsonapi.errors import jsonapi_errors\n\n\nclass ErrorResponse:\n \"\"\"\n Parent ErrorResponse class for handling json-api compliant errors.\n Inspired by the JsonApiException class of `flask-rest-jsonapi` itself\n \"\"\"\n title = 'Unknown error'\n status = 500\n headers = {'Content-Type': 'application/vnd.api+json'}\n\n def __init__(self, source, detail, title=None, status=None):\n \"\"\"Initialize a jsonapi ErrorResponse Object\n\n :param dict source: the source of the error\n :param str detail: the detail of the error\n \"\"\"\n self.source = source\n self.detail = detail\n if title is not None:\n self.title = title\n if status is not None:\n self.status = status\n\n def respond(self):\n \"\"\"\n :return: a jsonapi compliant response object\n \"\"\"\n dict_ = self.to_dict()\n return make_response(json.dumps(jsonapi_errors([dict_])), self.status, self.headers)\n\n def to_dict(self):\n \"\"\"\n :return: Dict from details of the object\n \"\"\"\n return {'status': self.status,\n 'source': self.source,\n 'title': self.title,\n 'detail': self.detail}\n\n\nclass ForbiddenError(ErrorResponse):\n \"\"\"\n Default class for 403 Error\n \"\"\"\n title = 'Access Forbidden'\n status = 403\n\n\nclass NotFoundError(ErrorResponse):\n \"\"\"\n Default class for 404 Error\n \"\"\"\n title = 'Not Found'\n status = 404\n\n\nclass ServerError(ErrorResponse):\n status = 500\n title = 'Internal Server Error'\n\n\nclass UnprocessableEntityError(ErrorResponse):\n status = 422\n title = 'Unprocessable Entity'\n\n\nclass BadRequestError(ErrorResponse):\n status = 400\n title = 'Bad Request'\n", "path": "app/api/helpers/errors.py"}, {"content": "from flask_rest_jsonapi.exceptions import JsonApiException\n\n\nclass UnprocessableEntity(JsonApiException):\n title = \"Unprocessable Entity\"\n status = 422\n\n\nclass ConflictException(JsonApiException):\n title = \"Conflict\"\n status = 409\n\n\nclass ForbiddenException(JsonApiException):\n \"\"\"\n Default class for 403 Error\n \"\"\"\n title = 'Access Forbidden'\n status = 403\n\n\nclass MethodNotAllowed(JsonApiException):\n \"\"\"\n Default Class to throw HTTP 405 Exception\n \"\"\"\n title = \"Method Not Allowed\"\n status = 405\n", "path": "app/api/helpers/exceptions.py"}], "after_files": [{"content": "import json\n\nfrom flask import make_response\nfrom flask_rest_jsonapi.errors import jsonapi_errors\n\n\nclass ErrorResponse:\n \"\"\"\n Parent ErrorResponse class for handling json-api compliant errors.\n Inspired by the JsonApiException class of `flask-rest-jsonapi` itself\n \"\"\"\n title = 'Unknown error'\n status = 500\n headers = {'Content-Type': 'application/vnd.api+json'}\n\n def __init__(self, source, detail, title=None, status=None):\n \"\"\"Initialize a jsonapi ErrorResponse Object\n\n :param dict source: the source of the error\n :param str detail: the detail of the error\n \"\"\"\n self.source = source\n self.detail = detail\n if title is not None:\n self.title = title\n if status is not None:\n self.status = status\n\n def respond(self):\n \"\"\"\n :return: a jsonapi compliant response object\n \"\"\"\n dict_ = self.to_dict()\n return make_response(json.dumps(jsonapi_errors([dict_])), self.status, self.headers)\n\n def to_dict(self):\n \"\"\"\n :return: Dict from details of the object\n \"\"\"\n return {'status': self.status,\n 'source': self.source,\n 'title': self.title,\n 'detail': self.detail}\n\n\nclass ForbiddenError(ErrorResponse):\n \"\"\"\n Default class for 403 Error\n \"\"\"\n title = 'Access Forbidden'\n status = 403\n\n\nclass NotFoundError(ErrorResponse):\n \"\"\"\n Default class for 404 Error\n \"\"\"\n title = 'Not Found'\n status = 404\n\n\nclass ServerError(ErrorResponse):\n status = 500\n title = 'Internal Server Error'\n\n\nclass UnprocessableEntityError(ErrorResponse):\n \"\"\"\n Default class for 422 Error\n \"\"\"\n status = 422\n title = 'Unprocessable Entity'\n\n\nclass BadRequestError(ErrorResponse):\n \"\"\"\n Default class for 400 Error\n \"\"\"\n status = 400\n title = 'Bad Request'\n", "path": "app/api/helpers/errors.py"}, {"content": "from flask_rest_jsonapi.exceptions import JsonApiException\n\n\nclass UnprocessableEntity(JsonApiException):\n \"\"\"\n Default class for 422 Error\n \"\"\"\n title = \"Unprocessable Entity\"\n status = 422\n\n\nclass ConflictException(JsonApiException):\n \"\"\"\n Default class for 409 Error\n \"\"\"\n title = \"Conflict\"\n status = 409\n\n\nclass ForbiddenException(JsonApiException):\n \"\"\"\n Default class for 403 Error\n \"\"\"\n title = 'Access Forbidden'\n status = 403\n\n\nclass MethodNotAllowed(JsonApiException):\n \"\"\"\n Default Class to throw HTTP 405 Exception\n \"\"\"\n title = \"Method Not Allowed\"\n status = 405\n", "path": "app/api/helpers/exceptions.py"}]}
| 1,106 | 256 |
gh_patches_debug_15280
|
rasdani/github-patches
|
git_diff
|
PlasmaPy__PlasmaPy-891
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Change name of chat room from Matrix/Riot to Element
The name of the chat platform we are using has [changed from Matrix/Riot to Element](https://element.io/blog/the-world-is-changing/). We should change the name of our chat room accordingly, including in our README file and possibly some places in our documentation.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plasmapy/particles/isotopes.py`
Content:
```
1 """
2 Module for loading isotope data from :file:`plasmapy/particles/data/isotopes.json`.
3
4 .. attention::
5 This module only contains non-public functionality. To learn more about the
6 package functionality, then examine the code itself.
7 """
8 __all__ = []
9
10 import astropy.units as u
11 import json
12 import pkgutil
13
14 # this code was used to create the JSON file as per vn-ki on Riot:
15 # https://matrix.to/#/!hkWCiyhQyxiYJlUtKF:matrix.org/
16 # $1554667515670438wIKlP:matrix.org?via=matrix.org&via=cadair.com
17 #
18 # def _isotope_default(obj):
19 # if isinstance(obj, u.Quantity):
20 # return {
21 # "unit": obj.unit.name,
22 # "value": obj.value,
23 # }
24 # with open("isotopes.json", "w") as f:
25 # json.dump(_Isotopes, f, default=plasma_default, indent=2)
26
27
28 def _isotope_obj_hook(obj):
29 """An `object_hook` designed for `json.load` and `json.loads`."""
30 if "unit" in obj:
31 return obj["value"] * u.Unit(obj["unit"])
32 return obj
33
34
35 #: Dictionary of isotope data.
36 _Isotopes = json.loads(
37 pkgutil.get_data("plasmapy", "particles/data/isotopes.json"),
38 object_hook=_isotope_obj_hook,
39 )
40
```
Path: `plasmapy/particles/elements.py`
Content:
```
1 """
2 Module for loading atomic data for elements from
3 :file:`plasmapy/particles/data/elements.json`.
4
5 The periodic tabla data is from: http://periodic.lanl.gov/index.shtml
6
7 .. attention::
8 This module only contains non-public functionality. To learn more about the
9 package functionality, examine the code itself.
10 """
11 __all__ = []
12
13 import astropy.units as u
14 import collections
15 import json
16 import pkgutil
17
18 _PeriodicTable = collections.namedtuple(
19 "periodic_table", ["group", "category", "block", "period"]
20 )
21
22
23 def _element_obj_hook(obj):
24 if "unit" in obj:
25 return obj["value"] * u.Unit(obj["unit"])
26 return obj
27
28
29 # this code was used to create the JSON file as per vn-ki on Riot:
30 # https://matrix.to/#/!hkWCiyhQyxiYJlUtKF:matrix.org/
31 # $1554667515670438wIKlP:matrix.org?via=matrix.org&via=cadair.com
32 #
33 # def plasma_default(obj):
34 # if isinstance(obj, u.Quantity):
35 # return {
36 # "unit": obj.unit.name,
37 # "value": obj.value,
38 # }
39 #
40 # with open("elements.json", "w") as f:
41 # json.dump(_Elements, f, default=plasma_default, indent=2)
42
43
44 _Elements = json.loads(
45 pkgutil.get_data("plasmapy", "particles/data/elements.json"),
46 object_hook=_element_obj_hook,
47 )
48
49
50 _atomic_numbers_to_symbols = {
51 elemdict["atomic number"]: symb for (symb, elemdict) in _Elements.items()
52 }
53
54 _element_names_to_symbols = {
55 elemdict["element name"]: symb for (symb, elemdict) in _Elements.items()
56 }
57
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/plasmapy/particles/elements.py b/plasmapy/particles/elements.py
--- a/plasmapy/particles/elements.py
+++ b/plasmapy/particles/elements.py
@@ -26,7 +26,7 @@
return obj
-# this code was used to create the JSON file as per vn-ki on Riot:
+# this code was used to create the JSON file as per vn-ki on Matrix:
# https://matrix.to/#/!hkWCiyhQyxiYJlUtKF:matrix.org/
# $1554667515670438wIKlP:matrix.org?via=matrix.org&via=cadair.com
#
diff --git a/plasmapy/particles/isotopes.py b/plasmapy/particles/isotopes.py
--- a/plasmapy/particles/isotopes.py
+++ b/plasmapy/particles/isotopes.py
@@ -11,7 +11,7 @@
import json
import pkgutil
-# this code was used to create the JSON file as per vn-ki on Riot:
+# this code was used to create the JSON file as per vn-ki on Matrix:
# https://matrix.to/#/!hkWCiyhQyxiYJlUtKF:matrix.org/
# $1554667515670438wIKlP:matrix.org?via=matrix.org&via=cadair.com
#
|
{"golden_diff": "diff --git a/plasmapy/particles/elements.py b/plasmapy/particles/elements.py\n--- a/plasmapy/particles/elements.py\n+++ b/plasmapy/particles/elements.py\n@@ -26,7 +26,7 @@\n return obj\n \n \n-# this code was used to create the JSON file as per vn-ki on Riot:\n+# this code was used to create the JSON file as per vn-ki on Matrix:\n # https://matrix.to/#/!hkWCiyhQyxiYJlUtKF:matrix.org/\n # $1554667515670438wIKlP:matrix.org?via=matrix.org&via=cadair.com\n #\ndiff --git a/plasmapy/particles/isotopes.py b/plasmapy/particles/isotopes.py\n--- a/plasmapy/particles/isotopes.py\n+++ b/plasmapy/particles/isotopes.py\n@@ -11,7 +11,7 @@\n import json\n import pkgutil\n \n-# this code was used to create the JSON file as per vn-ki on Riot:\n+# this code was used to create the JSON file as per vn-ki on Matrix:\n # https://matrix.to/#/!hkWCiyhQyxiYJlUtKF:matrix.org/\n # $1554667515670438wIKlP:matrix.org?via=matrix.org&via=cadair.com\n #\n", "issue": "Change name of chat room from Matrix/Riot to Element\nThe name of the chat platform we are using has [changed from Matrix/Riot to Element](https://element.io/blog/the-world-is-changing/). We should change the name of our chat room accordingly, including in our README file and possibly some places in our documentation.\n", "before_files": [{"content": "\"\"\"\nModule for loading isotope data from :file:`plasmapy/particles/data/isotopes.json`.\n\n.. attention::\n This module only contains non-public functionality. To learn more about the\n package functionality, then examine the code itself.\n\"\"\"\n__all__ = []\n\nimport astropy.units as u\nimport json\nimport pkgutil\n\n# this code was used to create the JSON file as per vn-ki on Riot:\n# https://matrix.to/#/!hkWCiyhQyxiYJlUtKF:matrix.org/\n# $1554667515670438wIKlP:matrix.org?via=matrix.org&via=cadair.com\n#\n# def _isotope_default(obj):\n# if isinstance(obj, u.Quantity):\n# return {\n# \"unit\": obj.unit.name,\n# \"value\": obj.value,\n# }\n# with open(\"isotopes.json\", \"w\") as f:\n# json.dump(_Isotopes, f, default=plasma_default, indent=2)\n\n\ndef _isotope_obj_hook(obj):\n \"\"\"An `object_hook` designed for `json.load` and `json.loads`.\"\"\"\n if \"unit\" in obj:\n return obj[\"value\"] * u.Unit(obj[\"unit\"])\n return obj\n\n\n#: Dictionary of isotope data.\n_Isotopes = json.loads(\n pkgutil.get_data(\"plasmapy\", \"particles/data/isotopes.json\"),\n object_hook=_isotope_obj_hook,\n)\n", "path": "plasmapy/particles/isotopes.py"}, {"content": "\"\"\"\nModule for loading atomic data for elements from\n:file:`plasmapy/particles/data/elements.json`.\n\nThe periodic tabla data is from: http://periodic.lanl.gov/index.shtml\n\n.. attention::\n This module only contains non-public functionality. To learn more about the\n package functionality, examine the code itself.\n\"\"\"\n__all__ = []\n\nimport astropy.units as u\nimport collections\nimport json\nimport pkgutil\n\n_PeriodicTable = collections.namedtuple(\n \"periodic_table\", [\"group\", \"category\", \"block\", \"period\"]\n)\n\n\ndef _element_obj_hook(obj):\n if \"unit\" in obj:\n return obj[\"value\"] * u.Unit(obj[\"unit\"])\n return obj\n\n\n# this code was used to create the JSON file as per vn-ki on Riot:\n# https://matrix.to/#/!hkWCiyhQyxiYJlUtKF:matrix.org/\n# $1554667515670438wIKlP:matrix.org?via=matrix.org&via=cadair.com\n#\n# def plasma_default(obj):\n# if isinstance(obj, u.Quantity):\n# return {\n# \"unit\": obj.unit.name,\n# \"value\": obj.value,\n# }\n#\n# with open(\"elements.json\", \"w\") as f:\n# json.dump(_Elements, f, default=plasma_default, indent=2)\n\n\n_Elements = json.loads(\n pkgutil.get_data(\"plasmapy\", \"particles/data/elements.json\"),\n object_hook=_element_obj_hook,\n)\n\n\n_atomic_numbers_to_symbols = {\n elemdict[\"atomic number\"]: symb for (symb, elemdict) in _Elements.items()\n}\n\n_element_names_to_symbols = {\n elemdict[\"element name\"]: symb for (symb, elemdict) in _Elements.items()\n}\n", "path": "plasmapy/particles/elements.py"}], "after_files": [{"content": "\"\"\"\nModule for loading isotope data from :file:`plasmapy/particles/data/isotopes.json`.\n\n.. attention::\n This module only contains non-public functionality. To learn more about the\n package functionality, then examine the code itself.\n\"\"\"\n__all__ = []\n\nimport astropy.units as u\nimport json\nimport pkgutil\n\n# this code was used to create the JSON file as per vn-ki on Matrix:\n# https://matrix.to/#/!hkWCiyhQyxiYJlUtKF:matrix.org/\n# $1554667515670438wIKlP:matrix.org?via=matrix.org&via=cadair.com\n#\n# def _isotope_default(obj):\n# if isinstance(obj, u.Quantity):\n# return {\n# \"unit\": obj.unit.name,\n# \"value\": obj.value,\n# }\n# with open(\"isotopes.json\", \"w\") as f:\n# json.dump(_Isotopes, f, default=plasma_default, indent=2)\n\n\ndef _isotope_obj_hook(obj):\n \"\"\"An `object_hook` designed for `json.load` and `json.loads`.\"\"\"\n if \"unit\" in obj:\n return obj[\"value\"] * u.Unit(obj[\"unit\"])\n return obj\n\n\n#: Dictionary of isotope data.\n_Isotopes = json.loads(\n pkgutil.get_data(\"plasmapy\", \"particles/data/isotopes.json\"),\n object_hook=_isotope_obj_hook,\n)\n", "path": "plasmapy/particles/isotopes.py"}, {"content": "\"\"\"\nModule for loading atomic data for elements from\n:file:`plasmapy/particles/data/elements.json`.\n\nThe periodic tabla data is from: http://periodic.lanl.gov/index.shtml\n\n.. attention::\n This module only contains non-public functionality. To learn more about the\n package functionality, examine the code itself.\n\"\"\"\n__all__ = []\n\nimport astropy.units as u\nimport collections\nimport json\nimport pkgutil\n\n_PeriodicTable = collections.namedtuple(\n \"periodic_table\", [\"group\", \"category\", \"block\", \"period\"]\n)\n\n\ndef _element_obj_hook(obj):\n if \"unit\" in obj:\n return obj[\"value\"] * u.Unit(obj[\"unit\"])\n return obj\n\n\n# this code was used to create the JSON file as per vn-ki on Matrix:\n# https://matrix.to/#/!hkWCiyhQyxiYJlUtKF:matrix.org/\n# $1554667515670438wIKlP:matrix.org?via=matrix.org&via=cadair.com\n#\n# def plasma_default(obj):\n# if isinstance(obj, u.Quantity):\n# return {\n# \"unit\": obj.unit.name,\n# \"value\": obj.value,\n# }\n#\n# with open(\"elements.json\", \"w\") as f:\n# json.dump(_Elements, f, default=plasma_default, indent=2)\n\n\n_Elements = json.loads(\n pkgutil.get_data(\"plasmapy\", \"particles/data/elements.json\"),\n object_hook=_element_obj_hook,\n)\n\n\n_atomic_numbers_to_symbols = {\n elemdict[\"atomic number\"]: symb for (symb, elemdict) in _Elements.items()\n}\n\n_element_names_to_symbols = {\n elemdict[\"element name\"]: symb for (symb, elemdict) in _Elements.items()\n}\n", "path": "plasmapy/particles/elements.py"}]}
| 1,274 | 336 |
gh_patches_debug_2500
|
rasdani/github-patches
|
git_diff
|
rlworkgroup__garage-692
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add Intel-optimized version of the package
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import find_packages
2 from setuptools import setup
3
4 # Required dependencies
5 required = [
6 # Please keep alphabetized
7 'akro',
8 'boto3',
9 'cached_property',
10 'click',
11 'cloudpickle',
12 'cma==1.1.06',
13 # dm_control throws an error during install about not being able to
14 # find a build dependency (absl-py). Later pip executes the `install`
15 # command again and the install succeeds because absl-py has been
16 # installed. This is stupid, but harmless.
17 'dm_control @ https://api.github.com/repos/deepmind/dm_control/tarball/92f9913013face0468442cd0964d5973ea2089ea', # noqa: E501
18 'dowel==0.0.2',
19 'gym[all]==0.10.11',
20 'joblib<0.13,>=0.12',
21 'matplotlib',
22 'mujoco-py<1.50.2,>=1.50.1',
23 'numpy==1.14.5',
24 'psutil',
25 'pyprind',
26 'python-dateutil',
27 'scikit-image',
28 'scipy',
29 'tensorflow<1.13,>=1.12.0',
30 'tensorflow-probability<0.6.0,>=0.5.0', # for tensorflow 1.12
31 ]
32
33 # Dependencies for optional features
34 extras = {}
35 extras['all'] = list(set(sum(extras.values(), [])))
36
37 # Development dependencies (*not* included in "all")
38 extras['dev'] = [
39 # Please keep alphabetized
40 'baselines @ https://api.github.com/repos/openai/baselines/tarball/f2729693253c0ef4d4086231d36e0a4307ec1cb3', # noqa: E501
41 'coverage',
42 'flake8',
43 'flake8-docstrings==1.3.0',
44 'flake8-import-order',
45 'nose2',
46 'pandas',
47 'pep8-naming==0.7.0',
48 'pre-commit',
49 'pylint==1.9.2',
50 'sphinx',
51 'sphinx_rtd_theme',
52 'yapf',
53 ]
54
55 with open('README.md') as f:
56 readme = f.read()
57
58 # Get the package version dynamically
59 with open('VERSION') as v:
60 version = v.read().strip()
61
62 setup(
63 name='garage',
64 version=version,
65 author='Reinforcement Learning Working Group',
66 description='A framework for reproducible reinforcement learning research',
67 url='https://github.com/rlworkgroup/garage',
68 packages=find_packages(where='src'),
69 package_dir={'': 'src'},
70 scripts=['scripts/garage'],
71 install_requires=required,
72 extras_require=extras,
73 license='MIT',
74 long_description=readme,
75 long_description_content_type='text/markdown',
76 classifiers=[
77 'Development Status :: 4 - Beta',
78 'Intended Audience :: Developers',
79 'Intended Audience :: Education',
80 'Intended Audience :: Science/Research',
81 'License :: OSI Approved :: MIT License',
82 'Programming Language :: Python :: 3.5',
83 'Programming Language :: Python :: 3.6',
84 'Programming Language :: Python :: 3.7',
85 'Programming Language :: Python :: 3 :: Only',
86 'Topic :: Scientific/Engineering :: Artificial Intelligence',
87 'Topic :: Scientific/Engineering :: Mathematics',
88 'Topic :: Software Development :: Libraries',
89 ],
90 )
91
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -34,6 +34,9 @@
extras = {}
extras['all'] = list(set(sum(extras.values(), [])))
+# Intel dependencies not included in all
+extras['intel'] = ['intel-tensorflow<1.13,>=1.12.0']
+
# Development dependencies (*not* included in "all")
extras['dev'] = [
# Please keep alphabetized
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -34,6 +34,9 @@\n extras = {}\n extras['all'] = list(set(sum(extras.values(), [])))\n \n+# Intel dependencies not included in all\n+extras['intel'] = ['intel-tensorflow<1.13,>=1.12.0']\n+\n # Development dependencies (*not* included in \"all\")\n extras['dev'] = [\n # Please keep alphabetized\n", "issue": "Add Intel-optimized version of the package\n\n", "before_files": [{"content": "from setuptools import find_packages\nfrom setuptools import setup\n\n# Required dependencies\nrequired = [\n # Please keep alphabetized\n 'akro',\n 'boto3',\n 'cached_property',\n 'click',\n 'cloudpickle',\n 'cma==1.1.06',\n # dm_control throws an error during install about not being able to\n # find a build dependency (absl-py). Later pip executes the `install`\n # command again and the install succeeds because absl-py has been\n # installed. This is stupid, but harmless.\n 'dm_control @ https://api.github.com/repos/deepmind/dm_control/tarball/92f9913013face0468442cd0964d5973ea2089ea', # noqa: E501\n 'dowel==0.0.2',\n 'gym[all]==0.10.11',\n 'joblib<0.13,>=0.12',\n 'matplotlib',\n 'mujoco-py<1.50.2,>=1.50.1',\n 'numpy==1.14.5',\n 'psutil',\n 'pyprind',\n 'python-dateutil',\n 'scikit-image',\n 'scipy',\n 'tensorflow<1.13,>=1.12.0',\n 'tensorflow-probability<0.6.0,>=0.5.0', # for tensorflow 1.12\n]\n\n# Dependencies for optional features\nextras = {}\nextras['all'] = list(set(sum(extras.values(), [])))\n\n# Development dependencies (*not* included in \"all\")\nextras['dev'] = [\n # Please keep alphabetized\n 'baselines @ https://api.github.com/repos/openai/baselines/tarball/f2729693253c0ef4d4086231d36e0a4307ec1cb3', # noqa: E501\n 'coverage',\n 'flake8',\n 'flake8-docstrings==1.3.0',\n 'flake8-import-order',\n 'nose2',\n 'pandas',\n 'pep8-naming==0.7.0',\n 'pre-commit',\n 'pylint==1.9.2',\n 'sphinx',\n 'sphinx_rtd_theme',\n 'yapf',\n]\n\nwith open('README.md') as f:\n readme = f.read()\n\n# Get the package version dynamically\nwith open('VERSION') as v:\n version = v.read().strip()\n\nsetup(\n name='garage',\n version=version,\n author='Reinforcement Learning Working Group',\n description='A framework for reproducible reinforcement learning research',\n url='https://github.com/rlworkgroup/garage',\n packages=find_packages(where='src'),\n package_dir={'': 'src'},\n scripts=['scripts/garage'],\n install_requires=required,\n extras_require=extras,\n license='MIT',\n long_description=readme,\n long_description_content_type='text/markdown',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries',\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import find_packages\nfrom setuptools import setup\n\n# Required dependencies\nrequired = [\n # Please keep alphabetized\n 'akro',\n 'boto3',\n 'cached_property',\n 'click',\n 'cloudpickle',\n 'cma==1.1.06',\n # dm_control throws an error during install about not being able to\n # find a build dependency (absl-py). Later pip executes the `install`\n # command again and the install succeeds because absl-py has been\n # installed. This is stupid, but harmless.\n 'dm_control @ https://api.github.com/repos/deepmind/dm_control/tarball/92f9913013face0468442cd0964d5973ea2089ea', # noqa: E501\n 'dowel==0.0.2',\n 'gym[all]==0.10.11',\n 'joblib<0.13,>=0.12',\n 'matplotlib',\n 'mujoco-py<1.50.2,>=1.50.1',\n 'numpy==1.14.5',\n 'psutil',\n 'pyprind',\n 'python-dateutil',\n 'scikit-image',\n 'scipy',\n 'tensorflow<1.13,>=1.12.0',\n 'tensorflow-probability<0.6.0,>=0.5.0', # for tensorflow 1.12\n]\n\n# Dependencies for optional features\nextras = {}\nextras['all'] = list(set(sum(extras.values(), [])))\n\n# Intel dependencies not included in all\nextras['intel'] = ['intel-tensorflow<1.13,>=1.12.0']\n\n# Development dependencies (*not* included in \"all\")\nextras['dev'] = [\n # Please keep alphabetized\n 'baselines @ https://api.github.com/repos/openai/baselines/tarball/f2729693253c0ef4d4086231d36e0a4307ec1cb3', # noqa: E501\n 'coverage',\n 'flake8',\n 'flake8-docstrings==1.3.0',\n 'flake8-import-order',\n 'nose2',\n 'pandas',\n 'pep8-naming==0.7.0',\n 'pre-commit',\n 'pylint==1.9.2',\n 'sphinx',\n 'sphinx_rtd_theme',\n 'yapf',\n]\n\nwith open('README.md') as f:\n readme = f.read()\n\n# Get the package version dynamically\nwith open('VERSION') as v:\n version = v.read().strip()\n\nsetup(\n name='garage',\n version=version,\n author='Reinforcement Learning Working Group',\n description='A framework for reproducible reinforcement learning research',\n url='https://github.com/rlworkgroup/garage',\n packages=find_packages(where='src'),\n package_dir={'': 'src'},\n scripts=['scripts/garage'],\n install_requires=required,\n extras_require=extras,\n license='MIT',\n long_description=readme,\n long_description_content_type='text/markdown',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries',\n ],\n)\n", "path": "setup.py"}]}
| 1,265 | 108 |
gh_patches_debug_16985
|
rasdani/github-patches
|
git_diff
|
searxng__searxng-2369
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Double bangs without query redirect to search URL instead of main page
<!-- PLEASE FILL THESE FIELDS, IT REALLY HELPS THE MAINTAINERS OF SearXNG -->
**Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG**
`2023.4.21+98387e29`
**How did you install SearXNG?**
`searxng-docker`
**What happened?**
Double bangs without query redirect to search URL instead of main page
**How To Reproduce**
Use external bang without any search items e.g. `!!gh` and we land on `https://github.com/search?utf8=%E2%9C%93&q=`
**Expected behavior**
`!!gh` should redirect to `https://github.com` if no search items
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `searx/external_bang.py`
Content:
```
1 # SPDX-License-Identifier: AGPL-3.0-or-later
2
3 from urllib.parse import quote_plus
4 from searx.data import EXTERNAL_BANGS
5
6 LEAF_KEY = chr(16)
7
8
9 def get_node(external_bangs_db, bang):
10 node = external_bangs_db['trie']
11 after = ''
12 before = ''
13 for bang_letter in bang:
14 after += bang_letter
15 if after in node and isinstance(node, dict):
16 node = node[after]
17 before += after
18 after = ''
19 return node, before, after
20
21
22 def get_bang_definition_and_ac(external_bangs_db, bang):
23 node, before, after = get_node(external_bangs_db, bang)
24
25 bang_definition = None
26 bang_ac_list = []
27 if after != '':
28 for k in node:
29 if k.startswith(after):
30 bang_ac_list.append(before + k)
31 elif isinstance(node, dict):
32 bang_definition = node.get(LEAF_KEY)
33 bang_ac_list = [before + k for k in node.keys() if k != LEAF_KEY]
34 elif isinstance(node, str):
35 bang_definition = node
36 bang_ac_list = []
37
38 return bang_definition, bang_ac_list
39
40
41 def resolve_bang_definition(bang_definition, query):
42 url, rank = bang_definition.split(chr(1))
43 url = url.replace(chr(2), quote_plus(query))
44 if url.startswith('//'):
45 url = 'https:' + url
46 rank = int(rank) if len(rank) > 0 else 0
47 return (url, rank)
48
49
50 def get_bang_definition_and_autocomplete(bang, external_bangs_db=None):
51 if external_bangs_db is None:
52 external_bangs_db = EXTERNAL_BANGS
53
54 bang_definition, bang_ac_list = get_bang_definition_and_ac(external_bangs_db, bang)
55
56 new_autocomplete = []
57 current = [*bang_ac_list]
58 done = set()
59 while len(current) > 0:
60 bang_ac = current.pop(0)
61 done.add(bang_ac)
62
63 current_bang_definition, current_bang_ac_list = get_bang_definition_and_ac(external_bangs_db, bang_ac)
64 if current_bang_definition:
65 _, order = resolve_bang_definition(current_bang_definition, '')
66 new_autocomplete.append((bang_ac, order))
67 for new_bang in current_bang_ac_list:
68 if new_bang not in done and new_bang not in current:
69 current.append(new_bang)
70
71 new_autocomplete.sort(key=lambda t: (-t[1], t[0]))
72 new_autocomplete = list(map(lambda t: t[0], new_autocomplete))
73
74 return bang_definition, new_autocomplete
75
76
77 def get_bang_url(search_query, external_bangs_db=None):
78 """
79 Redirects if the user supplied a correct bang search.
80 :param search_query: This is a search_query object which contains preferences and the submitted queries.
81 :return: None if the bang was invalid, else a string of the redirect url.
82 """
83 ret_val = None
84
85 if external_bangs_db is None:
86 external_bangs_db = EXTERNAL_BANGS
87
88 if search_query.external_bang:
89 bang_definition, _ = get_bang_definition_and_ac(external_bangs_db, search_query.external_bang)
90 if bang_definition and isinstance(bang_definition, str):
91 ret_val = resolve_bang_definition(bang_definition, search_query.query)[0]
92
93 return ret_val
94
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/searx/external_bang.py b/searx/external_bang.py
--- a/searx/external_bang.py
+++ b/searx/external_bang.py
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
-from urllib.parse import quote_plus
+from urllib.parse import quote_plus, urlparse
from searx.data import EXTERNAL_BANGS
LEAF_KEY = chr(16)
@@ -40,9 +40,15 @@
def resolve_bang_definition(bang_definition, query):
url, rank = bang_definition.split(chr(1))
- url = url.replace(chr(2), quote_plus(query))
if url.startswith('//'):
url = 'https:' + url
+ if query:
+ url = url.replace(chr(2), quote_plus(query))
+ else:
+ # go to main instead of search page
+ o = urlparse(url)
+ url = o.scheme + '://' + o.netloc
+
rank = int(rank) if len(rank) > 0 else 0
return (url, rank)
|
{"golden_diff": "diff --git a/searx/external_bang.py b/searx/external_bang.py\n--- a/searx/external_bang.py\n+++ b/searx/external_bang.py\n@@ -1,6 +1,6 @@\n # SPDX-License-Identifier: AGPL-3.0-or-later\n \n-from urllib.parse import quote_plus\n+from urllib.parse import quote_plus, urlparse\n from searx.data import EXTERNAL_BANGS\n \n LEAF_KEY = chr(16)\n@@ -40,9 +40,15 @@\n \n def resolve_bang_definition(bang_definition, query):\n url, rank = bang_definition.split(chr(1))\n- url = url.replace(chr(2), quote_plus(query))\n if url.startswith('//'):\n url = 'https:' + url\n+ if query:\n+ url = url.replace(chr(2), quote_plus(query))\n+ else:\n+ # go to main instead of search page\n+ o = urlparse(url)\n+ url = o.scheme + '://' + o.netloc\n+\n rank = int(rank) if len(rank) > 0 else 0\n return (url, rank)\n", "issue": "Double bangs without query redirect to search URL instead of main page\n<!-- PLEASE FILL THESE FIELDS, IT REALLY HELPS THE MAINTAINERS OF SearXNG -->\r\n\r\n**Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG**\r\n`2023.4.21+98387e29`\r\n\r\n**How did you install SearXNG?**\r\n`searxng-docker`\r\n\r\n**What happened?**\r\nDouble bangs without query redirect to search URL instead of main page\r\n\r\n**How To Reproduce**\r\nUse external bang without any search items e.g. `!!gh` and we land on `https://github.com/search?utf8=%E2%9C%93&q=` \r\n\r\n**Expected behavior**\r\n`!!gh` should redirect to `https://github.com` if no search items\n", "before_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n\nfrom urllib.parse import quote_plus\nfrom searx.data import EXTERNAL_BANGS\n\nLEAF_KEY = chr(16)\n\n\ndef get_node(external_bangs_db, bang):\n node = external_bangs_db['trie']\n after = ''\n before = ''\n for bang_letter in bang:\n after += bang_letter\n if after in node and isinstance(node, dict):\n node = node[after]\n before += after\n after = ''\n return node, before, after\n\n\ndef get_bang_definition_and_ac(external_bangs_db, bang):\n node, before, after = get_node(external_bangs_db, bang)\n\n bang_definition = None\n bang_ac_list = []\n if after != '':\n for k in node:\n if k.startswith(after):\n bang_ac_list.append(before + k)\n elif isinstance(node, dict):\n bang_definition = node.get(LEAF_KEY)\n bang_ac_list = [before + k for k in node.keys() if k != LEAF_KEY]\n elif isinstance(node, str):\n bang_definition = node\n bang_ac_list = []\n\n return bang_definition, bang_ac_list\n\n\ndef resolve_bang_definition(bang_definition, query):\n url, rank = bang_definition.split(chr(1))\n url = url.replace(chr(2), quote_plus(query))\n if url.startswith('//'):\n url = 'https:' + url\n rank = int(rank) if len(rank) > 0 else 0\n return (url, rank)\n\n\ndef get_bang_definition_and_autocomplete(bang, external_bangs_db=None):\n if external_bangs_db is None:\n external_bangs_db = EXTERNAL_BANGS\n\n bang_definition, bang_ac_list = get_bang_definition_and_ac(external_bangs_db, bang)\n\n new_autocomplete = []\n current = [*bang_ac_list]\n done = set()\n while len(current) > 0:\n bang_ac = current.pop(0)\n done.add(bang_ac)\n\n current_bang_definition, current_bang_ac_list = get_bang_definition_and_ac(external_bangs_db, bang_ac)\n if current_bang_definition:\n _, order = resolve_bang_definition(current_bang_definition, '')\n new_autocomplete.append((bang_ac, order))\n for new_bang in current_bang_ac_list:\n if new_bang not in done and new_bang not in current:\n current.append(new_bang)\n\n new_autocomplete.sort(key=lambda t: (-t[1], t[0]))\n new_autocomplete = list(map(lambda t: t[0], new_autocomplete))\n\n return bang_definition, new_autocomplete\n\n\ndef get_bang_url(search_query, external_bangs_db=None):\n \"\"\"\n Redirects if the user supplied a correct bang search.\n :param search_query: This is a search_query object which contains preferences and the submitted queries.\n :return: None if the bang was invalid, else a string of the redirect url.\n \"\"\"\n ret_val = None\n\n if external_bangs_db is None:\n external_bangs_db = EXTERNAL_BANGS\n\n if search_query.external_bang:\n bang_definition, _ = get_bang_definition_and_ac(external_bangs_db, search_query.external_bang)\n if bang_definition and isinstance(bang_definition, str):\n ret_val = resolve_bang_definition(bang_definition, search_query.query)[0]\n\n return ret_val\n", "path": "searx/external_bang.py"}], "after_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n\nfrom urllib.parse import quote_plus, urlparse\nfrom searx.data import EXTERNAL_BANGS\n\nLEAF_KEY = chr(16)\n\n\ndef get_node(external_bangs_db, bang):\n node = external_bangs_db['trie']\n after = ''\n before = ''\n for bang_letter in bang:\n after += bang_letter\n if after in node and isinstance(node, dict):\n node = node[after]\n before += after\n after = ''\n return node, before, after\n\n\ndef get_bang_definition_and_ac(external_bangs_db, bang):\n node, before, after = get_node(external_bangs_db, bang)\n\n bang_definition = None\n bang_ac_list = []\n if after != '':\n for k in node:\n if k.startswith(after):\n bang_ac_list.append(before + k)\n elif isinstance(node, dict):\n bang_definition = node.get(LEAF_KEY)\n bang_ac_list = [before + k for k in node.keys() if k != LEAF_KEY]\n elif isinstance(node, str):\n bang_definition = node\n bang_ac_list = []\n\n return bang_definition, bang_ac_list\n\n\ndef resolve_bang_definition(bang_definition, query):\n url, rank = bang_definition.split(chr(1))\n if url.startswith('//'):\n url = 'https:' + url\n if query:\n url = url.replace(chr(2), quote_plus(query))\n else:\n # go to main instead of search page\n o = urlparse(url)\n url = o.scheme + '://' + o.netloc\n\n rank = int(rank) if len(rank) > 0 else 0\n return (url, rank)\n\n\ndef get_bang_definition_and_autocomplete(bang, external_bangs_db=None):\n if external_bangs_db is None:\n external_bangs_db = EXTERNAL_BANGS\n\n bang_definition, bang_ac_list = get_bang_definition_and_ac(external_bangs_db, bang)\n\n new_autocomplete = []\n current = [*bang_ac_list]\n done = set()\n while len(current) > 0:\n bang_ac = current.pop(0)\n done.add(bang_ac)\n\n current_bang_definition, current_bang_ac_list = get_bang_definition_and_ac(external_bangs_db, bang_ac)\n if current_bang_definition:\n _, order = resolve_bang_definition(current_bang_definition, '')\n new_autocomplete.append((bang_ac, order))\n for new_bang in current_bang_ac_list:\n if new_bang not in done and new_bang not in current:\n current.append(new_bang)\n\n new_autocomplete.sort(key=lambda t: (-t[1], t[0]))\n new_autocomplete = list(map(lambda t: t[0], new_autocomplete))\n\n return bang_definition, new_autocomplete\n\n\ndef get_bang_url(search_query, external_bangs_db=None):\n \"\"\"\n Redirects if the user supplied a correct bang search.\n :param search_query: This is a search_query object which contains preferences and the submitted queries.\n :return: None if the bang was invalid, else a string of the redirect url.\n \"\"\"\n ret_val = None\n\n if external_bangs_db is None:\n external_bangs_db = EXTERNAL_BANGS\n\n if search_query.external_bang:\n bang_definition, _ = get_bang_definition_and_ac(external_bangs_db, search_query.external_bang)\n if bang_definition and isinstance(bang_definition, str):\n ret_val = resolve_bang_definition(bang_definition, search_query.query)[0]\n\n return ret_val\n", "path": "searx/external_bang.py"}]}
| 1,395 | 253 |
gh_patches_debug_2243
|
rasdani/github-patches
|
git_diff
|
streamlink__streamlink-5023
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
plugins.vtvgo: '403 Client Error: Forbidden for url: ...'
### Checklist
- [X] This is a plugin issue and not a different kind of issue
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)
- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)
### Streamlink version
Latest build from the master branch
### Description
Last month VtvGo added cookie requirements for the stream playlist, and now it seems that they added another security layer. The request to the website returns error 403.
### Debug log
```text
streamlink https://vtvgo.vn/xem-truc-tuyen-kenh-vtv3-3.html --loglevel=debug
[cli][debug] OS: Linux-5.15.0-53-generic-x86_64-with-glibc2.35
[cli][debug] Python: 3.10.6
[cli][debug] Streamlink: 5.1.2+4.g68dad105
[cli][debug] Dependencies:
[cli][debug] certifi: 2022.9.24
[cli][debug] isodate: 0.6.1
[cli][debug] lxml: 4.9.1
[cli][debug] pycountry: 22.3.5
[cli][debug] pycryptodome: 3.15.0
[cli][debug] PySocks: 1.7.1
[cli][debug] requests: 2.28.1
[cli][debug] urllib3: 1.26.12
[cli][debug] websocket-client: 1.4.1
[cli][debug] importlib-metadata: 4.6.4
[cli][debug] Arguments:
[cli][debug] url=https://vtvgo.vn/xem-truc-tuyen-kenh-vtv3-3.html
[cli][debug] --loglevel=debug
[cli][info] Found matching plugin vtvgo for URL https://vtvgo.vn/xem-truc-tuyen-kenh-vtv3-3.html
error: Unable to open URL: https://vtvgo.vn/ajax-get-stream (403 Client Error: Forbidden for url: https://vtvgo.vn/ajax-get-stream)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/vtvgo.py`
Content:
```
1 """
2 $description Live TV channels from VTV, a Vietnamese public, state-owned broadcaster.
3 $url vtvgo.vn
4 $type live
5 """
6
7 import logging
8 import re
9
10 from streamlink.plugin import Plugin, pluginmatcher
11 from streamlink.plugin.api import validate
12 from streamlink.stream.hls import HLSStream
13
14 log = logging.getLogger(__name__)
15
16
17 @pluginmatcher(re.compile(
18 r"https?://vtvgo\.vn/xem-truc-tuyen-kenh-"
19 ))
20 class VTVgo(Plugin):
21 AJAX_URL = "https://vtvgo.vn/ajax-get-stream"
22
23 def _get_streams(self):
24 # get cookies
25 self.session.http.get("https://vtvgo.vn/")
26
27 self.session.http.headers.update({
28 "Origin": "https://vtvgo.vn",
29 "Referer": self.url,
30 "X-Requested-With": "XMLHttpRequest",
31 })
32
33 params = self.session.http.get(self.url, schema=validate.Schema(
34 validate.parse_html(),
35 validate.xml_xpath_string(".//script[contains(text(),'setplayer(')][1]/text()"),
36 validate.none_or_all(
37 validate.regex(
38 re.compile(r"""var\s+(?P<key>(?:type_)?id|time|token)\s*=\s*["']?(?P<value>[^"']+)["']?;"""),
39 method="findall",
40 ),
41 [
42 ("id", int),
43 ("type_id", str),
44 ("time", str),
45 ("token", str),
46 ],
47 ),
48 ))
49 if not params:
50 return
51
52 log.trace(f"{params!r}")
53 hls_url = self.session.http.post(
54 self.AJAX_URL,
55 data=dict(params),
56 schema=validate.Schema(
57 validate.parse_json(),
58 {"stream_url": [validate.url()]},
59 validate.get(("stream_url", 0)),
60 ),
61 )
62
63 return HLSStream.parse_variant_playlist(self.session, hls_url)
64
65
66 __plugin__ = VTVgo
67
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/streamlink/plugins/vtvgo.py b/src/streamlink/plugins/vtvgo.py
--- a/src/streamlink/plugins/vtvgo.py
+++ b/src/streamlink/plugins/vtvgo.py
@@ -27,6 +27,7 @@
self.session.http.headers.update({
"Origin": "https://vtvgo.vn",
"Referer": self.url,
+ "Sec-Fetch-Site": "same-origin",
"X-Requested-With": "XMLHttpRequest",
})
|
{"golden_diff": "diff --git a/src/streamlink/plugins/vtvgo.py b/src/streamlink/plugins/vtvgo.py\n--- a/src/streamlink/plugins/vtvgo.py\n+++ b/src/streamlink/plugins/vtvgo.py\n@@ -27,6 +27,7 @@\n self.session.http.headers.update({\n \"Origin\": \"https://vtvgo.vn\",\n \"Referer\": self.url,\n+ \"Sec-Fetch-Site\": \"same-origin\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n })\n", "issue": "plugins.vtvgo: '403 Client Error: Forbidden for url: ...'\n### Checklist\n\n- [X] This is a plugin issue and not a different kind of issue\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\n\n### Streamlink version\n\nLatest build from the master branch\n\n### Description\n\nLast month VtvGo added cookie requirements for the stream playlist, and now it seems that they added another security layer. The request to the website returns error 403.\n\n### Debug log\n\n```text\nstreamlink https://vtvgo.vn/xem-truc-tuyen-kenh-vtv3-3.html --loglevel=debug\r\n[cli][debug] OS: Linux-5.15.0-53-generic-x86_64-with-glibc2.35\r\n[cli][debug] Python: 3.10.6\r\n[cli][debug] Streamlink: 5.1.2+4.g68dad105\r\n[cli][debug] Dependencies:\r\n[cli][debug] certifi: 2022.9.24\r\n[cli][debug] isodate: 0.6.1\r\n[cli][debug] lxml: 4.9.1\r\n[cli][debug] pycountry: 22.3.5\r\n[cli][debug] pycryptodome: 3.15.0\r\n[cli][debug] PySocks: 1.7.1\r\n[cli][debug] requests: 2.28.1\r\n[cli][debug] urllib3: 1.26.12\r\n[cli][debug] websocket-client: 1.4.1\r\n[cli][debug] importlib-metadata: 4.6.4\r\n[cli][debug] Arguments:\r\n[cli][debug] url=https://vtvgo.vn/xem-truc-tuyen-kenh-vtv3-3.html\r\n[cli][debug] --loglevel=debug\r\n[cli][info] Found matching plugin vtvgo for URL https://vtvgo.vn/xem-truc-tuyen-kenh-vtv3-3.html\r\nerror: Unable to open URL: https://vtvgo.vn/ajax-get-stream (403 Client Error: Forbidden for url: https://vtvgo.vn/ajax-get-stream)\n```\n\n", "before_files": [{"content": "\"\"\"\n$description Live TV channels from VTV, a Vietnamese public, state-owned broadcaster.\n$url vtvgo.vn\n$type live\n\"\"\"\n\nimport logging\nimport re\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.hls import HLSStream\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r\"https?://vtvgo\\.vn/xem-truc-tuyen-kenh-\"\n))\nclass VTVgo(Plugin):\n AJAX_URL = \"https://vtvgo.vn/ajax-get-stream\"\n\n def _get_streams(self):\n # get cookies\n self.session.http.get(\"https://vtvgo.vn/\")\n\n self.session.http.headers.update({\n \"Origin\": \"https://vtvgo.vn\",\n \"Referer\": self.url,\n \"X-Requested-With\": \"XMLHttpRequest\",\n })\n\n params = self.session.http.get(self.url, schema=validate.Schema(\n validate.parse_html(),\n validate.xml_xpath_string(\".//script[contains(text(),'setplayer(')][1]/text()\"),\n validate.none_or_all(\n validate.regex(\n re.compile(r\"\"\"var\\s+(?P<key>(?:type_)?id|time|token)\\s*=\\s*[\"']?(?P<value>[^\"']+)[\"']?;\"\"\"),\n method=\"findall\",\n ),\n [\n (\"id\", int),\n (\"type_id\", str),\n (\"time\", str),\n (\"token\", str),\n ],\n ),\n ))\n if not params:\n return\n\n log.trace(f\"{params!r}\")\n hls_url = self.session.http.post(\n self.AJAX_URL,\n data=dict(params),\n schema=validate.Schema(\n validate.parse_json(),\n {\"stream_url\": [validate.url()]},\n validate.get((\"stream_url\", 0)),\n ),\n )\n\n return HLSStream.parse_variant_playlist(self.session, hls_url)\n\n\n__plugin__ = VTVgo\n", "path": "src/streamlink/plugins/vtvgo.py"}], "after_files": [{"content": "\"\"\"\n$description Live TV channels from VTV, a Vietnamese public, state-owned broadcaster.\n$url vtvgo.vn\n$type live\n\"\"\"\n\nimport logging\nimport re\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.hls import HLSStream\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r\"https?://vtvgo\\.vn/xem-truc-tuyen-kenh-\"\n))\nclass VTVgo(Plugin):\n AJAX_URL = \"https://vtvgo.vn/ajax-get-stream\"\n\n def _get_streams(self):\n # get cookies\n self.session.http.get(\"https://vtvgo.vn/\")\n\n self.session.http.headers.update({\n \"Origin\": \"https://vtvgo.vn\",\n \"Referer\": self.url,\n \"Sec-Fetch-Site\": \"same-origin\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n })\n\n params = self.session.http.get(self.url, schema=validate.Schema(\n validate.parse_html(),\n validate.xml_xpath_string(\".//script[contains(text(),'setplayer(')][1]/text()\"),\n validate.none_or_all(\n validate.regex(\n re.compile(r\"\"\"var\\s+(?P<key>(?:type_)?id|time|token)\\s*=\\s*[\"']?(?P<value>[^\"']+)[\"']?;\"\"\"),\n method=\"findall\",\n ),\n [\n (\"id\", int),\n (\"type_id\", str),\n (\"time\", str),\n (\"token\", str),\n ],\n ),\n ))\n if not params:\n return\n\n log.trace(f\"{params!r}\")\n hls_url = self.session.http.post(\n self.AJAX_URL,\n data=dict(params),\n schema=validate.Schema(\n validate.parse_json(),\n {\"stream_url\": [validate.url()]},\n validate.get((\"stream_url\", 0)),\n ),\n )\n\n return HLSStream.parse_variant_playlist(self.session, hls_url)\n\n\n__plugin__ = VTVgo\n", "path": "src/streamlink/plugins/vtvgo.py"}]}
| 1,429 | 108 |
gh_patches_debug_7931
|
rasdani/github-patches
|
git_diff
|
conan-io__conan-center-index-14099
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[service] msbuild package template has import incompatible with Conan 2.0
At https://github.com/conan-io/conan-center-index/blob/53ef0688c436c6697d8312d47fcdef68fe7e6955/docs/package_templates/msbuild_package/all/conanfile.py#L4, `vs_layout` is imported from `conan.tools.layout`. However, when using this import in [my PR](https://github.com/conan-io/conan-center-index/pull/13646), the CI informed me that the recipe was not ready for Conan v2, citing this import error:
```
ERROR: Error loading conanfile at '/home/conan/w/prod_cci_PR-13646/recipes/lzham/all/conanfile.py': Unable to load conanfile in /home/conan/w/prod_cci_PR-13646/recipes/lzham/all/conanfile.py
File "<frozen importlib._bootstrap_external>", line 728, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "/home/conan/w/prod_cci_PR-13646/recipes/lzham/all/conanfile.py", line 8, in <module>
from conan.tools.layout import vs_layout
ImportError: cannot import name 'vs_layout' from 'conan.tools.layout' (/opt/pyenv/versions/3.7.13/lib/python3.7/site-packages/conan/tools/layout/__init__.py)
```
Therefore, it seems the msbuild template should import `vs_layout` from `conan.tools.microsoft` instead.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/package_templates/msbuild_package/all/conanfile.py`
Content:
```
1 from conan import ConanFile
2 from conan.errors import ConanInvalidConfiguration
3 from conan.tools.microsoft import is_msvc, MSBuildDeps, MSBuildToolchain, MSBuild, VCVars
4 from conan.tools.layout import vs_layout
5 from conan.tools.files import apply_conandata_patches, export_conandata_patches, get, copy, rm, replace_in_file
6 import os
7
8
9 required_conan_version = ">=1.53.0"
10
11
12 class PackageConan(ConanFile):
13 name = "package"
14 description = "short description"
15 # Use short name only, conform to SPDX License List: https://spdx.org/licenses/
16 # In case not listed there, use "LicenseRef-<license-file-name>"
17 license = ""
18 url = "https://github.com/conan-io/conan-center-index"
19 homepage = "https://github.com/project/package"
20 # no "conan" and project name in topics. Use topics from the upstream listed on GH
21 topics = ("topic1", "topic2", "topic3")
22 settings = "os", "arch", "compiler", "build_type"
23 options = {
24 "shared": [True, False],
25 "fPIC": [True, False],
26 }
27 default_options = {
28 "shared": False,
29 "fPIC": True,
30 }
31
32 # no exports_sources attribute, but export_sources(self) method instead
33 # this allows finer grain exportation of patches per version
34 def export_sources(self):
35 export_conandata_patches(self)
36
37 def config_options(self):
38 if self.settings.os == "Windows":
39 del self.options.fPIC
40
41 def configure(self):
42 if self.options.shared:
43 self.options.rm_safe("fPIC")
44 # for plain C projects only
45 self.settings.rm_safe("compiler.libcxx")
46 self.settings.rm_safe("compiler.cppstd")
47
48 def layout(self):
49 vs_layout(self)
50
51 def requirements(self):
52 # prefer self.requires method instead of requires attribute
53 self.requires("dependency/0.8.1")
54
55 def validate(self):
56 # in case it does not work in another configuration, it should validated here too
57 if not is_msvc(self):
58 raise ConanInvalidConfiguration(f"{self.ref} can be built only by Visual Studio and msvc.")
59
60 # if another tool than the compiler or CMake is required to build the project (pkgconf, bison, flex etc)
61 def build_requirements(self):
62 self.tool_requires("tool/x.y.z")
63
64 def source(self):
65 get(self, **self.conan_data["sources"][self.version], destination=self.source_folder, strip_root=True)
66
67 def generate(self):
68 tc = MSBuildToolchain(self)
69 tc.generate()
70 tc = MSBuildDeps(self)
71 tc.generate()
72 tc = VCVars(self)
73 tc.generate()
74
75 def _patch_sources(self):
76 apply_conandata_patches(self)
77 # remove bundled xxhash
78 rm(self, "whateer.*", os.path.join(self.source_folder, "lib"))
79 replace_in_file(self, os.path.join(self.source_folder, "CMakeLists.txt"), "...", "")
80
81 def build(self):
82 self._patch_sources() # It can be apply_conandata_patches(self) only in case no more patches are needed
83 msbuild = MSBuild(self)
84 # customize to Release when RelWithDebInfo
85 msbuild.build_type = "Debug" if self.settings.build_type == "Debug" else "Release"
86 # use Win32 instead of the default value when building x86
87 msbuild.platform = "Win32" if self.settings.arch == "x86" else msbuild.platform
88 # customize according the solution file and compiler version
89 msbuild.build(sln="project_2017.sln")
90
91 def package(self):
92 copy(self, pattern="LICENSE", dst=os.path.join(self.package_folder, "licenses"), src=self.source_folder)
93 copy(
94 self, pattern="*.lib", dst=os.path.join(self.package_folder, "lib"), src=self.build_folder, keep_path=False
95 )
96 copy(
97 self, pattern="*.dll", dst=os.path.join(self.package_folder, "bin"), src=self.build_folder, keep_path=False
98 )
99 copy(
100 self,
101 pattern="*.h",
102 dst=os.path.join(self.package_folder, "include"),
103 src=os.path.join(self.source_folder, "include"),
104 )
105
106 def package_info(self):
107 self.cpp_info.libs = ["package_lib"]
108
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docs/package_templates/msbuild_package/all/conanfile.py b/docs/package_templates/msbuild_package/all/conanfile.py
--- a/docs/package_templates/msbuild_package/all/conanfile.py
+++ b/docs/package_templates/msbuild_package/all/conanfile.py
@@ -1,7 +1,6 @@
from conan import ConanFile
from conan.errors import ConanInvalidConfiguration
-from conan.tools.microsoft import is_msvc, MSBuildDeps, MSBuildToolchain, MSBuild, VCVars
-from conan.tools.layout import vs_layout
+from conan.tools.microsoft import is_msvc, vs_layout, MSBuildDeps, MSBuildToolchain, MSBuild, VCVars
from conan.tools.files import apply_conandata_patches, export_conandata_patches, get, copy, rm, replace_in_file
import os
|
{"golden_diff": "diff --git a/docs/package_templates/msbuild_package/all/conanfile.py b/docs/package_templates/msbuild_package/all/conanfile.py\n--- a/docs/package_templates/msbuild_package/all/conanfile.py\n+++ b/docs/package_templates/msbuild_package/all/conanfile.py\n@@ -1,7 +1,6 @@\n from conan import ConanFile\n from conan.errors import ConanInvalidConfiguration\n-from conan.tools.microsoft import is_msvc, MSBuildDeps, MSBuildToolchain, MSBuild, VCVars\n-from conan.tools.layout import vs_layout\n+from conan.tools.microsoft import is_msvc, vs_layout, MSBuildDeps, MSBuildToolchain, MSBuild, VCVars\n from conan.tools.files import apply_conandata_patches, export_conandata_patches, get, copy, rm, replace_in_file\n import os\n", "issue": "[service] msbuild package template has import incompatible with Conan 2.0\nAt https://github.com/conan-io/conan-center-index/blob/53ef0688c436c6697d8312d47fcdef68fe7e6955/docs/package_templates/msbuild_package/all/conanfile.py#L4, `vs_layout` is imported from `conan.tools.layout`. However, when using this import in [my PR](https://github.com/conan-io/conan-center-index/pull/13646), the CI informed me that the recipe was not ready for Conan v2, citing this import error:\r\n\r\n```\r\nERROR: Error loading conanfile at '/home/conan/w/prod_cci_PR-13646/recipes/lzham/all/conanfile.py': Unable to load conanfile in /home/conan/w/prod_cci_PR-13646/recipes/lzham/all/conanfile.py\r\n File \"<frozen importlib._bootstrap_external>\", line 728, in exec_module\r\n File \"<frozen importlib._bootstrap>\", line 219, in _call_with_frames_removed\r\n File \"/home/conan/w/prod_cci_PR-13646/recipes/lzham/all/conanfile.py\", line 8, in <module>\r\n from conan.tools.layout import vs_layout\r\nImportError: cannot import name 'vs_layout' from 'conan.tools.layout' (/opt/pyenv/versions/3.7.13/lib/python3.7/site-packages/conan/tools/layout/__init__.py)\r\n```\r\nTherefore, it seems the msbuild template should import `vs_layout` from `conan.tools.microsoft` instead.\r\n\n", "before_files": [{"content": "from conan import ConanFile\nfrom conan.errors import ConanInvalidConfiguration\nfrom conan.tools.microsoft import is_msvc, MSBuildDeps, MSBuildToolchain, MSBuild, VCVars\nfrom conan.tools.layout import vs_layout\nfrom conan.tools.files import apply_conandata_patches, export_conandata_patches, get, copy, rm, replace_in_file\nimport os\n\n\nrequired_conan_version = \">=1.53.0\"\n\n\nclass PackageConan(ConanFile):\n name = \"package\"\n description = \"short description\"\n # Use short name only, conform to SPDX License List: https://spdx.org/licenses/\n # In case not listed there, use \"LicenseRef-<license-file-name>\"\n license = \"\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/project/package\"\n # no \"conan\" and project name in topics. Use topics from the upstream listed on GH\n topics = (\"topic1\", \"topic2\", \"topic3\")\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n }\n\n # no exports_sources attribute, but export_sources(self) method instead\n # this allows finer grain exportation of patches per version\n def export_sources(self):\n export_conandata_patches(self)\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n self.options.rm_safe(\"fPIC\")\n # for plain C projects only\n self.settings.rm_safe(\"compiler.libcxx\")\n self.settings.rm_safe(\"compiler.cppstd\")\n\n def layout(self):\n vs_layout(self)\n\n def requirements(self):\n # prefer self.requires method instead of requires attribute\n self.requires(\"dependency/0.8.1\")\n\n def validate(self):\n # in case it does not work in another configuration, it should validated here too\n if not is_msvc(self):\n raise ConanInvalidConfiguration(f\"{self.ref} can be built only by Visual Studio and msvc.\")\n\n # if another tool than the compiler or CMake is required to build the project (pkgconf, bison, flex etc)\n def build_requirements(self):\n self.tool_requires(\"tool/x.y.z\")\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version], destination=self.source_folder, strip_root=True)\n\n def generate(self):\n tc = MSBuildToolchain(self)\n tc.generate()\n tc = MSBuildDeps(self)\n tc.generate()\n tc = VCVars(self)\n tc.generate()\n\n def _patch_sources(self):\n apply_conandata_patches(self)\n # remove bundled xxhash\n rm(self, \"whateer.*\", os.path.join(self.source_folder, \"lib\"))\n replace_in_file(self, os.path.join(self.source_folder, \"CMakeLists.txt\"), \"...\", \"\")\n\n def build(self):\n self._patch_sources() # It can be apply_conandata_patches(self) only in case no more patches are needed\n msbuild = MSBuild(self)\n # customize to Release when RelWithDebInfo\n msbuild.build_type = \"Debug\" if self.settings.build_type == \"Debug\" else \"Release\"\n # use Win32 instead of the default value when building x86\n msbuild.platform = \"Win32\" if self.settings.arch == \"x86\" else msbuild.platform\n # customize according the solution file and compiler version\n msbuild.build(sln=\"project_2017.sln\")\n\n def package(self):\n copy(self, pattern=\"LICENSE\", dst=os.path.join(self.package_folder, \"licenses\"), src=self.source_folder)\n copy(\n self, pattern=\"*.lib\", dst=os.path.join(self.package_folder, \"lib\"), src=self.build_folder, keep_path=False\n )\n copy(\n self, pattern=\"*.dll\", dst=os.path.join(self.package_folder, \"bin\"), src=self.build_folder, keep_path=False\n )\n copy(\n self,\n pattern=\"*.h\",\n dst=os.path.join(self.package_folder, \"include\"),\n src=os.path.join(self.source_folder, \"include\"),\n )\n\n def package_info(self):\n self.cpp_info.libs = [\"package_lib\"]\n", "path": "docs/package_templates/msbuild_package/all/conanfile.py"}], "after_files": [{"content": "from conan import ConanFile\nfrom conan.errors import ConanInvalidConfiguration\nfrom conan.tools.microsoft import is_msvc, vs_layout, MSBuildDeps, MSBuildToolchain, MSBuild, VCVars\nfrom conan.tools.files import apply_conandata_patches, export_conandata_patches, get, copy, rm, replace_in_file\nimport os\n\n\nrequired_conan_version = \">=1.53.0\"\n\n\nclass PackageConan(ConanFile):\n name = \"package\"\n description = \"short description\"\n # Use short name only, conform to SPDX License List: https://spdx.org/licenses/\n # In case not listed there, use \"LicenseRef-<license-file-name>\"\n license = \"\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/project/package\"\n # no \"conan\" and project name in topics. Use topics from the upstream listed on GH\n topics = (\"topic1\", \"topic2\", \"topic3\")\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n }\n\n # no exports_sources attribute, but export_sources(self) method instead\n # this allows finer grain exportation of patches per version\n def export_sources(self):\n export_conandata_patches(self)\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n self.options.rm_safe(\"fPIC\")\n # for plain C projects only\n self.settings.rm_safe(\"compiler.libcxx\")\n self.settings.rm_safe(\"compiler.cppstd\")\n\n def layout(self):\n # src_folder must use the same source folder name the project\n vs_layout(self, src_folder=\"src\")\n\n def requirements(self):\n # prefer self.requires method instead of requires attribute\n self.requires(\"dependency/0.8.1\")\n\n def validate(self):\n # in case it does not work in another configuration, it should validated here too\n if not is_msvc(self):\n raise ConanInvalidConfiguration(f\"{self.ref} can be built only by Visual Studio and msvc.\")\n\n # if another tool than the compiler or CMake is required to build the project (pkgconf, bison, flex etc)\n def build_requirements(self):\n self.tool_requires(\"tool/x.y.z\")\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version], destination=self.source_folder, strip_root=True)\n\n def generate(self):\n tc = MSBuildToolchain(self)\n tc.generate()\n tc = MSBuildDeps(self)\n tc.generate()\n tc = VCVars(self)\n tc.generate()\n\n def _patch_sources(self):\n apply_conandata_patches(self)\n # remove bundled xxhash\n rm(self, \"whateer.*\", os.path.join(self.source_folder, \"lib\"))\n replace_in_file(self, os.path.join(self.source_folder, \"CMakeLists.txt\"), \"...\", \"\")\n\n def build(self):\n self._patch_sources() # It can be apply_conandata_patches(self) only in case no more patches are needed\n msbuild = MSBuild(self)\n # customize to Release when RelWithDebInfo\n msbuild.build_type = \"Debug\" if self.settings.build_type == \"Debug\" else \"Release\"\n # use Win32 instead of the default value when building x86\n msbuild.platform = \"Win32\" if self.settings.arch == \"x86\" else msbuild.platform\n # customize according the solution file and compiler version\n msbuild.build(sln=\"project_2017.sln\")\n\n def package(self):\n copy(self, pattern=\"LICENSE\", dst=os.path.join(self.package_folder, \"licenses\"), src=self.source_folder)\n copy(\n self, pattern=\"*.lib\", dst=os.path.join(self.package_folder, \"lib\"), src=self.build_folder, keep_path=False\n )\n copy(\n self, pattern=\"*.dll\", dst=os.path.join(self.package_folder, \"bin\"), src=self.build_folder, keep_path=False\n )\n copy(\n self,\n pattern=\"*.h\",\n dst=os.path.join(self.package_folder, \"include\"),\n src=os.path.join(self.source_folder, \"include\"),\n )\n\n def package_info(self):\n self.cpp_info.libs = [\"package_lib\"]\n", "path": "docs/package_templates/msbuild_package/all/conanfile.py"}]}
| 1,837 | 180 |
gh_patches_debug_29824
|
rasdani/github-patches
|
git_diff
|
microsoft__Qcodes-531
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Keithley2600 driver broken after commit 460c93c71506213102e56d3ea0c518723666d56a
### Steps to reproduce
1. Connect Keithley 2600
2. ask it for output status or mode
3. doh
### Expected behaviour
tell me what i asked for
### Actual behaviour
error because parsing fails
issue introduced in commit 460c93c71506213102e56d3ea0c518723666d56a (@alexcjohnson)
### System
**operating system**
W10
**qcodes branch**
master
**qcodes commit**
177f8201b3778c0071c60d4e4c658449284e1a6c
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qcodes/instrument_drivers/tektronix/Keithley_2600.py`
Content:
```
1 from qcodes import VisaInstrument
2
3
4 class Keithley_2600(VisaInstrument):
5 """
6 channel: use channel 'a' or 'b'
7
8 This is the qcodes driver for the Keithley_2600 Source-Meter series,
9 tested with Keithley_2614B
10
11 Status: beta-version.
12 TODO:
13 - Add all parameters that are in the manual
14 - range and limit should be set according to mode
15 - add ramping and such stuff
16
17 """
18 def __init__(self, name, address, channel, **kwargs):
19 super().__init__(name, address, terminator='\n', **kwargs)
20 self._channel = channel
21
22 self.add_parameter('volt', get_cmd='measure.v()',
23 get_parser=float, set_cmd='source.levelv={:.12f}',
24 label='Voltage',
25 unit='V')
26 self.add_parameter('curr', get_cmd='measure.i()',
27 get_parser=float, set_cmd='source.leveli={:.12f}',
28 label='Current',
29 unit='A')
30 self.add_parameter('mode',
31 get_cmd='source.func',
32 set_cmd='source.func={:d}',
33 val_mapping={'current': 0, 'voltage': 1})
34 self.add_parameter('output',
35 get_cmd='source.output',
36 set_cmd='source.output={:d}',
37 val_mapping={'on': 1, 'off': 0})
38 # Source range
39 # needs get after set
40 self.add_parameter('rangev',
41 get_cmd='source.rangev',
42 get_parser=float,
43 set_cmd='source.rangev={:.4f}',
44 unit='V')
45 # Measure range
46 # needs get after set
47 self.add_parameter('rangei',
48 get_cmd='source.rangei',
49 get_parser=float,
50 set_cmd='source.rangei={:.4f}',
51 unit='A')
52 # Compliance limit
53 self.add_parameter('limitv',
54 get_cmd='source.limitv',
55 get_parser=float,
56 set_cmd='source.limitv={:.4f}',
57 unit='V')
58 # Compliance limit
59 self.add_parameter('limiti',
60 get_cmd='source.limiti',
61 get_parser=float,
62 set_cmd='source.limiti={:.4f}',
63 unit='A')
64
65 self.connect_message()
66
67 def get_idn(self):
68 IDN = self.ask_raw('*IDN?')
69 vendor, model, serial, firmware = map(str.strip, IDN.split(','))
70 model = model[6:]
71
72 IDN = {'vendor': vendor, 'model': model,
73 'serial': serial, 'firmware': firmware}
74 return IDN
75
76 def reset(self):
77 self.write('reset()')
78
79 def ask(self, cmd):
80 return super().ask('print(smu{:s}.{:s})'.format(self._channel, cmd))
81
82 def write(self, cmd):
83 super().write('smu{:s}.{:s}'.format(self._channel, cmd))
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/qcodes/instrument_drivers/tektronix/Keithley_2600.py b/qcodes/instrument_drivers/tektronix/Keithley_2600.py
--- a/qcodes/instrument_drivers/tektronix/Keithley_2600.py
+++ b/qcodes/instrument_drivers/tektronix/Keithley_2600.py
@@ -19,20 +19,26 @@
super().__init__(name, address, terminator='\n', **kwargs)
self._channel = channel
- self.add_parameter('volt', get_cmd='measure.v()',
- get_parser=float, set_cmd='source.levelv={:.12f}',
+ self.add_parameter('volt',
+ get_cmd='measure.v()',
+ get_parser=float,
+ set_cmd='source.levelv={:.12f}',
label='Voltage',
unit='V')
- self.add_parameter('curr', get_cmd='measure.i()',
- get_parser=float, set_cmd='source.leveli={:.12f}',
+ self.add_parameter('curr',
+ get_cmd='measure.i()',
+ get_parser=float,
+ set_cmd='source.leveli={:.12f}',
label='Current',
unit='A')
self.add_parameter('mode',
get_cmd='source.func',
+ get_parser=float,
set_cmd='source.func={:d}',
val_mapping={'current': 0, 'voltage': 1})
self.add_parameter('output',
get_cmd='source.output',
+ get_parser=float,
set_cmd='source.output={:d}',
val_mapping={'on': 1, 'off': 0})
# Source range
|
{"golden_diff": "diff --git a/qcodes/instrument_drivers/tektronix/Keithley_2600.py b/qcodes/instrument_drivers/tektronix/Keithley_2600.py\n--- a/qcodes/instrument_drivers/tektronix/Keithley_2600.py\n+++ b/qcodes/instrument_drivers/tektronix/Keithley_2600.py\n@@ -19,20 +19,26 @@\n super().__init__(name, address, terminator='\\n', **kwargs)\n self._channel = channel\n \n- self.add_parameter('volt', get_cmd='measure.v()',\n- get_parser=float, set_cmd='source.levelv={:.12f}',\n+ self.add_parameter('volt',\n+ get_cmd='measure.v()',\n+ get_parser=float,\n+ set_cmd='source.levelv={:.12f}',\n label='Voltage',\n unit='V')\n- self.add_parameter('curr', get_cmd='measure.i()',\n- get_parser=float, set_cmd='source.leveli={:.12f}',\n+ self.add_parameter('curr',\n+ get_cmd='measure.i()',\n+ get_parser=float,\n+ set_cmd='source.leveli={:.12f}',\n label='Current',\n unit='A')\n self.add_parameter('mode',\n get_cmd='source.func',\n+ get_parser=float,\n set_cmd='source.func={:d}',\n val_mapping={'current': 0, 'voltage': 1})\n self.add_parameter('output',\n get_cmd='source.output',\n+ get_parser=float,\n set_cmd='source.output={:d}',\n val_mapping={'on': 1, 'off': 0})\n # Source range\n", "issue": "Keithley2600 driver broken after commit 460c93c71506213102e56d3ea0c518723666d56a\n\r\n### Steps to reproduce\r\n1. Connect Keithley 2600\r\n2. ask it for output status or mode\r\n3. doh\r\n\r\n### Expected behaviour\r\ntell me what i asked for\r\n\r\n### Actual behaviour\r\nerror because parsing fails\r\nissue introduced in commit 460c93c71506213102e56d3ea0c518723666d56a (@alexcjohnson)\r\n\r\n\r\n### System\r\n**operating system**\r\nW10\r\n**qcodes branch**\r\nmaster\r\n**qcodes commit**\r\n177f8201b3778c0071c60d4e4c658449284e1a6c \n", "before_files": [{"content": "from qcodes import VisaInstrument\n\n\nclass Keithley_2600(VisaInstrument):\n \"\"\"\n channel: use channel 'a' or 'b'\n\n This is the qcodes driver for the Keithley_2600 Source-Meter series,\n tested with Keithley_2614B\n\n Status: beta-version.\n TODO:\n - Add all parameters that are in the manual\n - range and limit should be set according to mode\n - add ramping and such stuff\n\n \"\"\"\n def __init__(self, name, address, channel, **kwargs):\n super().__init__(name, address, terminator='\\n', **kwargs)\n self._channel = channel\n\n self.add_parameter('volt', get_cmd='measure.v()',\n get_parser=float, set_cmd='source.levelv={:.12f}',\n label='Voltage',\n unit='V')\n self.add_parameter('curr', get_cmd='measure.i()',\n get_parser=float, set_cmd='source.leveli={:.12f}',\n label='Current',\n unit='A')\n self.add_parameter('mode',\n get_cmd='source.func',\n set_cmd='source.func={:d}',\n val_mapping={'current': 0, 'voltage': 1})\n self.add_parameter('output',\n get_cmd='source.output',\n set_cmd='source.output={:d}',\n val_mapping={'on': 1, 'off': 0})\n # Source range\n # needs get after set\n self.add_parameter('rangev',\n get_cmd='source.rangev',\n get_parser=float,\n set_cmd='source.rangev={:.4f}',\n unit='V')\n # Measure range\n # needs get after set\n self.add_parameter('rangei',\n get_cmd='source.rangei',\n get_parser=float,\n set_cmd='source.rangei={:.4f}',\n unit='A')\n # Compliance limit\n self.add_parameter('limitv',\n get_cmd='source.limitv',\n get_parser=float,\n set_cmd='source.limitv={:.4f}',\n unit='V')\n # Compliance limit\n self.add_parameter('limiti',\n get_cmd='source.limiti',\n get_parser=float,\n set_cmd='source.limiti={:.4f}',\n unit='A')\n\n self.connect_message()\n\n def get_idn(self):\n IDN = self.ask_raw('*IDN?')\n vendor, model, serial, firmware = map(str.strip, IDN.split(','))\n model = model[6:]\n\n IDN = {'vendor': vendor, 'model': model,\n 'serial': serial, 'firmware': firmware}\n return IDN\n\n def reset(self):\n self.write('reset()')\n\n def ask(self, cmd):\n return super().ask('print(smu{:s}.{:s})'.format(self._channel, cmd))\n\n def write(self, cmd):\n super().write('smu{:s}.{:s}'.format(self._channel, cmd))\n", "path": "qcodes/instrument_drivers/tektronix/Keithley_2600.py"}], "after_files": [{"content": "from qcodes import VisaInstrument\n\n\nclass Keithley_2600(VisaInstrument):\n \"\"\"\n channel: use channel 'a' or 'b'\n\n This is the qcodes driver for the Keithley_2600 Source-Meter series,\n tested with Keithley_2614B\n\n Status: beta-version.\n TODO:\n - Add all parameters that are in the manual\n - range and limit should be set according to mode\n - add ramping and such stuff\n\n \"\"\"\n def __init__(self, name, address, channel, **kwargs):\n super().__init__(name, address, terminator='\\n', **kwargs)\n self._channel = channel\n\n self.add_parameter('volt',\n get_cmd='measure.v()',\n get_parser=float,\n set_cmd='source.levelv={:.12f}',\n label='Voltage',\n unit='V')\n self.add_parameter('curr',\n get_cmd='measure.i()',\n get_parser=float,\n set_cmd='source.leveli={:.12f}',\n label='Current',\n unit='A')\n self.add_parameter('mode',\n get_cmd='source.func',\n get_parser=float,\n set_cmd='source.func={:d}',\n val_mapping={'current': 0, 'voltage': 1})\n self.add_parameter('output',\n get_cmd='source.output',\n get_parser=float,\n set_cmd='source.output={:d}',\n val_mapping={'on': 1, 'off': 0})\n # Source range\n # needs get after set\n self.add_parameter('rangev',\n get_cmd='source.rangev',\n get_parser=float,\n set_cmd='source.rangev={:.4f}',\n unit='V')\n # Measure range\n # needs get after set\n self.add_parameter('rangei',\n get_cmd='source.rangei',\n get_parser=float,\n set_cmd='source.rangei={:.4f}',\n unit='A')\n # Compliance limit\n self.add_parameter('limitv',\n get_cmd='source.limitv',\n get_parser=float,\n set_cmd='source.limitv={:.4f}',\n unit='V')\n # Compliance limit\n self.add_parameter('limiti',\n get_cmd='source.limiti',\n get_parser=float,\n set_cmd='source.limiti={:.4f}',\n unit='A')\n\n self.connect_message()\n\n def get_idn(self):\n IDN = self.ask_raw('*IDN?')\n vendor, model, serial, firmware = map(str.strip, IDN.split(','))\n model = model[6:]\n\n IDN = {'vendor': vendor, 'model': model,\n 'serial': serial, 'firmware': firmware}\n return IDN\n\n def reset(self):\n self.write('reset()')\n\n def ask(self, cmd):\n return super().ask('print(smu{:s}.{:s})'.format(self._channel, cmd))\n\n def write(self, cmd):\n super().write('smu{:s}.{:s}'.format(self._channel, cmd))\n", "path": "qcodes/instrument_drivers/tektronix/Keithley_2600.py"}]}
| 1,309 | 382 |
gh_patches_debug_26054
|
rasdani/github-patches
|
git_diff
|
encode__uvicorn-636
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
--proxy-headers with supervisor get client ip failed
#### uvicorn version
`Running uvicorn 0.11.3 with CPython 3.8.2 on Linux`
#### supervisor config
```
[fcgi-program:uvicorn]
socket=tcp://0.0.0.0:5000
directory=/var/www/webapp/
command=/usr/local/python3/bin/uvicorn --fd 0 --proxy-headers main:app
numprocs=2
process_name=uvicorn-%(process_num)d
stdout_logfile_maxbytes=0
stdout_logfile=/var/log/uvicorn.log
```
### fastapi
version 0.52.0
```
# cat /var/www/webapp/main.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from fastapi import FastAPI, Request
app = FastAPI(redoc_url=None)
@app.get('/')
def index(request: Request):
return 'hello {}!'.format(request.client.host)
```
### run on supervisor
supervisord version 4.1.0
```
supervisord -n
curl http://127.0.0.1:5000
```
why client.host is None?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `uvicorn/protocols/utils.py`
Content:
```
1 import socket
2
3
4 def get_remote_addr(transport):
5 socket_info = transport.get_extra_info("socket")
6 if socket_info is not None:
7 try:
8 info = socket_info.getpeername()
9 except OSError:
10 # This case appears to inconsistently occur with uvloop
11 # bound to a unix domain socket.
12 family = None
13 info = None
14 else:
15 family = socket_info.family
16
17 if family in (socket.AF_INET, socket.AF_INET6):
18 return (str(info[0]), int(info[1]))
19 return None
20 info = transport.get_extra_info("peername")
21 if info is not None and isinstance(info, (list, tuple)) and len(info) == 2:
22 return (str(info[0]), int(info[1]))
23 return None
24
25
26 def get_local_addr(transport):
27 socket_info = transport.get_extra_info("socket")
28 if socket_info is not None:
29 info = socket_info.getsockname()
30 family = socket_info.family
31 if family in (socket.AF_INET, socket.AF_INET6):
32 return (str(info[0]), int(info[1]))
33 return None
34 info = transport.get_extra_info("sockname")
35 if info is not None and isinstance(info, (list, tuple)) and len(info) == 2:
36 return (str(info[0]), int(info[1]))
37 return None
38
39
40 def is_ssl(transport):
41 return bool(transport.get_extra_info("sslcontext"))
42
43
44 def get_client_addr(scope):
45 client = scope.get("client")
46 if not client:
47 return ""
48 return "%s:%d" % client
49
50
51 def get_path_with_query_string(scope):
52 path_with_query_string = scope.get("root_path", "") + scope["path"]
53 if scope["query_string"]:
54 path_with_query_string = "{}?{}".format(
55 path_with_query_string, scope["query_string"].decode("ascii")
56 )
57 return path_with_query_string
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/uvicorn/protocols/utils.py b/uvicorn/protocols/utils.py
--- a/uvicorn/protocols/utils.py
+++ b/uvicorn/protocols/utils.py
@@ -1,5 +1,10 @@
import socket
+if hasattr(socket, "AF_UNIX"):
+ SUPPORTED_SOCKET_FAMILIES = (socket.AF_INET, socket.AF_INET6, socket.AF_UNIX)
+else:
+ SUPPORTED_SOCKET_FAMILIES = (socket.AF_INET, socket.AF_INET6)
+
def get_remote_addr(transport):
socket_info = transport.get_extra_info("socket")
@@ -14,8 +19,9 @@
else:
family = socket_info.family
- if family in (socket.AF_INET, socket.AF_INET6):
+ if family in SUPPORTED_SOCKET_FAMILIES:
return (str(info[0]), int(info[1]))
+
return None
info = transport.get_extra_info("peername")
if info is not None and isinstance(info, (list, tuple)) and len(info) == 2:
@@ -28,7 +34,7 @@
if socket_info is not None:
info = socket_info.getsockname()
family = socket_info.family
- if family in (socket.AF_INET, socket.AF_INET6):
+ if family in SUPPORTED_SOCKET_FAMILIES:
return (str(info[0]), int(info[1]))
return None
info = transport.get_extra_info("sockname")
|
{"golden_diff": "diff --git a/uvicorn/protocols/utils.py b/uvicorn/protocols/utils.py\n--- a/uvicorn/protocols/utils.py\n+++ b/uvicorn/protocols/utils.py\n@@ -1,5 +1,10 @@\n import socket\n \n+if hasattr(socket, \"AF_UNIX\"):\n+ SUPPORTED_SOCKET_FAMILIES = (socket.AF_INET, socket.AF_INET6, socket.AF_UNIX)\n+else:\n+ SUPPORTED_SOCKET_FAMILIES = (socket.AF_INET, socket.AF_INET6)\n+\n \n def get_remote_addr(transport):\n socket_info = transport.get_extra_info(\"socket\")\n@@ -14,8 +19,9 @@\n else:\n family = socket_info.family\n \n- if family in (socket.AF_INET, socket.AF_INET6):\n+ if family in SUPPORTED_SOCKET_FAMILIES:\n return (str(info[0]), int(info[1]))\n+\n return None\n info = transport.get_extra_info(\"peername\")\n if info is not None and isinstance(info, (list, tuple)) and len(info) == 2:\n@@ -28,7 +34,7 @@\n if socket_info is not None:\n info = socket_info.getsockname()\n family = socket_info.family\n- if family in (socket.AF_INET, socket.AF_INET6):\n+ if family in SUPPORTED_SOCKET_FAMILIES:\n return (str(info[0]), int(info[1]))\n return None\n info = transport.get_extra_info(\"sockname\")\n", "issue": "--proxy-headers with supervisor get client ip failed\n#### uvicorn version\r\n`Running uvicorn 0.11.3 with CPython 3.8.2 on Linux` \r\n#### supervisor config\r\n```\r\n[fcgi-program:uvicorn]\r\nsocket=tcp://0.0.0.0:5000 \r\ndirectory=/var/www/webapp/ \r\ncommand=/usr/local/python3/bin/uvicorn --fd 0 --proxy-headers main:app \r\nnumprocs=2 \r\nprocess_name=uvicorn-%(process_num)d \r\nstdout_logfile_maxbytes=0 \r\nstdout_logfile=/var/log/uvicorn.log \r\n```\r\n\r\n### fastapi \r\nversion 0.52.0\r\n\r\n```\r\n# cat /var/www/webapp/main.py\r\n\r\n#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\nfrom fastapi import FastAPI, Request\r\n\r\napp = FastAPI(redoc_url=None)\r\n\r\n\r\[email protected]('/')\r\ndef index(request: Request):\r\n return 'hello {}!'.format(request.client.host)\r\n\r\n```\r\n\r\n### run on supervisor\r\nsupervisord version 4.1.0\r\n```\r\nsupervisord -n\r\ncurl http://127.0.0.1:5000\r\n\r\n```\r\nwhy client.host is None?\r\n\n", "before_files": [{"content": "import socket\n\n\ndef get_remote_addr(transport):\n socket_info = transport.get_extra_info(\"socket\")\n if socket_info is not None:\n try:\n info = socket_info.getpeername()\n except OSError:\n # This case appears to inconsistently occur with uvloop\n # bound to a unix domain socket.\n family = None\n info = None\n else:\n family = socket_info.family\n\n if family in (socket.AF_INET, socket.AF_INET6):\n return (str(info[0]), int(info[1]))\n return None\n info = transport.get_extra_info(\"peername\")\n if info is not None and isinstance(info, (list, tuple)) and len(info) == 2:\n return (str(info[0]), int(info[1]))\n return None\n\n\ndef get_local_addr(transport):\n socket_info = transport.get_extra_info(\"socket\")\n if socket_info is not None:\n info = socket_info.getsockname()\n family = socket_info.family\n if family in (socket.AF_INET, socket.AF_INET6):\n return (str(info[0]), int(info[1]))\n return None\n info = transport.get_extra_info(\"sockname\")\n if info is not None and isinstance(info, (list, tuple)) and len(info) == 2:\n return (str(info[0]), int(info[1]))\n return None\n\n\ndef is_ssl(transport):\n return bool(transport.get_extra_info(\"sslcontext\"))\n\n\ndef get_client_addr(scope):\n client = scope.get(\"client\")\n if not client:\n return \"\"\n return \"%s:%d\" % client\n\n\ndef get_path_with_query_string(scope):\n path_with_query_string = scope.get(\"root_path\", \"\") + scope[\"path\"]\n if scope[\"query_string\"]:\n path_with_query_string = \"{}?{}\".format(\n path_with_query_string, scope[\"query_string\"].decode(\"ascii\")\n )\n return path_with_query_string\n", "path": "uvicorn/protocols/utils.py"}], "after_files": [{"content": "import socket\n\nif hasattr(socket, \"AF_UNIX\"):\n SUPPORTED_SOCKET_FAMILIES = (socket.AF_INET, socket.AF_INET6, socket.AF_UNIX)\nelse:\n SUPPORTED_SOCKET_FAMILIES = (socket.AF_INET, socket.AF_INET6)\n\n\ndef get_remote_addr(transport):\n socket_info = transport.get_extra_info(\"socket\")\n if socket_info is not None:\n try:\n info = socket_info.getpeername()\n except OSError:\n # This case appears to inconsistently occur with uvloop\n # bound to a unix domain socket.\n family = None\n info = None\n else:\n family = socket_info.family\n\n if family in SUPPORTED_SOCKET_FAMILIES:\n return (str(info[0]), int(info[1]))\n\n return None\n info = transport.get_extra_info(\"peername\")\n if info is not None and isinstance(info, (list, tuple)) and len(info) == 2:\n return (str(info[0]), int(info[1]))\n return None\n\n\ndef get_local_addr(transport):\n socket_info = transport.get_extra_info(\"socket\")\n if socket_info is not None:\n info = socket_info.getsockname()\n family = socket_info.family\n if family in SUPPORTED_SOCKET_FAMILIES:\n return (str(info[0]), int(info[1]))\n return None\n info = transport.get_extra_info(\"sockname\")\n if info is not None and isinstance(info, (list, tuple)) and len(info) == 2:\n return (str(info[0]), int(info[1]))\n return None\n\n\ndef is_ssl(transport):\n return bool(transport.get_extra_info(\"sslcontext\"))\n\n\ndef get_client_addr(scope):\n client = scope.get(\"client\")\n if not client:\n return \"\"\n return \"%s:%d\" % client\n\n\ndef get_path_with_query_string(scope):\n path_with_query_string = scope.get(\"root_path\", \"\") + scope[\"path\"]\n if scope[\"query_string\"]:\n path_with_query_string = \"{}?{}\".format(\n path_with_query_string, scope[\"query_string\"].decode(\"ascii\")\n )\n return path_with_query_string\n", "path": "uvicorn/protocols/utils.py"}]}
| 1,063 | 320 |
gh_patches_debug_3256
|
rasdani/github-patches
|
git_diff
|
e-valuation__EvaP-1467
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
OpenID login button is broken in latest firefox
Behavior:
When clicking the "login" button at `https://evap.hpi.de` a request is sent to `https://evap.hpi.de/oidc/authenticate/`, which returns a 302 and redirects to `https://evap.hpi.de/?next=/oidc/authenticate/`, which has no visible difference with `https://evap.hpi.de`. After clicking the login button again I'm redirected to `https://evap.hpi.de/?next=/oidc/authenticate/%3Fnext%3D/oidc/authenticate/`
Expected Behavior:
Display the openID page and allow login
Steps to reproduce
Go to https://evap.hpi.de and click the "login" button. Browser is an up-to-date firefox on linux
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `evap/middleware.py`
Content:
```
1 from django.contrib.auth.views import redirect_to_login
2
3
4 class RequireLoginMiddleware:
5 def __init__(self, get_response):
6 self.get_response = get_response
7
8 def __call__(self, request):
9 return self.get_response(request)
10
11 @staticmethod
12 def process_view(request, view_func, _view_args, _view_kwargs):
13 # Returning None tells django to pass the request on
14 if request.user.is_authenticated:
15 return None
16
17 if "no_login_required" in view_func.__dict__ and view_func.no_login_required:
18 return None
19
20 return redirect_to_login(request.get_full_path())
21
22
23 def no_login_required(func):
24 func.no_login_required = True
25 return func
26
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/evap/middleware.py b/evap/middleware.py
--- a/evap/middleware.py
+++ b/evap/middleware.py
@@ -17,6 +17,9 @@
if "no_login_required" in view_func.__dict__ and view_func.no_login_required:
return None
+ if view_func.__name__ in ["OIDCAuthenticationRequestView", "OIDCAuthenticationCallbackView"]:
+ return None
+
return redirect_to_login(request.get_full_path())
|
{"golden_diff": "diff --git a/evap/middleware.py b/evap/middleware.py\n--- a/evap/middleware.py\n+++ b/evap/middleware.py\n@@ -17,6 +17,9 @@\n if \"no_login_required\" in view_func.__dict__ and view_func.no_login_required:\n return None\n \n+ if view_func.__name__ in [\"OIDCAuthenticationRequestView\", \"OIDCAuthenticationCallbackView\"]:\n+ return None\n+\n return redirect_to_login(request.get_full_path())\n", "issue": "OpenID login button is broken in latest firefox\nBehavior:\r\nWhen clicking the \"login\" button at `https://evap.hpi.de` a request is sent to `https://evap.hpi.de/oidc/authenticate/`, which returns a 302 and redirects to `https://evap.hpi.de/?next=/oidc/authenticate/`, which has no visible difference with `https://evap.hpi.de`. After clicking the login button again I'm redirected to `https://evap.hpi.de/?next=/oidc/authenticate/%3Fnext%3D/oidc/authenticate/`\r\n\r\nExpected Behavior:\r\nDisplay the openID page and allow login\r\n\r\nSteps to reproduce\r\nGo to https://evap.hpi.de and click the \"login\" button. Browser is an up-to-date firefox on linux\n", "before_files": [{"content": "from django.contrib.auth.views import redirect_to_login\n\n\nclass RequireLoginMiddleware:\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n return self.get_response(request)\n\n @staticmethod\n def process_view(request, view_func, _view_args, _view_kwargs):\n # Returning None tells django to pass the request on\n if request.user.is_authenticated:\n return None\n\n if \"no_login_required\" in view_func.__dict__ and view_func.no_login_required:\n return None\n\n return redirect_to_login(request.get_full_path())\n\n\ndef no_login_required(func):\n func.no_login_required = True\n return func\n", "path": "evap/middleware.py"}], "after_files": [{"content": "from django.contrib.auth.views import redirect_to_login\n\n\nclass RequireLoginMiddleware:\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n return self.get_response(request)\n\n @staticmethod\n def process_view(request, view_func, _view_args, _view_kwargs):\n # Returning None tells django to pass the request on\n if request.user.is_authenticated:\n return None\n\n if \"no_login_required\" in view_func.__dict__ and view_func.no_login_required:\n return None\n\n if view_func.__name__ in [\"OIDCAuthenticationRequestView\", \"OIDCAuthenticationCallbackView\"]:\n return None\n\n return redirect_to_login(request.get_full_path())\n\n\ndef no_login_required(func):\n func.no_login_required = True\n return func\n", "path": "evap/middleware.py"}]}
| 629 | 113 |
gh_patches_debug_35959
|
rasdani/github-patches
|
git_diff
|
electricitymaps__electricitymaps-contrib-2088
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
US-BPA failing due to changes in data layout and timezone
```shell
File "/home/contrib/parsers/US_BPA.py", line 113, in fetch_production
raw_data = get_data(GENERATION_URL, session=session)
File "/home/contrib/parsers/US_BPA.py", line 25, in get_data
df = pd.read_table(StringIO(req.text), skiprows=5)
File "/usr/local/lib/python3.6/site-packages/pandas/io/parsers.py", line 685, in parser_f
return _read(filepath_or_buffer, kwds)
File "/usr/local/lib/python3.6/site-packages/pandas/io/parsers.py", line 463, in _read
data = parser.read(nrows)
File "/usr/local/lib/python3.6/site-packages/pandas/io/parsers.py", line 1154, in read
ret = self._engine.read(nrows)
File "/usr/local/lib/python3.6/site-packages/pandas/io/parsers.py", line 2059, in read
data = self._reader.read(nrows)
File "pandas/_libs/parsers.pyx", line 881, in pandas._libs.parsers.TextReader.read
File "pandas/_libs/parsers.pyx", line 896, in pandas._libs.parsers.TextReader._read_low_memory
File "pandas/_libs/parsers.pyx", line 950, in pandas._libs.parsers.TextReader._read_rows
File "pandas/_libs/parsers.pyx", line 937, in pandas._libs.parsers.TextReader._tokenize_rows
File "pandas/_libs/parsers.pyx", line 2132, in pandas._libs.parsers.raise_parser_error
pandas.errors.ParserError: Error tokenizing data. C error: Expected 1 fields in line 12, saw 6
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsers/US_BPA.py`
Content:
```
1 #!/usr/bin/env python3
2
3 """Parser for the Bonneville Power Administration area of the USA."""
4
5 from datetime import datetime, timedelta
6 from io import StringIO
7 import logging
8 import pandas as pd
9 import pytz
10 import requests
11
12
13 GENERATION_URL = 'https://transmission.bpa.gov/business/operations/Wind/baltwg.txt'
14
15 GENERATION_MAPPING = {'Wind': 'wind',
16 'Hydro': 'hydro',
17 'Fossil/Biomass': 'unknown',
18 'Nuclear': 'nuclear'}
19
20
21 def get_data(url, session=None):
22 """Returns a pandas dataframe."""
23 s=session or requests.Session()
24 req = s.get(url)
25 df = pd.read_table(StringIO(req.text), skiprows=5)
26
27 return df
28
29
30 def timestamp_converter(timestamp):
31 """Turns string representation of time into an aware datetime object."""
32
33 dt_naive = datetime.strptime(timestamp, '%m/%d/%Y %H:%M')
34 western = pytz.timezone('America/Los_Angeles')
35 dt_aware = western.localize(dt_naive)
36
37 return dt_aware
38
39
40 def data_processor(df, logger):
41 """
42 Takes a dataframe and drops all generation rows that are empty or more
43 than 1 day old. Turns each row into a dictionary and removes any generation
44 types that are unknown.
45 Returns a list of tuples in the form (datetime, production).
46 """
47
48 df= df.dropna(thresh=2)
49 df.columns = df.columns.str.strip()
50
51 # 5min data for the last 24 hours.
52 df = df.tail(288)
53 df['Date/Time'] = df['Date/Time'].map(timestamp_converter)
54
55 known_keys = GENERATION_MAPPING.keys() | {'Date/Time', 'Load'}
56 column_headers = set(df.columns)
57
58 unknown_keys = column_headers - known_keys
59
60 for k in unknown_keys:
61 logger.warning('New data {} seen in US-BPA data source'.format(k),
62 extra={'key': 'US-BPA'})
63
64 keys_to_remove = unknown_keys | {'Load'}
65
66 processed_data = []
67 for index, row in df.iterrows():
68 production = row.to_dict()
69
70 dt = production.pop('Date/Time')
71 dt = dt.to_pydatetime()
72 mapped_production = {GENERATION_MAPPING[k]:v for k,v in production.items()
73 if k not in keys_to_remove}
74
75 processed_data.append((dt, mapped_production))
76
77 return processed_data
78
79
80 def fetch_production(zone_key='US-BPA', session=None, target_datetime=None, logger=logging.getLogger(__name__)):
81 """
82 Requests the last known production mix (in MW) of a given zone
83 Arguments:
84 zone_key (optional) -- used in case a parser is able to fetch multiple zones
85 session (optional) -- request session passed in order to re-use an existing session
86 Return:
87 A dictionary in the form:
88 {
89 'zoneKey': 'FR',
90 'datetime': '2017-01-01T00:00:00Z',
91 'production': {
92 'biomass': 0.0,
93 'coal': 0.0,
94 'gas': 0.0,
95 'hydro': 0.0,
96 'nuclear': null,
97 'oil': 0.0,
98 'solar': 0.0,
99 'wind': 0.0,
100 'geothermal': 0.0,
101 'unknown': 0.0
102 },
103 'storage': {
104 'hydro': -10.0,
105 },
106 'source': 'mysource.com'
107 }
108 """
109
110 if target_datetime:
111 raise NotImplementedError('This parser is not yet able to parse past dates')
112
113 raw_data = get_data(GENERATION_URL, session=session)
114 processed_data = data_processor(raw_data, logger)
115
116 data = []
117 for item in processed_data:
118 datapoint = {'zoneKey': zone_key,
119 'datetime': item[0],
120 'production': item[1],
121 'storage': {},
122 'source': 'bpa.gov'}
123
124 data.append(datapoint)
125
126 return data
127
128
129 if __name__ == '__main__':
130 print('fetch_production() ->')
131 print(fetch_production())
132
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/parsers/US_BPA.py b/parsers/US_BPA.py
--- a/parsers/US_BPA.py
+++ b/parsers/US_BPA.py
@@ -2,11 +2,11 @@
"""Parser for the Bonneville Power Administration area of the USA."""
-from datetime import datetime, timedelta
+
from io import StringIO
+import arrow
import logging
import pandas as pd
-import pytz
import requests
@@ -20,19 +20,18 @@
def get_data(url, session=None):
"""Returns a pandas dataframe."""
- s=session or requests.Session()
+ s = session or requests.Session()
req = s.get(url)
- df = pd.read_table(StringIO(req.text), skiprows=5)
+ df = pd.read_table(StringIO(req.text), skiprows=11)
return df
def timestamp_converter(timestamp):
- """Turns string representation of time into an aware datetime object."""
+ """Turns a timestamp str into an aware datetime object."""
- dt_naive = datetime.strptime(timestamp, '%m/%d/%Y %H:%M')
- western = pytz.timezone('America/Los_Angeles')
- dt_aware = western.localize(dt_naive)
+ arr_dt_naive = arrow.get(timestamp, 'MM/DD/YYYY HH:mm')
+ dt_aware = arr_dt_naive.replace(tzinfo='America/Los_Angeles').datetime
return dt_aware
@@ -45,7 +44,7 @@
Returns a list of tuples in the form (datetime, production).
"""
- df= df.dropna(thresh=2)
+ df = df.dropna(thresh=2)
df.columns = df.columns.str.strip()
# 5min data for the last 24 hours.
@@ -59,7 +58,7 @@
for k in unknown_keys:
logger.warning('New data {} seen in US-BPA data source'.format(k),
- extra={'key': 'US-BPA'})
+ extra={'key': 'US-BPA'})
keys_to_remove = unknown_keys | {'Load'}
@@ -69,7 +68,7 @@
dt = production.pop('Date/Time')
dt = dt.to_pydatetime()
- mapped_production = {GENERATION_MAPPING[k]:v for k,v in production.items()
+ mapped_production = {GENERATION_MAPPING[k]: v for k, v in production.items()
if k not in keys_to_remove}
processed_data.append((dt, mapped_production))
|
{"golden_diff": "diff --git a/parsers/US_BPA.py b/parsers/US_BPA.py\n--- a/parsers/US_BPA.py\n+++ b/parsers/US_BPA.py\n@@ -2,11 +2,11 @@\n \n \"\"\"Parser for the Bonneville Power Administration area of the USA.\"\"\"\n \n-from datetime import datetime, timedelta\n+\n from io import StringIO\n+import arrow\n import logging\n import pandas as pd\n-import pytz\n import requests\n \n \n@@ -20,19 +20,18 @@\n \n def get_data(url, session=None):\n \"\"\"Returns a pandas dataframe.\"\"\"\n- s=session or requests.Session()\n+ s = session or requests.Session()\n req = s.get(url)\n- df = pd.read_table(StringIO(req.text), skiprows=5)\n+ df = pd.read_table(StringIO(req.text), skiprows=11)\n \n return df\n \n \n def timestamp_converter(timestamp):\n- \"\"\"Turns string representation of time into an aware datetime object.\"\"\"\n+ \"\"\"Turns a timestamp str into an aware datetime object.\"\"\"\n \n- dt_naive = datetime.strptime(timestamp, '%m/%d/%Y %H:%M')\n- western = pytz.timezone('America/Los_Angeles')\n- dt_aware = western.localize(dt_naive)\n+ arr_dt_naive = arrow.get(timestamp, 'MM/DD/YYYY HH:mm')\n+ dt_aware = arr_dt_naive.replace(tzinfo='America/Los_Angeles').datetime\n \n return dt_aware\n \n@@ -45,7 +44,7 @@\n Returns a list of tuples in the form (datetime, production).\n \"\"\"\n \n- df= df.dropna(thresh=2)\n+ df = df.dropna(thresh=2)\n df.columns = df.columns.str.strip()\n \n # 5min data for the last 24 hours.\n@@ -59,7 +58,7 @@\n \n for k in unknown_keys:\n logger.warning('New data {} seen in US-BPA data source'.format(k),\n- extra={'key': 'US-BPA'})\n+ extra={'key': 'US-BPA'})\n \n keys_to_remove = unknown_keys | {'Load'}\n \n@@ -69,7 +68,7 @@\n \n dt = production.pop('Date/Time')\n dt = dt.to_pydatetime()\n- mapped_production = {GENERATION_MAPPING[k]:v for k,v in production.items()\n+ mapped_production = {GENERATION_MAPPING[k]: v for k, v in production.items()\n if k not in keys_to_remove}\n \n processed_data.append((dt, mapped_production))\n", "issue": "US-BPA failing due to changes in data layout and timezone \n```shell\r\n File \"/home/contrib/parsers/US_BPA.py\", line 113, in fetch_production\r\n raw_data = get_data(GENERATION_URL, session=session)\r\n File \"/home/contrib/parsers/US_BPA.py\", line 25, in get_data\r\n df = pd.read_table(StringIO(req.text), skiprows=5)\r\n File \"/usr/local/lib/python3.6/site-packages/pandas/io/parsers.py\", line 685, in parser_f\r\n return _read(filepath_or_buffer, kwds)\r\n File \"/usr/local/lib/python3.6/site-packages/pandas/io/parsers.py\", line 463, in _read\r\n data = parser.read(nrows)\r\n File \"/usr/local/lib/python3.6/site-packages/pandas/io/parsers.py\", line 1154, in read\r\n ret = self._engine.read(nrows)\r\n File \"/usr/local/lib/python3.6/site-packages/pandas/io/parsers.py\", line 2059, in read\r\n data = self._reader.read(nrows)\r\n File \"pandas/_libs/parsers.pyx\", line 881, in pandas._libs.parsers.TextReader.read\r\n File \"pandas/_libs/parsers.pyx\", line 896, in pandas._libs.parsers.TextReader._read_low_memory\r\n File \"pandas/_libs/parsers.pyx\", line 950, in pandas._libs.parsers.TextReader._read_rows\r\n File \"pandas/_libs/parsers.pyx\", line 937, in pandas._libs.parsers.TextReader._tokenize_rows\r\n File \"pandas/_libs/parsers.pyx\", line 2132, in pandas._libs.parsers.raise_parser_error\r\npandas.errors.ParserError: Error tokenizing data. C error: Expected 1 fields in line 12, saw 6\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n\"\"\"Parser for the Bonneville Power Administration area of the USA.\"\"\"\n\nfrom datetime import datetime, timedelta\nfrom io import StringIO\nimport logging\nimport pandas as pd\nimport pytz\nimport requests\n\n\nGENERATION_URL = 'https://transmission.bpa.gov/business/operations/Wind/baltwg.txt'\n\nGENERATION_MAPPING = {'Wind': 'wind',\n 'Hydro': 'hydro',\n 'Fossil/Biomass': 'unknown',\n 'Nuclear': 'nuclear'}\n\n\ndef get_data(url, session=None):\n \"\"\"Returns a pandas dataframe.\"\"\"\n s=session or requests.Session()\n req = s.get(url)\n df = pd.read_table(StringIO(req.text), skiprows=5)\n\n return df\n\n\ndef timestamp_converter(timestamp):\n \"\"\"Turns string representation of time into an aware datetime object.\"\"\"\n\n dt_naive = datetime.strptime(timestamp, '%m/%d/%Y %H:%M')\n western = pytz.timezone('America/Los_Angeles')\n dt_aware = western.localize(dt_naive)\n\n return dt_aware\n\n\ndef data_processor(df, logger):\n \"\"\"\n Takes a dataframe and drops all generation rows that are empty or more\n than 1 day old. Turns each row into a dictionary and removes any generation\n types that are unknown.\n Returns a list of tuples in the form (datetime, production).\n \"\"\"\n\n df= df.dropna(thresh=2)\n df.columns = df.columns.str.strip()\n\n # 5min data for the last 24 hours.\n df = df.tail(288)\n df['Date/Time'] = df['Date/Time'].map(timestamp_converter)\n\n known_keys = GENERATION_MAPPING.keys() | {'Date/Time', 'Load'}\n column_headers = set(df.columns)\n\n unknown_keys = column_headers - known_keys\n\n for k in unknown_keys:\n logger.warning('New data {} seen in US-BPA data source'.format(k),\n extra={'key': 'US-BPA'})\n\n keys_to_remove = unknown_keys | {'Load'}\n\n processed_data = []\n for index, row in df.iterrows():\n production = row.to_dict()\n\n dt = production.pop('Date/Time')\n dt = dt.to_pydatetime()\n mapped_production = {GENERATION_MAPPING[k]:v for k,v in production.items()\n if k not in keys_to_remove}\n\n processed_data.append((dt, mapped_production))\n\n return processed_data\n\n\ndef fetch_production(zone_key='US-BPA', session=None, target_datetime=None, logger=logging.getLogger(__name__)):\n \"\"\"\n Requests the last known production mix (in MW) of a given zone\n Arguments:\n zone_key (optional) -- used in case a parser is able to fetch multiple zones\n session (optional) -- request session passed in order to re-use an existing session\n Return:\n A dictionary in the form:\n {\n 'zoneKey': 'FR',\n 'datetime': '2017-01-01T00:00:00Z',\n 'production': {\n 'biomass': 0.0,\n 'coal': 0.0,\n 'gas': 0.0,\n 'hydro': 0.0,\n 'nuclear': null,\n 'oil': 0.0,\n 'solar': 0.0,\n 'wind': 0.0,\n 'geothermal': 0.0,\n 'unknown': 0.0\n },\n 'storage': {\n 'hydro': -10.0,\n },\n 'source': 'mysource.com'\n }\n \"\"\"\n\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n raw_data = get_data(GENERATION_URL, session=session)\n processed_data = data_processor(raw_data, logger)\n\n data = []\n for item in processed_data:\n datapoint = {'zoneKey': zone_key,\n 'datetime': item[0],\n 'production': item[1],\n 'storage': {},\n 'source': 'bpa.gov'}\n\n data.append(datapoint)\n\n return data\n\n\nif __name__ == '__main__':\n print('fetch_production() ->')\n print(fetch_production())\n", "path": "parsers/US_BPA.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\n\"\"\"Parser for the Bonneville Power Administration area of the USA.\"\"\"\n\n\nfrom io import StringIO\nimport arrow\nimport logging\nimport pandas as pd\nimport requests\n\n\nGENERATION_URL = 'https://transmission.bpa.gov/business/operations/Wind/baltwg.txt'\n\nGENERATION_MAPPING = {'Wind': 'wind',\n 'Hydro': 'hydro',\n 'Fossil/Biomass': 'unknown',\n 'Nuclear': 'nuclear'}\n\n\ndef get_data(url, session=None):\n \"\"\"Returns a pandas dataframe.\"\"\"\n s = session or requests.Session()\n req = s.get(url)\n df = pd.read_table(StringIO(req.text), skiprows=11)\n\n return df\n\n\ndef timestamp_converter(timestamp):\n \"\"\"Turns a timestamp str into an aware datetime object.\"\"\"\n\n arr_dt_naive = arrow.get(timestamp, 'MM/DD/YYYY HH:mm')\n dt_aware = arr_dt_naive.replace(tzinfo='America/Los_Angeles').datetime\n\n return dt_aware\n\n\ndef data_processor(df, logger):\n \"\"\"\n Takes a dataframe and drops all generation rows that are empty or more\n than 1 day old. Turns each row into a dictionary and removes any generation\n types that are unknown.\n Returns a list of tuples in the form (datetime, production).\n \"\"\"\n\n df = df.dropna(thresh=2)\n df.columns = df.columns.str.strip()\n\n # 5min data for the last 24 hours.\n df = df.tail(288)\n df['Date/Time'] = df['Date/Time'].map(timestamp_converter)\n\n known_keys = GENERATION_MAPPING.keys() | {'Date/Time', 'Load'}\n column_headers = set(df.columns)\n\n unknown_keys = column_headers - known_keys\n\n for k in unknown_keys:\n logger.warning('New data {} seen in US-BPA data source'.format(k),\n extra={'key': 'US-BPA'})\n\n keys_to_remove = unknown_keys | {'Load'}\n\n processed_data = []\n for index, row in df.iterrows():\n production = row.to_dict()\n\n dt = production.pop('Date/Time')\n dt = dt.to_pydatetime()\n mapped_production = {GENERATION_MAPPING[k]: v for k, v in production.items()\n if k not in keys_to_remove}\n\n processed_data.append((dt, mapped_production))\n\n return processed_data\n\n\ndef fetch_production(zone_key='US-BPA', session=None, target_datetime=None, logger=logging.getLogger(__name__)):\n \"\"\"\n Requests the last known production mix (in MW) of a given zone\n Arguments:\n zone_key (optional) -- used in case a parser is able to fetch multiple zones\n session (optional) -- request session passed in order to re-use an existing session\n Return:\n A dictionary in the form:\n {\n 'zoneKey': 'FR',\n 'datetime': '2017-01-01T00:00:00Z',\n 'production': {\n 'biomass': 0.0,\n 'coal': 0.0,\n 'gas': 0.0,\n 'hydro': 0.0,\n 'nuclear': null,\n 'oil': 0.0,\n 'solar': 0.0,\n 'wind': 0.0,\n 'geothermal': 0.0,\n 'unknown': 0.0\n },\n 'storage': {\n 'hydro': -10.0,\n },\n 'source': 'mysource.com'\n }\n \"\"\"\n\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n raw_data = get_data(GENERATION_URL, session=session)\n processed_data = data_processor(raw_data, logger)\n\n data = []\n for item in processed_data:\n datapoint = {'zoneKey': zone_key,\n 'datetime': item[0],\n 'production': item[1],\n 'storage': {},\n 'source': 'bpa.gov'}\n\n data.append(datapoint)\n\n return data\n\n\nif __name__ == '__main__':\n print('fetch_production() ->')\n print(fetch_production())\n", "path": "parsers/US_BPA.py"}]}
| 1,908 | 568 |
gh_patches_debug_3683
|
rasdani/github-patches
|
git_diff
|
bokeh__bokeh-5883
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Test bokehjs/examples and run them on travis ci
They are constantly broken due to not being continuously tested.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/plotting/file/airports_map.py`
Content:
```
1 from __future__ import print_function
2
3 from bokeh.layouts import row
4 from bokeh.models import (
5 Range1d, WMTSTileSource, ColumnDataSource, HoverTool,
6 )
7 from bokeh.plotting import figure, show, output_file
8 from bokeh.sampledata.airports import data as airports
9 from bokeh.tile_providers import CARTODBPOSITRON
10
11 points_source = ColumnDataSource(airports)
12 title = "US Airports: Field Elevation > 1500m"
13
14 def plot(tile_source, filename):
15 output_file(filename, title=title)
16
17 # set to roughly extent of points
18 x_range = Range1d(start=airports['x'].min() - 10000, end=airports['x'].max() + 10000, bounds=None)
19 y_range = Range1d(start=airports['y'].min() - 10000, end=airports['y'].max() + 10000, bounds=None)
20
21 # create plot and add tools
22 p = figure(tools='wheel_zoom,pan', x_range=x_range, y_range=y_range, title=title)
23 p.axis.visible = False
24 hover_tool = HoverTool(tooltips=[("Name", "@name"), ("Elevation", "@elevation (m)")])
25 p.add_tools(hover_tool)
26 p.add_tile(tile_source)
27
28 # create point glyphs
29 p.circle(x='x', y='y', size=9, fill_color="#60ACA1", line_color="#D2C4C1", line_width=1.5, source=points_source)
30 return p
31
32 # create a tile source
33 tile_options = {}
34 tile_options['url'] = 'http://otile2.mqcdn.com/tiles/1.0.0/sat/{Z}/{X}/{Y}.png'
35 mq_tile_source = WMTSTileSource(**tile_options)
36
37 carto = plot(CARTODBPOSITRON, 'airports_map_cartodb.html')
38 mq = plot(mq_tile_source, 'airports_map.html')
39
40 show(row([carto, mq]))
41
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/examples/plotting/file/airports_map.py b/examples/plotting/file/airports_map.py
--- a/examples/plotting/file/airports_map.py
+++ b/examples/plotting/file/airports_map.py
@@ -31,7 +31,7 @@
# create a tile source
tile_options = {}
-tile_options['url'] = 'http://otile2.mqcdn.com/tiles/1.0.0/sat/{Z}/{X}/{Y}.png'
+# TODO tile_options['url'] = 'http://otile2.mqcdn.com/tiles/1.0.0/sat/{Z}/{X}/{Y}.png'
mq_tile_source = WMTSTileSource(**tile_options)
carto = plot(CARTODBPOSITRON, 'airports_map_cartodb.html')
|
{"golden_diff": "diff --git a/examples/plotting/file/airports_map.py b/examples/plotting/file/airports_map.py\n--- a/examples/plotting/file/airports_map.py\n+++ b/examples/plotting/file/airports_map.py\n@@ -31,7 +31,7 @@\n \n # create a tile source\n tile_options = {}\n-tile_options['url'] = 'http://otile2.mqcdn.com/tiles/1.0.0/sat/{Z}/{X}/{Y}.png'\n+# TODO tile_options['url'] = 'http://otile2.mqcdn.com/tiles/1.0.0/sat/{Z}/{X}/{Y}.png'\n mq_tile_source = WMTSTileSource(**tile_options)\n \n carto = plot(CARTODBPOSITRON, 'airports_map_cartodb.html')\n", "issue": "Test bokehjs/examples and run them on travis ci\nThey are constantly broken due to not being continuously tested.\n", "before_files": [{"content": "from __future__ import print_function\n\nfrom bokeh.layouts import row\nfrom bokeh.models import (\n Range1d, WMTSTileSource, ColumnDataSource, HoverTool,\n)\nfrom bokeh.plotting import figure, show, output_file\nfrom bokeh.sampledata.airports import data as airports\nfrom bokeh.tile_providers import CARTODBPOSITRON\n\npoints_source = ColumnDataSource(airports)\ntitle = \"US Airports: Field Elevation > 1500m\"\n\ndef plot(tile_source, filename):\n output_file(filename, title=title)\n\n # set to roughly extent of points\n x_range = Range1d(start=airports['x'].min() - 10000, end=airports['x'].max() + 10000, bounds=None)\n y_range = Range1d(start=airports['y'].min() - 10000, end=airports['y'].max() + 10000, bounds=None)\n\n # create plot and add tools\n p = figure(tools='wheel_zoom,pan', x_range=x_range, y_range=y_range, title=title)\n p.axis.visible = False\n hover_tool = HoverTool(tooltips=[(\"Name\", \"@name\"), (\"Elevation\", \"@elevation (m)\")])\n p.add_tools(hover_tool)\n p.add_tile(tile_source)\n\n # create point glyphs\n p.circle(x='x', y='y', size=9, fill_color=\"#60ACA1\", line_color=\"#D2C4C1\", line_width=1.5, source=points_source)\n return p\n\n# create a tile source\ntile_options = {}\ntile_options['url'] = 'http://otile2.mqcdn.com/tiles/1.0.0/sat/{Z}/{X}/{Y}.png'\nmq_tile_source = WMTSTileSource(**tile_options)\n\ncarto = plot(CARTODBPOSITRON, 'airports_map_cartodb.html')\nmq = plot(mq_tile_source, 'airports_map.html')\n\nshow(row([carto, mq]))\n", "path": "examples/plotting/file/airports_map.py"}], "after_files": [{"content": "from __future__ import print_function\n\nfrom bokeh.layouts import row\nfrom bokeh.models import (\n Range1d, WMTSTileSource, ColumnDataSource, HoverTool,\n)\nfrom bokeh.plotting import figure, show, output_file\nfrom bokeh.sampledata.airports import data as airports\nfrom bokeh.tile_providers import CARTODBPOSITRON\n\npoints_source = ColumnDataSource(airports)\ntitle = \"US Airports: Field Elevation > 1500m\"\n\ndef plot(tile_source, filename):\n output_file(filename, title=title)\n\n # set to roughly extent of points\n x_range = Range1d(start=airports['x'].min() - 10000, end=airports['x'].max() + 10000, bounds=None)\n y_range = Range1d(start=airports['y'].min() - 10000, end=airports['y'].max() + 10000, bounds=None)\n\n # create plot and add tools\n p = figure(tools='wheel_zoom,pan', x_range=x_range, y_range=y_range, title=title)\n p.axis.visible = False\n hover_tool = HoverTool(tooltips=[(\"Name\", \"@name\"), (\"Elevation\", \"@elevation (m)\")])\n p.add_tools(hover_tool)\n p.add_tile(tile_source)\n\n # create point glyphs\n p.circle(x='x', y='y', size=9, fill_color=\"#60ACA1\", line_color=\"#D2C4C1\", line_width=1.5, source=points_source)\n return p\n\n# create a tile source\ntile_options = {}\n# TODO tile_options['url'] = 'http://otile2.mqcdn.com/tiles/1.0.0/sat/{Z}/{X}/{Y}.png'\nmq_tile_source = WMTSTileSource(**tile_options)\n\ncarto = plot(CARTODBPOSITRON, 'airports_map_cartodb.html')\nmq = plot(mq_tile_source, 'airports_map.html')\n\nshow(row([carto, mq]))\n", "path": "examples/plotting/file/airports_map.py"}]}
| 820 | 180 |
gh_patches_debug_31548
|
rasdani/github-patches
|
git_diff
|
archlinux__archinstall-1194
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`select_disk` throw an error because BlockDevice does not contain label
Running this line from a python script causes an error:
`archinstall.select_disk(archinstall.all_blockdevices())`
```
Traceback (most recent call last):
File "/root/test.py", line 3, in <module>
hdd = archinstall.select_disk(archinstall.all_blockdevices())
File "/usr/lib/python3.10/site-packages/archinstall/lib/user_interaction/disk_conf.py", line 68, in select_disk
f"{index}: {drive} ({dict_o_disks[drive]['size'], dict_o_disks[drive].device, dict_o_disks[drive]['label']})"
File "/usr/lib/python3.10/site-packages/archinstall/lib/disk/blockdevice.py", line 45, in __getitem__
raise KeyError(f'{self} does not contain information: "{key}"')
KeyError: 'BlockDevice(/dev/sr0, size=0.8GB, free_space=, bus_type=sata) does not contain information: "label"'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `archinstall/lib/user_interaction/disk_conf.py`
Content:
```
1 from __future__ import annotations
2
3 from typing import Any, Dict, TYPE_CHECKING, Optional
4
5 from .partitioning_conf import manage_new_and_existing_partitions, get_default_partition_layout
6 from ..disk import BlockDevice
7 from ..exceptions import DiskError
8 from ..menu import Menu
9 from ..menu.menu import MenuSelectionType
10 from ..output import log
11
12 if TYPE_CHECKING:
13 _: Any
14
15
16 def ask_for_main_filesystem_format(advanced_options=False) -> str:
17 options = {'btrfs': 'btrfs', 'ext4': 'ext4', 'xfs': 'xfs', 'f2fs': 'f2fs'}
18
19 advanced = {'ntfs': 'ntfs'}
20
21 if advanced_options:
22 options.update(advanced)
23
24 prompt = _('Select which filesystem your main partition should use')
25 choice = Menu(prompt, options, skip=False).run()
26 return choice.value
27
28
29 def select_individual_blockdevice_usage(block_devices: list) -> Dict[str, Any]:
30 result = {}
31
32 for device in block_devices:
33 layout = manage_new_and_existing_partitions(device)
34 result[device.path] = layout
35
36 return result
37
38
39 def select_disk_layout(preset: Optional[Dict[str, Any]], block_devices: list, advanced_options=False) -> Optional[Dict[str, Any]]:
40 wipe_mode = str(_('Wipe all selected drives and use a best-effort default partition layout'))
41 custome_mode = str(_('Select what to do with each individual drive (followed by partition usage)'))
42 modes = [wipe_mode, custome_mode]
43
44 warning = str(_('Are you sure you want to reset this setting?'))
45
46 choice = Menu(
47 _('Select what you wish to do with the selected block devices'),
48 modes,
49 explode_on_interrupt=True,
50 explode_warning=warning
51 ).run()
52
53 match choice.type_:
54 case MenuSelectionType.Esc: return preset
55 case MenuSelectionType.Ctrl_c: return None
56 case MenuSelectionType.Selection:
57 if choice.value == wipe_mode:
58 return get_default_partition_layout(block_devices, advanced_options)
59 else:
60 return select_individual_blockdevice_usage(block_devices)
61
62
63 def select_disk(dict_o_disks: Dict[str, BlockDevice]) -> BlockDevice:
64 """
65 Asks the user to select a harddrive from the `dict_o_disks` selection.
66 Usually this is combined with :ref:`archinstall.list_drives`.
67
68 :param dict_o_disks: A `dict` where keys are the drive-name, value should be a dict containing drive information.
69 :type dict_o_disks: dict
70
71 :return: The name/path (the dictionary key) of the selected drive
72 :rtype: str
73 """
74 drives = sorted(list(dict_o_disks.keys()))
75 if len(drives) >= 1:
76 for index, drive in enumerate(drives):
77 print(
78 f"{index}: {drive} ({dict_o_disks[drive]['size'], dict_o_disks[drive].device, dict_o_disks[drive]['label']})"
79 )
80
81 log("You can skip selecting a drive and partitioning and use whatever drive-setup is mounted at /mnt (experimental)",
82 fg="yellow")
83
84 drive = Menu('Select one of the disks or skip and use "/mnt" as default"', drives).run()
85 if not drive:
86 return drive
87
88 drive = dict_o_disks[drive]
89 return drive
90
91 raise DiskError('select_disk() requires a non-empty dictionary of disks to select from.')
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/archinstall/lib/user_interaction/disk_conf.py b/archinstall/lib/user_interaction/disk_conf.py
--- a/archinstall/lib/user_interaction/disk_conf.py
+++ b/archinstall/lib/user_interaction/disk_conf.py
@@ -7,7 +7,6 @@
from ..exceptions import DiskError
from ..menu import Menu
from ..menu.menu import MenuSelectionType
-from ..output import log
if TYPE_CHECKING:
_: Any
@@ -60,7 +59,7 @@
return select_individual_blockdevice_usage(block_devices)
-def select_disk(dict_o_disks: Dict[str, BlockDevice]) -> BlockDevice:
+def select_disk(dict_o_disks: Dict[str, BlockDevice]) -> Optional[BlockDevice]:
"""
Asks the user to select a harddrive from the `dict_o_disks` selection.
Usually this is combined with :ref:`archinstall.list_drives`.
@@ -73,19 +72,15 @@
"""
drives = sorted(list(dict_o_disks.keys()))
if len(drives) >= 1:
- for index, drive in enumerate(drives):
- print(
- f"{index}: {drive} ({dict_o_disks[drive]['size'], dict_o_disks[drive].device, dict_o_disks[drive]['label']})"
- )
+ title = str(_('You can skip selecting a drive and partitioning and use whatever drive-setup is mounted at /mnt (experimental)')) + '\n'
+ title += str(_('Select one of the disks or skip and use /mnt as default'))
- log("You can skip selecting a drive and partitioning and use whatever drive-setup is mounted at /mnt (experimental)",
- fg="yellow")
+ choice = Menu(title, drives).run()
- drive = Menu('Select one of the disks or skip and use "/mnt" as default"', drives).run()
- if not drive:
- return drive
+ if choice.type_ == MenuSelectionType.Esc:
+ return None
- drive = dict_o_disks[drive]
+ drive = dict_o_disks[choice.value]
return drive
raise DiskError('select_disk() requires a non-empty dictionary of disks to select from.')
|
{"golden_diff": "diff --git a/archinstall/lib/user_interaction/disk_conf.py b/archinstall/lib/user_interaction/disk_conf.py\n--- a/archinstall/lib/user_interaction/disk_conf.py\n+++ b/archinstall/lib/user_interaction/disk_conf.py\n@@ -7,7 +7,6 @@\n from ..exceptions import DiskError\n from ..menu import Menu\n from ..menu.menu import MenuSelectionType\n-from ..output import log\n \n if TYPE_CHECKING:\n \t_: Any\n@@ -60,7 +59,7 @@\n \t\t\t\treturn select_individual_blockdevice_usage(block_devices)\n \n \n-def select_disk(dict_o_disks: Dict[str, BlockDevice]) -> BlockDevice:\n+def select_disk(dict_o_disks: Dict[str, BlockDevice]) -> Optional[BlockDevice]:\n \t\"\"\"\n \tAsks the user to select a harddrive from the `dict_o_disks` selection.\n \tUsually this is combined with :ref:`archinstall.list_drives`.\n@@ -73,19 +72,15 @@\n \t\"\"\"\n \tdrives = sorted(list(dict_o_disks.keys()))\n \tif len(drives) >= 1:\n-\t\tfor index, drive in enumerate(drives):\n-\t\t\tprint(\n-\t\t\t\tf\"{index}: {drive} ({dict_o_disks[drive]['size'], dict_o_disks[drive].device, dict_o_disks[drive]['label']})\"\n-\t\t\t)\n+\t\ttitle = str(_('You can skip selecting a drive and partitioning and use whatever drive-setup is mounted at /mnt (experimental)')) + '\\n'\n+\t\ttitle += str(_('Select one of the disks or skip and use /mnt as default'))\n \n-\t\tlog(\"You can skip selecting a drive and partitioning and use whatever drive-setup is mounted at /mnt (experimental)\",\n-\t\t\tfg=\"yellow\")\n+\t\tchoice = Menu(title, drives).run()\n \n-\t\tdrive = Menu('Select one of the disks or skip and use \"/mnt\" as default\"', drives).run()\n-\t\tif not drive:\n-\t\t\treturn drive\n+\t\tif choice.type_ == MenuSelectionType.Esc:\n+\t\t\treturn None\n \n-\t\tdrive = dict_o_disks[drive]\n+\t\tdrive = dict_o_disks[choice.value]\n \t\treturn drive\n \n \traise DiskError('select_disk() requires a non-empty dictionary of disks to select from.')\n", "issue": "`select_disk` throw an error because BlockDevice does not contain label\nRunning this line from a python script causes an error: \r\n\r\n`archinstall.select_disk(archinstall.all_blockdevices())`\r\n\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/root/test.py\", line 3, in <module>\r\n hdd = archinstall.select_disk(archinstall.all_blockdevices())\r\n File \"/usr/lib/python3.10/site-packages/archinstall/lib/user_interaction/disk_conf.py\", line 68, in select_disk\r\n f\"{index}: {drive} ({dict_o_disks[drive]['size'], dict_o_disks[drive].device, dict_o_disks[drive]['label']})\"\r\n File \"/usr/lib/python3.10/site-packages/archinstall/lib/disk/blockdevice.py\", line 45, in __getitem__\r\n raise KeyError(f'{self} does not contain information: \"{key}\"')\r\nKeyError: 'BlockDevice(/dev/sr0, size=0.8GB, free_space=, bus_type=sata) does not contain information: \"label\"'\r\n```\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any, Dict, TYPE_CHECKING, Optional\n\nfrom .partitioning_conf import manage_new_and_existing_partitions, get_default_partition_layout\nfrom ..disk import BlockDevice\nfrom ..exceptions import DiskError\nfrom ..menu import Menu\nfrom ..menu.menu import MenuSelectionType\nfrom ..output import log\n\nif TYPE_CHECKING:\n\t_: Any\n\n\ndef ask_for_main_filesystem_format(advanced_options=False) -> str:\n\toptions = {'btrfs': 'btrfs', 'ext4': 'ext4', 'xfs': 'xfs', 'f2fs': 'f2fs'}\n\n\tadvanced = {'ntfs': 'ntfs'}\n\n\tif advanced_options:\n\t\toptions.update(advanced)\n\n\tprompt = _('Select which filesystem your main partition should use')\n\tchoice = Menu(prompt, options, skip=False).run()\n\treturn choice.value\n\n\ndef select_individual_blockdevice_usage(block_devices: list) -> Dict[str, Any]:\n\tresult = {}\n\n\tfor device in block_devices:\n\t\tlayout = manage_new_and_existing_partitions(device)\n\t\tresult[device.path] = layout\n\n\treturn result\n\n\ndef select_disk_layout(preset: Optional[Dict[str, Any]], block_devices: list, advanced_options=False) -> Optional[Dict[str, Any]]:\n\twipe_mode = str(_('Wipe all selected drives and use a best-effort default partition layout'))\n\tcustome_mode = str(_('Select what to do with each individual drive (followed by partition usage)'))\n\tmodes = [wipe_mode, custome_mode]\n\n\twarning = str(_('Are you sure you want to reset this setting?'))\n\n\tchoice = Menu(\n\t\t_('Select what you wish to do with the selected block devices'),\n\t\tmodes,\n\t\texplode_on_interrupt=True,\n\t\texplode_warning=warning\n\t).run()\n\n\tmatch choice.type_:\n\t\tcase MenuSelectionType.Esc: return preset\n\t\tcase MenuSelectionType.Ctrl_c: return None\n\t\tcase MenuSelectionType.Selection:\n\t\t\tif choice.value == wipe_mode:\n\t\t\t\treturn get_default_partition_layout(block_devices, advanced_options)\n\t\t\telse:\n\t\t\t\treturn select_individual_blockdevice_usage(block_devices)\n\n\ndef select_disk(dict_o_disks: Dict[str, BlockDevice]) -> BlockDevice:\n\t\"\"\"\n\tAsks the user to select a harddrive from the `dict_o_disks` selection.\n\tUsually this is combined with :ref:`archinstall.list_drives`.\n\n\t:param dict_o_disks: A `dict` where keys are the drive-name, value should be a dict containing drive information.\n\t:type dict_o_disks: dict\n\n\t:return: The name/path (the dictionary key) of the selected drive\n\t:rtype: str\n\t\"\"\"\n\tdrives = sorted(list(dict_o_disks.keys()))\n\tif len(drives) >= 1:\n\t\tfor index, drive in enumerate(drives):\n\t\t\tprint(\n\t\t\t\tf\"{index}: {drive} ({dict_o_disks[drive]['size'], dict_o_disks[drive].device, dict_o_disks[drive]['label']})\"\n\t\t\t)\n\n\t\tlog(\"You can skip selecting a drive and partitioning and use whatever drive-setup is mounted at /mnt (experimental)\",\n\t\t\tfg=\"yellow\")\n\n\t\tdrive = Menu('Select one of the disks or skip and use \"/mnt\" as default\"', drives).run()\n\t\tif not drive:\n\t\t\treturn drive\n\n\t\tdrive = dict_o_disks[drive]\n\t\treturn drive\n\n\traise DiskError('select_disk() requires a non-empty dictionary of disks to select from.')\n", "path": "archinstall/lib/user_interaction/disk_conf.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any, Dict, TYPE_CHECKING, Optional\n\nfrom .partitioning_conf import manage_new_and_existing_partitions, get_default_partition_layout\nfrom ..disk import BlockDevice\nfrom ..exceptions import DiskError\nfrom ..menu import Menu\nfrom ..menu.menu import MenuSelectionType\n\nif TYPE_CHECKING:\n\t_: Any\n\n\ndef ask_for_main_filesystem_format(advanced_options=False) -> str:\n\toptions = {'btrfs': 'btrfs', 'ext4': 'ext4', 'xfs': 'xfs', 'f2fs': 'f2fs'}\n\n\tadvanced = {'ntfs': 'ntfs'}\n\n\tif advanced_options:\n\t\toptions.update(advanced)\n\n\tprompt = _('Select which filesystem your main partition should use')\n\tchoice = Menu(prompt, options, skip=False).run()\n\treturn choice.value\n\n\ndef select_individual_blockdevice_usage(block_devices: list) -> Dict[str, Any]:\n\tresult = {}\n\n\tfor device in block_devices:\n\t\tlayout = manage_new_and_existing_partitions(device)\n\t\tresult[device.path] = layout\n\n\treturn result\n\n\ndef select_disk_layout(preset: Optional[Dict[str, Any]], block_devices: list, advanced_options=False) -> Optional[Dict[str, Any]]:\n\twipe_mode = str(_('Wipe all selected drives and use a best-effort default partition layout'))\n\tcustome_mode = str(_('Select what to do with each individual drive (followed by partition usage)'))\n\tmodes = [wipe_mode, custome_mode]\n\n\twarning = str(_('Are you sure you want to reset this setting?'))\n\n\tchoice = Menu(\n\t\t_('Select what you wish to do with the selected block devices'),\n\t\tmodes,\n\t\texplode_on_interrupt=True,\n\t\texplode_warning=warning\n\t).run()\n\n\tmatch choice.type_:\n\t\tcase MenuSelectionType.Esc: return preset\n\t\tcase MenuSelectionType.Ctrl_c: return None\n\t\tcase MenuSelectionType.Selection:\n\t\t\tif choice.value == wipe_mode:\n\t\t\t\treturn get_default_partition_layout(block_devices, advanced_options)\n\t\t\telse:\n\t\t\t\treturn select_individual_blockdevice_usage(block_devices)\n\n\ndef select_disk(dict_o_disks: Dict[str, BlockDevice]) -> Optional[BlockDevice]:\n\t\"\"\"\n\tAsks the user to select a harddrive from the `dict_o_disks` selection.\n\tUsually this is combined with :ref:`archinstall.list_drives`.\n\n\t:param dict_o_disks: A `dict` where keys are the drive-name, value should be a dict containing drive information.\n\t:type dict_o_disks: dict\n\n\t:return: The name/path (the dictionary key) of the selected drive\n\t:rtype: str\n\t\"\"\"\n\tdrives = sorted(list(dict_o_disks.keys()))\n\tif len(drives) >= 1:\n\t\ttitle = str(_('You can skip selecting a drive and partitioning and use whatever drive-setup is mounted at /mnt (experimental)')) + '\\n'\n\t\ttitle += str(_('Select one of the disks or skip and use /mnt as default'))\n\n\t\tchoice = Menu(title, drives).run()\n\n\t\tif choice.type_ == MenuSelectionType.Esc:\n\t\t\treturn None\n\n\t\tdrive = dict_o_disks[choice.value]\n\t\treturn drive\n\n\traise DiskError('select_disk() requires a non-empty dictionary of disks to select from.')\n", "path": "archinstall/lib/user_interaction/disk_conf.py"}]}
| 1,456 | 498 |
gh_patches_debug_19023
|
rasdani/github-patches
|
git_diff
|
vyperlang__vyper-874
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add uint256 decimal support for convert
### What's your issue about?
Convert should allow conversion of uint256 -> decimal.
### How can it be fixed?
convert.py:57
#### Cute Animal Picture
^.^
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `vyper/types/convert.py`
Content:
```
1 from vyper.functions.signature import (
2 signature
3 )
4 from vyper.parser.parser_utils import (
5 LLLnode,
6 getpos,
7 byte_array_to_num
8 )
9 from vyper.exceptions import (
10 InvalidLiteralException,
11 TypeMismatchException,
12 )
13 from vyper.types import (
14 BaseType,
15 )
16 from vyper.types import (
17 get_type,
18 )
19 from vyper.utils import (
20 DECIMAL_DIVISOR,
21 MemoryPositions,
22 SizeLimits
23 )
24
25
26 @signature(('int128', 'uint256', 'bytes32', 'bytes'), 'str_literal')
27 def to_int128(expr, args, kwargs, context):
28 in_node = args[0]
29 typ, len = get_type(in_node)
30 if typ in ('int128', 'uint256', 'bytes32'):
31 if in_node.typ.is_literal and not SizeLimits.in_bounds('int128', in_node.value):
32 raise InvalidLiteralException("Number out of range: {}".format(in_node.value), expr)
33 return LLLnode.from_list(
34 ['clamp', ['mload', MemoryPositions.MINNUM], in_node,
35 ['mload', MemoryPositions.MAXNUM]], typ=BaseType('int128', in_node.typ.unit), pos=getpos(expr)
36 )
37 else:
38 return byte_array_to_num(in_node, expr, 'int128')
39
40
41 @signature(('num_literal', 'int128', 'bytes32'), 'str_literal')
42 def to_uint256(expr, args, kwargs, context):
43 in_node = args[0]
44 typ, len = get_type(in_node)
45 if isinstance(in_node, int):
46
47 if not SizeLimits.in_bounds('uint256', in_node):
48 raise InvalidLiteralException("Number out of range: {}".format(in_node))
49 _unit = in_node.typ.unit if typ == 'int128' else None
50 return LLLnode.from_list(in_node, typ=BaseType('uint256', _unit), pos=getpos(expr))
51 elif isinstance(in_node, LLLnode) and typ in ('int128', 'num_literal'):
52 _unit = in_node.typ.unit if typ == 'int128' else None
53 return LLLnode.from_list(['clampge', in_node, 0], typ=BaseType('uint256', _unit), pos=getpos(expr))
54 elif isinstance(in_node, LLLnode) and typ in ('bytes32'):
55 return LLLnode(value=in_node.value, args=in_node.args, typ=BaseType('uint256'), pos=getpos(expr))
56 else:
57 raise InvalidLiteralException("Invalid input for uint256: %r" % in_node, expr)
58
59
60 @signature('int128', 'str_literal')
61 def to_decimal(expr, args, kwargs, context):
62 input = args[0]
63 return LLLnode.from_list(['mul', input, DECIMAL_DIVISOR], typ=BaseType('decimal', input.typ.unit, input.typ.positional),
64 pos=getpos(expr))
65
66
67 @signature(('int128', 'uint256', 'address', 'bytes'), 'str_literal')
68 def to_bytes32(expr, args, kwargs, context):
69 input = args[0]
70 typ, len = get_type(input)
71 if typ == 'bytes':
72 if len != 32:
73 raise TypeMismatchException("Unable to convert bytes[{}] to bytes32".format(len))
74 if input.location == "memory":
75 return LLLnode.from_list(
76 ['mload', ['add', input, 32]], typ=BaseType('bytes32')
77 )
78 elif input.location == "storage":
79 return LLLnode.from_list(
80 ['sload', ['add', ['sha3_32', input], 1]], typ=BaseType('bytes32')
81 )
82 else:
83 return LLLnode(value=input.value, args=input.args, typ=BaseType('bytes32'), pos=getpos(expr))
84
85
86 def convert(expr, context):
87 output_type = expr.args[1].s
88 if output_type in conversion_table:
89 return conversion_table[output_type](expr, context)
90 else:
91 raise Exception("Conversion to {} is invalid.".format(output_type))
92
93
94 conversion_table = {
95 'int128': to_int128,
96 'uint256': to_uint256,
97 'decimal': to_decimal,
98 'bytes32': to_bytes32,
99 }
100
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/vyper/types/convert.py b/vyper/types/convert.py
--- a/vyper/types/convert.py
+++ b/vyper/types/convert.py
@@ -57,11 +57,20 @@
raise InvalidLiteralException("Invalid input for uint256: %r" % in_node, expr)
-@signature('int128', 'str_literal')
+@signature(('int128', 'uint256'), 'str_literal')
def to_decimal(expr, args, kwargs, context):
input = args[0]
- return LLLnode.from_list(['mul', input, DECIMAL_DIVISOR], typ=BaseType('decimal', input.typ.unit, input.typ.positional),
- pos=getpos(expr))
+ if input.typ.typ == 'uint256':
+ return LLLnode.from_list(
+ ['uclample', ['mul', input, DECIMAL_DIVISOR], ['mload', MemoryPositions.MAXDECIMAL]],
+ typ=BaseType('decimal', input.typ.unit, input.typ.positional), pos=getpos(expr)
+ )
+ else:
+ return LLLnode.from_list(
+ ['mul', input, DECIMAL_DIVISOR],
+ typ=BaseType('decimal', input.typ.unit, input.typ.positional),
+ pos=getpos(expr)
+ )
@signature(('int128', 'uint256', 'address', 'bytes'), 'str_literal')
|
{"golden_diff": "diff --git a/vyper/types/convert.py b/vyper/types/convert.py\n--- a/vyper/types/convert.py\n+++ b/vyper/types/convert.py\n@@ -57,11 +57,20 @@\n raise InvalidLiteralException(\"Invalid input for uint256: %r\" % in_node, expr)\n \n \n-@signature('int128', 'str_literal')\n+@signature(('int128', 'uint256'), 'str_literal')\n def to_decimal(expr, args, kwargs, context):\n input = args[0]\n- return LLLnode.from_list(['mul', input, DECIMAL_DIVISOR], typ=BaseType('decimal', input.typ.unit, input.typ.positional),\n- pos=getpos(expr))\n+ if input.typ.typ == 'uint256':\n+ return LLLnode.from_list(\n+ ['uclample', ['mul', input, DECIMAL_DIVISOR], ['mload', MemoryPositions.MAXDECIMAL]],\n+ typ=BaseType('decimal', input.typ.unit, input.typ.positional), pos=getpos(expr)\n+ )\n+ else:\n+ return LLLnode.from_list(\n+ ['mul', input, DECIMAL_DIVISOR],\n+ typ=BaseType('decimal', input.typ.unit, input.typ.positional),\n+ pos=getpos(expr)\n+ )\n \n \n @signature(('int128', 'uint256', 'address', 'bytes'), 'str_literal')\n", "issue": "Add uint256 decimal support for convert\n### What's your issue about?\r\n\r\nConvert should allow conversion of uint256 -> decimal.\r\n\r\n### How can it be fixed?\r\n\r\nconvert.py:57\r\n\r\n#### Cute Animal Picture\r\n^.^\n", "before_files": [{"content": "from vyper.functions.signature import (\n signature\n)\nfrom vyper.parser.parser_utils import (\n LLLnode,\n getpos,\n byte_array_to_num\n)\nfrom vyper.exceptions import (\n InvalidLiteralException,\n TypeMismatchException,\n)\nfrom vyper.types import (\n BaseType,\n)\nfrom vyper.types import (\n get_type,\n)\nfrom vyper.utils import (\n DECIMAL_DIVISOR,\n MemoryPositions,\n SizeLimits\n)\n\n\n@signature(('int128', 'uint256', 'bytes32', 'bytes'), 'str_literal')\ndef to_int128(expr, args, kwargs, context):\n in_node = args[0]\n typ, len = get_type(in_node)\n if typ in ('int128', 'uint256', 'bytes32'):\n if in_node.typ.is_literal and not SizeLimits.in_bounds('int128', in_node.value):\n raise InvalidLiteralException(\"Number out of range: {}\".format(in_node.value), expr)\n return LLLnode.from_list(\n ['clamp', ['mload', MemoryPositions.MINNUM], in_node,\n ['mload', MemoryPositions.MAXNUM]], typ=BaseType('int128', in_node.typ.unit), pos=getpos(expr)\n )\n else:\n return byte_array_to_num(in_node, expr, 'int128')\n\n\n@signature(('num_literal', 'int128', 'bytes32'), 'str_literal')\ndef to_uint256(expr, args, kwargs, context):\n in_node = args[0]\n typ, len = get_type(in_node)\n if isinstance(in_node, int):\n\n if not SizeLimits.in_bounds('uint256', in_node):\n raise InvalidLiteralException(\"Number out of range: {}\".format(in_node))\n _unit = in_node.typ.unit if typ == 'int128' else None\n return LLLnode.from_list(in_node, typ=BaseType('uint256', _unit), pos=getpos(expr))\n elif isinstance(in_node, LLLnode) and typ in ('int128', 'num_literal'):\n _unit = in_node.typ.unit if typ == 'int128' else None\n return LLLnode.from_list(['clampge', in_node, 0], typ=BaseType('uint256', _unit), pos=getpos(expr))\n elif isinstance(in_node, LLLnode) and typ in ('bytes32'):\n return LLLnode(value=in_node.value, args=in_node.args, typ=BaseType('uint256'), pos=getpos(expr))\n else:\n raise InvalidLiteralException(\"Invalid input for uint256: %r\" % in_node, expr)\n\n\n@signature('int128', 'str_literal')\ndef to_decimal(expr, args, kwargs, context):\n input = args[0]\n return LLLnode.from_list(['mul', input, DECIMAL_DIVISOR], typ=BaseType('decimal', input.typ.unit, input.typ.positional),\n pos=getpos(expr))\n\n\n@signature(('int128', 'uint256', 'address', 'bytes'), 'str_literal')\ndef to_bytes32(expr, args, kwargs, context):\n input = args[0]\n typ, len = get_type(input)\n if typ == 'bytes':\n if len != 32:\n raise TypeMismatchException(\"Unable to convert bytes[{}] to bytes32\".format(len))\n if input.location == \"memory\":\n return LLLnode.from_list(\n ['mload', ['add', input, 32]], typ=BaseType('bytes32')\n )\n elif input.location == \"storage\":\n return LLLnode.from_list(\n ['sload', ['add', ['sha3_32', input], 1]], typ=BaseType('bytes32')\n )\n else:\n return LLLnode(value=input.value, args=input.args, typ=BaseType('bytes32'), pos=getpos(expr))\n\n\ndef convert(expr, context):\n output_type = expr.args[1].s\n if output_type in conversion_table:\n return conversion_table[output_type](expr, context)\n else:\n raise Exception(\"Conversion to {} is invalid.\".format(output_type))\n\n\nconversion_table = {\n 'int128': to_int128,\n 'uint256': to_uint256,\n 'decimal': to_decimal,\n 'bytes32': to_bytes32,\n}\n", "path": "vyper/types/convert.py"}], "after_files": [{"content": "from vyper.functions.signature import (\n signature\n)\nfrom vyper.parser.parser_utils import (\n LLLnode,\n getpos,\n byte_array_to_num\n)\nfrom vyper.exceptions import (\n InvalidLiteralException,\n TypeMismatchException,\n)\nfrom vyper.types import (\n BaseType,\n)\nfrom vyper.types import (\n get_type,\n)\nfrom vyper.utils import (\n DECIMAL_DIVISOR,\n MemoryPositions,\n SizeLimits\n)\n\n\n@signature(('int128', 'uint256', 'bytes32', 'bytes'), 'str_literal')\ndef to_int128(expr, args, kwargs, context):\n in_node = args[0]\n typ, len = get_type(in_node)\n if typ in ('int128', 'uint256', 'bytes32'):\n if in_node.typ.is_literal and not SizeLimits.in_bounds('int128', in_node.value):\n raise InvalidLiteralException(\"Number out of range: {}\".format(in_node.value), expr)\n return LLLnode.from_list(\n ['clamp', ['mload', MemoryPositions.MINNUM], in_node,\n ['mload', MemoryPositions.MAXNUM]], typ=BaseType('int128', in_node.typ.unit), pos=getpos(expr)\n )\n else:\n return byte_array_to_num(in_node, expr, 'int128')\n\n\n@signature(('num_literal', 'int128', 'bytes32'), 'str_literal')\ndef to_uint256(expr, args, kwargs, context):\n in_node = args[0]\n typ, len = get_type(in_node)\n if isinstance(in_node, int):\n\n if not SizeLimits.in_bounds('uint256', in_node):\n raise InvalidLiteralException(\"Number out of range: {}\".format(in_node))\n _unit = in_node.typ.unit if typ == 'int128' else None\n return LLLnode.from_list(in_node, typ=BaseType('uint256', _unit), pos=getpos(expr))\n elif isinstance(in_node, LLLnode) and typ in ('int128', 'num_literal'):\n _unit = in_node.typ.unit if typ == 'int128' else None\n return LLLnode.from_list(['clampge', in_node, 0], typ=BaseType('uint256', _unit), pos=getpos(expr))\n elif isinstance(in_node, LLLnode) and typ in ('bytes32'):\n return LLLnode(value=in_node.value, args=in_node.args, typ=BaseType('uint256'), pos=getpos(expr))\n else:\n raise InvalidLiteralException(\"Invalid input for uint256: %r\" % in_node, expr)\n\n\n@signature(('int128', 'uint256'), 'str_literal')\ndef to_decimal(expr, args, kwargs, context):\n input = args[0]\n if input.typ.typ == 'uint256':\n return LLLnode.from_list(\n ['uclample', ['mul', input, DECIMAL_DIVISOR], ['mload', MemoryPositions.MAXDECIMAL]],\n typ=BaseType('decimal', input.typ.unit, input.typ.positional), pos=getpos(expr)\n )\n else:\n return LLLnode.from_list(\n ['mul', input, DECIMAL_DIVISOR],\n typ=BaseType('decimal', input.typ.unit, input.typ.positional),\n pos=getpos(expr)\n )\n\n\n@signature(('int128', 'uint256', 'address', 'bytes'), 'str_literal')\ndef to_bytes32(expr, args, kwargs, context):\n input = args[0]\n typ, len = get_type(input)\n if typ == 'bytes':\n if len != 32:\n raise TypeMismatchException(\"Unable to convert bytes[{}] to bytes32\".format(len))\n if input.location == \"memory\":\n return LLLnode.from_list(\n ['mload', ['add', input, 32]], typ=BaseType('bytes32')\n )\n elif input.location == \"storage\":\n return LLLnode.from_list(\n ['sload', ['add', ['sha3_32', input], 1]], typ=BaseType('bytes32')\n )\n else:\n return LLLnode(value=input.value, args=input.args, typ=BaseType('bytes32'), pos=getpos(expr))\n\n\ndef convert(expr, context):\n output_type = expr.args[1].s\n if output_type in conversion_table:\n return conversion_table[output_type](expr, context)\n else:\n raise Exception(\"Conversion to {} is invalid.\".format(output_type))\n\n\nconversion_table = {\n 'int128': to_int128,\n 'uint256': to_uint256,\n 'decimal': to_decimal,\n 'bytes32': to_bytes32,\n}\n", "path": "vyper/types/convert.py"}]}
| 1,481 | 321 |
gh_patches_debug_3044
|
rasdani/github-patches
|
git_diff
|
opendatacube__datacube-core-1374
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Incompatibilities with xarray > 2022.03
### Expected behaviour
ODC should work with current version of `xarray`. In `setup.py` there's an exclusion of `2022.6.0`, but I don't think that's sufficient. It'd be worth digging up the commit/PR that made that change.
### Actual behaviour
Tests are failing.
```
FAILED tests/api/test_grid_workflow.py::test_gridworkflow_with_time_depth - AssertionError
FAILED tests/api/test_virtual.py::test_aggregate - ValueError: time already exists as coordinate or variable name.
```
### Steps to reproduce the behaviour
`pytest tests/`
### Environment information
* Which ``datacube --version`` are you using?
`develop` branch at `af59377327c363b9c52b55000b4024a0b3fbaa8b`
* What datacube deployment/enviornment are you running against?
- Mambaforge
- conda-forge
- Python 3.10
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 from setuptools import setup, find_packages
4
5 tests_require = [
6 'hypothesis',
7 'pycodestyle',
8 'pylint',
9 'pytest',
10 'pytest-cov',
11 'pytest-timeout',
12 'pytest-httpserver',
13 'moto',
14 ]
15 doc_require = [
16 'Sphinx',
17 'sphinx_rtd_theme',
18 'sphinx_autodoc_typehints', # Propagate mypy info into docs
19 'sphinx-click',
20 'recommonmark',
21 'setuptools', # version related dependencies
22 'setuptools_scm[toml]',
23 ]
24
25 extras_require = {
26 'performance': ['ciso8601', 'bottleneck'],
27 'distributed': ['distributed', 'dask[distributed]'],
28 'doc': doc_require,
29 's3': ['boto3', 'botocore'],
30 'test': tests_require,
31 'cf': ['compliance-checker>=4.0.0'],
32 }
33
34 extras_require['dev'] = sorted(set(sum([extras_require[k] for k in [
35 'test',
36 'doc',
37 'performance',
38 's3',
39 'distributed',
40 ]], [])))
41
42 # An 'all' option, following ipython naming conventions.
43 extras_require['all'] = sorted(set(sum(extras_require.values(), [])))
44
45 extra_plugins = dict(read=[], write=[], index=[])
46
47 setup(
48 name='datacube',
49 python_requires='>=3.8.0',
50
51 url='https://github.com/opendatacube/datacube-core',
52 author='Open Data Cube',
53 maintainer='Open Data Cube',
54 maintainer_email='',
55 description='An analysis environment for satellite and other earth observation data',
56 long_description=open('README.rst').read(),
57 long_description_content_type='text/x-rst',
58 license='Apache License 2.0',
59 classifiers=[
60 "Development Status :: 4 - Beta",
61 "Intended Audience :: Developers",
62 "Intended Audience :: Science/Research",
63 "License :: OSI Approved :: Apache Software License",
64 "Natural Language :: English",
65 "Operating System :: MacOS :: MacOS X",
66 "Operating System :: POSIX",
67 "Operating System :: POSIX :: BSD",
68 "Operating System :: POSIX :: Linux",
69 "Operating System :: Microsoft :: Windows",
70 "Programming Language :: Python",
71 "Programming Language :: Python :: 3",
72 "Programming Language :: Python :: 3.8",
73 "Programming Language :: Python :: 3.9",
74 "Programming Language :: Python :: 3.10",
75 "Topic :: Scientific/Engineering :: GIS",
76 "Topic :: Scientific/Engineering :: Information Analysis",
77 ],
78
79 packages=find_packages(
80 exclude=('tests', 'tests.*',
81 'integration_tests', 'integration_tests.*')
82 ),
83 package_data={
84 '': ['*.yaml', '*/*.yaml'],
85 'datacube': ['py.typed'],
86 },
87 scripts=[],
88 install_requires=[
89 'affine',
90 'pyproj>=2.5',
91 'shapely>=1.6.4',
92 'cachetools',
93 'click>=5.0',
94 'cloudpickle>=0.4',
95 'dask[array]',
96 'distributed',
97 'jsonschema',
98 'netcdf4',
99 'numpy',
100 'psycopg2',
101 'lark',
102 'pandas',
103 'python-dateutil',
104 'pyyaml',
105 'rasterio>=1.3.2', # Warping broken in 1.3.0 and 1.3.1
106 'sqlalchemy',
107 'GeoAlchemy2',
108 'toolz',
109 'xarray>=0.9,!=2022.6.0', # >0.9 fixes most problems with `crs` attributes being lost
110 ],
111 extras_require=extras_require,
112 tests_require=tests_require,
113
114 entry_points={
115 'console_scripts': [
116 'datacube = datacube.scripts.cli_app:cli',
117 'datacube-search = datacube.scripts.search_tool:cli',
118 'datacube-worker = datacube.execution.worker:main',
119 ],
120 'datacube.plugins.io.read': [
121 'netcdf = datacube.drivers.netcdf.driver:reader_driver_init',
122 *extra_plugins['read'],
123 ],
124 'datacube.plugins.io.write': [
125 'netcdf = datacube.drivers.netcdf.driver:writer_driver_init',
126 *extra_plugins['write'],
127 ],
128 'datacube.plugins.index': [
129 'default = datacube.index.postgres.index:index_driver_init',
130 'null = datacube.index.null.index:index_driver_init',
131 'memory = datacube.index.memory.index:index_driver_init',
132 'postgis = datacube.index.postgis.index:index_driver_init',
133 *extra_plugins['index'],
134 ],
135 },
136 )
137
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -106,7 +106,7 @@
'sqlalchemy',
'GeoAlchemy2',
'toolz',
- 'xarray>=0.9,!=2022.6.0', # >0.9 fixes most problems with `crs` attributes being lost
+ 'xarray>=0.9,<2022.6', # >0.9 fixes most problems with `crs` attributes being lost
],
extras_require=extras_require,
tests_require=tests_require,
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -106,7 +106,7 @@\n 'sqlalchemy',\n 'GeoAlchemy2',\n 'toolz',\n- 'xarray>=0.9,!=2022.6.0', # >0.9 fixes most problems with `crs` attributes being lost\n+ 'xarray>=0.9,<2022.6', # >0.9 fixes most problems with `crs` attributes being lost\n ],\n extras_require=extras_require,\n tests_require=tests_require,\n", "issue": "Incompatibilities with xarray > 2022.03\n### Expected behaviour\r\nODC should work with current version of `xarray`. In `setup.py` there's an exclusion of `2022.6.0`, but I don't think that's sufficient. It'd be worth digging up the commit/PR that made that change.\r\n\r\n### Actual behaviour\r\n\r\nTests are failing.\r\n\r\n```\r\nFAILED tests/api/test_grid_workflow.py::test_gridworkflow_with_time_depth - AssertionError\r\nFAILED tests/api/test_virtual.py::test_aggregate - ValueError: time already exists as coordinate or variable name.\r\n```\r\n\r\n### Steps to reproduce the behaviour\r\n\r\n`pytest tests/`\r\n\r\n### Environment information\r\n\r\n* Which ``datacube --version`` are you using?\r\n\r\n `develop` branch at `af59377327c363b9c52b55000b4024a0b3fbaa8b`\r\n\r\n* What datacube deployment/enviornment are you running against?\r\n\r\n - Mambaforge\r\n - conda-forge \r\n - Python 3.10\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'hypothesis',\n 'pycodestyle',\n 'pylint',\n 'pytest',\n 'pytest-cov',\n 'pytest-timeout',\n 'pytest-httpserver',\n 'moto',\n]\ndoc_require = [\n 'Sphinx',\n 'sphinx_rtd_theme',\n 'sphinx_autodoc_typehints', # Propagate mypy info into docs\n 'sphinx-click',\n 'recommonmark',\n 'setuptools', # version related dependencies\n 'setuptools_scm[toml]',\n]\n\nextras_require = {\n 'performance': ['ciso8601', 'bottleneck'],\n 'distributed': ['distributed', 'dask[distributed]'],\n 'doc': doc_require,\n 's3': ['boto3', 'botocore'],\n 'test': tests_require,\n 'cf': ['compliance-checker>=4.0.0'],\n}\n\nextras_require['dev'] = sorted(set(sum([extras_require[k] for k in [\n 'test',\n 'doc',\n 'performance',\n 's3',\n 'distributed',\n]], [])))\n\n# An 'all' option, following ipython naming conventions.\nextras_require['all'] = sorted(set(sum(extras_require.values(), [])))\n\nextra_plugins = dict(read=[], write=[], index=[])\n\nsetup(\n name='datacube',\n python_requires='>=3.8.0',\n\n url='https://github.com/opendatacube/datacube-core',\n author='Open Data Cube',\n maintainer='Open Data Cube',\n maintainer_email='',\n description='An analysis environment for satellite and other earth observation data',\n long_description=open('README.rst').read(),\n long_description_content_type='text/x-rst',\n license='Apache License 2.0',\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Scientific/Engineering :: GIS\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n ],\n\n packages=find_packages(\n exclude=('tests', 'tests.*',\n 'integration_tests', 'integration_tests.*')\n ),\n package_data={\n '': ['*.yaml', '*/*.yaml'],\n 'datacube': ['py.typed'],\n },\n scripts=[],\n install_requires=[\n 'affine',\n 'pyproj>=2.5',\n 'shapely>=1.6.4',\n 'cachetools',\n 'click>=5.0',\n 'cloudpickle>=0.4',\n 'dask[array]',\n 'distributed',\n 'jsonschema',\n 'netcdf4',\n 'numpy',\n 'psycopg2',\n 'lark',\n 'pandas',\n 'python-dateutil',\n 'pyyaml',\n 'rasterio>=1.3.2', # Warping broken in 1.3.0 and 1.3.1\n 'sqlalchemy',\n 'GeoAlchemy2',\n 'toolz',\n 'xarray>=0.9,!=2022.6.0', # >0.9 fixes most problems with `crs` attributes being lost\n ],\n extras_require=extras_require,\n tests_require=tests_require,\n\n entry_points={\n 'console_scripts': [\n 'datacube = datacube.scripts.cli_app:cli',\n 'datacube-search = datacube.scripts.search_tool:cli',\n 'datacube-worker = datacube.execution.worker:main',\n ],\n 'datacube.plugins.io.read': [\n 'netcdf = datacube.drivers.netcdf.driver:reader_driver_init',\n *extra_plugins['read'],\n ],\n 'datacube.plugins.io.write': [\n 'netcdf = datacube.drivers.netcdf.driver:writer_driver_init',\n *extra_plugins['write'],\n ],\n 'datacube.plugins.index': [\n 'default = datacube.index.postgres.index:index_driver_init',\n 'null = datacube.index.null.index:index_driver_init',\n 'memory = datacube.index.memory.index:index_driver_init',\n 'postgis = datacube.index.postgis.index:index_driver_init',\n *extra_plugins['index'],\n ],\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'hypothesis',\n 'pycodestyle',\n 'pylint',\n 'pytest',\n 'pytest-cov',\n 'pytest-timeout',\n 'pytest-httpserver',\n 'moto',\n]\ndoc_require = [\n 'Sphinx',\n 'sphinx_rtd_theme',\n 'sphinx_autodoc_typehints', # Propagate mypy info into docs\n 'sphinx-click',\n 'recommonmark',\n 'setuptools', # version related dependencies\n 'setuptools_scm[toml]',\n]\n\nextras_require = {\n 'performance': ['ciso8601', 'bottleneck'],\n 'distributed': ['distributed', 'dask[distributed]'],\n 'doc': doc_require,\n 's3': ['boto3', 'botocore'],\n 'test': tests_require,\n 'cf': ['compliance-checker>=4.0.0'],\n}\n\nextras_require['dev'] = sorted(set(sum([extras_require[k] for k in [\n 'test',\n 'doc',\n 'performance',\n 's3',\n 'distributed',\n]], [])))\n\n# An 'all' option, following ipython naming conventions.\nextras_require['all'] = sorted(set(sum(extras_require.values(), [])))\n\nextra_plugins = dict(read=[], write=[], index=[])\n\nsetup(\n name='datacube',\n python_requires='>=3.8.0',\n\n url='https://github.com/opendatacube/datacube-core',\n author='Open Data Cube',\n maintainer='Open Data Cube',\n maintainer_email='',\n description='An analysis environment for satellite and other earth observation data',\n long_description=open('README.rst').read(),\n long_description_content_type='text/x-rst',\n license='Apache License 2.0',\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Scientific/Engineering :: GIS\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n ],\n\n packages=find_packages(\n exclude=('tests', 'tests.*',\n 'integration_tests', 'integration_tests.*')\n ),\n package_data={\n '': ['*.yaml', '*/*.yaml'],\n 'datacube': ['py.typed'],\n },\n scripts=[],\n install_requires=[\n 'affine',\n 'pyproj>=2.5',\n 'shapely>=1.6.4',\n 'cachetools',\n 'click>=5.0',\n 'cloudpickle>=0.4',\n 'dask[array]',\n 'distributed',\n 'jsonschema',\n 'netcdf4',\n 'numpy',\n 'psycopg2',\n 'lark',\n 'pandas',\n 'python-dateutil',\n 'pyyaml',\n 'rasterio>=1.3.2', # Warping broken in 1.3.0 and 1.3.1\n 'sqlalchemy',\n 'GeoAlchemy2',\n 'toolz',\n 'xarray>=0.9,<2022.6', # >0.9 fixes most problems with `crs` attributes being lost\n ],\n extras_require=extras_require,\n tests_require=tests_require,\n\n entry_points={\n 'console_scripts': [\n 'datacube = datacube.scripts.cli_app:cli',\n 'datacube-search = datacube.scripts.search_tool:cli',\n 'datacube-worker = datacube.execution.worker:main',\n ],\n 'datacube.plugins.io.read': [\n 'netcdf = datacube.drivers.netcdf.driver:reader_driver_init',\n *extra_plugins['read'],\n ],\n 'datacube.plugins.io.write': [\n 'netcdf = datacube.drivers.netcdf.driver:writer_driver_init',\n *extra_plugins['write'],\n ],\n 'datacube.plugins.index': [\n 'default = datacube.index.postgres.index:index_driver_init',\n 'null = datacube.index.null.index:index_driver_init',\n 'memory = datacube.index.memory.index:index_driver_init',\n 'postgis = datacube.index.postgis.index:index_driver_init',\n *extra_plugins['index'],\n ],\n },\n)\n", "path": "setup.py"}]}
| 1,842 | 139 |
gh_patches_debug_25447
|
rasdani/github-patches
|
git_diff
|
cookiecutter__cookiecutter-270
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add aliases for cookiecutters
Remembering the locations of cookiecutters can be a bit of a pain - even if it's just including the local path.
It would be useful to have an extra section in `cookiecutterrc` for aliases to cookiecutter locations. Something like:
```
aliases:
python: https://github.com/audreyr/cookiecutter-pypackage
django: https://github.com/marcofucci/cookiecutter-simple-django
docs: ~/samples/templates/documentation
```
The code would simply need to translate an alias into a path before doing any of the existing processing.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cookiecutter/main.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 """
5 cookiecutter.main
6 -----------------
7
8 Main entry point for the `cookiecutter` command.
9
10 The code in this module is also a good example of how to use Cookiecutter as a
11 library rather than a script.
12 """
13
14 from __future__ import unicode_literals
15 import argparse
16 import logging
17 import os
18 import sys
19
20 from . import __version__
21 from .config import get_user_config
22 from .prompt import prompt_for_config
23 from .generate import generate_context, generate_files
24 from .vcs import clone
25
26 logger = logging.getLogger(__name__)
27
28
29 def cookiecutter(input_dir, checkout=None, no_input=False):
30 """
31 API equivalent to using Cookiecutter at the command line.
32
33 :param input_dir: A directory containing a project template dir,
34 or a URL to git repo.
35 :param checkout: The branch, tag or commit ID to checkout after clone
36 """
37
38 # Get user config from ~/.cookiecutterrc or equivalent
39 # If no config file, sensible defaults from config.DEFAULT_CONFIG are used
40 config_dict = get_user_config()
41
42 # TODO: find a better way to tell if it's a repo URL
43 if "git@" in input_dir or "https://" in input_dir:
44 repo_dir = clone(
45 repo_url=input_dir,
46 checkout=checkout,
47 clone_to_dir=config_dict['cookiecutters_dir']
48 )
49 else:
50 # If it's a local repo, no need to clone or copy to your cookiecutters_dir
51 repo_dir = input_dir
52
53 context_file = os.path.join(repo_dir, 'cookiecutter.json')
54 logging.debug('context_file is {0}'.format(context_file))
55
56 context = generate_context(
57 context_file=context_file,
58 default_context=config_dict['default_context']
59 )
60
61 # prompt the user to manually configure at the command line.
62 # except when 'no-input' flag is set
63 if not no_input:
64 cookiecutter_dict = prompt_for_config(context)
65 context['cookiecutter'] = cookiecutter_dict
66
67 # Create project from local context and project template.
68 generate_files(
69 repo_dir=repo_dir,
70 context=context
71 )
72
73
74 def _get_parser():
75 parser = argparse.ArgumentParser(
76 description='Create a project from a Cookiecutter project template.'
77 )
78 parser.add_argument(
79 '--no-input',
80 action="store_true",
81 help='Do not prompt for parameters and only use cookiecutter.json '
82 'file content')
83 parser.add_argument(
84 'input_dir',
85 help='Cookiecutter project dir, e.g. cookiecutter-pypackage/'
86 )
87 parser.add_argument(
88 '-c', '--checkout',
89 help='branch, tag or commit to checkout after git clone'
90 )
91 cookiecutter_pkg_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
92 parser.add_argument(
93 '-V', '--version',
94 help="Show version information and exit.",
95 action='version',
96 version='Cookiecutter %s from %s (Python %s)' % (
97 __version__,
98 cookiecutter_pkg_dir,
99 sys.version[:3]
100 )
101 )
102 parser.add_argument(
103 '-v', '--verbose',
104 help='Print debug information',
105 action='store_true', default=False
106 )
107
108 return parser
109
110 def parse_cookiecutter_args(args):
111 """ Parse the command-line arguments to Cookiecutter. """
112 parser = _get_parser()
113 return parser.parse_args(args)
114
115
116 def main():
117 """ Entry point for the package, as defined in setup.py. """
118
119 args = parse_cookiecutter_args(sys.argv[1:])
120
121 if args.verbose:
122 logging.basicConfig(format='%(levelname)s %(filename)s: %(message)s', level=logging.DEBUG)
123 else:
124 # Log info and above to console
125 logging.basicConfig(
126 format='%(levelname)s: %(message)s',
127 level=logging.INFO
128 )
129
130 cookiecutter(args.input_dir, args.checkout, args.no_input)
131
132
133 if __name__ == '__main__':
134 main()
135
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/cookiecutter/main.py b/cookiecutter/main.py
--- a/cookiecutter/main.py
+++ b/cookiecutter/main.py
@@ -25,6 +25,34 @@
logger = logging.getLogger(__name__)
+builtin_abbreviations = {
+ 'gh': 'https://github.com/{0}.git',
+ 'bb': 'https://bitbucket.org/{0}',
+}
+
+def expand_abbreviations(input_dir, config_dict):
+ """
+ Expand abbreviations in a template name.
+
+ :param input_dir: The project template name.
+ :param config_dict: The user config, which will contain abbreviation
+ definitions.
+ """
+
+ abbreviations = builtin_abbreviations.copy()
+ abbreviations.update(config_dict.get('abbreviations', {}))
+
+ if input_dir in abbreviations:
+ return abbreviations[input_dir]
+
+ # Split on colon. If there is no colon, rest will be empty
+ # and prefix will be the whole input_dir
+ prefix, sep, rest = input_dir.partition(':')
+ if prefix in abbreviations:
+ return abbreviations[prefix].format(rest)
+
+ return input_dir
+
def cookiecutter(input_dir, checkout=None, no_input=False):
"""
@@ -39,6 +67,8 @@
# If no config file, sensible defaults from config.DEFAULT_CONFIG are used
config_dict = get_user_config()
+ input_dir = expand_abbreviations(input_dir, config_dict)
+
# TODO: find a better way to tell if it's a repo URL
if "git@" in input_dir or "https://" in input_dir:
repo_dir = clone(
|
{"golden_diff": "diff --git a/cookiecutter/main.py b/cookiecutter/main.py\n--- a/cookiecutter/main.py\n+++ b/cookiecutter/main.py\n@@ -25,6 +25,34 @@\n \n logger = logging.getLogger(__name__)\n \n+builtin_abbreviations = {\n+ 'gh': 'https://github.com/{0}.git',\n+ 'bb': 'https://bitbucket.org/{0}',\n+}\n+\n+def expand_abbreviations(input_dir, config_dict):\n+ \"\"\"\n+ Expand abbreviations in a template name.\n+\n+ :param input_dir: The project template name.\n+ :param config_dict: The user config, which will contain abbreviation\n+ definitions.\n+ \"\"\"\n+\n+ abbreviations = builtin_abbreviations.copy()\n+ abbreviations.update(config_dict.get('abbreviations', {}))\n+\n+ if input_dir in abbreviations:\n+ return abbreviations[input_dir]\n+\n+ # Split on colon. If there is no colon, rest will be empty\n+ # and prefix will be the whole input_dir\n+ prefix, sep, rest = input_dir.partition(':')\n+ if prefix in abbreviations:\n+ return abbreviations[prefix].format(rest)\n+\n+ return input_dir\n+\n \n def cookiecutter(input_dir, checkout=None, no_input=False):\n \"\"\"\n@@ -39,6 +67,8 @@\n # If no config file, sensible defaults from config.DEFAULT_CONFIG are used\n config_dict = get_user_config()\n \n+ input_dir = expand_abbreviations(input_dir, config_dict)\n+\n # TODO: find a better way to tell if it's a repo URL\n if \"git@\" in input_dir or \"https://\" in input_dir:\n repo_dir = clone(\n", "issue": "Add aliases for cookiecutters\nRemembering the locations of cookiecutters can be a bit of a pain - even if it's just including the local path.\n\nIt would be useful to have an extra section in `cookiecutterrc` for aliases to cookiecutter locations. Something like:\n\n```\naliases:\n python: https://github.com/audreyr/cookiecutter-pypackage\n django: https://github.com/marcofucci/cookiecutter-simple-django\n docs: ~/samples/templates/documentation\n```\n\nThe code would simply need to translate an alias into a path before doing any of the existing processing.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.main\n-----------------\n\nMain entry point for the `cookiecutter` command.\n\nThe code in this module is also a good example of how to use Cookiecutter as a\nlibrary rather than a script.\n\"\"\"\n\nfrom __future__ import unicode_literals\nimport argparse\nimport logging\nimport os\nimport sys\n\nfrom . import __version__\nfrom .config import get_user_config\nfrom .prompt import prompt_for_config\nfrom .generate import generate_context, generate_files\nfrom .vcs import clone\n\nlogger = logging.getLogger(__name__)\n\n\ndef cookiecutter(input_dir, checkout=None, no_input=False):\n \"\"\"\n API equivalent to using Cookiecutter at the command line.\n\n :param input_dir: A directory containing a project template dir,\n or a URL to git repo.\n :param checkout: The branch, tag or commit ID to checkout after clone\n \"\"\"\n\n # Get user config from ~/.cookiecutterrc or equivalent\n # If no config file, sensible defaults from config.DEFAULT_CONFIG are used\n config_dict = get_user_config()\n\n # TODO: find a better way to tell if it's a repo URL\n if \"git@\" in input_dir or \"https://\" in input_dir:\n repo_dir = clone(\n repo_url=input_dir,\n checkout=checkout,\n clone_to_dir=config_dict['cookiecutters_dir']\n )\n else:\n # If it's a local repo, no need to clone or copy to your cookiecutters_dir\n repo_dir = input_dir\n\n context_file = os.path.join(repo_dir, 'cookiecutter.json')\n logging.debug('context_file is {0}'.format(context_file))\n\n context = generate_context(\n context_file=context_file,\n default_context=config_dict['default_context']\n )\n\n # prompt the user to manually configure at the command line.\n # except when 'no-input' flag is set\n if not no_input:\n cookiecutter_dict = prompt_for_config(context)\n context['cookiecutter'] = cookiecutter_dict\n\n # Create project from local context and project template.\n generate_files(\n repo_dir=repo_dir,\n context=context\n )\n\n\ndef _get_parser():\n parser = argparse.ArgumentParser(\n description='Create a project from a Cookiecutter project template.'\n )\n parser.add_argument(\n '--no-input',\n action=\"store_true\",\n help='Do not prompt for parameters and only use cookiecutter.json '\n 'file content')\n parser.add_argument(\n 'input_dir',\n help='Cookiecutter project dir, e.g. cookiecutter-pypackage/'\n )\n parser.add_argument(\n '-c', '--checkout',\n help='branch, tag or commit to checkout after git clone'\n )\n cookiecutter_pkg_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n parser.add_argument(\n '-V', '--version',\n help=\"Show version information and exit.\",\n action='version',\n version='Cookiecutter %s from %s (Python %s)' % (\n __version__,\n cookiecutter_pkg_dir,\n sys.version[:3]\n )\n )\n parser.add_argument(\n '-v', '--verbose',\n help='Print debug information',\n action='store_true', default=False\n )\n\n return parser\n\ndef parse_cookiecutter_args(args):\n \"\"\" Parse the command-line arguments to Cookiecutter. \"\"\"\n parser = _get_parser()\n return parser.parse_args(args)\n\n\ndef main():\n \"\"\" Entry point for the package, as defined in setup.py. \"\"\"\n\n args = parse_cookiecutter_args(sys.argv[1:])\n\n if args.verbose:\n logging.basicConfig(format='%(levelname)s %(filename)s: %(message)s', level=logging.DEBUG)\n else:\n # Log info and above to console\n logging.basicConfig(\n format='%(levelname)s: %(message)s',\n level=logging.INFO\n )\n\n cookiecutter(args.input_dir, args.checkout, args.no_input)\n\n\nif __name__ == '__main__':\n main()\n", "path": "cookiecutter/main.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.main\n-----------------\n\nMain entry point for the `cookiecutter` command.\n\nThe code in this module is also a good example of how to use Cookiecutter as a\nlibrary rather than a script.\n\"\"\"\n\nfrom __future__ import unicode_literals\nimport argparse\nimport logging\nimport os\nimport sys\n\nfrom . import __version__\nfrom .config import get_user_config\nfrom .prompt import prompt_for_config\nfrom .generate import generate_context, generate_files\nfrom .vcs import clone\n\nlogger = logging.getLogger(__name__)\n\nbuiltin_abbreviations = {\n 'gh': 'https://github.com/{0}.git',\n 'bb': 'https://bitbucket.org/{0}',\n}\n\ndef expand_abbreviations(input_dir, config_dict):\n \"\"\"\n Expand abbreviations in a template name.\n\n :param input_dir: The project template name.\n :param config_dict: The user config, which will contain abbreviation\n definitions.\n \"\"\"\n\n abbreviations = builtin_abbreviations.copy()\n abbreviations.update(config_dict.get('abbreviations', {}))\n\n if input_dir in abbreviations:\n return abbreviations[input_dir]\n\n # Split on colon. If there is no colon, rest will be empty\n # and prefix will be the whole input_dir\n prefix, sep, rest = input_dir.partition(':')\n if prefix in abbreviations:\n return abbreviations[prefix].format(rest)\n\n return input_dir\n\n\ndef cookiecutter(input_dir, checkout=None, no_input=False):\n \"\"\"\n API equivalent to using Cookiecutter at the command line.\n\n :param input_dir: A directory containing a project template dir,\n or a URL to git repo.\n :param checkout: The branch, tag or commit ID to checkout after clone\n \"\"\"\n\n # Get user config from ~/.cookiecutterrc or equivalent\n # If no config file, sensible defaults from config.DEFAULT_CONFIG are used\n config_dict = get_user_config()\n\n input_dir = expand_abbreviations(input_dir, config_dict)\n\n # TODO: find a better way to tell if it's a repo URL\n if \"git@\" in input_dir or \"https://\" in input_dir:\n repo_dir = clone(\n repo_url=input_dir,\n checkout=checkout,\n clone_to_dir=config_dict['cookiecutters_dir']\n )\n else:\n # If it's a local repo, no need to clone or copy to your cookiecutters_dir\n repo_dir = input_dir\n\n context_file = os.path.join(repo_dir, 'cookiecutter.json')\n logging.debug('context_file is {0}'.format(context_file))\n\n context = generate_context(\n context_file=context_file,\n default_context=config_dict['default_context']\n )\n\n # prompt the user to manually configure at the command line.\n # except when 'no-input' flag is set\n if not no_input:\n cookiecutter_dict = prompt_for_config(context)\n context['cookiecutter'] = cookiecutter_dict\n\n # Create project from local context and project template.\n generate_files(\n repo_dir=repo_dir,\n context=context\n )\n\n\ndef _get_parser():\n parser = argparse.ArgumentParser(\n description='Create a project from a Cookiecutter project template.'\n )\n parser.add_argument(\n '--no-input',\n action=\"store_true\",\n help='Do not prompt for parameters and only use cookiecutter.json '\n 'file content')\n parser.add_argument(\n 'input_dir',\n help='Cookiecutter project dir, e.g. cookiecutter-pypackage/'\n )\n parser.add_argument(\n '-c', '--checkout',\n help='branch, tag or commit to checkout after git clone'\n )\n cookiecutter_pkg_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n parser.add_argument(\n '-V', '--version',\n help=\"Show version information and exit.\",\n action='version',\n version='Cookiecutter %s from %s (Python %s)' % (\n __version__,\n cookiecutter_pkg_dir,\n sys.version[:3]\n )\n )\n parser.add_argument(\n '-v', '--verbose',\n help='Print debug information',\n action='store_true', default=False\n )\n\n return parser\n\ndef parse_cookiecutter_args(args):\n \"\"\" Parse the command-line arguments to Cookiecutter. \"\"\"\n parser = _get_parser()\n return parser.parse_args(args)\n\n\ndef main():\n \"\"\" Entry point for the package, as defined in setup.py. \"\"\"\n\n args = parse_cookiecutter_args(sys.argv[1:])\n\n if args.verbose:\n logging.basicConfig(format='%(levelname)s %(filename)s: %(message)s', level=logging.DEBUG)\n else:\n # Log info and above to console\n logging.basicConfig(\n format='%(levelname)s: %(message)s',\n level=logging.INFO\n )\n\n cookiecutter(args.input_dir, args.checkout, args.no_input)\n\n\nif __name__ == '__main__':\n main()\n", "path": "cookiecutter/main.py"}]}
| 1,577 | 382 |
gh_patches_debug_38603
|
rasdani/github-patches
|
git_diff
|
docker__docker-py-347
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SSLAdapter init_poolmanager error because request.packages.urllib3.__version__ invalid
In ssladapter.py,
``` python
try:
import requests.packages.urllib3 as urllib3
except ImportError:
import urllib3
...
if urllib3 and urllib_ver == 'dev' and \
StrictVersion(urllib_ver) > StrictVersion('1.5'):
kwargs['ssl_version'] = self.ssl_version
self.poolmanager = PoolManager(**kwargs)
```
`requests.packages.urllib3` will be imported, and the `requests.packages.urllib3.__version__` is 'dev' at present, and the `StrictVersion(urllib_ver)` will raise a ValueError: "invalid version number 'dev'"
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docker/tls.py`
Content:
```
1 import os
2
3 from . import errors
4 from .ssladapter import ssladapter
5
6
7 class TLSConfig(object):
8 cert = None
9 verify = None
10 ssl_version = None
11
12 def __init__(self, client_cert=None, ca_cert=None, verify=None,
13 ssl_version=None):
14 # Argument compatibility/mapping with
15 # http://docs.docker.com/examples/https/
16 # This diverges from the Docker CLI in that users can specify 'tls'
17 # here, but also disable any public/default CA pool verification by
18 # leaving tls_verify=False
19
20 # urllib3 sets a default ssl_version if ssl_version is None
21 # http://tinyurl.com/kxga8hb
22 self.ssl_version = ssl_version
23
24 # "tls" and "tls_verify" must have both or neither cert/key files
25 # In either case, Alert the user when both are expected, but any are
26 # missing.
27
28 if client_cert:
29 try:
30 tls_cert, tls_key = client_cert
31 except ValueError:
32 raise errors.TLSParameterError(
33 'client_config must be a tuple of'
34 ' (client certificate, key file)'
35 )
36
37 if not (tls_cert and tls_key) or (not os.path.isfile(tls_cert) or
38 not os.path.isfile(tls_key)):
39 raise errors.TLSParameterError(
40 'Path to a certificate and key files must be provided'
41 ' through the client_config param'
42 )
43 self.cert = (tls_cert, tls_key)
44
45 # Either set verify to True (public/default CA checks) or to the
46 # path of a CA Cert file.
47 if verify is not None:
48 if not ca_cert:
49 self.verify = verify
50 elif os.path.isfile(ca_cert):
51 if not verify:
52 raise errors.TLSParameterError(
53 'verify can not be False when a CA cert is'
54 ' provided.'
55 )
56 self.verify = ca_cert
57 else:
58 raise errors.TLSParameterError(
59 'Invalid CA certificate provided for `tls_ca_cert`.'
60 )
61
62 def configure_client(self, client):
63 client.ssl_version = self.ssl_version
64 if self.verify is not None:
65 client.verify = self.verify
66 if self.cert:
67 client.cert = self.cert
68 client.mount('https://', ssladapter.SSLAdapter(self.ssl_version))
69
```
Path: `docker/ssladapter/ssladapter.py`
Content:
```
1 """ Resolves OpenSSL issues in some servers:
2 https://lukasa.co.uk/2013/01/Choosing_SSL_Version_In_Requests/
3 https://github.com/kennethreitz/requests/pull/799
4 """
5 from distutils.version import StrictVersion
6 from requests.adapters import HTTPAdapter
7 try:
8 import requests.packages.urllib3 as urllib3
9 except ImportError:
10 import urllib3
11
12
13 PoolManager = urllib3.poolmanager.PoolManager
14
15
16 class SSLAdapter(HTTPAdapter):
17 '''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''
18 def __init__(self, ssl_version=None, **kwargs):
19 self.ssl_version = ssl_version
20 super(SSLAdapter, self).__init__(**kwargs)
21
22 def init_poolmanager(self, connections, maxsize, block=False):
23 urllib_ver = urllib3.__version__.split('-')[0]
24 kwargs = {
25 'num_pools': connections,
26 'maxsize': maxsize,
27 'block': block
28 }
29 if urllib3 and urllib_ver == 'dev' and \
30 StrictVersion(urllib_ver) > StrictVersion('1.5'):
31 kwargs['ssl_version'] = self.ssl_version
32
33 self.poolmanager = PoolManager(**kwargs)
34
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docker/ssladapter/ssladapter.py b/docker/ssladapter/ssladapter.py
--- a/docker/ssladapter/ssladapter.py
+++ b/docker/ssladapter/ssladapter.py
@@ -15,19 +15,27 @@
class SSLAdapter(HTTPAdapter):
'''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''
- def __init__(self, ssl_version=None, **kwargs):
+ def __init__(self, ssl_version=None, assert_hostname=None, **kwargs):
self.ssl_version = ssl_version
+ self.assert_hostname = assert_hostname
super(SSLAdapter, self).__init__(**kwargs)
def init_poolmanager(self, connections, maxsize, block=False):
- urllib_ver = urllib3.__version__.split('-')[0]
kwargs = {
'num_pools': connections,
'maxsize': maxsize,
- 'block': block
+ 'block': block,
+ 'assert_hostname': self.assert_hostname,
}
- if urllib3 and urllib_ver == 'dev' and \
- StrictVersion(urllib_ver) > StrictVersion('1.5'):
+ if self.can_override_ssl_version():
kwargs['ssl_version'] = self.ssl_version
self.poolmanager = PoolManager(**kwargs)
+
+ def can_override_ssl_version(self):
+ urllib_ver = urllib3.__version__.split('-')[0]
+ if urllib_ver is None:
+ return False
+ if urllib_ver == 'dev':
+ return True
+ return StrictVersion(urllib_ver) > StrictVersion('1.5')
diff --git a/docker/tls.py b/docker/tls.py
--- a/docker/tls.py
+++ b/docker/tls.py
@@ -10,7 +10,7 @@
ssl_version = None
def __init__(self, client_cert=None, ca_cert=None, verify=None,
- ssl_version=None):
+ ssl_version=None, assert_hostname=None):
# Argument compatibility/mapping with
# http://docs.docker.com/examples/https/
# This diverges from the Docker CLI in that users can specify 'tls'
@@ -20,6 +20,7 @@
# urllib3 sets a default ssl_version if ssl_version is None
# http://tinyurl.com/kxga8hb
self.ssl_version = ssl_version
+ self.assert_hostname = assert_hostname
# "tls" and "tls_verify" must have both or neither cert/key files
# In either case, Alert the user when both are expected, but any are
@@ -65,4 +66,7 @@
client.verify = self.verify
if self.cert:
client.cert = self.cert
- client.mount('https://', ssladapter.SSLAdapter(self.ssl_version))
+ client.mount('https://', ssladapter.SSLAdapter(
+ ssl_version=self.ssl_version,
+ assert_hostname=self.assert_hostname,
+ ))
|
{"golden_diff": "diff --git a/docker/ssladapter/ssladapter.py b/docker/ssladapter/ssladapter.py\n--- a/docker/ssladapter/ssladapter.py\n+++ b/docker/ssladapter/ssladapter.py\n@@ -15,19 +15,27 @@\n \n class SSLAdapter(HTTPAdapter):\n '''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''\n- def __init__(self, ssl_version=None, **kwargs):\n+ def __init__(self, ssl_version=None, assert_hostname=None, **kwargs):\n self.ssl_version = ssl_version\n+ self.assert_hostname = assert_hostname\n super(SSLAdapter, self).__init__(**kwargs)\n \n def init_poolmanager(self, connections, maxsize, block=False):\n- urllib_ver = urllib3.__version__.split('-')[0]\n kwargs = {\n 'num_pools': connections,\n 'maxsize': maxsize,\n- 'block': block\n+ 'block': block,\n+ 'assert_hostname': self.assert_hostname,\n }\n- if urllib3 and urllib_ver == 'dev' and \\\n- StrictVersion(urllib_ver) > StrictVersion('1.5'):\n+ if self.can_override_ssl_version():\n kwargs['ssl_version'] = self.ssl_version\n \n self.poolmanager = PoolManager(**kwargs)\n+\n+ def can_override_ssl_version(self):\n+ urllib_ver = urllib3.__version__.split('-')[0]\n+ if urllib_ver is None:\n+ return False\n+ if urllib_ver == 'dev':\n+ return True\n+ return StrictVersion(urllib_ver) > StrictVersion('1.5')\ndiff --git a/docker/tls.py b/docker/tls.py\n--- a/docker/tls.py\n+++ b/docker/tls.py\n@@ -10,7 +10,7 @@\n ssl_version = None\n \n def __init__(self, client_cert=None, ca_cert=None, verify=None,\n- ssl_version=None):\n+ ssl_version=None, assert_hostname=None):\n # Argument compatibility/mapping with\n # http://docs.docker.com/examples/https/\n # This diverges from the Docker CLI in that users can specify 'tls'\n@@ -20,6 +20,7 @@\n # urllib3 sets a default ssl_version if ssl_version is None\n # http://tinyurl.com/kxga8hb\n self.ssl_version = ssl_version\n+ self.assert_hostname = assert_hostname\n \n # \"tls\" and \"tls_verify\" must have both or neither cert/key files\n # In either case, Alert the user when both are expected, but any are\n@@ -65,4 +66,7 @@\n client.verify = self.verify\n if self.cert:\n client.cert = self.cert\n- client.mount('https://', ssladapter.SSLAdapter(self.ssl_version))\n+ client.mount('https://', ssladapter.SSLAdapter(\n+ ssl_version=self.ssl_version,\n+ assert_hostname=self.assert_hostname,\n+ ))\n", "issue": "SSLAdapter init_poolmanager error because request.packages.urllib3.__version__ invalid\nIn ssladapter.py, \n\n``` python\ntry:\n import requests.packages.urllib3 as urllib3\nexcept ImportError:\n import urllib3\n...\n\n if urllib3 and urllib_ver == 'dev' and \\\n StrictVersion(urllib_ver) > StrictVersion('1.5'):\n kwargs['ssl_version'] = self.ssl_version\n\n self.poolmanager = PoolManager(**kwargs)\n```\n\n`requests.packages.urllib3` will be imported, and the `requests.packages.urllib3.__version__` is 'dev' at present, and the `StrictVersion(urllib_ver)` will raise a ValueError: \"invalid version number 'dev'\"\n\n", "before_files": [{"content": "import os\n\nfrom . import errors\nfrom .ssladapter import ssladapter\n\n\nclass TLSConfig(object):\n cert = None\n verify = None\n ssl_version = None\n\n def __init__(self, client_cert=None, ca_cert=None, verify=None,\n ssl_version=None):\n # Argument compatibility/mapping with\n # http://docs.docker.com/examples/https/\n # This diverges from the Docker CLI in that users can specify 'tls'\n # here, but also disable any public/default CA pool verification by\n # leaving tls_verify=False\n\n # urllib3 sets a default ssl_version if ssl_version is None\n # http://tinyurl.com/kxga8hb\n self.ssl_version = ssl_version\n\n # \"tls\" and \"tls_verify\" must have both or neither cert/key files\n # In either case, Alert the user when both are expected, but any are\n # missing.\n\n if client_cert:\n try:\n tls_cert, tls_key = client_cert\n except ValueError:\n raise errors.TLSParameterError(\n 'client_config must be a tuple of'\n ' (client certificate, key file)'\n )\n\n if not (tls_cert and tls_key) or (not os.path.isfile(tls_cert) or\n not os.path.isfile(tls_key)):\n raise errors.TLSParameterError(\n 'Path to a certificate and key files must be provided'\n ' through the client_config param'\n )\n self.cert = (tls_cert, tls_key)\n\n # Either set verify to True (public/default CA checks) or to the\n # path of a CA Cert file.\n if verify is not None:\n if not ca_cert:\n self.verify = verify\n elif os.path.isfile(ca_cert):\n if not verify:\n raise errors.TLSParameterError(\n 'verify can not be False when a CA cert is'\n ' provided.'\n )\n self.verify = ca_cert\n else:\n raise errors.TLSParameterError(\n 'Invalid CA certificate provided for `tls_ca_cert`.'\n )\n\n def configure_client(self, client):\n client.ssl_version = self.ssl_version\n if self.verify is not None:\n client.verify = self.verify\n if self.cert:\n client.cert = self.cert\n client.mount('https://', ssladapter.SSLAdapter(self.ssl_version))\n", "path": "docker/tls.py"}, {"content": "\"\"\" Resolves OpenSSL issues in some servers:\n https://lukasa.co.uk/2013/01/Choosing_SSL_Version_In_Requests/\n https://github.com/kennethreitz/requests/pull/799\n\"\"\"\nfrom distutils.version import StrictVersion\nfrom requests.adapters import HTTPAdapter\ntry:\n import requests.packages.urllib3 as urllib3\nexcept ImportError:\n import urllib3\n\n\nPoolManager = urllib3.poolmanager.PoolManager\n\n\nclass SSLAdapter(HTTPAdapter):\n '''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''\n def __init__(self, ssl_version=None, **kwargs):\n self.ssl_version = ssl_version\n super(SSLAdapter, self).__init__(**kwargs)\n\n def init_poolmanager(self, connections, maxsize, block=False):\n urllib_ver = urllib3.__version__.split('-')[0]\n kwargs = {\n 'num_pools': connections,\n 'maxsize': maxsize,\n 'block': block\n }\n if urllib3 and urllib_ver == 'dev' and \\\n StrictVersion(urllib_ver) > StrictVersion('1.5'):\n kwargs['ssl_version'] = self.ssl_version\n\n self.poolmanager = PoolManager(**kwargs)\n", "path": "docker/ssladapter/ssladapter.py"}], "after_files": [{"content": "import os\n\nfrom . import errors\nfrom .ssladapter import ssladapter\n\n\nclass TLSConfig(object):\n cert = None\n verify = None\n ssl_version = None\n\n def __init__(self, client_cert=None, ca_cert=None, verify=None,\n ssl_version=None, assert_hostname=None):\n # Argument compatibility/mapping with\n # http://docs.docker.com/examples/https/\n # This diverges from the Docker CLI in that users can specify 'tls'\n # here, but also disable any public/default CA pool verification by\n # leaving tls_verify=False\n\n # urllib3 sets a default ssl_version if ssl_version is None\n # http://tinyurl.com/kxga8hb\n self.ssl_version = ssl_version\n self.assert_hostname = assert_hostname\n\n # \"tls\" and \"tls_verify\" must have both or neither cert/key files\n # In either case, Alert the user when both are expected, but any are\n # missing.\n\n if client_cert:\n try:\n tls_cert, tls_key = client_cert\n except ValueError:\n raise errors.TLSParameterError(\n 'client_config must be a tuple of'\n ' (client certificate, key file)'\n )\n\n if not (tls_cert and tls_key) or (not os.path.isfile(tls_cert) or\n not os.path.isfile(tls_key)):\n raise errors.TLSParameterError(\n 'Path to a certificate and key files must be provided'\n ' through the client_config param'\n )\n self.cert = (tls_cert, tls_key)\n\n # Either set verify to True (public/default CA checks) or to the\n # path of a CA Cert file.\n if verify is not None:\n if not ca_cert:\n self.verify = verify\n elif os.path.isfile(ca_cert):\n if not verify:\n raise errors.TLSParameterError(\n 'verify can not be False when a CA cert is'\n ' provided.'\n )\n self.verify = ca_cert\n else:\n raise errors.TLSParameterError(\n 'Invalid CA certificate provided for `tls_ca_cert`.'\n )\n\n def configure_client(self, client):\n client.ssl_version = self.ssl_version\n if self.verify is not None:\n client.verify = self.verify\n if self.cert:\n client.cert = self.cert\n client.mount('https://', ssladapter.SSLAdapter(\n ssl_version=self.ssl_version,\n assert_hostname=self.assert_hostname,\n ))\n", "path": "docker/tls.py"}, {"content": "\"\"\" Resolves OpenSSL issues in some servers:\n https://lukasa.co.uk/2013/01/Choosing_SSL_Version_In_Requests/\n https://github.com/kennethreitz/requests/pull/799\n\"\"\"\nfrom distutils.version import StrictVersion\nfrom requests.adapters import HTTPAdapter\ntry:\n import requests.packages.urllib3 as urllib3\nexcept ImportError:\n import urllib3\n\n\nPoolManager = urllib3.poolmanager.PoolManager\n\n\nclass SSLAdapter(HTTPAdapter):\n '''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''\n def __init__(self, ssl_version=None, assert_hostname=None, **kwargs):\n self.ssl_version = ssl_version\n self.assert_hostname = assert_hostname\n super(SSLAdapter, self).__init__(**kwargs)\n\n def init_poolmanager(self, connections, maxsize, block=False):\n kwargs = {\n 'num_pools': connections,\n 'maxsize': maxsize,\n 'block': block,\n 'assert_hostname': self.assert_hostname,\n }\n if self.can_override_ssl_version():\n kwargs['ssl_version'] = self.ssl_version\n\n self.poolmanager = PoolManager(**kwargs)\n\n def can_override_ssl_version(self):\n urllib_ver = urllib3.__version__.split('-')[0]\n if urllib_ver is None:\n return False\n if urllib_ver == 'dev':\n return True\n return StrictVersion(urllib_ver) > StrictVersion('1.5')\n", "path": "docker/ssladapter/ssladapter.py"}]}
| 1,394 | 639 |
gh_patches_debug_19397
|
rasdani/github-patches
|
git_diff
|
elastic__apm-agent-python-766
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
'UnixDomainSocketConnection' object has no attribute 'port'
AttributeError: 'UnixDomainSocketConnection' object has no attribute 'port'
elastic-apm==5.5.1
```
File "django_redis/cache.py", line 32, in _decorator
return method(self, *args, **kwargs)
File "django_redis/cache.py", line 80, in get
return self.client.get(key, default=default, version=version,
File "django_redis/client/default.py", line 203, in get
value = client.get(key)
File "redis/client.py", line 976, in get
return self.execute_command('GET', name)
File "elasticapm/instrumentation/packages/base.py", line 210, in call_if_sampling
return self.call(module, method, wrapped, instance, args, kwargs)
File "elasticapm/instrumentation/packages/redis.py", line 66, in call
return wrapped(*args, **kwargs)
File "redis/client.py", line 667, in execute_command
connection.send_command(*args)
File "redis/connection.py", line 610, in send_command
self.send_packed_command(self.pack_command(*args))
File "elasticapm/instrumentation/packages/base.py", line 210, in call_if_sampling
return self.call(module, method, wrapped, instance, args, kwargs)
File "elasticapm/instrumentation/packages/redis.py", line 90, in call
port = int(instance.port) if instance.port else None
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticapm/instrumentation/packages/redis.py`
Content:
```
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2019, Elasticsearch BV
4 # All rights reserved.
5 #
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions are met:
8 #
9 # * Redistributions of source code must retain the above copyright notice, this
10 # list of conditions and the following disclaimer.
11 #
12 # * Redistributions in binary form must reproduce the above copyright notice,
13 # this list of conditions and the following disclaimer in the documentation
14 # and/or other materials provided with the distribution.
15 #
16 # * Neither the name of the copyright holder nor the names of its
17 # contributors may be used to endorse or promote products derived from
18 # this software without specific prior written permission.
19 #
20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 from __future__ import absolute_import
32
33 from elasticapm.instrumentation.packages.base import AbstractInstrumentedModule
34 from elasticapm.traces import capture_span, execution_context
35
36
37 class Redis3CheckMixin(object):
38 instrument_list_3 = []
39 instrument_list = []
40
41 def get_instrument_list(self):
42 try:
43 from redis import VERSION
44
45 if VERSION[0] >= 3:
46 return self.instrument_list_3
47 return self.instrument_list
48 except ImportError:
49 return self.instrument_list
50
51
52 class RedisInstrumentation(Redis3CheckMixin, AbstractInstrumentedModule):
53 name = "redis"
54
55 # no need to instrument StrictRedis in redis-py >= 3.0
56 instrument_list_3 = [("redis.client", "Redis.execute_command")]
57 instrument_list = [("redis.client", "Redis.execute_command"), ("redis.client", "StrictRedis.execute_command")]
58
59 def call(self, module, method, wrapped, instance, args, kwargs):
60 if len(args) > 0:
61 wrapped_name = str(args[0])
62 else:
63 wrapped_name = self.get_wrapped_name(wrapped, instance, method)
64
65 with capture_span(wrapped_name, span_type="db", span_subtype="redis", span_action="query", leaf=True):
66 return wrapped(*args, **kwargs)
67
68
69 class RedisPipelineInstrumentation(Redis3CheckMixin, AbstractInstrumentedModule):
70 name = "redis"
71
72 # BasePipeline has been renamed to Pipeline in redis-py 3
73 instrument_list_3 = [("redis.client", "Pipeline.execute")]
74 instrument_list = [("redis.client", "BasePipeline.execute")]
75
76 def call(self, module, method, wrapped, instance, args, kwargs):
77 wrapped_name = self.get_wrapped_name(wrapped, instance, method)
78 with capture_span(wrapped_name, span_type="db", span_subtype="redis", span_action="query", leaf=True):
79 return wrapped(*args, **kwargs)
80
81
82 class RedisConnectionInstrumentation(AbstractInstrumentedModule):
83 name = "redis"
84
85 instrument_list = (("redis.connection", "Connection.send_packed_command"),)
86
87 def call(self, module, method, wrapped, instance, args, kwargs):
88 span = execution_context.get_span()
89 if span and span.subtype == "redis":
90 port = int(instance.port) if instance.port else None
91 destination_info = {
92 "address": instance.host,
93 "port": port,
94 "service": {"name": "redis", "resource": "redis", "type": "db"},
95 }
96 span.context["destination"] = destination_info
97 return wrapped(*args, **kwargs)
98
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/elasticapm/instrumentation/packages/redis.py b/elasticapm/instrumentation/packages/redis.py
--- a/elasticapm/instrumentation/packages/redis.py
+++ b/elasticapm/instrumentation/packages/redis.py
@@ -87,11 +87,16 @@
def call(self, module, method, wrapped, instance, args, kwargs):
span = execution_context.get_span()
if span and span.subtype == "redis":
- port = int(instance.port) if instance.port else None
- destination_info = {
- "address": instance.host,
- "port": port,
- "service": {"name": "redis", "resource": "redis", "type": "db"},
- }
- span.context["destination"] = destination_info
+ span.context["destination"] = get_destination_info(instance)
return wrapped(*args, **kwargs)
+
+
+def get_destination_info(connection):
+ destination_info = {"service": {"name": "redis", "resource": "redis", "type": "db"}}
+ if hasattr(connection, "port"):
+ destination_info["port"] = connection.port
+ destination_info["address"] = connection.host
+ elif hasattr(connection, "path"):
+ destination_info["port"] = None
+ destination_info["address"] = "unix://" + connection.path
+ return destination_info
|
{"golden_diff": "diff --git a/elasticapm/instrumentation/packages/redis.py b/elasticapm/instrumentation/packages/redis.py\n--- a/elasticapm/instrumentation/packages/redis.py\n+++ b/elasticapm/instrumentation/packages/redis.py\n@@ -87,11 +87,16 @@\n def call(self, module, method, wrapped, instance, args, kwargs):\n span = execution_context.get_span()\n if span and span.subtype == \"redis\":\n- port = int(instance.port) if instance.port else None\n- destination_info = {\n- \"address\": instance.host,\n- \"port\": port,\n- \"service\": {\"name\": \"redis\", \"resource\": \"redis\", \"type\": \"db\"},\n- }\n- span.context[\"destination\"] = destination_info\n+ span.context[\"destination\"] = get_destination_info(instance)\n return wrapped(*args, **kwargs)\n+\n+\n+def get_destination_info(connection):\n+ destination_info = {\"service\": {\"name\": \"redis\", \"resource\": \"redis\", \"type\": \"db\"}}\n+ if hasattr(connection, \"port\"):\n+ destination_info[\"port\"] = connection.port\n+ destination_info[\"address\"] = connection.host\n+ elif hasattr(connection, \"path\"):\n+ destination_info[\"port\"] = None\n+ destination_info[\"address\"] = \"unix://\" + connection.path\n+ return destination_info\n", "issue": "'UnixDomainSocketConnection' object has no attribute 'port'\nAttributeError: 'UnixDomainSocketConnection' object has no attribute 'port'\r\nelastic-apm==5.5.1\r\n\r\n```\r\n File \"django_redis/cache.py\", line 32, in _decorator\r\n return method(self, *args, **kwargs)\r\n File \"django_redis/cache.py\", line 80, in get\r\n return self.client.get(key, default=default, version=version,\r\n File \"django_redis/client/default.py\", line 203, in get\r\n value = client.get(key)\r\n File \"redis/client.py\", line 976, in get\r\n return self.execute_command('GET', name)\r\n File \"elasticapm/instrumentation/packages/base.py\", line 210, in call_if_sampling\r\n return self.call(module, method, wrapped, instance, args, kwargs)\r\n File \"elasticapm/instrumentation/packages/redis.py\", line 66, in call\r\n return wrapped(*args, **kwargs)\r\n File \"redis/client.py\", line 667, in execute_command\r\n connection.send_command(*args)\r\n File \"redis/connection.py\", line 610, in send_command\r\n self.send_packed_command(self.pack_command(*args))\r\n File \"elasticapm/instrumentation/packages/base.py\", line 210, in call_if_sampling\r\n return self.call(module, method, wrapped, instance, args, kwargs)\r\n File \"elasticapm/instrumentation/packages/redis.py\", line 90, in call\r\n port = int(instance.port) if instance.port else None\r\n```\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom __future__ import absolute_import\n\nfrom elasticapm.instrumentation.packages.base import AbstractInstrumentedModule\nfrom elasticapm.traces import capture_span, execution_context\n\n\nclass Redis3CheckMixin(object):\n instrument_list_3 = []\n instrument_list = []\n\n def get_instrument_list(self):\n try:\n from redis import VERSION\n\n if VERSION[0] >= 3:\n return self.instrument_list_3\n return self.instrument_list\n except ImportError:\n return self.instrument_list\n\n\nclass RedisInstrumentation(Redis3CheckMixin, AbstractInstrumentedModule):\n name = \"redis\"\n\n # no need to instrument StrictRedis in redis-py >= 3.0\n instrument_list_3 = [(\"redis.client\", \"Redis.execute_command\")]\n instrument_list = [(\"redis.client\", \"Redis.execute_command\"), (\"redis.client\", \"StrictRedis.execute_command\")]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n if len(args) > 0:\n wrapped_name = str(args[0])\n else:\n wrapped_name = self.get_wrapped_name(wrapped, instance, method)\n\n with capture_span(wrapped_name, span_type=\"db\", span_subtype=\"redis\", span_action=\"query\", leaf=True):\n return wrapped(*args, **kwargs)\n\n\nclass RedisPipelineInstrumentation(Redis3CheckMixin, AbstractInstrumentedModule):\n name = \"redis\"\n\n # BasePipeline has been renamed to Pipeline in redis-py 3\n instrument_list_3 = [(\"redis.client\", \"Pipeline.execute\")]\n instrument_list = [(\"redis.client\", \"BasePipeline.execute\")]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n wrapped_name = self.get_wrapped_name(wrapped, instance, method)\n with capture_span(wrapped_name, span_type=\"db\", span_subtype=\"redis\", span_action=\"query\", leaf=True):\n return wrapped(*args, **kwargs)\n\n\nclass RedisConnectionInstrumentation(AbstractInstrumentedModule):\n name = \"redis\"\n\n instrument_list = ((\"redis.connection\", \"Connection.send_packed_command\"),)\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n span = execution_context.get_span()\n if span and span.subtype == \"redis\":\n port = int(instance.port) if instance.port else None\n destination_info = {\n \"address\": instance.host,\n \"port\": port,\n \"service\": {\"name\": \"redis\", \"resource\": \"redis\", \"type\": \"db\"},\n }\n span.context[\"destination\"] = destination_info\n return wrapped(*args, **kwargs)\n", "path": "elasticapm/instrumentation/packages/redis.py"}], "after_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom __future__ import absolute_import\n\nfrom elasticapm.instrumentation.packages.base import AbstractInstrumentedModule\nfrom elasticapm.traces import capture_span, execution_context\n\n\nclass Redis3CheckMixin(object):\n instrument_list_3 = []\n instrument_list = []\n\n def get_instrument_list(self):\n try:\n from redis import VERSION\n\n if VERSION[0] >= 3:\n return self.instrument_list_3\n return self.instrument_list\n except ImportError:\n return self.instrument_list\n\n\nclass RedisInstrumentation(Redis3CheckMixin, AbstractInstrumentedModule):\n name = \"redis\"\n\n # no need to instrument StrictRedis in redis-py >= 3.0\n instrument_list_3 = [(\"redis.client\", \"Redis.execute_command\")]\n instrument_list = [(\"redis.client\", \"Redis.execute_command\"), (\"redis.client\", \"StrictRedis.execute_command\")]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n if len(args) > 0:\n wrapped_name = str(args[0])\n else:\n wrapped_name = self.get_wrapped_name(wrapped, instance, method)\n\n with capture_span(wrapped_name, span_type=\"db\", span_subtype=\"redis\", span_action=\"query\", leaf=True):\n return wrapped(*args, **kwargs)\n\n\nclass RedisPipelineInstrumentation(Redis3CheckMixin, AbstractInstrumentedModule):\n name = \"redis\"\n\n # BasePipeline has been renamed to Pipeline in redis-py 3\n instrument_list_3 = [(\"redis.client\", \"Pipeline.execute\")]\n instrument_list = [(\"redis.client\", \"BasePipeline.execute\")]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n wrapped_name = self.get_wrapped_name(wrapped, instance, method)\n with capture_span(wrapped_name, span_type=\"db\", span_subtype=\"redis\", span_action=\"query\", leaf=True):\n return wrapped(*args, **kwargs)\n\n\nclass RedisConnectionInstrumentation(AbstractInstrumentedModule):\n name = \"redis\"\n\n instrument_list = ((\"redis.connection\", \"Connection.send_packed_command\"),)\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n span = execution_context.get_span()\n if span and span.subtype == \"redis\":\n span.context[\"destination\"] = get_destination_info(instance)\n return wrapped(*args, **kwargs)\n\n\ndef get_destination_info(connection):\n destination_info = {\"service\": {\"name\": \"redis\", \"resource\": \"redis\", \"type\": \"db\"}}\n if hasattr(connection, \"port\"):\n destination_info[\"port\"] = connection.port\n destination_info[\"address\"] = connection.host\n elif hasattr(connection, \"path\"):\n destination_info[\"port\"] = None\n destination_info[\"address\"] = \"unix://\" + connection.path\n return destination_info\n", "path": "elasticapm/instrumentation/packages/redis.py"}]}
| 1,707 | 306 |
gh_patches_debug_56
|
rasdani/github-patches
|
git_diff
|
Anselmoo__spectrafit-660
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Docs]: Update release drafter
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Current Missing Information in the Docs
- Link to the complete changes of the latest release.
- Exclude auto commits in the contributor list
### Anything else?
_No response_
### Code of Conduct
- [X] I agree to follow this project's Code of Conduct
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `spectrafit/__init__.py`
Content:
```
1 """SpectraFit, fast command line tool for fitting data."""
2 __version__ = "1.0.0a0"
3
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/spectrafit/__init__.py b/spectrafit/__init__.py
--- a/spectrafit/__init__.py
+++ b/spectrafit/__init__.py
@@ -1,2 +1,2 @@
"""SpectraFit, fast command line tool for fitting data."""
-__version__ = "1.0.0a0"
+__version__ = "1.0.0a1"
|
{"golden_diff": "diff --git a/spectrafit/__init__.py b/spectrafit/__init__.py\n--- a/spectrafit/__init__.py\n+++ b/spectrafit/__init__.py\n@@ -1,2 +1,2 @@\n \"\"\"SpectraFit, fast command line tool for fitting data.\"\"\"\n-__version__ = \"1.0.0a0\"\n+__version__ = \"1.0.0a1\"\n", "issue": "[Docs]: Update release drafter\n### Is there an existing issue for this?\n\n- [X] I have searched the existing issues\n\n### Current Missing Information in the Docs\n\n- Link to the complete changes of the latest release.\r\n- Exclude auto commits in the contributor list\n\n### Anything else?\n\n_No response_\n\n### Code of Conduct\n\n- [X] I agree to follow this project's Code of Conduct\n", "before_files": [{"content": "\"\"\"SpectraFit, fast command line tool for fitting data.\"\"\"\n__version__ = \"1.0.0a0\"\n", "path": "spectrafit/__init__.py"}], "after_files": [{"content": "\"\"\"SpectraFit, fast command line tool for fitting data.\"\"\"\n__version__ = \"1.0.0a1\"\n", "path": "spectrafit/__init__.py"}]}
| 374 | 96 |
gh_patches_debug_62442
|
rasdani/github-patches
|
git_diff
|
comic__grand-challenge.org-1162
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Reader study completed message is visible when study is not completed

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/grandchallenge/reader_studies/templatetags/get_ground_truth.py`
Content:
```
1 from django import template
2
3 register = template.Library()
4
5
6 @register.simple_tag
7 def get_ground_truth(obj, image, question):
8 """Get the auth token for the user."""
9 ground_truths = obj.statistics["ground_truths"]
10 return ground_truths[image][question]
11
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/app/grandchallenge/reader_studies/templatetags/get_ground_truth.py b/app/grandchallenge/reader_studies/templatetags/get_ground_truth.py
--- a/app/grandchallenge/reader_studies/templatetags/get_ground_truth.py
+++ b/app/grandchallenge/reader_studies/templatetags/get_ground_truth.py
@@ -5,6 +5,7 @@
@register.simple_tag
def get_ground_truth(obj, image, question):
- """Get the auth token for the user."""
+ """Get the ground truth value for the image/question combination in reader
+ study obj."""
ground_truths = obj.statistics["ground_truths"]
return ground_truths[image][question]
|
{"golden_diff": "diff --git a/app/grandchallenge/reader_studies/templatetags/get_ground_truth.py b/app/grandchallenge/reader_studies/templatetags/get_ground_truth.py\n--- a/app/grandchallenge/reader_studies/templatetags/get_ground_truth.py\n+++ b/app/grandchallenge/reader_studies/templatetags/get_ground_truth.py\n@@ -5,6 +5,7 @@\n \n @register.simple_tag\n def get_ground_truth(obj, image, question):\n- \"\"\"Get the auth token for the user.\"\"\"\n+ \"\"\"Get the ground truth value for the image/question combination in reader\n+ study obj.\"\"\"\n ground_truths = obj.statistics[\"ground_truths\"]\n return ground_truths[image][question]\n", "issue": "Reader study completed message is visible when study is not completed\n\r\n\n", "before_files": [{"content": "from django import template\n\nregister = template.Library()\n\n\[email protected]_tag\ndef get_ground_truth(obj, image, question):\n \"\"\"Get the auth token for the user.\"\"\"\n ground_truths = obj.statistics[\"ground_truths\"]\n return ground_truths[image][question]\n", "path": "app/grandchallenge/reader_studies/templatetags/get_ground_truth.py"}], "after_files": [{"content": "from django import template\n\nregister = template.Library()\n\n\[email protected]_tag\ndef get_ground_truth(obj, image, question):\n \"\"\"Get the ground truth value for the image/question combination in reader\n study obj.\"\"\"\n ground_truths = obj.statistics[\"ground_truths\"]\n return ground_truths[image][question]\n", "path": "app/grandchallenge/reader_studies/templatetags/get_ground_truth.py"}]}
| 438 | 159 |
gh_patches_debug_25949
|
rasdani/github-patches
|
git_diff
|
larq__larq-319
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Refactor layer test to use pytest parameterized test
Currently we use a mix of unittests written in `pytest` style and tests using `tf.test.TestCase` in larq. In #313 I added some simple `pytest` fixtures that allow to run tests in both eager and graph mode.
The only part of the test suite that uses `tf.test.TestCase` are the [layer tests](https://github.com/larq/larq/blob/f6f9277a006f6cb07b1e5f9bc591a087f2261152/larq/layers_test.py#L73-L187) which rely on [`keras_parameterized.run_all_keras_modes`](https://github.com/tensorflow/tensorflow/blob/669080135924d204a1c67dd556223d794efbf664/tensorflow/python/keras/keras_parameterized.py#L176-L303) and a forked/copied version of [`testing_utils. layer_test`](https://github.com/tensorflow/tensorflow/blob/669080135924d204a1c67dd556223d794efbf664/tensorflow/python/keras/testing_utils.py#L75-L261).
I think it would be a lot cleaner to write a simple [`pytest` fixture](https://pytest.org/en/latest/fixture.html#fixture) similar to #313 that allows us to run a normal [`pytest` parameterized test](https://pytest.org/en/latest/parametrize.html#pytest-mark-parametrize) in all Keras modes and adapt the layer tests to use it. That way we can get rid of `absl.testing` and some requirements on TensorFlow internals.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import setup, find_packages
2
3
4 def readme():
5 with open("README.md", "r") as f:
6 return f.read()
7
8
9 setup(
10 name="larq",
11 version="0.7.4",
12 python_requires=">=3.6",
13 author="Plumerai",
14 author_email="[email protected]",
15 description="An Open Source Machine Learning Library for Training Binarized Neural Networks",
16 long_description=readme(),
17 long_description_content_type="text/markdown",
18 url="https://larq.dev/",
19 packages=find_packages(exclude=["larq.snapshots"]),
20 license="Apache 2.0",
21 install_requires=[
22 "numpy >= 1.15.4, < 2.0",
23 "terminaltables>=3.1.0",
24 "dataclasses ; python_version<'3.7'",
25 ],
26 extras_require={
27 "tensorflow": ["tensorflow>=1.14.0"],
28 "tensorflow_gpu": ["tensorflow-gpu>=1.14.0"],
29 "test": [
30 "absl-py==0.8.1",
31 "pytest==5.2.2",
32 "pytest-cov==2.8.1",
33 "pytest-xdist==1.30.0",
34 "snapshottest==0.5.1",
35 ],
36 "docs": [
37 "mkdocs==1.0.4",
38 "mkdocs-material==4.4.3",
39 "pymdown-extensions==6.1",
40 "mknotebooks==0.1.7",
41 "mkdocs-minify-plugin==0.2.1",
42 "larq-zoo==0.4.2",
43 "altair==3.2.0",
44 "pandas==0.25.3",
45 ],
46 },
47 classifiers=[
48 "Development Status :: 4 - Beta",
49 "Intended Audience :: Developers",
50 "Intended Audience :: Education",
51 "Intended Audience :: Science/Research",
52 "License :: OSI Approved :: Apache Software License",
53 "Programming Language :: Python :: 3",
54 "Programming Language :: Python :: 3 :: Only",
55 "Programming Language :: Python :: 3.6",
56 "Programming Language :: Python :: 3.7",
57 "Topic :: Scientific/Engineering",
58 "Topic :: Scientific/Engineering :: Mathematics",
59 "Topic :: Scientific/Engineering :: Artificial Intelligence",
60 "Topic :: Software Development",
61 "Topic :: Software Development :: Libraries",
62 "Topic :: Software Development :: Libraries :: Python Modules",
63 ],
64 )
65
```
Path: `larq/conftest.py`
Content:
```
1 import pytest
2 from tensorflow.python.eager import context
3
4
5 @pytest.fixture
6 def eager_mode():
7 """pytest fixture for running test in eager mode"""
8 with context.eager_mode():
9 yield
10
11
12 @pytest.fixture
13 def graph_mode():
14 """pytest fixture for running test in graph mode"""
15 with context.graph_mode():
16 yield
17
18
19 @pytest.fixture(params=["eager", "graph"])
20 def eager_and_graph_mode(request):
21 """pytest fixture for running test in eager and graph mode"""
22 with getattr(context, f"{request.param}_mode")():
23 yield request.param
24
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/larq/conftest.py b/larq/conftest.py
--- a/larq/conftest.py
+++ b/larq/conftest.py
@@ -1,4 +1,5 @@
import pytest
+import tensorflow as tf
from tensorflow.python.eager import context
@@ -21,3 +22,27 @@
"""pytest fixture for running test in eager and graph mode"""
with getattr(context, f"{request.param}_mode")():
yield request.param
+
+
[email protected](params=["graph", "tf_eager", "tf_keras_eager"])
+def keras_should_run_eagerly(request):
+ """Fixture to run in graph and two eager modes.
+
+ The modes are:
+ - Graph mode
+ - TensorFlow eager and Keras eager
+ - TensorFlow eager and Keras not eager
+
+ The `tf.context` sets graph/eager mode for TensorFlow. The yield is True if Keras
+ should run eagerly.
+ """
+
+ if request.param == "graph":
+ if int(tf.__version__[0]) >= 2:
+ pytest.skip("Skipping graph mode for TensorFlow 2+.")
+
+ with context.graph_mode():
+ yield
+ else:
+ with context.eager_mode():
+ yield request.param == "tf_keras_eager"
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -27,7 +27,6 @@
"tensorflow": ["tensorflow>=1.14.0"],
"tensorflow_gpu": ["tensorflow-gpu>=1.14.0"],
"test": [
- "absl-py==0.8.1",
"pytest==5.2.2",
"pytest-cov==2.8.1",
"pytest-xdist==1.30.0",
|
{"golden_diff": "diff --git a/larq/conftest.py b/larq/conftest.py\n--- a/larq/conftest.py\n+++ b/larq/conftest.py\n@@ -1,4 +1,5 @@\n import pytest\n+import tensorflow as tf\n from tensorflow.python.eager import context\n \n \n@@ -21,3 +22,27 @@\n \"\"\"pytest fixture for running test in eager and graph mode\"\"\"\n with getattr(context, f\"{request.param}_mode\")():\n yield request.param\n+\n+\[email protected](params=[\"graph\", \"tf_eager\", \"tf_keras_eager\"])\n+def keras_should_run_eagerly(request):\n+ \"\"\"Fixture to run in graph and two eager modes.\n+\n+ The modes are:\n+ - Graph mode\n+ - TensorFlow eager and Keras eager\n+ - TensorFlow eager and Keras not eager\n+\n+ The `tf.context` sets graph/eager mode for TensorFlow. The yield is True if Keras\n+ should run eagerly.\n+ \"\"\"\n+\n+ if request.param == \"graph\":\n+ if int(tf.__version__[0]) >= 2:\n+ pytest.skip(\"Skipping graph mode for TensorFlow 2+.\")\n+\n+ with context.graph_mode():\n+ yield\n+ else:\n+ with context.eager_mode():\n+ yield request.param == \"tf_keras_eager\"\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -27,7 +27,6 @@\n \"tensorflow\": [\"tensorflow>=1.14.0\"],\n \"tensorflow_gpu\": [\"tensorflow-gpu>=1.14.0\"],\n \"test\": [\n- \"absl-py==0.8.1\",\n \"pytest==5.2.2\",\n \"pytest-cov==2.8.1\",\n \"pytest-xdist==1.30.0\",\n", "issue": "Refactor layer test to use pytest parameterized test\nCurrently we use a mix of unittests written in `pytest` style and tests using `tf.test.TestCase` in larq. In #313 I added some simple `pytest` fixtures that allow to run tests in both eager and graph mode.\r\n\r\nThe only part of the test suite that uses `tf.test.TestCase` are the [layer tests](https://github.com/larq/larq/blob/f6f9277a006f6cb07b1e5f9bc591a087f2261152/larq/layers_test.py#L73-L187) which rely on [`keras_parameterized.run_all_keras_modes`](https://github.com/tensorflow/tensorflow/blob/669080135924d204a1c67dd556223d794efbf664/tensorflow/python/keras/keras_parameterized.py#L176-L303) and a forked/copied version of [`testing_utils. layer_test`](https://github.com/tensorflow/tensorflow/blob/669080135924d204a1c67dd556223d794efbf664/tensorflow/python/keras/testing_utils.py#L75-L261).\r\n\r\nI think it would be a lot cleaner to write a simple [`pytest` fixture](https://pytest.org/en/latest/fixture.html#fixture) similar to #313 that allows us to run a normal [`pytest` parameterized test](https://pytest.org/en/latest/parametrize.html#pytest-mark-parametrize) in all Keras modes and adapt the layer tests to use it. That way we can get rid of `absl.testing` and some requirements on TensorFlow internals.\n", "before_files": [{"content": "from setuptools import setup, find_packages\n\n\ndef readme():\n with open(\"README.md\", \"r\") as f:\n return f.read()\n\n\nsetup(\n name=\"larq\",\n version=\"0.7.4\",\n python_requires=\">=3.6\",\n author=\"Plumerai\",\n author_email=\"[email protected]\",\n description=\"An Open Source Machine Learning Library for Training Binarized Neural Networks\",\n long_description=readme(),\n long_description_content_type=\"text/markdown\",\n url=\"https://larq.dev/\",\n packages=find_packages(exclude=[\"larq.snapshots\"]),\n license=\"Apache 2.0\",\n install_requires=[\n \"numpy >= 1.15.4, < 2.0\",\n \"terminaltables>=3.1.0\",\n \"dataclasses ; python_version<'3.7'\",\n ],\n extras_require={\n \"tensorflow\": [\"tensorflow>=1.14.0\"],\n \"tensorflow_gpu\": [\"tensorflow-gpu>=1.14.0\"],\n \"test\": [\n \"absl-py==0.8.1\",\n \"pytest==5.2.2\",\n \"pytest-cov==2.8.1\",\n \"pytest-xdist==1.30.0\",\n \"snapshottest==0.5.1\",\n ],\n \"docs\": [\n \"mkdocs==1.0.4\",\n \"mkdocs-material==4.4.3\",\n \"pymdown-extensions==6.1\",\n \"mknotebooks==0.1.7\",\n \"mkdocs-minify-plugin==0.2.1\",\n \"larq-zoo==0.4.2\",\n \"altair==3.2.0\",\n \"pandas==0.25.3\",\n ],\n },\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Software Development\",\n \"Topic :: Software Development :: Libraries\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n)\n", "path": "setup.py"}, {"content": "import pytest\nfrom tensorflow.python.eager import context\n\n\[email protected]\ndef eager_mode():\n \"\"\"pytest fixture for running test in eager mode\"\"\"\n with context.eager_mode():\n yield\n\n\[email protected]\ndef graph_mode():\n \"\"\"pytest fixture for running test in graph mode\"\"\"\n with context.graph_mode():\n yield\n\n\[email protected](params=[\"eager\", \"graph\"])\ndef eager_and_graph_mode(request):\n \"\"\"pytest fixture for running test in eager and graph mode\"\"\"\n with getattr(context, f\"{request.param}_mode\")():\n yield request.param\n", "path": "larq/conftest.py"}], "after_files": [{"content": "from setuptools import setup, find_packages\n\n\ndef readme():\n with open(\"README.md\", \"r\") as f:\n return f.read()\n\n\nsetup(\n name=\"larq\",\n version=\"0.7.4\",\n python_requires=\">=3.6\",\n author=\"Plumerai\",\n author_email=\"[email protected]\",\n description=\"An Open Source Machine Learning Library for Training Binarized Neural Networks\",\n long_description=readme(),\n long_description_content_type=\"text/markdown\",\n url=\"https://larq.dev/\",\n packages=find_packages(exclude=[\"larq.snapshots\"]),\n license=\"Apache 2.0\",\n install_requires=[\n \"numpy >= 1.15.4, < 2.0\",\n \"terminaltables>=3.1.0\",\n \"dataclasses ; python_version<'3.7'\",\n ],\n extras_require={\n \"tensorflow\": [\"tensorflow>=1.14.0\"],\n \"tensorflow_gpu\": [\"tensorflow-gpu>=1.14.0\"],\n \"test\": [\n \"pytest==5.2.2\",\n \"pytest-cov==2.8.1\",\n \"pytest-xdist==1.30.0\",\n \"snapshottest==0.5.1\",\n ],\n \"docs\": [\n \"mkdocs==1.0.4\",\n \"mkdocs-material==4.4.3\",\n \"pymdown-extensions==6.1\",\n \"mknotebooks==0.1.7\",\n \"mkdocs-minify-plugin==0.2.1\",\n \"larq-zoo==0.4.2\",\n \"altair==3.2.0\",\n \"pandas==0.25.3\",\n ],\n },\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Software Development\",\n \"Topic :: Software Development :: Libraries\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n)\n", "path": "setup.py"}, {"content": "import pytest\nimport tensorflow as tf\nfrom tensorflow.python.eager import context\n\n\[email protected]\ndef eager_mode():\n \"\"\"pytest fixture for running test in eager mode\"\"\"\n with context.eager_mode():\n yield\n\n\[email protected]\ndef graph_mode():\n \"\"\"pytest fixture for running test in graph mode\"\"\"\n with context.graph_mode():\n yield\n\n\[email protected](params=[\"eager\", \"graph\"])\ndef eager_and_graph_mode(request):\n \"\"\"pytest fixture for running test in eager and graph mode\"\"\"\n with getattr(context, f\"{request.param}_mode\")():\n yield request.param\n\n\[email protected](params=[\"graph\", \"tf_eager\", \"tf_keras_eager\"])\ndef keras_should_run_eagerly(request):\n \"\"\"Fixture to run in graph and two eager modes.\n\n The modes are:\n - Graph mode\n - TensorFlow eager and Keras eager\n - TensorFlow eager and Keras not eager\n\n The `tf.context` sets graph/eager mode for TensorFlow. The yield is True if Keras\n should run eagerly.\n \"\"\"\n\n if request.param == \"graph\":\n if int(tf.__version__[0]) >= 2:\n pytest.skip(\"Skipping graph mode for TensorFlow 2+.\")\n\n with context.graph_mode():\n yield\n else:\n with context.eager_mode():\n yield request.param == \"tf_keras_eager\"\n", "path": "larq/conftest.py"}]}
| 1,527 | 416 |
gh_patches_debug_7229
|
rasdani/github-patches
|
git_diff
|
docker__docker-py-1150
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Client.build crashes when trying to pull a new image if HttpHeaders are set in config file
``` python
import docker
c = docker.Client()
c.build('https://github.com/docker/compose.git')
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-3-d78c607c9627> in <module>()
----> 1 c.build('https://github.com/docker/compose.git')
/home/joffrey/.envs/pydocker/local/lib/python2.7/site-packages/docker/api/build.pyc in build(self, path, tag, quiet, fileobj, nocache, rm, stream, timeout, custom_context, encoding, pull, forcerm, dockerfile, container_limits, decode, buildargs, gzip)
102 headers=headers,
103 stream=stream,
--> 104 timeout=timeout,
105 )
106
/home/joffrey/.envs/pydocker/local/lib/python2.7/site-packages/docker/utils/decorators.pyc in inner(self, *args, **kwargs)
44 kwargs['headers'] = self._auth_configs['HttpHeaders']
45 else:
---> 46 kwargs['headers'].update(self._auth_configs['HttpHeaders'])
47 return f(self, *args, **kwargs)
48 return inner
AttributeError: 'NoneType' object has no attribute 'update'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docker/utils/decorators.py`
Content:
```
1 import functools
2
3 from .. import errors
4 from . import utils
5
6
7 def check_resource(f):
8 @functools.wraps(f)
9 def wrapped(self, resource_id=None, *args, **kwargs):
10 if resource_id is None:
11 if kwargs.get('container'):
12 resource_id = kwargs.pop('container')
13 elif kwargs.get('image'):
14 resource_id = kwargs.pop('image')
15 if isinstance(resource_id, dict):
16 resource_id = resource_id.get('Id')
17 if not resource_id:
18 raise errors.NullResource(
19 'image or container param is undefined'
20 )
21 return f(self, resource_id, *args, **kwargs)
22 return wrapped
23
24
25 def minimum_version(version):
26 def decorator(f):
27 @functools.wraps(f)
28 def wrapper(self, *args, **kwargs):
29 if utils.version_lt(self._version, version):
30 raise errors.InvalidVersion(
31 '{0} is not available for version < {1}'.format(
32 f.__name__, version
33 )
34 )
35 return f(self, *args, **kwargs)
36 return wrapper
37 return decorator
38
39
40 def update_headers(f):
41 def inner(self, *args, **kwargs):
42 if 'HttpHeaders' in self._auth_configs:
43 if 'headers' not in kwargs:
44 kwargs['headers'] = self._auth_configs['HttpHeaders']
45 else:
46 kwargs['headers'].update(self._auth_configs['HttpHeaders'])
47 return f(self, *args, **kwargs)
48 return inner
49
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docker/utils/decorators.py b/docker/utils/decorators.py
--- a/docker/utils/decorators.py
+++ b/docker/utils/decorators.py
@@ -40,7 +40,7 @@
def update_headers(f):
def inner(self, *args, **kwargs):
if 'HttpHeaders' in self._auth_configs:
- if 'headers' not in kwargs:
+ if not kwargs.get('headers'):
kwargs['headers'] = self._auth_configs['HttpHeaders']
else:
kwargs['headers'].update(self._auth_configs['HttpHeaders'])
|
{"golden_diff": "diff --git a/docker/utils/decorators.py b/docker/utils/decorators.py\n--- a/docker/utils/decorators.py\n+++ b/docker/utils/decorators.py\n@@ -40,7 +40,7 @@\n def update_headers(f):\n def inner(self, *args, **kwargs):\n if 'HttpHeaders' in self._auth_configs:\n- if 'headers' not in kwargs:\n+ if not kwargs.get('headers'):\n kwargs['headers'] = self._auth_configs['HttpHeaders']\n else:\n kwargs['headers'].update(self._auth_configs['HttpHeaders'])\n", "issue": "Client.build crashes when trying to pull a new image if HttpHeaders are set in config file\n``` python\n\nimport docker\n\nc = docker.Client()\nc.build('https://github.com/docker/compose.git')\n---------------------------------------------------------------------------\nAttributeError Traceback (most recent call last)\n<ipython-input-3-d78c607c9627> in <module>()\n----> 1 c.build('https://github.com/docker/compose.git')\n\n/home/joffrey/.envs/pydocker/local/lib/python2.7/site-packages/docker/api/build.pyc in build(self, path, tag, quiet, fileobj, nocache, rm, stream, timeout, custom_context, encoding, pull, forcerm, dockerfile, container_limits, decode, buildargs, gzip)\n 102 headers=headers,\n 103 stream=stream,\n--> 104 timeout=timeout,\n 105 )\n 106 \n\n/home/joffrey/.envs/pydocker/local/lib/python2.7/site-packages/docker/utils/decorators.pyc in inner(self, *args, **kwargs)\n 44 kwargs['headers'] = self._auth_configs['HttpHeaders']\n 45 else:\n---> 46 kwargs['headers'].update(self._auth_configs['HttpHeaders'])\n 47 return f(self, *args, **kwargs)\n 48 return inner\n\nAttributeError: 'NoneType' object has no attribute 'update'\n```\n\n", "before_files": [{"content": "import functools\n\nfrom .. import errors\nfrom . import utils\n\n\ndef check_resource(f):\n @functools.wraps(f)\n def wrapped(self, resource_id=None, *args, **kwargs):\n if resource_id is None:\n if kwargs.get('container'):\n resource_id = kwargs.pop('container')\n elif kwargs.get('image'):\n resource_id = kwargs.pop('image')\n if isinstance(resource_id, dict):\n resource_id = resource_id.get('Id')\n if not resource_id:\n raise errors.NullResource(\n 'image or container param is undefined'\n )\n return f(self, resource_id, *args, **kwargs)\n return wrapped\n\n\ndef minimum_version(version):\n def decorator(f):\n @functools.wraps(f)\n def wrapper(self, *args, **kwargs):\n if utils.version_lt(self._version, version):\n raise errors.InvalidVersion(\n '{0} is not available for version < {1}'.format(\n f.__name__, version\n )\n )\n return f(self, *args, **kwargs)\n return wrapper\n return decorator\n\n\ndef update_headers(f):\n def inner(self, *args, **kwargs):\n if 'HttpHeaders' in self._auth_configs:\n if 'headers' not in kwargs:\n kwargs['headers'] = self._auth_configs['HttpHeaders']\n else:\n kwargs['headers'].update(self._auth_configs['HttpHeaders'])\n return f(self, *args, **kwargs)\n return inner\n", "path": "docker/utils/decorators.py"}], "after_files": [{"content": "import functools\n\nfrom .. import errors\nfrom . import utils\n\n\ndef check_resource(f):\n @functools.wraps(f)\n def wrapped(self, resource_id=None, *args, **kwargs):\n if resource_id is None:\n if kwargs.get('container'):\n resource_id = kwargs.pop('container')\n elif kwargs.get('image'):\n resource_id = kwargs.pop('image')\n if isinstance(resource_id, dict):\n resource_id = resource_id.get('Id')\n if not resource_id:\n raise errors.NullResource(\n 'image or container param is undefined'\n )\n return f(self, resource_id, *args, **kwargs)\n return wrapped\n\n\ndef minimum_version(version):\n def decorator(f):\n @functools.wraps(f)\n def wrapper(self, *args, **kwargs):\n if utils.version_lt(self._version, version):\n raise errors.InvalidVersion(\n '{0} is not available for version < {1}'.format(\n f.__name__, version\n )\n )\n return f(self, *args, **kwargs)\n return wrapper\n return decorator\n\n\ndef update_headers(f):\n def inner(self, *args, **kwargs):\n if 'HttpHeaders' in self._auth_configs:\n if not kwargs.get('headers'):\n kwargs['headers'] = self._auth_configs['HttpHeaders']\n else:\n kwargs['headers'].update(self._auth_configs['HttpHeaders'])\n return f(self, *args, **kwargs)\n return inner\n", "path": "docker/utils/decorators.py"}]}
| 991 | 126 |
gh_patches_debug_29587
|
rasdani/github-patches
|
git_diff
|
cocotb__cocotb-2198
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use setuptools_scm for generating a version number
[setuptools_scm](https://github.com/pypa/setuptools_scm/) can automatically generate version numbers based on git tags and versioning information. This will allow us to exactly identify which commit a user is reporting an issue with if it is not a released version. It will also remove one step of the release process.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 ###############################################################################
3 # Copyright (c) 2013 Potential Ventures Ltd
4 # Copyright (c) 2013 SolarFlare Communications Inc
5 # All rights reserved.
6 #
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are met:
9 # * Redistributions of source code must retain the above copyright
10 # notice, this list of conditions and the following disclaimer.
11 # * Redistributions in binary form must reproduce the above copyright
12 # notice, this list of conditions and the following disclaimer in the
13 # documentation and/or other materials provided with the distribution.
14 # * Neither the name of Potential Ventures Ltd,
15 # SolarFlare Communications Inc nor the
16 # names of its contributors may be used to endorse or promote products
17 # derived from this software without specific prior written permission.
18 #
19 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
20 # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 # DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY
23 # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 ###############################################################################
30
31 import sys
32 if sys.version_info[:2] < (3, 5):
33 msg = [
34 "This version of cocotb requires at least Python 3.5,",
35 "you are running Python %d.%d.%d." % (
36 sys.version_info[0], sys.version_info[1], sys.version_info[2])
37 ]
38 if sys.version_info[0] == 2:
39 msg += [
40 "If you have Python 3 installed on your machine try ",
41 "using 'python3 -m pip' instead of 'pip' to install cocotb."
42 ]
43 msg += [
44 "For more information please refer to the documentation at ",
45 "https://cocotb.readthedocs.io."
46 ]
47
48 raise SystemExit("\n".join(msg))
49
50 import logging
51 from setuptools import setup
52 from setuptools import find_packages
53 from os import path, walk
54 from io import StringIO
55
56 # Note: cocotb is not installed properly yet and is missing dependencies and binaries
57 # We can still import other files next to setup.py, as long as they're in MANIFEST.in
58 # The below line is necessary for PEP517 support
59 sys.path.append(path.dirname(__file__))
60 from cocotb_build_libs import get_ext, build_ext
61
62
63 def read_file(fname):
64 with open(path.join(path.dirname(__file__), fname), encoding='utf8') as f:
65 return f.read()
66
67
68 def package_files(directory):
69 paths = []
70 for (fpath, directories, filenames) in walk(directory):
71 for filename in filenames:
72 paths.append(path.join('..', fpath, filename))
73 return paths
74
75
76 # this sets the __version__ variable
77 exec(read_file(path.join('cocotb', '_version.py')))
78
79 # store log from build_libs and display at the end in verbose mode
80 # see https://github.com/pypa/pip/issues/6634
81 log_stream = StringIO()
82 handler = logging.StreamHandler(log_stream)
83 log = logging.getLogger("cocotb._build_libs")
84 log.setLevel(logging.INFO)
85 log.addHandler(handler)
86
87 setup(
88 name='cocotb',
89 cmdclass={'build_ext': build_ext},
90 version=__version__, # noqa: F821
91 description='cocotb is a coroutine based cosimulation library for writing VHDL and Verilog testbenches in Python.',
92 url='https://docs.cocotb.org',
93 license='BSD',
94 long_description=read_file('README.md'),
95 long_description_content_type='text/markdown',
96 author='Chris Higgs, Stuart Hodgson',
97 maintainer='cocotb contributors',
98 maintainer_email='[email protected]',
99 install_requires=[],
100 python_requires='>=3.5',
101 packages=find_packages(),
102 package_data={
103 'cocotb': (
104 package_files('cocotb/share/makefiles') + # noqa: W504
105 package_files('cocotb/share/include') + # noqa: W504
106 package_files('cocotb/share/def') +
107 package_files('cocotb/share/lib/verilator')
108 )
109 },
110 ext_modules=get_ext(),
111 entry_points={
112 'console_scripts': [
113 'cocotb-config=cocotb.config:main',
114 ]
115 },
116 platforms='any',
117 classifiers=[
118 "Programming Language :: Python :: 3",
119 "Programming Language :: Python :: 3.5",
120 "Programming Language :: Python :: 3.6",
121 "Programming Language :: Python :: 3.7",
122 "Programming Language :: Python :: 3.8",
123 "Programming Language :: Python :: 3.9",
124 "License :: OSI Approved :: BSD License",
125 "Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)",
126 ],
127
128 # these appear in the sidebar on PyPI
129 project_urls={
130 "Bug Tracker": "https://github.com/cocotb/cocotb/issues",
131 "Source Code": "https://github.com/cocotb/cocotb",
132 "Documentation": "https://docs.cocotb.org",
133 },
134 )
135
136 print(log_stream.getvalue())
137
```
Path: `cocotb/_version.py`
Content:
```
1 # Package versioning solution originally found here:
2 # http://stackoverflow.com/q/458550
3
4 # Store the version here so:
5 # 1) we don't load dependencies by storing it in __init__.py
6 # 2) we can import it in setup.py for the same reason
7 # 3) we can import it into your module
8 __version__ = '1.5.0.dev0'
9
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/cocotb/_version.py b/cocotb/_version.py
deleted file mode 100644
--- a/cocotb/_version.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# Package versioning solution originally found here:
-# http://stackoverflow.com/q/458550
-
-# Store the version here so:
-# 1) we don't load dependencies by storing it in __init__.py
-# 2) we can import it in setup.py for the same reason
-# 3) we can import it into your module
-__version__ = '1.5.0.dev0'
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -73,9 +73,6 @@
return paths
-# this sets the __version__ variable
-exec(read_file(path.join('cocotb', '_version.py')))
-
# store log from build_libs and display at the end in verbose mode
# see https://github.com/pypa/pip/issues/6634
log_stream = StringIO()
@@ -87,7 +84,11 @@
setup(
name='cocotb',
cmdclass={'build_ext': build_ext},
- version=__version__, # noqa: F821
+ use_scm_version=dict(
+ write_to='cocotb/_version.py',
+ write_to_template='__version__ = {version!r}',
+ version_scheme='release-branch-semver'
+ ),
description='cocotb is a coroutine based cosimulation library for writing VHDL and Verilog testbenches in Python.',
url='https://docs.cocotb.org',
license='BSD',
@@ -96,6 +97,7 @@
author='Chris Higgs, Stuart Hodgson',
maintainer='cocotb contributors',
maintainer_email='[email protected]',
+ setup_requires=['setuptools_scm'],
install_requires=[],
python_requires='>=3.5',
packages=find_packages(),
|
{"golden_diff": "diff --git a/cocotb/_version.py b/cocotb/_version.py\ndeleted file mode 100644\n--- a/cocotb/_version.py\n+++ /dev/null\n@@ -1,8 +0,0 @@\n-# Package versioning solution originally found here:\n-# http://stackoverflow.com/q/458550\n-\n-# Store the version here so:\n-# 1) we don't load dependencies by storing it in __init__.py\n-# 2) we can import it in setup.py for the same reason\n-# 3) we can import it into your module\n-__version__ = '1.5.0.dev0'\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -73,9 +73,6 @@\n return paths\n \n \n-# this sets the __version__ variable\n-exec(read_file(path.join('cocotb', '_version.py')))\n-\n # store log from build_libs and display at the end in verbose mode\n # see https://github.com/pypa/pip/issues/6634\n log_stream = StringIO()\n@@ -87,7 +84,11 @@\n setup(\n name='cocotb',\n cmdclass={'build_ext': build_ext},\n- version=__version__, # noqa: F821\n+ use_scm_version=dict(\n+ write_to='cocotb/_version.py',\n+ write_to_template='__version__ = {version!r}',\n+ version_scheme='release-branch-semver'\n+ ),\n description='cocotb is a coroutine based cosimulation library for writing VHDL and Verilog testbenches in Python.',\n url='https://docs.cocotb.org',\n license='BSD',\n@@ -96,6 +97,7 @@\n author='Chris Higgs, Stuart Hodgson',\n maintainer='cocotb contributors',\n maintainer_email='[email protected]',\n+ setup_requires=['setuptools_scm'],\n install_requires=[],\n python_requires='>=3.5',\n packages=find_packages(),\n", "issue": "Use setuptools_scm for generating a version number\n[setuptools_scm](https://github.com/pypa/setuptools_scm/) can automatically generate version numbers based on git tags and versioning information. This will allow us to exactly identify which commit a user is reporting an issue with if it is not a released version. It will also remove one step of the release process.\n", "before_files": [{"content": "#!/usr/bin/env python\n###############################################################################\n# Copyright (c) 2013 Potential Ventures Ltd\n# Copyright (c) 2013 SolarFlare Communications Inc\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of Potential Ventures Ltd,\n# SolarFlare Communications Inc nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n###############################################################################\n\nimport sys\nif sys.version_info[:2] < (3, 5):\n msg = [\n \"This version of cocotb requires at least Python 3.5,\",\n \"you are running Python %d.%d.%d.\" % (\n sys.version_info[0], sys.version_info[1], sys.version_info[2])\n ]\n if sys.version_info[0] == 2:\n msg += [\n \"If you have Python 3 installed on your machine try \",\n \"using 'python3 -m pip' instead of 'pip' to install cocotb.\"\n ]\n msg += [\n \"For more information please refer to the documentation at \",\n \"https://cocotb.readthedocs.io.\"\n ]\n\n raise SystemExit(\"\\n\".join(msg))\n\nimport logging\nfrom setuptools import setup\nfrom setuptools import find_packages\nfrom os import path, walk\nfrom io import StringIO\n\n# Note: cocotb is not installed properly yet and is missing dependencies and binaries\n# We can still import other files next to setup.py, as long as they're in MANIFEST.in\n# The below line is necessary for PEP517 support\nsys.path.append(path.dirname(__file__))\nfrom cocotb_build_libs import get_ext, build_ext\n\n\ndef read_file(fname):\n with open(path.join(path.dirname(__file__), fname), encoding='utf8') as f:\n return f.read()\n\n\ndef package_files(directory):\n paths = []\n for (fpath, directories, filenames) in walk(directory):\n for filename in filenames:\n paths.append(path.join('..', fpath, filename))\n return paths\n\n\n# this sets the __version__ variable\nexec(read_file(path.join('cocotb', '_version.py')))\n\n# store log from build_libs and display at the end in verbose mode\n# see https://github.com/pypa/pip/issues/6634\nlog_stream = StringIO()\nhandler = logging.StreamHandler(log_stream)\nlog = logging.getLogger(\"cocotb._build_libs\")\nlog.setLevel(logging.INFO)\nlog.addHandler(handler)\n\nsetup(\n name='cocotb',\n cmdclass={'build_ext': build_ext},\n version=__version__, # noqa: F821\n description='cocotb is a coroutine based cosimulation library for writing VHDL and Verilog testbenches in Python.',\n url='https://docs.cocotb.org',\n license='BSD',\n long_description=read_file('README.md'),\n long_description_content_type='text/markdown',\n author='Chris Higgs, Stuart Hodgson',\n maintainer='cocotb contributors',\n maintainer_email='[email protected]',\n install_requires=[],\n python_requires='>=3.5',\n packages=find_packages(),\n package_data={\n 'cocotb': (\n package_files('cocotb/share/makefiles') + # noqa: W504\n package_files('cocotb/share/include') + # noqa: W504\n package_files('cocotb/share/def') +\n package_files('cocotb/share/lib/verilator')\n )\n },\n ext_modules=get_ext(),\n entry_points={\n 'console_scripts': [\n 'cocotb-config=cocotb.config:main',\n ]\n },\n platforms='any',\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"License :: OSI Approved :: BSD License\",\n \"Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)\",\n ],\n\n # these appear in the sidebar on PyPI\n project_urls={\n \"Bug Tracker\": \"https://github.com/cocotb/cocotb/issues\",\n \"Source Code\": \"https://github.com/cocotb/cocotb\",\n \"Documentation\": \"https://docs.cocotb.org\",\n },\n)\n\nprint(log_stream.getvalue())\n", "path": "setup.py"}, {"content": "# Package versioning solution originally found here:\n# http://stackoverflow.com/q/458550\n\n# Store the version here so:\n# 1) we don't load dependencies by storing it in __init__.py\n# 2) we can import it in setup.py for the same reason\n# 3) we can import it into your module\n__version__ = '1.5.0.dev0'\n", "path": "cocotb/_version.py"}], "after_files": [{"content": "#!/usr/bin/env python\n###############################################################################\n# Copyright (c) 2013 Potential Ventures Ltd\n# Copyright (c) 2013 SolarFlare Communications Inc\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of Potential Ventures Ltd,\n# SolarFlare Communications Inc nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n###############################################################################\n\nimport sys\nif sys.version_info[:2] < (3, 5):\n msg = [\n \"This version of cocotb requires at least Python 3.5,\",\n \"you are running Python %d.%d.%d.\" % (\n sys.version_info[0], sys.version_info[1], sys.version_info[2])\n ]\n if sys.version_info[0] == 2:\n msg += [\n \"If you have Python 3 installed on your machine try \",\n \"using 'python3 -m pip' instead of 'pip' to install cocotb.\"\n ]\n msg += [\n \"For more information please refer to the documentation at \",\n \"https://cocotb.readthedocs.io.\"\n ]\n\n raise SystemExit(\"\\n\".join(msg))\n\nimport logging\nfrom setuptools import setup\nfrom setuptools import find_packages\nfrom os import path, walk\nfrom io import StringIO\n\n# Note: cocotb is not installed properly yet and is missing dependencies and binaries\n# We can still import other files next to setup.py, as long as they're in MANIFEST.in\n# The below line is necessary for PEP517 support\nsys.path.append(path.dirname(__file__))\nfrom cocotb_build_libs import get_ext, build_ext\n\n\ndef read_file(fname):\n with open(path.join(path.dirname(__file__), fname), encoding='utf8') as f:\n return f.read()\n\n\ndef package_files(directory):\n paths = []\n for (fpath, directories, filenames) in walk(directory):\n for filename in filenames:\n paths.append(path.join('..', fpath, filename))\n return paths\n\n\n# store log from build_libs and display at the end in verbose mode\n# see https://github.com/pypa/pip/issues/6634\nlog_stream = StringIO()\nhandler = logging.StreamHandler(log_stream)\nlog = logging.getLogger(\"cocotb._build_libs\")\nlog.setLevel(logging.INFO)\nlog.addHandler(handler)\n\nsetup(\n name='cocotb',\n cmdclass={'build_ext': build_ext},\n use_scm_version=dict(\n write_to='cocotb/_version.py',\n write_to_template='__version__ = {version!r}',\n version_scheme='release-branch-semver'\n ),\n description='cocotb is a coroutine based cosimulation library for writing VHDL and Verilog testbenches in Python.',\n url='https://docs.cocotb.org',\n license='BSD',\n long_description=read_file('README.md'),\n long_description_content_type='text/markdown',\n author='Chris Higgs, Stuart Hodgson',\n maintainer='cocotb contributors',\n maintainer_email='[email protected]',\n setup_requires=['setuptools_scm'],\n install_requires=[],\n python_requires='>=3.5',\n packages=find_packages(),\n package_data={\n 'cocotb': (\n package_files('cocotb/share/makefiles') + # noqa: W504\n package_files('cocotb/share/include') + # noqa: W504\n package_files('cocotb/share/def') +\n package_files('cocotb/share/lib/verilator')\n )\n },\n ext_modules=get_ext(),\n entry_points={\n 'console_scripts': [\n 'cocotb-config=cocotb.config:main',\n ]\n },\n platforms='any',\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"License :: OSI Approved :: BSD License\",\n \"Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)\",\n ],\n\n # these appear in the sidebar on PyPI\n project_urls={\n \"Bug Tracker\": \"https://github.com/cocotb/cocotb/issues\",\n \"Source Code\": \"https://github.com/cocotb/cocotb\",\n \"Documentation\": \"https://docs.cocotb.org\",\n },\n)\n\nprint(log_stream.getvalue())\n", "path": "setup.py"}, {"content": null, "path": "cocotb/_version.py"}]}
| 2,000 | 469 |
gh_patches_debug_13544
|
rasdani/github-patches
|
git_diff
|
Azure__azure-cli-extensions-93
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Change help for az webapp new
### Extension name (the extension in question)
webapp
### Description of issue (in as much detail as possible)
currently the help information for the command return the
Command
az webapp new: Create and deploy a node web app.
Requested changes:
- add the callout for .net core apps
- add explicit mention that the command is in preview
-----
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/webapp/setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 # --------------------------------------------------------------------------------------------
4 # Copyright (c) Microsoft Corporation. All rights reserved.
5 # Licensed under the MIT License. See License.txt in the project root for license information.
6 # --------------------------------------------------------------------------------------------
7
8 from codecs import open
9 from setuptools import setup, find_packages
10
11 VERSION = "0.1.0"
12
13 CLASSIFIERS = [
14 'Development Status :: 4 - Beta',
15 'Intended Audience :: Developers',
16 'Intended Audience :: System Administrators',
17 'Programming Language :: Python',
18 'Programming Language :: Python :: 2',
19 'Programming Language :: Python :: 2.7',
20 'Programming Language :: Python :: 3',
21 'Programming Language :: Python :: 3.4',
22 'Programming Language :: Python :: 3.5',
23 'Programming Language :: Python :: 3.6',
24 'License :: OSI Approved :: MIT License',
25 ]
26
27 DEPENDENCIES = []
28
29 setup(
30 name='webapp',
31 version=VERSION,
32 description='An Azure CLI Extension to manage appservice resources',
33 long_description='An Azure CLI Extension to manage appservice resources',
34 license='MIT',
35 author='Sisira Panchagnula',
36 author_email='[email protected]',
37 url='https://github.com/Azure/azure-cli-extensions',
38 classifiers=CLASSIFIERS,
39 packages=find_packages(exclude=["tests"]),
40 install_requires=DEPENDENCIES
41 )
42
```
Path: `src/webapp/azext_webapp/_help.py`
Content:
```
1 # --------------------------------------------------------------------------------------------
2 # Copyright (c) Microsoft Corporation. All rights reserved.
3 # Licensed under the MIT License. See License.txt in the project root for license information.
4 # --------------------------------------------------------------------------------------------
5
6 from knack.help_files import helps
7
8
9 helps['webapp new'] = """
10 type: command
11 short-summary: Create and deploy a node web app
12 examples:
13 - name: Create a web app with the default configuration.
14 text: >
15 az webapp new -n MyUniqueAppName --dryrun \n
16 az webapp new -n MyUniqueAppName -l locationName
17 """
18
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/webapp/azext_webapp/_help.py b/src/webapp/azext_webapp/_help.py
--- a/src/webapp/azext_webapp/_help.py
+++ b/src/webapp/azext_webapp/_help.py
@@ -8,7 +8,8 @@
helps['webapp new'] = """
type: command
- short-summary: Create and deploy a node web app
+ short-summary: Experimental command to create and deploy a web app.
+ Current supports Node on Linux & .NET Core on Windows.
examples:
- name: Create a web app with the default configuration.
text: >
diff --git a/src/webapp/setup.py b/src/webapp/setup.py
--- a/src/webapp/setup.py
+++ b/src/webapp/setup.py
@@ -8,7 +8,7 @@
from codecs import open
from setuptools import setup, find_packages
-VERSION = "0.1.0"
+VERSION = "0.1.1"
CLASSIFIERS = [
'Development Status :: 4 - Beta',
|
{"golden_diff": "diff --git a/src/webapp/azext_webapp/_help.py b/src/webapp/azext_webapp/_help.py\n--- a/src/webapp/azext_webapp/_help.py\n+++ b/src/webapp/azext_webapp/_help.py\n@@ -8,7 +8,8 @@\n \n helps['webapp new'] = \"\"\"\n type: command\n- short-summary: Create and deploy a node web app\n+ short-summary: Experimental command to create and deploy a web app.\n+ Current supports Node on Linux & .NET Core on Windows.\n examples:\n - name: Create a web app with the default configuration.\n text: >\ndiff --git a/src/webapp/setup.py b/src/webapp/setup.py\n--- a/src/webapp/setup.py\n+++ b/src/webapp/setup.py\n@@ -8,7 +8,7 @@\n from codecs import open\n from setuptools import setup, find_packages\n \n-VERSION = \"0.1.0\"\n+VERSION = \"0.1.1\"\n \n CLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n", "issue": "Change help for az webapp new\n### Extension name (the extension in question)\r\nwebapp\r\n\r\n### Description of issue (in as much detail as possible)\r\ncurrently the help information for the command return the \r\n\r\nCommand\r\n az webapp new: Create and deploy a node web app.\r\n\r\nRequested changes:\r\n\r\n- add the callout for .net core apps\r\n- add explicit mention that the command is in preview\r\n\r\n-----\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\n# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nfrom codecs import open\nfrom setuptools import setup, find_packages\n\nVERSION = \"0.1.0\"\n\nCLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'License :: OSI Approved :: MIT License',\n]\n\nDEPENDENCIES = []\n\nsetup(\n name='webapp',\n version=VERSION,\n description='An Azure CLI Extension to manage appservice resources',\n long_description='An Azure CLI Extension to manage appservice resources',\n license='MIT',\n author='Sisira Panchagnula',\n author_email='[email protected]',\n url='https://github.com/Azure/azure-cli-extensions',\n classifiers=CLASSIFIERS,\n packages=find_packages(exclude=[\"tests\"]),\n install_requires=DEPENDENCIES\n)\n", "path": "src/webapp/setup.py"}, {"content": "# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nfrom knack.help_files import helps\n\n\nhelps['webapp new'] = \"\"\"\n type: command\n short-summary: Create and deploy a node web app\n examples:\n - name: Create a web app with the default configuration.\n text: >\n az webapp new -n MyUniqueAppName --dryrun \\n\n az webapp new -n MyUniqueAppName -l locationName\n\"\"\"\n", "path": "src/webapp/azext_webapp/_help.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nfrom codecs import open\nfrom setuptools import setup, find_packages\n\nVERSION = \"0.1.1\"\n\nCLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'License :: OSI Approved :: MIT License',\n]\n\nDEPENDENCIES = []\n\nsetup(\n name='webapp',\n version=VERSION,\n description='An Azure CLI Extension to manage appservice resources',\n long_description='An Azure CLI Extension to manage appservice resources',\n license='MIT',\n author='Sisira Panchagnula',\n author_email='[email protected]',\n url='https://github.com/Azure/azure-cli-extensions',\n classifiers=CLASSIFIERS,\n packages=find_packages(exclude=[\"tests\"]),\n install_requires=DEPENDENCIES\n)\n", "path": "src/webapp/setup.py"}, {"content": "# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nfrom knack.help_files import helps\n\n\nhelps['webapp new'] = \"\"\"\n type: command\n short-summary: Experimental command to create and deploy a web app.\n Current supports Node on Linux & .NET Core on Windows.\n examples:\n - name: Create a web app with the default configuration.\n text: >\n az webapp new -n MyUniqueAppName --dryrun \\n\n az webapp new -n MyUniqueAppName -l locationName\n\"\"\"\n", "path": "src/webapp/azext_webapp/_help.py"}]}
| 898 | 233 |
gh_patches_debug_23257
|
rasdani/github-patches
|
git_diff
|
getredash__redash-2799
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add front-end extension capability
This is similar in concept to https://github.com/getredash/redash/pull/2354/files
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redash/extensions.py`
Content:
```
1 from pkg_resources import iter_entry_points
2
3
4 def init_extensions(app):
5 """
6 Load the Redash extensions for the given Redash Flask app.
7 """
8 if not hasattr(app, 'redash_extensions'):
9 app.redash_extensions = {}
10
11 for entry_point in iter_entry_points('redash.extensions'):
12 app.logger.info('Loading Redash extension %s.', entry_point.name)
13 extension = entry_point.load()
14 app.redash_extensions[entry_point.name] = extension(app)
15
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/redash/extensions.py b/redash/extensions.py
--- a/redash/extensions.py
+++ b/redash/extensions.py
@@ -1,4 +1,5 @@
-from pkg_resources import iter_entry_points
+import os
+from pkg_resources import iter_entry_points, resource_isdir, resource_listdir
def init_extensions(app):
@@ -10,5 +11,20 @@
for entry_point in iter_entry_points('redash.extensions'):
app.logger.info('Loading Redash extension %s.', entry_point.name)
- extension = entry_point.load()
- app.redash_extensions[entry_point.name] = extension(app)
+ try:
+ extension = entry_point.load()
+ app.redash_extensions[entry_point.name] = {
+ "entry_function": extension(app),
+ "resources_list": []
+ }
+ except ImportError:
+ app.logger.info('%s does not have a callable and will not be loaded.', entry_point.name)
+ (root_module, _) = os.path.splitext(entry_point.module_name)
+ content_folder_relative = os.path.join(entry_point.name, 'bundle')
+
+ # If it's a frontend extension only, store a list of files in the bundle directory.
+ if resource_isdir(root_module, content_folder_relative):
+ app.redash_extensions[entry_point.name] = {
+ "entry_function": None,
+ "resources_list": resource_listdir(root_module, content_folder_relative)
+ }
|
{"golden_diff": "diff --git a/redash/extensions.py b/redash/extensions.py\n--- a/redash/extensions.py\n+++ b/redash/extensions.py\n@@ -1,4 +1,5 @@\n-from pkg_resources import iter_entry_points\n+import os\n+from pkg_resources import iter_entry_points, resource_isdir, resource_listdir\n \n \n def init_extensions(app):\n@@ -10,5 +11,20 @@\n \n for entry_point in iter_entry_points('redash.extensions'):\n app.logger.info('Loading Redash extension %s.', entry_point.name)\n- extension = entry_point.load()\n- app.redash_extensions[entry_point.name] = extension(app)\n+ try:\n+ extension = entry_point.load()\n+ app.redash_extensions[entry_point.name] = {\n+ \"entry_function\": extension(app),\n+ \"resources_list\": []\n+ }\n+ except ImportError:\n+ app.logger.info('%s does not have a callable and will not be loaded.', entry_point.name)\n+ (root_module, _) = os.path.splitext(entry_point.module_name)\n+ content_folder_relative = os.path.join(entry_point.name, 'bundle')\n+\n+ # If it's a frontend extension only, store a list of files in the bundle directory.\n+ if resource_isdir(root_module, content_folder_relative):\n+ app.redash_extensions[entry_point.name] = {\n+ \"entry_function\": None,\n+ \"resources_list\": resource_listdir(root_module, content_folder_relative)\n+ }\n", "issue": "Add front-end extension capability\nThis is similar in concept to https://github.com/getredash/redash/pull/2354/files\n", "before_files": [{"content": "from pkg_resources import iter_entry_points\n\n\ndef init_extensions(app):\n \"\"\"\n Load the Redash extensions for the given Redash Flask app.\n \"\"\"\n if not hasattr(app, 'redash_extensions'):\n app.redash_extensions = {}\n\n for entry_point in iter_entry_points('redash.extensions'):\n app.logger.info('Loading Redash extension %s.', entry_point.name)\n extension = entry_point.load()\n app.redash_extensions[entry_point.name] = extension(app)\n", "path": "redash/extensions.py"}], "after_files": [{"content": "import os\nfrom pkg_resources import iter_entry_points, resource_isdir, resource_listdir\n\n\ndef init_extensions(app):\n \"\"\"\n Load the Redash extensions for the given Redash Flask app.\n \"\"\"\n if not hasattr(app, 'redash_extensions'):\n app.redash_extensions = {}\n\n for entry_point in iter_entry_points('redash.extensions'):\n app.logger.info('Loading Redash extension %s.', entry_point.name)\n try:\n extension = entry_point.load()\n app.redash_extensions[entry_point.name] = {\n \"entry_function\": extension(app),\n \"resources_list\": []\n }\n except ImportError:\n app.logger.info('%s does not have a callable and will not be loaded.', entry_point.name)\n (root_module, _) = os.path.splitext(entry_point.module_name)\n content_folder_relative = os.path.join(entry_point.name, 'bundle')\n\n # If it's a frontend extension only, store a list of files in the bundle directory.\n if resource_isdir(root_module, content_folder_relative):\n app.redash_extensions[entry_point.name] = {\n \"entry_function\": None,\n \"resources_list\": resource_listdir(root_module, content_folder_relative)\n }\n", "path": "redash/extensions.py"}]}
| 411 | 317 |
gh_patches_debug_17786
|
rasdani/github-patches
|
git_diff
|
lmfit__lmfit-py-152
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cannot deploy to pypi repo dues to tuples in the `setup.py` attributes
Due to a python-bug (http://bugs.python.org/issue19610) i cannot install and deploy lmfit with `python setup install`
I discovered this issue while trying to fix #149
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # from distutils.core import setup
3 from setuptools import setup
4
5 import lmfit as lmfit
6 import numpy, scipy
7
8 long_desc = """A library for least-squares minimization and data fitting in
9 Python. Built on top of scipy.optimize, lmfit provides a Parameter object
10 which can be set as fixed or free, can have upper and/or lower bounds, or
11 can be written in terms of algebraic constraints of other Parameters. The
12 user writes a function to be minimized as a function of these Parameters,
13 and the scipy.optimize methods are used to find the optimal values for the
14 Parameters. The Levenberg-Marquardt (leastsq) is the default minimization
15 algorithm, and provides estimated standard errors and correlations between
16 varied Parameters. Other minimization methods, including Nelder-Mead's
17 downhill simplex, Powell's method, BFGS, Sequential Least Squares, and
18 others are also supported. Bounds and contraints can be placed on
19 Parameters for all of these methods.
20
21 In addition, methods for explicitly calculating confidence intervals are
22 provided for exploring minmization problems where the approximation of
23 estimating Parameter uncertainties from the covariance matrix is
24 questionable. """
25
26
27 setup(name = 'lmfit',
28 version = lmfit.__version__,
29 author = 'LMFit Development Team',
30 author_email = '[email protected]',
31 url = 'http://lmfit.github.io/lmfit-py/',
32 download_url = 'http://lmfit.github.io//lmfit-py/',
33 requires = ('numpy', 'scipy'),
34 license = 'BSD',
35 description = "Least-Squares Minimization with Bounds and Constraints",
36 long_description = long_desc,
37 platforms = ('Windows', 'Linux', 'Mac OS X'),
38 classifiers=['Intended Audience :: Science/Research',
39 'Operating System :: OS Independent',
40 'Programming Language :: Python',
41 'Topic :: Scientific/Engineering',
42 ],
43 # test_suite='nose.collector',
44 # test_requires=['Nose'],
45 package_dir = {'lmfit': 'lmfit'},
46 packages = ['lmfit', 'lmfit.ui', 'lmfit.uncertainties'],
47 )
48
49
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -30,11 +30,11 @@
author_email = '[email protected]',
url = 'http://lmfit.github.io/lmfit-py/',
download_url = 'http://lmfit.github.io//lmfit-py/',
- requires = ('numpy', 'scipy'),
+ requires = ['numpy', 'scipy'],
license = 'BSD',
description = "Least-Squares Minimization with Bounds and Constraints",
long_description = long_desc,
- platforms = ('Windows', 'Linux', 'Mac OS X'),
+ platforms = ['Windows', 'Linux', 'Mac OS X'],
classifiers=['Intended Audience :: Science/Research',
'Operating System :: OS Independent',
'Programming Language :: Python',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -30,11 +30,11 @@\n author_email = '[email protected]',\n url = 'http://lmfit.github.io/lmfit-py/',\n download_url = 'http://lmfit.github.io//lmfit-py/',\n- requires = ('numpy', 'scipy'),\n+ requires = ['numpy', 'scipy'],\n license = 'BSD',\n description = \"Least-Squares Minimization with Bounds and Constraints\",\n long_description = long_desc,\n- platforms = ('Windows', 'Linux', 'Mac OS X'),\n+ platforms = ['Windows', 'Linux', 'Mac OS X'],\n classifiers=['Intended Audience :: Science/Research',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n", "issue": "Cannot deploy to pypi repo dues to tuples in the `setup.py` attributes\nDue to a python-bug (http://bugs.python.org/issue19610) i cannot install and deploy lmfit with `python setup install`\n\nI discovered this issue while trying to fix #149 \n\n", "before_files": [{"content": "#!/usr/bin/env python\n# from distutils.core import setup\nfrom setuptools import setup\n\nimport lmfit as lmfit\nimport numpy, scipy\n\nlong_desc = \"\"\"A library for least-squares minimization and data fitting in\nPython. Built on top of scipy.optimize, lmfit provides a Parameter object\nwhich can be set as fixed or free, can have upper and/or lower bounds, or\ncan be written in terms of algebraic constraints of other Parameters. The\nuser writes a function to be minimized as a function of these Parameters,\nand the scipy.optimize methods are used to find the optimal values for the\nParameters. The Levenberg-Marquardt (leastsq) is the default minimization\nalgorithm, and provides estimated standard errors and correlations between\nvaried Parameters. Other minimization methods, including Nelder-Mead's\ndownhill simplex, Powell's method, BFGS, Sequential Least Squares, and\nothers are also supported. Bounds and contraints can be placed on\nParameters for all of these methods.\n\nIn addition, methods for explicitly calculating confidence intervals are\nprovided for exploring minmization problems where the approximation of\nestimating Parameter uncertainties from the covariance matrix is\nquestionable. \"\"\"\n\n\nsetup(name = 'lmfit',\n version = lmfit.__version__,\n author = 'LMFit Development Team',\n author_email = '[email protected]',\n url = 'http://lmfit.github.io/lmfit-py/',\n download_url = 'http://lmfit.github.io//lmfit-py/',\n requires = ('numpy', 'scipy'),\n license = 'BSD',\n description = \"Least-Squares Minimization with Bounds and Constraints\",\n long_description = long_desc,\n platforms = ('Windows', 'Linux', 'Mac OS X'),\n classifiers=['Intended Audience :: Science/Research',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Topic :: Scientific/Engineering',\n ],\n # test_suite='nose.collector',\n # test_requires=['Nose'],\n package_dir = {'lmfit': 'lmfit'},\n packages = ['lmfit', 'lmfit.ui', 'lmfit.uncertainties'],\n )\n\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# from distutils.core import setup\nfrom setuptools import setup\n\nimport lmfit as lmfit\nimport numpy, scipy\n\nlong_desc = \"\"\"A library for least-squares minimization and data fitting in\nPython. Built on top of scipy.optimize, lmfit provides a Parameter object\nwhich can be set as fixed or free, can have upper and/or lower bounds, or\ncan be written in terms of algebraic constraints of other Parameters. The\nuser writes a function to be minimized as a function of these Parameters,\nand the scipy.optimize methods are used to find the optimal values for the\nParameters. The Levenberg-Marquardt (leastsq) is the default minimization\nalgorithm, and provides estimated standard errors and correlations between\nvaried Parameters. Other minimization methods, including Nelder-Mead's\ndownhill simplex, Powell's method, BFGS, Sequential Least Squares, and\nothers are also supported. Bounds and contraints can be placed on\nParameters for all of these methods.\n\nIn addition, methods for explicitly calculating confidence intervals are\nprovided for exploring minmization problems where the approximation of\nestimating Parameter uncertainties from the covariance matrix is\nquestionable. \"\"\"\n\n\nsetup(name = 'lmfit',\n version = lmfit.__version__,\n author = 'LMFit Development Team',\n author_email = '[email protected]',\n url = 'http://lmfit.github.io/lmfit-py/',\n download_url = 'http://lmfit.github.io//lmfit-py/',\n requires = ['numpy', 'scipy'],\n license = 'BSD',\n description = \"Least-Squares Minimization with Bounds and Constraints\",\n long_description = long_desc,\n platforms = ['Windows', 'Linux', 'Mac OS X'],\n classifiers=['Intended Audience :: Science/Research',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Topic :: Scientific/Engineering',\n ],\n # test_suite='nose.collector',\n # test_requires=['Nose'],\n package_dir = {'lmfit': 'lmfit'},\n packages = ['lmfit', 'lmfit.ui', 'lmfit.uncertainties'],\n )\n\n", "path": "setup.py"}]}
| 878 | 185 |
gh_patches_debug_8538
|
rasdani/github-patches
|
git_diff
|
qtile__qtile-1768
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
widget.CheckUpdates not hidden when no updates are available
<!--
Please do not ask general questions here! There are [community
contact](https://github.com/qtile/qtile#community) options for that.
-->
# Issue description
<!--
A brief discussion of what failed and how it failed. A description of
what you tried is helpful, i.e. "When I use lazy.kill() on a window I get
the following stack trace" instead of "Closing windows doesn't work".
-->
The documentation for [`CheckUpdates`](http://docs.qtile.org/en/latest/manual/ref/widgets.html#checkupdates) `display_format` mentions that the given format-specifier will only be used when there are updates available - giving rise to the assumption that the widget would be invisible when no updates are available (which seems very reasonable).
This doesn't seem to be the case for me, though. I am permanently seeing a `0`. So either the documentation or the implementation might need a little tweak.
# Qtile version
<!--
Please include the exact commit hash of the version of Qtile that failed.
-->
0.15.1
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `libqtile/widget/check_updates.py`
Content:
```
1 # Copyright (c) 2015 Ali Mousavi
2 #
3 # Permission is hereby granted, free of charge, to any person obtaining a copy
4 # of this software and associated documentation files (the "Software"), to deal
5 # in the Software without restriction, including without limitation the rights
6 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 # copies of the Software, and to permit persons to whom the Software is
8 # furnished to do so, subject to the following conditions:
9 #
10 # The above copyright notice and this permission notice shall be included in
11 # all copies or substantial portions of the Software.
12 #
13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19 # SOFTWARE.
20
21 import os
22 from subprocess import CalledProcessError, Popen
23
24 from libqtile.log_utils import logger
25 from libqtile.widget import base
26
27
28 class CheckUpdates(base.ThreadedPollText):
29 """Shows number of pending updates in different unix systems"""
30 orientations = base.ORIENTATION_HORIZONTAL
31 defaults = [
32 ("distro", "Arch", "Name of your distribution"),
33 ("custom_command", None, "Custom shell command for checking updates (counts the lines of the output)"),
34 ("update_interval", 60, "Update interval in seconds."),
35 ('execute', None, 'Command to execute on click'),
36 ("display_format", "Updates: {updates}", "Display format if updates available"),
37 ("colour_no_updates", "ffffff", "Colour when there's no updates."),
38 ("colour_have_updates", "ffffff", "Colour when there are updates."),
39 ("restart_indicator", "", "Indicator to represent reboot is required. (Ubuntu only)")
40 ]
41
42 def __init__(self, **config):
43 base.ThreadedPollText.__init__(self, **config)
44 self.add_defaults(CheckUpdates.defaults)
45
46 # format: "Distro": ("cmd", "number of lines to subtract from output")
47 self.cmd_dict = {"Arch": ("pacman -Qu", 0),
48 "Arch_checkupdates": ("checkupdates", 0),
49 "Arch_Sup": ("pacman -Sup", 1),
50 "Arch_yay": ("yay -Qu", 0),
51 "Debian": ("apt-show-versions -u -b", 0),
52 "Ubuntu": ("aptitude search ~U", 0),
53 "Fedora": ("dnf list updates", 3),
54 "FreeBSD": ("pkg_version -I -l '<'", 0),
55 "Mandriva": ("urpmq --auto-select", 0)
56 }
57
58 # Check if distro name is valid.
59 try:
60 self.cmd = self.cmd_dict[self.distro][0].split()
61 self.subtr = self.cmd_dict[self.distro][1]
62 except KeyError:
63 distros = sorted(self.cmd_dict.keys())
64 logger.error(self.distro + ' is not a valid distro name. ' +
65 'Use one of the list: ' + str(distros) + '.')
66 self.cmd = None
67
68 def _check_updates(self):
69 # type: () -> str
70 try:
71 if self.custom_command is None:
72 updates = self.call_process(self.cmd)
73 else:
74 updates = self.call_process(self.custom_command, shell=True)
75 self.subtr = 0
76 except CalledProcessError:
77 updates = ""
78 num_updates = str(len(updates.splitlines()) - self.subtr)
79
80 if self.restart_indicator and os.path.exists('/var/run/reboot-required'):
81 num_updates += self.restart_indicator
82
83 self._set_colour(num_updates)
84 return self.display_format.format(**{"updates": num_updates})
85
86 def _set_colour(self, num_updates):
87 # type: (str) -> None
88 if not num_updates.startswith("0"):
89 self.layout.colour = self.colour_have_updates
90 else:
91 self.layout.colour = self.colour_no_updates
92
93 def poll(self):
94 # type: () -> str
95 if not self.cmd:
96 return "N/A"
97 return self._check_updates()
98
99 def button_press(self, x, y, button):
100 # type: (int, int, int) -> None
101 base.ThreadedPollText.button_press(self, x, y, button)
102 if button == 1 and self.execute is not None:
103 Popen(self.execute, shell=True)
104
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/libqtile/widget/check_updates.py b/libqtile/widget/check_updates.py
--- a/libqtile/widget/check_updates.py
+++ b/libqtile/widget/check_updates.py
@@ -75,7 +75,11 @@
self.subtr = 0
except CalledProcessError:
updates = ""
- num_updates = str(len(updates.splitlines()) - self.subtr)
+ num_updates = len(updates.splitlines()) - self.subtr
+
+ if num_updates == 0:
+ return ""
+ num_updates = str(num_updates)
if self.restart_indicator and os.path.exists('/var/run/reboot-required'):
num_updates += self.restart_indicator
|
{"golden_diff": "diff --git a/libqtile/widget/check_updates.py b/libqtile/widget/check_updates.py\n--- a/libqtile/widget/check_updates.py\n+++ b/libqtile/widget/check_updates.py\n@@ -75,7 +75,11 @@\n self.subtr = 0\n except CalledProcessError:\n updates = \"\"\n- num_updates = str(len(updates.splitlines()) - self.subtr)\n+ num_updates = len(updates.splitlines()) - self.subtr\n+\n+ if num_updates == 0:\n+ return \"\"\n+ num_updates = str(num_updates)\n \n if self.restart_indicator and os.path.exists('/var/run/reboot-required'):\n num_updates += self.restart_indicator\n", "issue": "widget.CheckUpdates not hidden when no updates are available\n<!--\r\nPlease do not ask general questions here! There are [community\r\ncontact](https://github.com/qtile/qtile#community) options for that.\r\n-->\r\n\r\n# Issue description\r\n\r\n<!--\r\nA brief discussion of what failed and how it failed. A description of\r\nwhat you tried is helpful, i.e. \"When I use lazy.kill() on a window I get\r\nthe following stack trace\" instead of \"Closing windows doesn't work\".\r\n-->\r\n\r\nThe documentation for [`CheckUpdates`](http://docs.qtile.org/en/latest/manual/ref/widgets.html#checkupdates) `display_format` mentions that the given format-specifier will only be used when there are updates available - giving rise to the assumption that the widget would be invisible when no updates are available (which seems very reasonable).\r\n\r\nThis doesn't seem to be the case for me, though. I am permanently seeing a `0`. So either the documentation or the implementation might need a little tweak.\r\n\r\n# Qtile version\r\n\r\n<!--\r\nPlease include the exact commit hash of the version of Qtile that failed.\r\n-->\r\n\r\n0.15.1\n", "before_files": [{"content": "# Copyright (c) 2015 Ali Mousavi\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport os\nfrom subprocess import CalledProcessError, Popen\n\nfrom libqtile.log_utils import logger\nfrom libqtile.widget import base\n\n\nclass CheckUpdates(base.ThreadedPollText):\n \"\"\"Shows number of pending updates in different unix systems\"\"\"\n orientations = base.ORIENTATION_HORIZONTAL\n defaults = [\n (\"distro\", \"Arch\", \"Name of your distribution\"),\n (\"custom_command\", None, \"Custom shell command for checking updates (counts the lines of the output)\"),\n (\"update_interval\", 60, \"Update interval in seconds.\"),\n ('execute', None, 'Command to execute on click'),\n (\"display_format\", \"Updates: {updates}\", \"Display format if updates available\"),\n (\"colour_no_updates\", \"ffffff\", \"Colour when there's no updates.\"),\n (\"colour_have_updates\", \"ffffff\", \"Colour when there are updates.\"),\n (\"restart_indicator\", \"\", \"Indicator to represent reboot is required. (Ubuntu only)\")\n ]\n\n def __init__(self, **config):\n base.ThreadedPollText.__init__(self, **config)\n self.add_defaults(CheckUpdates.defaults)\n\n # format: \"Distro\": (\"cmd\", \"number of lines to subtract from output\")\n self.cmd_dict = {\"Arch\": (\"pacman -Qu\", 0),\n \"Arch_checkupdates\": (\"checkupdates\", 0),\n \"Arch_Sup\": (\"pacman -Sup\", 1),\n \"Arch_yay\": (\"yay -Qu\", 0),\n \"Debian\": (\"apt-show-versions -u -b\", 0),\n \"Ubuntu\": (\"aptitude search ~U\", 0),\n \"Fedora\": (\"dnf list updates\", 3),\n \"FreeBSD\": (\"pkg_version -I -l '<'\", 0),\n \"Mandriva\": (\"urpmq --auto-select\", 0)\n }\n\n # Check if distro name is valid.\n try:\n self.cmd = self.cmd_dict[self.distro][0].split()\n self.subtr = self.cmd_dict[self.distro][1]\n except KeyError:\n distros = sorted(self.cmd_dict.keys())\n logger.error(self.distro + ' is not a valid distro name. ' +\n 'Use one of the list: ' + str(distros) + '.')\n self.cmd = None\n\n def _check_updates(self):\n # type: () -> str\n try:\n if self.custom_command is None:\n updates = self.call_process(self.cmd)\n else:\n updates = self.call_process(self.custom_command, shell=True)\n self.subtr = 0\n except CalledProcessError:\n updates = \"\"\n num_updates = str(len(updates.splitlines()) - self.subtr)\n\n if self.restart_indicator and os.path.exists('/var/run/reboot-required'):\n num_updates += self.restart_indicator\n\n self._set_colour(num_updates)\n return self.display_format.format(**{\"updates\": num_updates})\n\n def _set_colour(self, num_updates):\n # type: (str) -> None\n if not num_updates.startswith(\"0\"):\n self.layout.colour = self.colour_have_updates\n else:\n self.layout.colour = self.colour_no_updates\n\n def poll(self):\n # type: () -> str\n if not self.cmd:\n return \"N/A\"\n return self._check_updates()\n\n def button_press(self, x, y, button):\n # type: (int, int, int) -> None\n base.ThreadedPollText.button_press(self, x, y, button)\n if button == 1 and self.execute is not None:\n Popen(self.execute, shell=True)\n", "path": "libqtile/widget/check_updates.py"}], "after_files": [{"content": "# Copyright (c) 2015 Ali Mousavi\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport os\nfrom subprocess import CalledProcessError, Popen\n\nfrom libqtile.log_utils import logger\nfrom libqtile.widget import base\n\n\nclass CheckUpdates(base.ThreadedPollText):\n \"\"\"Shows number of pending updates in different unix systems\"\"\"\n orientations = base.ORIENTATION_HORIZONTAL\n defaults = [\n (\"distro\", \"Arch\", \"Name of your distribution\"),\n (\"custom_command\", None, \"Custom shell command for checking updates (counts the lines of the output)\"),\n (\"update_interval\", 60, \"Update interval in seconds.\"),\n ('execute', None, 'Command to execute on click'),\n (\"display_format\", \"Updates: {updates}\", \"Display format if updates available\"),\n (\"colour_no_updates\", \"ffffff\", \"Colour when there's no updates.\"),\n (\"colour_have_updates\", \"ffffff\", \"Colour when there are updates.\"),\n (\"restart_indicator\", \"\", \"Indicator to represent reboot is required. (Ubuntu only)\")\n ]\n\n def __init__(self, **config):\n base.ThreadedPollText.__init__(self, **config)\n self.add_defaults(CheckUpdates.defaults)\n\n # format: \"Distro\": (\"cmd\", \"number of lines to subtract from output\")\n self.cmd_dict = {\"Arch\": (\"pacman -Qu\", 0),\n \"Arch_checkupdates\": (\"checkupdates\", 0),\n \"Arch_Sup\": (\"pacman -Sup\", 1),\n \"Arch_yay\": (\"yay -Qu\", 0),\n \"Debian\": (\"apt-show-versions -u -b\", 0),\n \"Ubuntu\": (\"aptitude search ~U\", 0),\n \"Fedora\": (\"dnf list updates\", 3),\n \"FreeBSD\": (\"pkg_version -I -l '<'\", 0),\n \"Mandriva\": (\"urpmq --auto-select\", 0)\n }\n\n # Check if distro name is valid.\n try:\n self.cmd = self.cmd_dict[self.distro][0].split()\n self.subtr = self.cmd_dict[self.distro][1]\n except KeyError:\n distros = sorted(self.cmd_dict.keys())\n logger.error(self.distro + ' is not a valid distro name. ' +\n 'Use one of the list: ' + str(distros) + '.')\n self.cmd = None\n\n def _check_updates(self):\n # type: () -> str\n try:\n if self.custom_command is None:\n updates = self.call_process(self.cmd)\n else:\n updates = self.call_process(self.custom_command, shell=True)\n self.subtr = 0\n except CalledProcessError:\n updates = \"\"\n num_updates = len(updates.splitlines()) - self.subtr\n\n if num_updates == 0:\n return \"\"\n num_updates = str(num_updates)\n\n if self.restart_indicator and os.path.exists('/var/run/reboot-required'):\n num_updates += self.restart_indicator\n\n self._set_colour(num_updates)\n return self.display_format.format(**{\"updates\": num_updates})\n\n def _set_colour(self, num_updates):\n # type: (str) -> None\n if not num_updates.startswith(\"0\"):\n self.layout.colour = self.colour_have_updates\n else:\n self.layout.colour = self.colour_no_updates\n\n def poll(self):\n # type: () -> str\n if not self.cmd:\n return \"N/A\"\n return self._check_updates()\n\n def button_press(self, x, y, button):\n # type: (int, int, int) -> None\n base.ThreadedPollText.button_press(self, x, y, button)\n if button == 1 and self.execute is not None:\n Popen(self.execute, shell=True)\n", "path": "libqtile/widget/check_updates.py"}]}
| 1,723 | 152 |
gh_patches_debug_2136
|
rasdani/github-patches
|
git_diff
|
Kinto__kinto-1752
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Deleting a collection doesn't delete access_control_entries for its children
`buckets.py` has an event listener that ensures that when a bucket is deleted, everything underneath it is recursively deleted. `collections.py` has one too but it doesn't appear to be as robust -- it doesn't have a wildcard to match objects more than one level below it (which might be OK, since only records are below collections now), and `delete_object_permissions` is only called on the collection rather than its children.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kinto/views/collections.py`
Content:
```
1 import colander
2 from pyramid.events import subscriber
3
4 from kinto.core import resource, utils
5 from kinto.core.events import ResourceChanged, ACTIONS
6 from kinto.schema_validation import validate_from_bucket_schema_or_400, JSONSchemaMapping
7
8
9 class CollectionSchema(resource.ResourceSchema):
10 schema = JSONSchemaMapping(missing=colander.drop)
11 cache_expires = colander.SchemaNode(colander.Int(), missing=colander.drop)
12
13
14 @resource.register(name='collection',
15 collection_path='/buckets/{{bucket_id}}/collections',
16 record_path='/buckets/{{bucket_id}}/collections/{{id}}')
17 class Collection(resource.ShareableResource):
18 schema = CollectionSchema
19 permissions = ('read', 'write', 'record:create')
20
21 def get_parent_id(self, request):
22 bucket_id = request.matchdict['bucket_id']
23 parent_id = utils.instance_uri(request, 'bucket', id=bucket_id)
24 return parent_id
25
26 def process_record(self, new, old=None):
27 """Additional collection schema validation from bucket, if any."""
28 new = super().process_record(new, old)
29
30 # Remove internal and auto-assigned fields.
31 internal_fields = (self.model.id_field,
32 self.model.modified_field,
33 self.model.permissions_field)
34 validate_from_bucket_schema_or_400(new, resource_name="collection", request=self.request,
35 ignore_fields=internal_fields)
36 return new
37
38
39 @subscriber(ResourceChanged,
40 for_resources=('collection',),
41 for_actions=(ACTIONS.DELETE,))
42 def on_collections_deleted(event):
43 """Some collections were deleted, delete records.
44 """
45 storage = event.request.registry.storage
46 permission = event.request.registry.permission
47
48 for change in event.impacted_records:
49 collection = change['old']
50 bucket_id = event.payload['bucket_id']
51 parent_id = utils.instance_uri(event.request, 'collection',
52 bucket_id=bucket_id,
53 id=collection['id'])
54 storage.delete_all(collection_id=None,
55 parent_id=parent_id,
56 with_deleted=False)
57 storage.purge_deleted(collection_id=None,
58 parent_id=parent_id)
59 permission.delete_object_permissions(parent_id)
60
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kinto/views/collections.py b/kinto/views/collections.py
--- a/kinto/views/collections.py
+++ b/kinto/views/collections.py
@@ -56,4 +56,4 @@
with_deleted=False)
storage.purge_deleted(collection_id=None,
parent_id=parent_id)
- permission.delete_object_permissions(parent_id)
+ permission.delete_object_permissions(parent_id + '/*')
|
{"golden_diff": "diff --git a/kinto/views/collections.py b/kinto/views/collections.py\n--- a/kinto/views/collections.py\n+++ b/kinto/views/collections.py\n@@ -56,4 +56,4 @@\n with_deleted=False)\n storage.purge_deleted(collection_id=None,\n parent_id=parent_id)\n- permission.delete_object_permissions(parent_id)\n+ permission.delete_object_permissions(parent_id + '/*')\n", "issue": "Deleting a collection doesn't delete access_control_entries for its children\n`buckets.py` has an event listener that ensures that when a bucket is deleted, everything underneath it is recursively deleted. `collections.py` has one too but it doesn't appear to be as robust -- it doesn't have a wildcard to match objects more than one level below it (which might be OK, since only records are below collections now), and `delete_object_permissions` is only called on the collection rather than its children.\n", "before_files": [{"content": "import colander\nfrom pyramid.events import subscriber\n\nfrom kinto.core import resource, utils\nfrom kinto.core.events import ResourceChanged, ACTIONS\nfrom kinto.schema_validation import validate_from_bucket_schema_or_400, JSONSchemaMapping\n\n\nclass CollectionSchema(resource.ResourceSchema):\n schema = JSONSchemaMapping(missing=colander.drop)\n cache_expires = colander.SchemaNode(colander.Int(), missing=colander.drop)\n\n\[email protected](name='collection',\n collection_path='/buckets/{{bucket_id}}/collections',\n record_path='/buckets/{{bucket_id}}/collections/{{id}}')\nclass Collection(resource.ShareableResource):\n schema = CollectionSchema\n permissions = ('read', 'write', 'record:create')\n\n def get_parent_id(self, request):\n bucket_id = request.matchdict['bucket_id']\n parent_id = utils.instance_uri(request, 'bucket', id=bucket_id)\n return parent_id\n\n def process_record(self, new, old=None):\n \"\"\"Additional collection schema validation from bucket, if any.\"\"\"\n new = super().process_record(new, old)\n\n # Remove internal and auto-assigned fields.\n internal_fields = (self.model.id_field,\n self.model.modified_field,\n self.model.permissions_field)\n validate_from_bucket_schema_or_400(new, resource_name=\"collection\", request=self.request,\n ignore_fields=internal_fields)\n return new\n\n\n@subscriber(ResourceChanged,\n for_resources=('collection',),\n for_actions=(ACTIONS.DELETE,))\ndef on_collections_deleted(event):\n \"\"\"Some collections were deleted, delete records.\n \"\"\"\n storage = event.request.registry.storage\n permission = event.request.registry.permission\n\n for change in event.impacted_records:\n collection = change['old']\n bucket_id = event.payload['bucket_id']\n parent_id = utils.instance_uri(event.request, 'collection',\n bucket_id=bucket_id,\n id=collection['id'])\n storage.delete_all(collection_id=None,\n parent_id=parent_id,\n with_deleted=False)\n storage.purge_deleted(collection_id=None,\n parent_id=parent_id)\n permission.delete_object_permissions(parent_id)\n", "path": "kinto/views/collections.py"}], "after_files": [{"content": "import colander\nfrom pyramid.events import subscriber\n\nfrom kinto.core import resource, utils\nfrom kinto.core.events import ResourceChanged, ACTIONS\nfrom kinto.schema_validation import validate_from_bucket_schema_or_400, JSONSchemaMapping\n\n\nclass CollectionSchema(resource.ResourceSchema):\n schema = JSONSchemaMapping(missing=colander.drop)\n cache_expires = colander.SchemaNode(colander.Int(), missing=colander.drop)\n\n\[email protected](name='collection',\n collection_path='/buckets/{{bucket_id}}/collections',\n record_path='/buckets/{{bucket_id}}/collections/{{id}}')\nclass Collection(resource.ShareableResource):\n schema = CollectionSchema\n permissions = ('read', 'write', 'record:create')\n\n def get_parent_id(self, request):\n bucket_id = request.matchdict['bucket_id']\n parent_id = utils.instance_uri(request, 'bucket', id=bucket_id)\n return parent_id\n\n def process_record(self, new, old=None):\n \"\"\"Additional collection schema validation from bucket, if any.\"\"\"\n new = super().process_record(new, old)\n\n # Remove internal and auto-assigned fields.\n internal_fields = (self.model.id_field,\n self.model.modified_field,\n self.model.permissions_field)\n validate_from_bucket_schema_or_400(new, resource_name=\"collection\", request=self.request,\n ignore_fields=internal_fields)\n return new\n\n\n@subscriber(ResourceChanged,\n for_resources=('collection',),\n for_actions=(ACTIONS.DELETE,))\ndef on_collections_deleted(event):\n \"\"\"Some collections were deleted, delete records.\n \"\"\"\n storage = event.request.registry.storage\n permission = event.request.registry.permission\n\n for change in event.impacted_records:\n collection = change['old']\n bucket_id = event.payload['bucket_id']\n parent_id = utils.instance_uri(event.request, 'collection',\n bucket_id=bucket_id,\n id=collection['id'])\n storage.delete_all(collection_id=None,\n parent_id=parent_id,\n with_deleted=False)\n storage.purge_deleted(collection_id=None,\n parent_id=parent_id)\n permission.delete_object_permissions(parent_id + '/*')\n", "path": "kinto/views/collections.py"}]}
| 923 | 90 |
gh_patches_debug_1396
|
rasdani/github-patches
|
git_diff
|
googleapis__google-cloud-python-5366
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
General: v0.33.0 pip install fails
In a fresh Python v2.7.12 virtualenv on linux:
```
pip install google-cloud
```
Results in:
```
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/tmp/pip-install-3_n60m/google-cloud/setup.py", line 22, in <module>
with open(os.path.join(PACKAGE_ROOT, 'setup-README.rst')) as file_obj:
IOError: [Errno 2] No such file or directory: '/tmp/pip-install-3_n60m/google-cloud/setup-README.rst'
```
Note:
```
pip install google-cloud==0.32.0
```
works fine.
I believe it has to do with recent changes: https://github.com/GoogleCloudPlatform/google-cloud-python/commit/71e5d4bf94745580834b86c3e92ac4186c3115c0
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `legacy/google-cloud/setup.py`
Content:
```
1 # Copyright 2016 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16 import logging
17
18 from setuptools import setup
19
20 PACKAGE_ROOT = os.path.abspath(os.path.dirname(__file__))
21
22 with open(os.path.join(PACKAGE_ROOT, 'setup-README.rst')) as file_obj:
23 README = file_obj.read()
24
25 # NOTE: This is duplicated throughout and we should try to
26 # consolidate.
27 SETUP_BASE = {
28 'author': 'Google Cloud Platform',
29 'author_email': '[email protected]',
30 'scripts': [],
31 'url': 'https://github.com/GoogleCloudPlatform/google-cloud-python',
32 'license': 'Apache 2.0',
33 'platforms': 'Posix; MacOS X; Windows',
34 'include_package_data': True,
35 'zip_safe': False,
36 'classifiers': [
37 'Development Status :: 7 - Inactive',
38 'Intended Audience :: Developers',
39 'License :: OSI Approved :: Apache Software License',
40 'Operating System :: OS Independent',
41 'Programming Language :: Python :: 2',
42 'Programming Language :: Python :: 2.7',
43 'Programming Language :: Python :: 3',
44 'Programming Language :: Python :: 3.4',
45 'Programming Language :: Python :: 3.5',
46 'Programming Language :: Python :: 3.6',
47 'Topic :: Internet',
48 ],
49 }
50
51 REQUIREMENTS = [
52 'google-api-core >= 0.1.2, < 0.2.0dev',
53 'google-cloud-bigquery >= 0.28.0, < 0.29dev',
54 'google-cloud-bigquery-datatransfer >= 0.1.0, < 0.2dev',
55 'google-cloud-bigtable >= 0.28.1, < 0.29dev',
56 'google-cloud-container >= 0.1.0, < 0.2dev',
57 'google-cloud-core >= 0.28.0, < 0.29dev',
58 'google-cloud-datastore >= 1.4.0, < 1.5dev',
59 'google-cloud-dns >= 0.28.0, < 0.29dev',
60 'google-cloud-error-reporting >= 0.28.0, < 0.29dev',
61 'google-cloud-firestore >= 0.28.0, < 0.29dev',
62 'google-cloud-language >= 1.0.0, < 1.1dev',
63 'google-cloud-logging >= 1.4.0, < 1.5dev',
64 'google-cloud-monitoring >= 0.28.0, < 0.29dev',
65 'google-cloud-pubsub >= 0.30.0, < 0.31dev',
66 'google-cloud-resource-manager >= 0.28.0, < 0.29dev',
67 'google-cloud-runtimeconfig >= 0.28.0, < 0.29dev',
68 'google-cloud-spanner >= 0.29.0, < 0.30dev',
69 'google-cloud-speech >= 0.30.0, < 0.31dev',
70 'google-cloud-storage >= 1.6.0, < 1.7dev',
71 'google-cloud-trace >= 0.17.0, < 0.18dev',
72 'google-cloud-translate >= 1.3.0, < 1.4dev',
73 'google-cloud-videointelligence >= 1.0.0, < 1.1dev',
74 'google-cloud-vision >= 0.29.0, < 0.30dev',
75 ]
76
77 setup(
78 name='google-cloud',
79 version='0.33.0',
80 description='API Client library for Google Cloud',
81 long_description=README,
82 install_requires=REQUIREMENTS,
83 **SETUP_BASE
84 )
85
86 warning = "WARNING: The google-cloud Python package is deprecated. On " \
87 "June 18, 2018, this package will no longer install any other " \
88 "packages. Please install the product-specific google-cloud-* " \
89 "packages needed for your application. See " \
90 "https://github.com/GoogleCloudPlatform/google-cloud-python."
91
92 logging.warn(warning)
93
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/legacy/google-cloud/setup.py b/legacy/google-cloud/setup.py
--- a/legacy/google-cloud/setup.py
+++ b/legacy/google-cloud/setup.py
@@ -76,7 +76,7 @@
setup(
name='google-cloud',
- version='0.33.0',
+ version='0.33.1',
description='API Client library for Google Cloud',
long_description=README,
install_requires=REQUIREMENTS,
|
{"golden_diff": "diff --git a/legacy/google-cloud/setup.py b/legacy/google-cloud/setup.py\n--- a/legacy/google-cloud/setup.py\n+++ b/legacy/google-cloud/setup.py\n@@ -76,7 +76,7 @@\n \n setup(\n name='google-cloud',\n- version='0.33.0',\n+ version='0.33.1',\n description='API Client library for Google Cloud',\n long_description=README,\n install_requires=REQUIREMENTS,\n", "issue": "General: v0.33.0 pip install fails\nIn a fresh Python v2.7.12 virtualenv on linux:\r\n```\r\npip install google-cloud\r\n```\r\n\r\nResults in:\r\n```\r\n Traceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\n File \"/tmp/pip-install-3_n60m/google-cloud/setup.py\", line 22, in <module>\r\n with open(os.path.join(PACKAGE_ROOT, 'setup-README.rst')) as file_obj:\r\n IOError: [Errno 2] No such file or directory: '/tmp/pip-install-3_n60m/google-cloud/setup-README.rst'\r\n```\r\n\r\nNote:\r\n```\r\npip install google-cloud==0.32.0\r\n```\r\nworks fine.\r\n\r\nI believe it has to do with recent changes: https://github.com/GoogleCloudPlatform/google-cloud-python/commit/71e5d4bf94745580834b86c3e92ac4186c3115c0\n", "before_files": [{"content": "# Copyright 2016 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport logging\n\nfrom setuptools import setup\n\nPACKAGE_ROOT = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(PACKAGE_ROOT, 'setup-README.rst')) as file_obj:\n README = file_obj.read()\n\n# NOTE: This is duplicated throughout and we should try to\n# consolidate.\nSETUP_BASE = {\n 'author': 'Google Cloud Platform',\n 'author_email': '[email protected]',\n 'scripts': [],\n 'url': 'https://github.com/GoogleCloudPlatform/google-cloud-python',\n 'license': 'Apache 2.0',\n 'platforms': 'Posix; MacOS X; Windows',\n 'include_package_data': True,\n 'zip_safe': False,\n 'classifiers': [\n 'Development Status :: 7 - Inactive',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Internet',\n ],\n}\n\nREQUIREMENTS = [\n 'google-api-core >= 0.1.2, < 0.2.0dev',\n 'google-cloud-bigquery >= 0.28.0, < 0.29dev',\n 'google-cloud-bigquery-datatransfer >= 0.1.0, < 0.2dev',\n 'google-cloud-bigtable >= 0.28.1, < 0.29dev',\n 'google-cloud-container >= 0.1.0, < 0.2dev',\n 'google-cloud-core >= 0.28.0, < 0.29dev',\n 'google-cloud-datastore >= 1.4.0, < 1.5dev',\n 'google-cloud-dns >= 0.28.0, < 0.29dev',\n 'google-cloud-error-reporting >= 0.28.0, < 0.29dev',\n 'google-cloud-firestore >= 0.28.0, < 0.29dev',\n 'google-cloud-language >= 1.0.0, < 1.1dev',\n 'google-cloud-logging >= 1.4.0, < 1.5dev',\n 'google-cloud-monitoring >= 0.28.0, < 0.29dev',\n 'google-cloud-pubsub >= 0.30.0, < 0.31dev',\n 'google-cloud-resource-manager >= 0.28.0, < 0.29dev',\n 'google-cloud-runtimeconfig >= 0.28.0, < 0.29dev',\n 'google-cloud-spanner >= 0.29.0, < 0.30dev',\n 'google-cloud-speech >= 0.30.0, < 0.31dev',\n 'google-cloud-storage >= 1.6.0, < 1.7dev',\n 'google-cloud-trace >= 0.17.0, < 0.18dev',\n 'google-cloud-translate >= 1.3.0, < 1.4dev',\n 'google-cloud-videointelligence >= 1.0.0, < 1.1dev',\n 'google-cloud-vision >= 0.29.0, < 0.30dev',\n]\n\nsetup(\n name='google-cloud',\n version='0.33.0',\n description='API Client library for Google Cloud',\n long_description=README,\n install_requires=REQUIREMENTS,\n **SETUP_BASE\n)\n\nwarning = \"WARNING: The google-cloud Python package is deprecated. On \" \\\n \"June 18, 2018, this package will no longer install any other \" \\\n \"packages. Please install the product-specific google-cloud-* \" \\\n \"packages needed for your application. See \" \\\n \"https://github.com/GoogleCloudPlatform/google-cloud-python.\"\n\nlogging.warn(warning)\n", "path": "legacy/google-cloud/setup.py"}], "after_files": [{"content": "# Copyright 2016 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport logging\n\nfrom setuptools import setup\n\nPACKAGE_ROOT = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(PACKAGE_ROOT, 'setup-README.rst')) as file_obj:\n README = file_obj.read()\n\n# NOTE: This is duplicated throughout and we should try to\n# consolidate.\nSETUP_BASE = {\n 'author': 'Google Cloud Platform',\n 'author_email': '[email protected]',\n 'scripts': [],\n 'url': 'https://github.com/GoogleCloudPlatform/google-cloud-python',\n 'license': 'Apache 2.0',\n 'platforms': 'Posix; MacOS X; Windows',\n 'include_package_data': True,\n 'zip_safe': False,\n 'classifiers': [\n 'Development Status :: 7 - Inactive',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Internet',\n ],\n}\n\nREQUIREMENTS = [\n 'google-api-core >= 0.1.2, < 0.2.0dev',\n 'google-cloud-bigquery >= 0.28.0, < 0.29dev',\n 'google-cloud-bigquery-datatransfer >= 0.1.0, < 0.2dev',\n 'google-cloud-bigtable >= 0.28.1, < 0.29dev',\n 'google-cloud-container >= 0.1.0, < 0.2dev',\n 'google-cloud-core >= 0.28.0, < 0.29dev',\n 'google-cloud-datastore >= 1.4.0, < 1.5dev',\n 'google-cloud-dns >= 0.28.0, < 0.29dev',\n 'google-cloud-error-reporting >= 0.28.0, < 0.29dev',\n 'google-cloud-firestore >= 0.28.0, < 0.29dev',\n 'google-cloud-language >= 1.0.0, < 1.1dev',\n 'google-cloud-logging >= 1.4.0, < 1.5dev',\n 'google-cloud-monitoring >= 0.28.0, < 0.29dev',\n 'google-cloud-pubsub >= 0.30.0, < 0.31dev',\n 'google-cloud-resource-manager >= 0.28.0, < 0.29dev',\n 'google-cloud-runtimeconfig >= 0.28.0, < 0.29dev',\n 'google-cloud-spanner >= 0.29.0, < 0.30dev',\n 'google-cloud-speech >= 0.30.0, < 0.31dev',\n 'google-cloud-storage >= 1.6.0, < 1.7dev',\n 'google-cloud-trace >= 0.17.0, < 0.18dev',\n 'google-cloud-translate >= 1.3.0, < 1.4dev',\n 'google-cloud-videointelligence >= 1.0.0, < 1.1dev',\n 'google-cloud-vision >= 0.29.0, < 0.30dev',\n]\n\nsetup(\n name='google-cloud',\n version='0.33.1',\n description='API Client library for Google Cloud',\n long_description=README,\n install_requires=REQUIREMENTS,\n **SETUP_BASE\n)\n\nwarning = \"WARNING: The google-cloud Python package is deprecated. On \" \\\n \"June 18, 2018, this package will no longer install any other \" \\\n \"packages. Please install the product-specific google-cloud-* \" \\\n \"packages needed for your application. See \" \\\n \"https://github.com/GoogleCloudPlatform/google-cloud-python.\"\n\nlogging.warn(warning)\n", "path": "legacy/google-cloud/setup.py"}]}
| 1,739 | 103 |
gh_patches_debug_30505
|
rasdani/github-patches
|
git_diff
|
keras-team__autokeras-459
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
improve code quality using Codacy
<!---
**If you are reporting a bug:**
* Verify that your issue is not being currently addressed by other issues or pull requests.
* Please note that Auto-Keras is only compatible with **Python 3.6**.
* Tag the issue with the `bug report` tag.
-->
### Bug Description
<!---
A clear and concise description of what the bug is.
-->
We are now using Codacy and CodeClimate to improve our code quality.
Please try to solve anything reported by Codacy by following this [link](https://app.codacy.com/manual/jhfjhfj1/autokeras/issues/index).
On the page, you can see a list of **issues**.
You can click to see the details and suggestions to fix the issue.
Thanks.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `autokeras/pretrained/voice_generator/deepvoice3_pytorch/conv.py`
Content:
```
1 # coding: utf-8
2 from torch import nn
3 from torch.nn import functional as F
4
5
6 class Conv1d(nn.Conv1d):
7 """Extended nn.Conv1d for incremental dilated convolutions
8 """
9
10 def __init__(self, *args, **kwargs):
11 super().__init__(*args, **kwargs)
12 self.clear_buffer()
13 self._linearized_weight = None
14
15 def incremental_forward(self, input):
16
17 # reshape weight
18 weight = self._get_linearized_weight()
19 kw = self.kernel_size[0]
20 dilation = self.dilation[0]
21
22 bsz = input.size(0) # input: bsz x len x dim
23 if kw > 1:
24 input = input.data
25 if self.input_buffer is None:
26 self.input_buffer = input.new(bsz, kw + (kw - 1) * (dilation - 1), input.size(2))
27 self.input_buffer.zero_()
28 else:
29 # shift buffer
30 self.input_buffer[:, :-1, :] = self.input_buffer[:, 1:, :].clone()
31 # append next input
32 self.input_buffer[:, -1, :] = input[:, -1, :]
33 input = self.input_buffer
34 if dilation > 1:
35 input = input[:, 0::dilation, :].contiguous()
36 output = F.linear(input.view(bsz, -1), weight, self.bias)
37 return output.view(bsz, 1, -1)
38
39 def clear_buffer(self):
40 self.input_buffer = None
41
42 def _get_linearized_weight(self):
43 if self._linearized_weight is None:
44 kw = self.kernel_size[0]
45 # nn.Conv1d
46 weight = self.weight.transpose(1, 2).contiguous()
47
48 assert weight.size() == (self.out_channels, kw, self.in_channels)
49 self._linearized_weight = weight.view(self.out_channels, -1)
50 return self._linearized_weight
51
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/autokeras/pretrained/voice_generator/deepvoice3_pytorch/conv.py b/autokeras/pretrained/voice_generator/deepvoice3_pytorch/conv.py
--- a/autokeras/pretrained/voice_generator/deepvoice3_pytorch/conv.py
+++ b/autokeras/pretrained/voice_generator/deepvoice3_pytorch/conv.py
@@ -12,29 +12,29 @@
self.clear_buffer()
self._linearized_weight = None
- def incremental_forward(self, input):
+ def incremental_forward(self, input_data):
# reshape weight
weight = self._get_linearized_weight()
kw = self.kernel_size[0]
dilation = self.dilation[0]
- bsz = input.size(0) # input: bsz x len x dim
+ bsz = input_data.size(0) # conv_input: bsz x len x dim
if kw > 1:
- input = input.data
+ input_data = input_data.data
if self.input_buffer is None:
- self.input_buffer = input.new(bsz, kw + (kw - 1) * (dilation - 1), input.size(2))
+ self.input_buffer = input_data.new(bsz, kw + (kw - 1) * (dilation - 1), input_data.size(2))
self.input_buffer.zero_()
else:
# shift buffer
self.input_buffer[:, :-1, :] = self.input_buffer[:, 1:, :].clone()
# append next input
- self.input_buffer[:, -1, :] = input[:, -1, :]
- input = self.input_buffer
+ self.input_buffer[:, -1, :] = input_data[:, -1, :]
+ input_data = self.input_buffer
if dilation > 1:
- input = input[:, 0::dilation, :].contiguous()
- output = F.linear(input.view(bsz, -1), weight, self.bias)
- return output.view(bsz, 1, -1)
+ input_data = input_data[:, 0::dilation, :].contiguous()
+ input_data = F.linear(input_data.view(bsz, -1), weight, self.bias)
+ return input_data.view(bsz, 1, -1)
def clear_buffer(self):
self.input_buffer = None
|
{"golden_diff": "diff --git a/autokeras/pretrained/voice_generator/deepvoice3_pytorch/conv.py b/autokeras/pretrained/voice_generator/deepvoice3_pytorch/conv.py\n--- a/autokeras/pretrained/voice_generator/deepvoice3_pytorch/conv.py\n+++ b/autokeras/pretrained/voice_generator/deepvoice3_pytorch/conv.py\n@@ -12,29 +12,29 @@\n self.clear_buffer()\n self._linearized_weight = None\n \n- def incremental_forward(self, input):\n+ def incremental_forward(self, input_data):\n \n # reshape weight\n weight = self._get_linearized_weight()\n kw = self.kernel_size[0]\n dilation = self.dilation[0]\n \n- bsz = input.size(0) # input: bsz x len x dim\n+ bsz = input_data.size(0) # conv_input: bsz x len x dim\n if kw > 1:\n- input = input.data\n+ input_data = input_data.data\n if self.input_buffer is None:\n- self.input_buffer = input.new(bsz, kw + (kw - 1) * (dilation - 1), input.size(2))\n+ self.input_buffer = input_data.new(bsz, kw + (kw - 1) * (dilation - 1), input_data.size(2))\n self.input_buffer.zero_()\n else:\n # shift buffer\n self.input_buffer[:, :-1, :] = self.input_buffer[:, 1:, :].clone()\n # append next input\n- self.input_buffer[:, -1, :] = input[:, -1, :]\n- input = self.input_buffer\n+ self.input_buffer[:, -1, :] = input_data[:, -1, :]\n+ input_data = self.input_buffer\n if dilation > 1:\n- input = input[:, 0::dilation, :].contiguous()\n- output = F.linear(input.view(bsz, -1), weight, self.bias)\n- return output.view(bsz, 1, -1)\n+ input_data = input_data[:, 0::dilation, :].contiguous()\n+ input_data = F.linear(input_data.view(bsz, -1), weight, self.bias)\n+ return input_data.view(bsz, 1, -1)\n \n def clear_buffer(self):\n self.input_buffer = None\n", "issue": "improve code quality using Codacy\n<!---\r\n**If you are reporting a bug:**\r\n* Verify that your issue is not being currently addressed by other issues or pull requests.\r\n* Please note that Auto-Keras is only compatible with **Python 3.6**.\r\n* Tag the issue with the `bug report` tag.\r\n-->\r\n\r\n### Bug Description\r\n<!---\r\nA clear and concise description of what the bug is.\r\n-->\r\nWe are now using Codacy and CodeClimate to improve our code quality.\r\n\r\nPlease try to solve anything reported by Codacy by following this [link](https://app.codacy.com/manual/jhfjhfj1/autokeras/issues/index).\r\nOn the page, you can see a list of **issues**.\r\nYou can click to see the details and suggestions to fix the issue.\r\n\r\nThanks.\n", "before_files": [{"content": "# coding: utf-8\nfrom torch import nn\nfrom torch.nn import functional as F\n\n\nclass Conv1d(nn.Conv1d):\n \"\"\"Extended nn.Conv1d for incremental dilated convolutions\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.clear_buffer()\n self._linearized_weight = None\n\n def incremental_forward(self, input):\n\n # reshape weight\n weight = self._get_linearized_weight()\n kw = self.kernel_size[0]\n dilation = self.dilation[0]\n\n bsz = input.size(0) # input: bsz x len x dim\n if kw > 1:\n input = input.data\n if self.input_buffer is None:\n self.input_buffer = input.new(bsz, kw + (kw - 1) * (dilation - 1), input.size(2))\n self.input_buffer.zero_()\n else:\n # shift buffer\n self.input_buffer[:, :-1, :] = self.input_buffer[:, 1:, :].clone()\n # append next input\n self.input_buffer[:, -1, :] = input[:, -1, :]\n input = self.input_buffer\n if dilation > 1:\n input = input[:, 0::dilation, :].contiguous()\n output = F.linear(input.view(bsz, -1), weight, self.bias)\n return output.view(bsz, 1, -1)\n\n def clear_buffer(self):\n self.input_buffer = None\n\n def _get_linearized_weight(self):\n if self._linearized_weight is None:\n kw = self.kernel_size[0]\n # nn.Conv1d\n weight = self.weight.transpose(1, 2).contiguous()\n\n assert weight.size() == (self.out_channels, kw, self.in_channels)\n self._linearized_weight = weight.view(self.out_channels, -1)\n return self._linearized_weight\n", "path": "autokeras/pretrained/voice_generator/deepvoice3_pytorch/conv.py"}], "after_files": [{"content": "# coding: utf-8\nfrom torch import nn\nfrom torch.nn import functional as F\n\n\nclass Conv1d(nn.Conv1d):\n \"\"\"Extended nn.Conv1d for incremental dilated convolutions\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.clear_buffer()\n self._linearized_weight = None\n\n def incremental_forward(self, input_data):\n\n # reshape weight\n weight = self._get_linearized_weight()\n kw = self.kernel_size[0]\n dilation = self.dilation[0]\n\n bsz = input_data.size(0) # conv_input: bsz x len x dim\n if kw > 1:\n input_data = input_data.data\n if self.input_buffer is None:\n self.input_buffer = input_data.new(bsz, kw + (kw - 1) * (dilation - 1), input_data.size(2))\n self.input_buffer.zero_()\n else:\n # shift buffer\n self.input_buffer[:, :-1, :] = self.input_buffer[:, 1:, :].clone()\n # append next input\n self.input_buffer[:, -1, :] = input_data[:, -1, :]\n input_data = self.input_buffer\n if dilation > 1:\n input_data = input_data[:, 0::dilation, :].contiguous()\n input_data = F.linear(input_data.view(bsz, -1), weight, self.bias)\n return input_data.view(bsz, 1, -1)\n\n def clear_buffer(self):\n self.input_buffer = None\n\n def _get_linearized_weight(self):\n if self._linearized_weight is None:\n kw = self.kernel_size[0]\n # nn.Conv1d\n weight = self.weight.transpose(1, 2).contiguous()\n\n assert weight.size() == (self.out_channels, kw, self.in_channels)\n self._linearized_weight = weight.view(self.out_channels, -1)\n return self._linearized_weight\n", "path": "autokeras/pretrained/voice_generator/deepvoice3_pytorch/conv.py"}]}
| 960 | 526 |
gh_patches_debug_8653
|
rasdani/github-patches
|
git_diff
|
Health-Informatics-UoN__Carrot-Mapper-732
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Scan Report Edit Table
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/api/proxy/urls.py`
Content:
```
1 from config import settings
2 from django.urls import re_path
3 from revproxy.views import ProxyView
4
5 # A set of urls that will override any root paths requested, and proxy them to the Next.js app.
6 urlpatterns = [
7 # /scanreports/ and escape any further paths
8 re_path(
9 r"^scanreports/(?P<path>(?!create))$",
10 ProxyView.as_view(upstream=f"{settings.NEXTJS_URL}/scanreports"),
11 name="scan-report-list",
12 ),
13 re_path(
14 r"^scanreports/(?P<path>\d+)/?$",
15 ProxyView.as_view(upstream=f"{settings.NEXTJS_URL}/scanreports/"),
16 name="scan-report-tables",
17 ),
18 re_path(
19 r"^scanreports/(?P<path>\d+/tables/\d+)/$",
20 ProxyView.as_view(upstream=f"{settings.NEXTJS_URL}/scanreports/"),
21 name="scan-report-fields",
22 ),
23 re_path(
24 r"^scanreports/(?P<path>\d+/tables/\d+/fields/\d+)/$",
25 ProxyView.as_view(upstream=f"{settings.NEXTJS_URL}/scanreports/"),
26 name="scan-report-values",
27 ),
28 re_path(
29 r"^datasets/(?P<path>(?![\d/]).*)$",
30 ProxyView.as_view(upstream=f"{settings.NEXTJS_URL}/datasets"),
31 name="datasets-list",
32 ),
33 re_path(
34 r"^datasets/(?P<path>\d+)/?$",
35 ProxyView.as_view(upstream=f"{settings.NEXTJS_URL}/datasets"),
36 name="datasets-scanreports-list",
37 ),
38 re_path(
39 "_next/(?P<path>.*)$",
40 ProxyView.as_view(upstream=f"{settings.NEXTJS_URL}/_next"),
41 ),
42 ]
43
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/app/api/proxy/urls.py b/app/api/proxy/urls.py
--- a/app/api/proxy/urls.py
+++ b/app/api/proxy/urls.py
@@ -25,6 +25,11 @@
ProxyView.as_view(upstream=f"{settings.NEXTJS_URL}/scanreports/"),
name="scan-report-values",
),
+ re_path(
+ r"^scanreports/(?P<path>\d+/tables/\d+/update)/$",
+ ProxyView.as_view(upstream=f"{settings.NEXTJS_URL}/scanreports/"),
+ name="scan-report-edit-table",
+ ),
re_path(
r"^datasets/(?P<path>(?![\d/]).*)$",
ProxyView.as_view(upstream=f"{settings.NEXTJS_URL}/datasets"),
|
{"golden_diff": "diff --git a/app/api/proxy/urls.py b/app/api/proxy/urls.py\n--- a/app/api/proxy/urls.py\n+++ b/app/api/proxy/urls.py\n@@ -25,6 +25,11 @@\n ProxyView.as_view(upstream=f\"{settings.NEXTJS_URL}/scanreports/\"),\n name=\"scan-report-values\",\n ),\n+ re_path(\n+ r\"^scanreports/(?P<path>\\d+/tables/\\d+/update)/$\",\n+ ProxyView.as_view(upstream=f\"{settings.NEXTJS_URL}/scanreports/\"),\n+ name=\"scan-report-edit-table\",\n+ ),\n re_path(\n r\"^datasets/(?P<path>(?![\\d/]).*)$\",\n ProxyView.as_view(upstream=f\"{settings.NEXTJS_URL}/datasets\"),\n", "issue": "Scan Report Edit Table\n\n", "before_files": [{"content": "from config import settings\nfrom django.urls import re_path\nfrom revproxy.views import ProxyView\n\n# A set of urls that will override any root paths requested, and proxy them to the Next.js app.\nurlpatterns = [\n # /scanreports/ and escape any further paths\n re_path(\n r\"^scanreports/(?P<path>(?!create))$\",\n ProxyView.as_view(upstream=f\"{settings.NEXTJS_URL}/scanreports\"),\n name=\"scan-report-list\",\n ),\n re_path(\n r\"^scanreports/(?P<path>\\d+)/?$\",\n ProxyView.as_view(upstream=f\"{settings.NEXTJS_URL}/scanreports/\"),\n name=\"scan-report-tables\",\n ),\n re_path(\n r\"^scanreports/(?P<path>\\d+/tables/\\d+)/$\",\n ProxyView.as_view(upstream=f\"{settings.NEXTJS_URL}/scanreports/\"),\n name=\"scan-report-fields\",\n ),\n re_path(\n r\"^scanreports/(?P<path>\\d+/tables/\\d+/fields/\\d+)/$\",\n ProxyView.as_view(upstream=f\"{settings.NEXTJS_URL}/scanreports/\"),\n name=\"scan-report-values\",\n ),\n re_path(\n r\"^datasets/(?P<path>(?![\\d/]).*)$\",\n ProxyView.as_view(upstream=f\"{settings.NEXTJS_URL}/datasets\"),\n name=\"datasets-list\",\n ),\n re_path(\n r\"^datasets/(?P<path>\\d+)/?$\",\n ProxyView.as_view(upstream=f\"{settings.NEXTJS_URL}/datasets\"),\n name=\"datasets-scanreports-list\",\n ),\n re_path(\n \"_next/(?P<path>.*)$\",\n ProxyView.as_view(upstream=f\"{settings.NEXTJS_URL}/_next\"),\n ),\n]\n", "path": "app/api/proxy/urls.py"}], "after_files": [{"content": "from config import settings\nfrom django.urls import re_path\nfrom revproxy.views import ProxyView\n\n# A set of urls that will override any root paths requested, and proxy them to the Next.js app.\nurlpatterns = [\n # /scanreports/ and escape any further paths\n re_path(\n r\"^scanreports/(?P<path>(?!create))$\",\n ProxyView.as_view(upstream=f\"{settings.NEXTJS_URL}/scanreports\"),\n name=\"scan-report-list\",\n ),\n re_path(\n r\"^scanreports/(?P<path>\\d+)/?$\",\n ProxyView.as_view(upstream=f\"{settings.NEXTJS_URL}/scanreports/\"),\n name=\"scan-report-tables\",\n ),\n re_path(\n r\"^scanreports/(?P<path>\\d+/tables/\\d+)/$\",\n ProxyView.as_view(upstream=f\"{settings.NEXTJS_URL}/scanreports/\"),\n name=\"scan-report-fields\",\n ),\n re_path(\n r\"^scanreports/(?P<path>\\d+/tables/\\d+/fields/\\d+)/$\",\n ProxyView.as_view(upstream=f\"{settings.NEXTJS_URL}/scanreports/\"),\n name=\"scan-report-values\",\n ),\n re_path(\n r\"^scanreports/(?P<path>\\d+/tables/\\d+/update)/$\",\n ProxyView.as_view(upstream=f\"{settings.NEXTJS_URL}/scanreports/\"),\n name=\"scan-report-edit-table\",\n ),\n re_path(\n r\"^datasets/(?P<path>(?![\\d/]).*)$\",\n ProxyView.as_view(upstream=f\"{settings.NEXTJS_URL}/datasets\"),\n name=\"datasets-list\",\n ),\n re_path(\n r\"^datasets/(?P<path>\\d+)/?$\",\n ProxyView.as_view(upstream=f\"{settings.NEXTJS_URL}/datasets\"),\n name=\"datasets-scanreports-list\",\n ),\n re_path(\n \"_next/(?P<path>.*)$\",\n ProxyView.as_view(upstream=f\"{settings.NEXTJS_URL}/_next\"),\n ),\n]\n", "path": "app/api/proxy/urls.py"}]}
| 719 | 176 |
gh_patches_debug_8969
|
rasdani/github-patches
|
git_diff
|
aws__serverless-application-model-610
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Rendering on PyPI is broken
**Description:**
Rendering of the README on PyPI is broken because it expects RST by default and Markdown is being uploaded.
**Steps to reproduce the issue:**
1. Go to https://pypi.org/project/aws-sam-translator/
**Observed result:**
Raw markdown
**Expected result:**
Links and images working
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # setup.py
4 #
5 # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
6 #
7 # Licensed under the Apache License, Version 2.0 (the "License");
8 # you may not use this file except in compliance with the License.
9 # You may obtain a copy of the License at
10 #
11 # http://www.apache.org/licenses/LICENSE-2.0
12 #
13 # Unless required by applicable law or agreed to in writing, software
14 # distributed under the License is distributed on an "AS IS" BASIS,
15 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 # See the License for the specific language governing permissions and
17 # limitations under the License.
18 #
19
20 """
21 AWS SAM Serverless Application Model
22 """
23 import io
24 import os
25 import re
26
27 from setuptools import setup, find_packages
28
29
30 def read(*filenames, **kwargs):
31 encoding = kwargs.get('encoding', 'utf-8')
32 sep = kwargs.get('sep', os.linesep)
33 buf = []
34 for filename in filenames:
35 with io.open(filename, encoding=encoding) as f:
36 buf.append(f.read())
37 return sep.join(buf)
38
39
40 def read_version():
41 content = read(os.path.join(
42 os.path.dirname(__file__), 'samtranslator', '__init__.py'))
43 return re.search(r"__version__ = '([^']+)'", content).group(1)
44
45
46 def read_requirements(req='base.txt'):
47 content = read(os.path.join('requirements', req))
48 return [line for line in content.split(os.linesep)
49 if not line.strip().startswith('#')]
50
51
52 setup(
53 name='aws-sam-translator',
54 version=read_version(),
55 description='AWS SAM Translator is a library that transform SAM templates into AWS CloudFormation templates',
56 long_description=read('README.md'),
57 author='Amazon Web Services',
58 author_email='[email protected]',
59 url='https://github.com/awslabs/serverless-application-model',
60 license='Apache License 2.0',
61 # Exclude all but the code folders
62 packages=find_packages(exclude=('tests', 'docs', 'examples', 'versions')),
63 install_requires=read_requirements('base.txt'),
64 include_package_data=True,
65 extras_require={
66 'dev': read_requirements('dev.txt')
67 },
68 keywords="AWS SAM Serverless Application Model",
69 classifiers=[
70 'Development Status :: 4 - Beta',
71 'Environment :: Console',
72 'Environment :: Other Environment',
73 'Intended Audience :: Developers',
74 'Intended Audience :: Information Technology',
75 'License :: OSI Approved :: Apache Software License',
76 'Operating System :: OS Independent',
77 'Programming Language :: Python',
78 'Programming Language :: Python :: 2.7',
79 'Topic :: Internet',
80 'Topic :: Software Development :: Build Tools',
81 'Topic :: Utilities'
82 ]
83 )
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -54,6 +54,7 @@
version=read_version(),
description='AWS SAM Translator is a library that transform SAM templates into AWS CloudFormation templates',
long_description=read('README.md'),
+ long_description_content_type='text/markdown',
author='Amazon Web Services',
author_email='[email protected]',
url='https://github.com/awslabs/serverless-application-model',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -54,6 +54,7 @@\n version=read_version(),\n description='AWS SAM Translator is a library that transform SAM templates into AWS CloudFormation templates',\n long_description=read('README.md'),\n+ long_description_content_type='text/markdown',\n author='Amazon Web Services',\n author_email='[email protected]',\n url='https://github.com/awslabs/serverless-application-model',\n", "issue": "Rendering on PyPI is broken\n**Description:**\r\nRendering of the README on PyPI is broken because it expects RST by default and Markdown is being uploaded.\r\n\r\n**Steps to reproduce the issue:**\r\n\r\n1. Go to https://pypi.org/project/aws-sam-translator/\r\n\r\n**Observed result:**\r\n\r\nRaw markdown\r\n\r\n**Expected result:**\r\n\r\nLinks and images working\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# setup.py\n#\n# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nAWS SAM Serverless Application Model\n\"\"\"\nimport io\nimport os\nimport re\n\nfrom setuptools import setup, find_packages\n\n\ndef read(*filenames, **kwargs):\n encoding = kwargs.get('encoding', 'utf-8')\n sep = kwargs.get('sep', os.linesep)\n buf = []\n for filename in filenames:\n with io.open(filename, encoding=encoding) as f:\n buf.append(f.read())\n return sep.join(buf)\n\n\ndef read_version():\n content = read(os.path.join(\n os.path.dirname(__file__), 'samtranslator', '__init__.py'))\n return re.search(r\"__version__ = '([^']+)'\", content).group(1)\n\n\ndef read_requirements(req='base.txt'):\n content = read(os.path.join('requirements', req))\n return [line for line in content.split(os.linesep)\n if not line.strip().startswith('#')]\n\n\nsetup(\n name='aws-sam-translator',\n version=read_version(),\n description='AWS SAM Translator is a library that transform SAM templates into AWS CloudFormation templates',\n long_description=read('README.md'),\n author='Amazon Web Services',\n author_email='[email protected]',\n url='https://github.com/awslabs/serverless-application-model',\n license='Apache License 2.0',\n # Exclude all but the code folders\n packages=find_packages(exclude=('tests', 'docs', 'examples', 'versions')),\n install_requires=read_requirements('base.txt'),\n include_package_data=True,\n extras_require={\n 'dev': read_requirements('dev.txt')\n },\n keywords=\"AWS SAM Serverless Application Model\",\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Environment :: Other Environment',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Information Technology',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Internet',\n 'Topic :: Software Development :: Build Tools',\n 'Topic :: Utilities'\n ]\n)\n", "path": "setup.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# setup.py\n#\n# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nAWS SAM Serverless Application Model\n\"\"\"\nimport io\nimport os\nimport re\n\nfrom setuptools import setup, find_packages\n\n\ndef read(*filenames, **kwargs):\n encoding = kwargs.get('encoding', 'utf-8')\n sep = kwargs.get('sep', os.linesep)\n buf = []\n for filename in filenames:\n with io.open(filename, encoding=encoding) as f:\n buf.append(f.read())\n return sep.join(buf)\n\n\ndef read_version():\n content = read(os.path.join(\n os.path.dirname(__file__), 'samtranslator', '__init__.py'))\n return re.search(r\"__version__ = '([^']+)'\", content).group(1)\n\n\ndef read_requirements(req='base.txt'):\n content = read(os.path.join('requirements', req))\n return [line for line in content.split(os.linesep)\n if not line.strip().startswith('#')]\n\n\nsetup(\n name='aws-sam-translator',\n version=read_version(),\n description='AWS SAM Translator is a library that transform SAM templates into AWS CloudFormation templates',\n long_description=read('README.md'),\n long_description_content_type='text/markdown',\n author='Amazon Web Services',\n author_email='[email protected]',\n url='https://github.com/awslabs/serverless-application-model',\n license='Apache License 2.0',\n # Exclude all but the code folders\n packages=find_packages(exclude=('tests', 'docs', 'examples', 'versions')),\n install_requires=read_requirements('base.txt'),\n include_package_data=True,\n extras_require={\n 'dev': read_requirements('dev.txt')\n },\n keywords=\"AWS SAM Serverless Application Model\",\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Environment :: Other Environment',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Information Technology',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Internet',\n 'Topic :: Software Development :: Build Tools',\n 'Topic :: Utilities'\n ]\n)\n", "path": "setup.py"}]}
| 1,106 | 111 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.