problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
10.2k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 582
21k
| num_tokens
int64 271
2.05k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_38607
|
rasdani/github-patches
|
git_diff
|
liqd__a4-opin-688
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spatial Idea Challenge: No comments possible
In the blueprint Spatial Idea Challenge, in phase 1 comments cannot be made (be sure to test with normal user account). We need to fix the blueprint, apparently. See here https://opin-stage.liqd.net/de/maps/test-the-comments/ for an example.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `euth/maps/views.py`
Content:
```
1 from django.conf import settings
2 from easy_thumbnails.files import get_thumbnailer
3
4 from euth.ideas import views as idea_views
5
6 from . import forms
7 from .models import MapIdea
8
9
10 class MapIdeaListView(idea_views.IdeaListView):
11 model = MapIdea
12
13 def dump_geojson(self):
14 result = {}
15 result['type'] = 'FeatureCollection'
16 feature_list = []
17
18 for item in self.get_queryset():
19
20 url = ''
21
22 if item.image:
23 image = get_thumbnailer(item.image)['map_thumbnail']
24 url = image.url
25
26 properties = {
27 'name': item.name,
28 'slug': item.slug,
29 'image': url,
30 'comments_count': item.comment_count,
31 'positive_rating_count': item.positive_rating_count,
32 'negative_rating_count': item.negative_rating_count,
33 'url': item.get_absolute_url()
34 }
35 point_dict = item.point
36 point_dict['properties'] = properties
37 feature_list.append(point_dict)
38
39 result['features'] = feature_list
40 return result
41
42 def get_context_data(self, **kwargs):
43 context = super().get_context_data(**kwargs)
44 context['mapideas_json'] = self.dump_geojson()
45 context['map_url'] = settings.BASE_MAP
46 context['polygon'] = self.module.settings_instance.polygon
47 return context
48
49
50 class MapIdeaCreateView(idea_views.IdeaCreateView):
51 model = MapIdea
52 form_class = forms.MapIdeaForm
53 permission_required = 'euth_maps.propose_idea'
54
55 def get_form_kwargs(self):
56 kwargs = super().get_form_kwargs()
57 kwargs['settings_instance'] = self.module.settings_instance
58 return kwargs
59
60
61 class MapIdeaUpdateView(idea_views.IdeaUpdateView):
62 model = MapIdea
63 form_class = forms.MapIdeaForm
64
65 def get_form_kwargs(self):
66 kwargs = super().get_form_kwargs()
67 kwargs['settings_instance'] = self.object.module.settings_instance
68 return kwargs
69
70
71 class MapIdeaDeleteView(idea_views.IdeaDeleteView):
72 model = MapIdea
73
74
75 class MapIdeaDetailView(idea_views.IdeaDetailView):
76 model = MapIdea
77 queryset = MapIdea.objects.annotate_positive_rating_count()\
78 .annotate_negative_rating_count()
79
80 def get_context_data(self, **kwargs):
81 context = super().get_context_data(**kwargs)
82 context['map_url'] = settings.BASE_MAP
83 return context
84
```
Path: `euth/maps/rules.py`
Content:
```
1 import rules
2 from rules.predicates import is_superuser
3
4 from adhocracy4.modules.predicates import (is_context_initiator,
5 is_context_member,
6 is_context_moderator)
7 from adhocracy4.phases.predicates import phase_allows_create
8
9 from .models import MapIdea
10
11 rules.add_perm('euth_maps.propose_idea',
12 is_superuser | is_context_moderator | is_context_initiator |
13 (is_context_member & phase_allows_create(MapIdea)))
14
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/euth/maps/rules.py b/euth/maps/rules.py
--- a/euth/maps/rules.py
+++ b/euth/maps/rules.py
@@ -3,11 +3,36 @@
from adhocracy4.modules.predicates import (is_context_initiator,
is_context_member,
- is_context_moderator)
-from adhocracy4.phases.predicates import phase_allows_create
-
+ is_context_moderator,
+ is_owner,
+ is_public_context)
+from adhocracy4.phases.predicates import (phase_allows_comment,
+ phase_allows_create,
+ phase_allows_modify,
+ phase_allows_rate)
from .models import MapIdea
-rules.add_perm('euth_maps.propose_idea',
+
+rules.add_perm('euth_maps.rate_mapidea',
+ is_superuser | is_context_moderator | is_context_initiator |
+ (is_context_member & phase_allows_rate))
+
+
+rules.add_perm('euth_maps.comment_mapidea',
+ is_superuser | is_context_moderator | is_context_initiator |
+ (is_context_member & phase_allows_comment))
+
+
+rules.add_perm('euth_maps.modify_mapidea',
+ is_superuser | is_context_moderator | is_context_initiator |
+ (is_context_member & is_owner & phase_allows_modify))
+
+
+rules.add_perm('euth_maps.propose_mapidea',
is_superuser | is_context_moderator | is_context_initiator |
(is_context_member & phase_allows_create(MapIdea)))
+
+
+rules.add_perm('euth_maps.view_mapidea',
+ is_superuser | is_context_moderator | is_context_initiator |
+ is_context_member | is_public_context)
diff --git a/euth/maps/views.py b/euth/maps/views.py
--- a/euth/maps/views.py
+++ b/euth/maps/views.py
@@ -50,7 +50,7 @@
class MapIdeaCreateView(idea_views.IdeaCreateView):
model = MapIdea
form_class = forms.MapIdeaForm
- permission_required = 'euth_maps.propose_idea'
+ permission_required = 'euth_maps.propose_mapidea'
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
@@ -60,6 +60,7 @@
class MapIdeaUpdateView(idea_views.IdeaUpdateView):
model = MapIdea
+ permission_required = 'euth_maps.modify_mapidea'
form_class = forms.MapIdeaForm
def get_form_kwargs(self):
@@ -70,10 +71,12 @@
class MapIdeaDeleteView(idea_views.IdeaDeleteView):
model = MapIdea
+ permission_required = 'euth_maps.modify_mapidea'
class MapIdeaDetailView(idea_views.IdeaDetailView):
model = MapIdea
+ permission_required = 'euth_maps.view_mapidea'
queryset = MapIdea.objects.annotate_positive_rating_count()\
.annotate_negative_rating_count()
|
{"golden_diff": "diff --git a/euth/maps/rules.py b/euth/maps/rules.py\n--- a/euth/maps/rules.py\n+++ b/euth/maps/rules.py\n@@ -3,11 +3,36 @@\n \n from adhocracy4.modules.predicates import (is_context_initiator,\n is_context_member,\n- is_context_moderator)\n-from adhocracy4.phases.predicates import phase_allows_create\n-\n+ is_context_moderator,\n+ is_owner,\n+ is_public_context)\n+from adhocracy4.phases.predicates import (phase_allows_comment,\n+ phase_allows_create,\n+ phase_allows_modify,\n+ phase_allows_rate)\n from .models import MapIdea\n \n-rules.add_perm('euth_maps.propose_idea',\n+\n+rules.add_perm('euth_maps.rate_mapidea',\n+ is_superuser | is_context_moderator | is_context_initiator |\n+ (is_context_member & phase_allows_rate))\n+\n+\n+rules.add_perm('euth_maps.comment_mapidea',\n+ is_superuser | is_context_moderator | is_context_initiator |\n+ (is_context_member & phase_allows_comment))\n+\n+\n+rules.add_perm('euth_maps.modify_mapidea',\n+ is_superuser | is_context_moderator | is_context_initiator |\n+ (is_context_member & is_owner & phase_allows_modify))\n+\n+\n+rules.add_perm('euth_maps.propose_mapidea',\n is_superuser | is_context_moderator | is_context_initiator |\n (is_context_member & phase_allows_create(MapIdea)))\n+\n+\n+rules.add_perm('euth_maps.view_mapidea',\n+ is_superuser | is_context_moderator | is_context_initiator |\n+ is_context_member | is_public_context)\ndiff --git a/euth/maps/views.py b/euth/maps/views.py\n--- a/euth/maps/views.py\n+++ b/euth/maps/views.py\n@@ -50,7 +50,7 @@\n class MapIdeaCreateView(idea_views.IdeaCreateView):\n model = MapIdea\n form_class = forms.MapIdeaForm\n- permission_required = 'euth_maps.propose_idea'\n+ permission_required = 'euth_maps.propose_mapidea'\n \n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n@@ -60,6 +60,7 @@\n \n class MapIdeaUpdateView(idea_views.IdeaUpdateView):\n model = MapIdea\n+ permission_required = 'euth_maps.modify_mapidea'\n form_class = forms.MapIdeaForm\n \n def get_form_kwargs(self):\n@@ -70,10 +71,12 @@\n \n class MapIdeaDeleteView(idea_views.IdeaDeleteView):\n model = MapIdea\n+ permission_required = 'euth_maps.modify_mapidea'\n \n \n class MapIdeaDetailView(idea_views.IdeaDetailView):\n model = MapIdea\n+ permission_required = 'euth_maps.view_mapidea'\n queryset = MapIdea.objects.annotate_positive_rating_count()\\\n .annotate_negative_rating_count()\n", "issue": "Spatial Idea Challenge: No comments possible\nIn the blueprint Spatial Idea Challenge, in phase 1 comments cannot be made (be sure to test with normal user account). We need to fix the blueprint, apparently. See here https://opin-stage.liqd.net/de/maps/test-the-comments/ for an example.\n", "before_files": [{"content": "from django.conf import settings\nfrom easy_thumbnails.files import get_thumbnailer\n\nfrom euth.ideas import views as idea_views\n\nfrom . import forms\nfrom .models import MapIdea\n\n\nclass MapIdeaListView(idea_views.IdeaListView):\n model = MapIdea\n\n def dump_geojson(self):\n result = {}\n result['type'] = 'FeatureCollection'\n feature_list = []\n\n for item in self.get_queryset():\n\n url = ''\n\n if item.image:\n image = get_thumbnailer(item.image)['map_thumbnail']\n url = image.url\n\n properties = {\n 'name': item.name,\n 'slug': item.slug,\n 'image': url,\n 'comments_count': item.comment_count,\n 'positive_rating_count': item.positive_rating_count,\n 'negative_rating_count': item.negative_rating_count,\n 'url': item.get_absolute_url()\n }\n point_dict = item.point\n point_dict['properties'] = properties\n feature_list.append(point_dict)\n\n result['features'] = feature_list\n return result\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['mapideas_json'] = self.dump_geojson()\n context['map_url'] = settings.BASE_MAP\n context['polygon'] = self.module.settings_instance.polygon\n return context\n\n\nclass MapIdeaCreateView(idea_views.IdeaCreateView):\n model = MapIdea\n form_class = forms.MapIdeaForm\n permission_required = 'euth_maps.propose_idea'\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['settings_instance'] = self.module.settings_instance\n return kwargs\n\n\nclass MapIdeaUpdateView(idea_views.IdeaUpdateView):\n model = MapIdea\n form_class = forms.MapIdeaForm\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['settings_instance'] = self.object.module.settings_instance\n return kwargs\n\n\nclass MapIdeaDeleteView(idea_views.IdeaDeleteView):\n model = MapIdea\n\n\nclass MapIdeaDetailView(idea_views.IdeaDetailView):\n model = MapIdea\n queryset = MapIdea.objects.annotate_positive_rating_count()\\\n .annotate_negative_rating_count()\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['map_url'] = settings.BASE_MAP\n return context\n", "path": "euth/maps/views.py"}, {"content": "import rules\nfrom rules.predicates import is_superuser\n\nfrom adhocracy4.modules.predicates import (is_context_initiator,\n is_context_member,\n is_context_moderator)\nfrom adhocracy4.phases.predicates import phase_allows_create\n\nfrom .models import MapIdea\n\nrules.add_perm('euth_maps.propose_idea',\n is_superuser | is_context_moderator | is_context_initiator |\n (is_context_member & phase_allows_create(MapIdea)))\n", "path": "euth/maps/rules.py"}], "after_files": [{"content": "from django.conf import settings\nfrom easy_thumbnails.files import get_thumbnailer\n\nfrom euth.ideas import views as idea_views\n\nfrom . import forms\nfrom .models import MapIdea\n\n\nclass MapIdeaListView(idea_views.IdeaListView):\n model = MapIdea\n\n def dump_geojson(self):\n result = {}\n result['type'] = 'FeatureCollection'\n feature_list = []\n\n for item in self.get_queryset():\n\n url = ''\n\n if item.image:\n image = get_thumbnailer(item.image)['map_thumbnail']\n url = image.url\n\n properties = {\n 'name': item.name,\n 'slug': item.slug,\n 'image': url,\n 'comments_count': item.comment_count,\n 'positive_rating_count': item.positive_rating_count,\n 'negative_rating_count': item.negative_rating_count,\n 'url': item.get_absolute_url()\n }\n point_dict = item.point\n point_dict['properties'] = properties\n feature_list.append(point_dict)\n\n result['features'] = feature_list\n return result\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['mapideas_json'] = self.dump_geojson()\n context['map_url'] = settings.BASE_MAP\n context['polygon'] = self.module.settings_instance.polygon\n return context\n\n\nclass MapIdeaCreateView(idea_views.IdeaCreateView):\n model = MapIdea\n form_class = forms.MapIdeaForm\n permission_required = 'euth_maps.propose_mapidea'\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['settings_instance'] = self.module.settings_instance\n return kwargs\n\n\nclass MapIdeaUpdateView(idea_views.IdeaUpdateView):\n model = MapIdea\n permission_required = 'euth_maps.modify_mapidea'\n form_class = forms.MapIdeaForm\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['settings_instance'] = self.object.module.settings_instance\n return kwargs\n\n\nclass MapIdeaDeleteView(idea_views.IdeaDeleteView):\n model = MapIdea\n permission_required = 'euth_maps.modify_mapidea'\n\n\nclass MapIdeaDetailView(idea_views.IdeaDetailView):\n model = MapIdea\n permission_required = 'euth_maps.view_mapidea'\n queryset = MapIdea.objects.annotate_positive_rating_count()\\\n .annotate_negative_rating_count()\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['map_url'] = settings.BASE_MAP\n return context\n", "path": "euth/maps/views.py"}, {"content": "import rules\nfrom rules.predicates import is_superuser\n\nfrom adhocracy4.modules.predicates import (is_context_initiator,\n is_context_member,\n is_context_moderator,\n is_owner,\n is_public_context)\nfrom adhocracy4.phases.predicates import (phase_allows_comment,\n phase_allows_create,\n phase_allows_modify,\n phase_allows_rate)\nfrom .models import MapIdea\n\n\nrules.add_perm('euth_maps.rate_mapidea',\n is_superuser | is_context_moderator | is_context_initiator |\n (is_context_member & phase_allows_rate))\n\n\nrules.add_perm('euth_maps.comment_mapidea',\n is_superuser | is_context_moderator | is_context_initiator |\n (is_context_member & phase_allows_comment))\n\n\nrules.add_perm('euth_maps.modify_mapidea',\n is_superuser | is_context_moderator | is_context_initiator |\n (is_context_member & is_owner & phase_allows_modify))\n\n\nrules.add_perm('euth_maps.propose_mapidea',\n is_superuser | is_context_moderator | is_context_initiator |\n (is_context_member & phase_allows_create(MapIdea)))\n\n\nrules.add_perm('euth_maps.view_mapidea',\n is_superuser | is_context_moderator | is_context_initiator |\n is_context_member | is_public_context)\n", "path": "euth/maps/rules.py"}]}
| 1,164 | 665 |
gh_patches_debug_26237
|
rasdani/github-patches
|
git_diff
|
netket__netket-212
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Get rid of netket::Ising and other hard-coded hamiltonians in c++
In the spirit of #199 , we can safely remove the Ising and Heisenberg hamiltonians from the C++ classes, since those are nothing but specific cases of more general hamiltonians (basically they are just LocalOperators or even GraphOperator).
Convenient constructors can be defined in few lines of python, and moved to the python part of the library.
The BoseHubbard hamiltonian is a bit trickier, we can keep it as it is for the moment.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `netket/operator.py`
Content:
```
1 from ._C_netket.operator import *
2
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/netket/operator.py b/netket/operator.py
--- a/netket/operator.py
+++ b/netket/operator.py
@@ -1 +1,52 @@
from ._C_netket.operator import *
+import numpy as _np
+
+
+def Ising(hilbert, h, J=1.0):
+ """
+ Constructs a new ``Ising`` given a hilbert space, a transverse field,
+ and (if specified) a coupling constant.
+
+ Args:
+ hilbert: Hilbert space the operator acts on.
+ h: The strength of the transverse field.
+ J: The strength of the coupling. Default is 1.0.
+
+ Examples:
+ Constructs an ``Ising`` operator for a 1D system.
+
+ ```python
+ >>> import netket as nk
+ >>> g = nk.graph.Hypercube(length=20, n_dim=1, pbc=True)
+ >>> hi = nk.hilbert.Spin(s=0.5, graph=g)
+ >>> op = nk.operator.Ising(h=1.321, hilbert=hi, J=0.5)
+ >>> print(op.hilbert.size)
+ 20
+ """
+ sigma_x = _np.array([[0, 1], [1, 0]])
+ sz_sz = _np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
+ return GraphOperator(hilbert, siteops=[-h * sigma_x], bondops=[J * sz_sz])
+
+
+def Heisenberg(hilbert):
+ """
+ Constructs a new ``Heisenberg`` given a hilbert space.
+ Args:
+ hilbert: Hilbert space the operator acts on.
+ Examples:
+ Constructs a ``Heisenberg`` operator for a 1D system.
+ ```python
+ >>> import netket as nk
+ >>> g = nk.graph.Hypercube(length=20, n_dim=1, pbc=True)
+ >>> hi = nk.hilbert.Spin(s=0.5, total_sz=0, graph=g)
+ >>> op = nk.operator.Heisenberg(hilbert=hi)
+ >>> print(op.hilbert.size)
+ 20
+ """
+ sz_sz = _np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
+ exchange = _np.array([[0, 0, 0, 0], [0, 0, 2, 0], [0, 2, 0, 0], [0, 0, 0, 0]])
+ if hilbert.graph.is_bipartite:
+ heis_term = sz_sz - exchange
+ else:
+ heis_term = sz_sz + exchange
+ return GraphOperator(hilbert, bondops=[heis_term])
|
{"golden_diff": "diff --git a/netket/operator.py b/netket/operator.py\n--- a/netket/operator.py\n+++ b/netket/operator.py\n@@ -1 +1,52 @@\n from ._C_netket.operator import *\n+import numpy as _np\n+\n+\n+def Ising(hilbert, h, J=1.0):\n+ \"\"\"\n+ Constructs a new ``Ising`` given a hilbert space, a transverse field,\n+ and (if specified) a coupling constant.\n+\n+ Args:\n+ hilbert: Hilbert space the operator acts on.\n+ h: The strength of the transverse field.\n+ J: The strength of the coupling. Default is 1.0.\n+\n+ Examples:\n+ Constructs an ``Ising`` operator for a 1D system.\n+\n+ ```python\n+ >>> import netket as nk\n+ >>> g = nk.graph.Hypercube(length=20, n_dim=1, pbc=True)\n+ >>> hi = nk.hilbert.Spin(s=0.5, graph=g)\n+ >>> op = nk.operator.Ising(h=1.321, hilbert=hi, J=0.5)\n+ >>> print(op.hilbert.size)\n+ 20\n+ \"\"\"\n+ sigma_x = _np.array([[0, 1], [1, 0]])\n+ sz_sz = _np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])\n+ return GraphOperator(hilbert, siteops=[-h * sigma_x], bondops=[J * sz_sz])\n+\n+\n+def Heisenberg(hilbert):\n+ \"\"\"\n+ Constructs a new ``Heisenberg`` given a hilbert space.\n+ Args:\n+ hilbert: Hilbert space the operator acts on.\n+ Examples:\n+ Constructs a ``Heisenberg`` operator for a 1D system.\n+ ```python\n+ >>> import netket as nk\n+ >>> g = nk.graph.Hypercube(length=20, n_dim=1, pbc=True)\n+ >>> hi = nk.hilbert.Spin(s=0.5, total_sz=0, graph=g)\n+ >>> op = nk.operator.Heisenberg(hilbert=hi)\n+ >>> print(op.hilbert.size)\n+ 20\n+ \"\"\"\n+ sz_sz = _np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])\n+ exchange = _np.array([[0, 0, 0, 0], [0, 0, 2, 0], [0, 2, 0, 0], [0, 0, 0, 0]])\n+ if hilbert.graph.is_bipartite:\n+ heis_term = sz_sz - exchange\n+ else:\n+ heis_term = sz_sz + exchange\n+ return GraphOperator(hilbert, bondops=[heis_term])\n", "issue": "Get rid of netket::Ising and other hard-coded hamiltonians in c++\nIn the spirit of #199 , we can safely remove the Ising and Heisenberg hamiltonians from the C++ classes, since those are nothing but specific cases of more general hamiltonians (basically they are just LocalOperators or even GraphOperator). \r\nConvenient constructors can be defined in few lines of python, and moved to the python part of the library. \r\n\r\nThe BoseHubbard hamiltonian is a bit trickier, we can keep it as it is for the moment. \n", "before_files": [{"content": "from ._C_netket.operator import *\n", "path": "netket/operator.py"}], "after_files": [{"content": "from ._C_netket.operator import *\nimport numpy as _np\n\n\ndef Ising(hilbert, h, J=1.0):\n \"\"\"\n Constructs a new ``Ising`` given a hilbert space, a transverse field,\n and (if specified) a coupling constant.\n\n Args:\n hilbert: Hilbert space the operator acts on.\n h: The strength of the transverse field.\n J: The strength of the coupling. Default is 1.0.\n\n Examples:\n Constructs an ``Ising`` operator for a 1D system.\n\n ```python\n >>> import netket as nk\n >>> g = nk.graph.Hypercube(length=20, n_dim=1, pbc=True)\n >>> hi = nk.hilbert.Spin(s=0.5, graph=g)\n >>> op = nk.operator.Ising(h=1.321, hilbert=hi, J=0.5)\n >>> print(op.hilbert.size)\n 20\n \"\"\"\n sigma_x = _np.array([[0, 1], [1, 0]])\n sz_sz = _np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])\n return GraphOperator(hilbert, siteops=[-h * sigma_x], bondops=[J * sz_sz])\n\n\ndef Heisenberg(hilbert):\n \"\"\"\n Constructs a new ``Heisenberg`` given a hilbert space.\n Args:\n hilbert: Hilbert space the operator acts on.\n Examples:\n Constructs a ``Heisenberg`` operator for a 1D system.\n ```python\n >>> import netket as nk\n >>> g = nk.graph.Hypercube(length=20, n_dim=1, pbc=True)\n >>> hi = nk.hilbert.Spin(s=0.5, total_sz=0, graph=g)\n >>> op = nk.operator.Heisenberg(hilbert=hi)\n >>> print(op.hilbert.size)\n 20\n \"\"\"\n sz_sz = _np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])\n exchange = _np.array([[0, 0, 0, 0], [0, 0, 2, 0], [0, 2, 0, 0], [0, 0, 0, 0]])\n if hilbert.graph.is_bipartite:\n heis_term = sz_sz - exchange\n else:\n heis_term = sz_sz + exchange\n return GraphOperator(hilbert, bondops=[heis_term])\n", "path": "netket/operator.py"}]}
| 390 | 711 |
gh_patches_debug_37010
|
rasdani/github-patches
|
git_diff
|
mirumee__ariadne-565
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Federated schemas should not require at least one query to be implemented
In a Federated environment, the Gateway instantiates the Query type by default. This means that an implementing services should _not_ be required to implement or extend a query.
# Ideal Scenario
This is an example scenario of what is valid in Node and Java implementations. For example, it should be valid to expose a service that exposes no root queries, but only the federated query fields, like below.
Produced Query type:
**Example**: This is what the schemas would look like for two federated services:
## Product Service
product/schema.gql
```gql
extend type Query {
products: [Product]
}
type Product {
id: ID!
name: String
reviews: [ProductReview]
}
extend type ProductReview @key(fields: "id") {
id: ID! @external
}
```
**Output**:
```
products: [Product]
_entities(representations: [_Any]): [_Entity]
_service: _Service
```
## Review Service
review/schema.gql
```gql
# Notice how we don't have to extend the Query type
type ProductReview @key(fields: "id") {
id: ID!
comment: String!
}
```
**Output**:
This should be valid.
```
_entities(representations: [_Any]): [_Entity]
_service: _Service
```
# Breaking Scenario
When attempting to implement the `ProductReview` service (see example above) without extending the Query type, Ariadne will fail to [generate a federated schema](https://github.com/mirumee/ariadne/blob/master/ariadne/contrib/federation/schema.py#L57). This is because `make_executable_schema` attempts to generate a federated schema by [extending a Query type](https://github.com/mirumee/ariadne/blob/master/ariadne/contrib/federation/schema.py#L24) with the assumption that a Query type has been defined, which technically it isn't.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ariadne/contrib/federation/schema.py`
Content:
```
1 from typing import Dict, List, Type, Union, cast
2
3 from graphql import extend_schema, parse
4 from graphql.language import DocumentNode
5 from graphql.type import (
6 GraphQLObjectType,
7 GraphQLSchema,
8 GraphQLUnionType,
9 )
10
11 from ...executable_schema import make_executable_schema, join_type_defs
12 from ...schema_visitor import SchemaDirectiveVisitor
13 from ...types import SchemaBindable
14 from .utils import get_entity_types, purge_schema_directives, resolve_entities
15
16
17 federation_service_type_defs = """
18 scalar _Any
19
20 type _Service {
21 sdl: String
22 }
23
24 extend type Query {
25 _service: _Service!
26 }
27
28 directive @external on FIELD_DEFINITION
29 directive @requires(fields: String!) on FIELD_DEFINITION
30 directive @provides(fields: String!) on FIELD_DEFINITION
31 directive @key(fields: String!) repeatable on OBJECT | INTERFACE
32 directive @extends on OBJECT | INTERFACE
33 """
34
35 federation_entity_type_defs = """
36 union _Entity
37
38 extend type Query {
39 _entities(representations: [_Any!]!): [_Entity]!
40 }
41 """
42
43
44 def make_federated_schema(
45 type_defs: Union[str, List[str]],
46 *bindables: Union[SchemaBindable, List[SchemaBindable]],
47 directives: Dict[str, Type[SchemaDirectiveVisitor]] = None,
48 ) -> GraphQLSchema:
49 if isinstance(type_defs, list):
50 type_defs = join_type_defs(type_defs)
51
52 # Remove custom schema directives (to avoid apollo-gateway crashes).
53 # NOTE: This does NOT interfere with ariadne's directives support.
54 sdl = purge_schema_directives(type_defs)
55
56 type_defs = join_type_defs([type_defs, federation_service_type_defs])
57 schema = make_executable_schema(
58 type_defs,
59 *bindables,
60 directives=directives,
61 )
62
63 # Parse through the schema to find all entities with key directive.
64 entity_types = get_entity_types(schema)
65 has_entities = len(entity_types) > 0
66
67 # Add the federation type definitions.
68 if has_entities:
69 schema = extend_federated_schema(
70 schema,
71 parse(federation_entity_type_defs),
72 )
73
74 # Add _entities query.
75 entity_type = schema.get_type("_Entity")
76 if entity_type:
77 entity_type = cast(GraphQLUnionType, entity_type)
78 entity_type.types = entity_types
79
80 query_type = schema.get_type("Query")
81 if query_type:
82 query_type = cast(GraphQLObjectType, query_type)
83 query_type.fields["_entities"].resolve = resolve_entities
84
85 # Add _service query.
86 query_type = schema.get_type("Query")
87 if query_type:
88 query_type = cast(GraphQLObjectType, query_type)
89 query_type.fields["_service"].resolve = lambda _service, info: {"sdl": sdl}
90
91 return schema
92
93
94 def extend_federated_schema(
95 schema: GraphQLSchema,
96 document_ast: DocumentNode,
97 assume_valid: bool = False,
98 assume_valid_sdl: bool = False,
99 ) -> GraphQLSchema:
100 extended_schema = extend_schema(
101 schema,
102 document_ast,
103 assume_valid,
104 assume_valid_sdl,
105 )
106
107 for (k, v) in schema.type_map.items():
108 resolve_reference = getattr(v, "__resolve_reference__", None)
109 if resolve_reference and k in extended_schema.type_map:
110 setattr(
111 extended_schema.type_map[k],
112 "__resolve_reference__",
113 resolve_reference,
114 )
115
116 return extended_schema
117
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ariadne/contrib/federation/schema.py b/ariadne/contrib/federation/schema.py
--- a/ariadne/contrib/federation/schema.py
+++ b/ariadne/contrib/federation/schema.py
@@ -2,6 +2,7 @@
from graphql import extend_schema, parse
from graphql.language import DocumentNode
+from graphql.language.ast import ObjectTypeDefinitionNode
from graphql.type import (
GraphQLObjectType,
GraphQLSchema,
@@ -17,13 +18,13 @@
federation_service_type_defs = """
scalar _Any
- type _Service {
+ type _Service {{
sdl: String
- }
+ }}
- extend type Query {
+ {type_token} Query {{
_service: _Service!
- }
+ }}
directive @external on FIELD_DEFINITION
directive @requires(fields: String!) on FIELD_DEFINITION
@@ -41,6 +42,17 @@
"""
+def has_query_type(type_defs: str) -> bool:
+ ast_document = parse(type_defs)
+ for definition in ast_document.definitions:
+ if (
+ isinstance(definition, ObjectTypeDefinitionNode)
+ and definition.name.value == "Query"
+ ):
+ return True
+ return False
+
+
def make_federated_schema(
type_defs: Union[str, List[str]],
*bindables: Union[SchemaBindable, List[SchemaBindable]],
@@ -52,8 +64,10 @@
# Remove custom schema directives (to avoid apollo-gateway crashes).
# NOTE: This does NOT interfere with ariadne's directives support.
sdl = purge_schema_directives(type_defs)
+ type_token = "extend type" if has_query_type(sdl) else "type"
+ federation_service_type = federation_service_type_defs.format(type_token=type_token)
- type_defs = join_type_defs([type_defs, federation_service_type_defs])
+ type_defs = join_type_defs([type_defs, federation_service_type])
schema = make_executable_schema(
type_defs,
*bindables,
@@ -66,10 +80,7 @@
# Add the federation type definitions.
if has_entities:
- schema = extend_federated_schema(
- schema,
- parse(federation_entity_type_defs),
- )
+ schema = extend_federated_schema(schema, parse(federation_entity_type_defs))
# Add _entities query.
entity_type = schema.get_type("_Entity")
|
{"golden_diff": "diff --git a/ariadne/contrib/federation/schema.py b/ariadne/contrib/federation/schema.py\n--- a/ariadne/contrib/federation/schema.py\n+++ b/ariadne/contrib/federation/schema.py\n@@ -2,6 +2,7 @@\n \n from graphql import extend_schema, parse\n from graphql.language import DocumentNode\n+from graphql.language.ast import ObjectTypeDefinitionNode\n from graphql.type import (\n GraphQLObjectType,\n GraphQLSchema,\n@@ -17,13 +18,13 @@\n federation_service_type_defs = \"\"\"\n scalar _Any\n \n- type _Service {\n+ type _Service {{\n sdl: String\n- }\n+ }}\n \n- extend type Query {\n+ {type_token} Query {{\n _service: _Service!\n- }\n+ }}\n \n directive @external on FIELD_DEFINITION\n directive @requires(fields: String!) on FIELD_DEFINITION\n@@ -41,6 +42,17 @@\n \"\"\"\n \n \n+def has_query_type(type_defs: str) -> bool:\n+ ast_document = parse(type_defs)\n+ for definition in ast_document.definitions:\n+ if (\n+ isinstance(definition, ObjectTypeDefinitionNode)\n+ and definition.name.value == \"Query\"\n+ ):\n+ return True\n+ return False\n+\n+\n def make_federated_schema(\n type_defs: Union[str, List[str]],\n *bindables: Union[SchemaBindable, List[SchemaBindable]],\n@@ -52,8 +64,10 @@\n # Remove custom schema directives (to avoid apollo-gateway crashes).\n # NOTE: This does NOT interfere with ariadne's directives support.\n sdl = purge_schema_directives(type_defs)\n+ type_token = \"extend type\" if has_query_type(sdl) else \"type\"\n+ federation_service_type = federation_service_type_defs.format(type_token=type_token)\n \n- type_defs = join_type_defs([type_defs, federation_service_type_defs])\n+ type_defs = join_type_defs([type_defs, federation_service_type])\n schema = make_executable_schema(\n type_defs,\n *bindables,\n@@ -66,10 +80,7 @@\n \n # Add the federation type definitions.\n if has_entities:\n- schema = extend_federated_schema(\n- schema,\n- parse(federation_entity_type_defs),\n- )\n+ schema = extend_federated_schema(schema, parse(federation_entity_type_defs))\n \n # Add _entities query.\n entity_type = schema.get_type(\"_Entity\")\n", "issue": "Federated schemas should not require at least one query to be implemented\nIn a Federated environment, the Gateway instantiates the Query type by default. This means that an implementing services should _not_ be required to implement or extend a query. \r\n\r\n# Ideal Scenario\r\nThis is an example scenario of what is valid in Node and Java implementations. For example, it should be valid to expose a service that exposes no root queries, but only the federated query fields, like below.\r\n\r\nProduced Query type:\r\n\r\n\r\n**Example**: This is what the schemas would look like for two federated services:\r\n## Product Service\r\nproduct/schema.gql\r\n```gql\r\nextend type Query {\r\n products: [Product]\r\n}\r\n\r\ntype Product {\r\n id: ID!\r\n name: String\r\n reviews: [ProductReview]\r\n}\r\n\r\nextend type ProductReview @key(fields: \"id\") {\r\n id: ID! @external\r\n}\r\n```\r\n**Output**:\r\n\r\n```\r\nproducts: [Product]\r\n_entities(representations: [_Any]): [_Entity]\r\n_service: _Service\r\n```\r\n\r\n## Review Service\r\nreview/schema.gql\r\n```gql\r\n# Notice how we don't have to extend the Query type\r\ntype ProductReview @key(fields: \"id\") {\r\n id: ID!\r\n comment: String!\r\n}\r\n```\r\n\r\n**Output**:\r\nThis should be valid.\r\n```\r\n_entities(representations: [_Any]): [_Entity]\r\n_service: _Service\r\n```\r\n\r\n\r\n# Breaking Scenario\r\nWhen attempting to implement the `ProductReview` service (see example above) without extending the Query type, Ariadne will fail to [generate a federated schema](https://github.com/mirumee/ariadne/blob/master/ariadne/contrib/federation/schema.py#L57). This is because `make_executable_schema` attempts to generate a federated schema by [extending a Query type](https://github.com/mirumee/ariadne/blob/master/ariadne/contrib/federation/schema.py#L24) with the assumption that a Query type has been defined, which technically it isn't. \r\n\r\n\r\n\n", "before_files": [{"content": "from typing import Dict, List, Type, Union, cast\n\nfrom graphql import extend_schema, parse\nfrom graphql.language import DocumentNode\nfrom graphql.type import (\n GraphQLObjectType,\n GraphQLSchema,\n GraphQLUnionType,\n)\n\nfrom ...executable_schema import make_executable_schema, join_type_defs\nfrom ...schema_visitor import SchemaDirectiveVisitor\nfrom ...types import SchemaBindable\nfrom .utils import get_entity_types, purge_schema_directives, resolve_entities\n\n\nfederation_service_type_defs = \"\"\"\n scalar _Any\n\n type _Service {\n sdl: String\n }\n\n extend type Query {\n _service: _Service!\n }\n\n directive @external on FIELD_DEFINITION\n directive @requires(fields: String!) on FIELD_DEFINITION\n directive @provides(fields: String!) on FIELD_DEFINITION\n directive @key(fields: String!) repeatable on OBJECT | INTERFACE\n directive @extends on OBJECT | INTERFACE\n\"\"\"\n\nfederation_entity_type_defs = \"\"\"\n union _Entity\n\n extend type Query {\n _entities(representations: [_Any!]!): [_Entity]!\n }\n\"\"\"\n\n\ndef make_federated_schema(\n type_defs: Union[str, List[str]],\n *bindables: Union[SchemaBindable, List[SchemaBindable]],\n directives: Dict[str, Type[SchemaDirectiveVisitor]] = None,\n) -> GraphQLSchema:\n if isinstance(type_defs, list):\n type_defs = join_type_defs(type_defs)\n\n # Remove custom schema directives (to avoid apollo-gateway crashes).\n # NOTE: This does NOT interfere with ariadne's directives support.\n sdl = purge_schema_directives(type_defs)\n\n type_defs = join_type_defs([type_defs, federation_service_type_defs])\n schema = make_executable_schema(\n type_defs,\n *bindables,\n directives=directives,\n )\n\n # Parse through the schema to find all entities with key directive.\n entity_types = get_entity_types(schema)\n has_entities = len(entity_types) > 0\n\n # Add the federation type definitions.\n if has_entities:\n schema = extend_federated_schema(\n schema,\n parse(federation_entity_type_defs),\n )\n\n # Add _entities query.\n entity_type = schema.get_type(\"_Entity\")\n if entity_type:\n entity_type = cast(GraphQLUnionType, entity_type)\n entity_type.types = entity_types\n\n query_type = schema.get_type(\"Query\")\n if query_type:\n query_type = cast(GraphQLObjectType, query_type)\n query_type.fields[\"_entities\"].resolve = resolve_entities\n\n # Add _service query.\n query_type = schema.get_type(\"Query\")\n if query_type:\n query_type = cast(GraphQLObjectType, query_type)\n query_type.fields[\"_service\"].resolve = lambda _service, info: {\"sdl\": sdl}\n\n return schema\n\n\ndef extend_federated_schema(\n schema: GraphQLSchema,\n document_ast: DocumentNode,\n assume_valid: bool = False,\n assume_valid_sdl: bool = False,\n) -> GraphQLSchema:\n extended_schema = extend_schema(\n schema,\n document_ast,\n assume_valid,\n assume_valid_sdl,\n )\n\n for (k, v) in schema.type_map.items():\n resolve_reference = getattr(v, \"__resolve_reference__\", None)\n if resolve_reference and k in extended_schema.type_map:\n setattr(\n extended_schema.type_map[k],\n \"__resolve_reference__\",\n resolve_reference,\n )\n\n return extended_schema\n", "path": "ariadne/contrib/federation/schema.py"}], "after_files": [{"content": "from typing import Dict, List, Type, Union, cast\n\nfrom graphql import extend_schema, parse\nfrom graphql.language import DocumentNode\nfrom graphql.language.ast import ObjectTypeDefinitionNode\nfrom graphql.type import (\n GraphQLObjectType,\n GraphQLSchema,\n GraphQLUnionType,\n)\n\nfrom ...executable_schema import make_executable_schema, join_type_defs\nfrom ...schema_visitor import SchemaDirectiveVisitor\nfrom ...types import SchemaBindable\nfrom .utils import get_entity_types, purge_schema_directives, resolve_entities\n\n\nfederation_service_type_defs = \"\"\"\n scalar _Any\n\n type _Service {{\n sdl: String\n }}\n\n {type_token} Query {{\n _service: _Service!\n }}\n\n directive @external on FIELD_DEFINITION\n directive @requires(fields: String!) on FIELD_DEFINITION\n directive @provides(fields: String!) on FIELD_DEFINITION\n directive @key(fields: String!) repeatable on OBJECT | INTERFACE\n directive @extends on OBJECT | INTERFACE\n\"\"\"\n\nfederation_entity_type_defs = \"\"\"\n union _Entity\n\n extend type Query {\n _entities(representations: [_Any!]!): [_Entity]!\n }\n\"\"\"\n\n\ndef has_query_type(type_defs: str) -> bool:\n ast_document = parse(type_defs)\n for definition in ast_document.definitions:\n if (\n isinstance(definition, ObjectTypeDefinitionNode)\n and definition.name.value == \"Query\"\n ):\n return True\n return False\n\n\ndef make_federated_schema(\n type_defs: Union[str, List[str]],\n *bindables: Union[SchemaBindable, List[SchemaBindable]],\n directives: Dict[str, Type[SchemaDirectiveVisitor]] = None,\n) -> GraphQLSchema:\n if isinstance(type_defs, list):\n type_defs = join_type_defs(type_defs)\n\n # Remove custom schema directives (to avoid apollo-gateway crashes).\n # NOTE: This does NOT interfere with ariadne's directives support.\n sdl = purge_schema_directives(type_defs)\n type_token = \"extend type\" if has_query_type(sdl) else \"type\"\n federation_service_type = federation_service_type_defs.format(type_token=type_token)\n\n type_defs = join_type_defs([type_defs, federation_service_type])\n schema = make_executable_schema(\n type_defs,\n *bindables,\n directives=directives,\n )\n\n # Parse through the schema to find all entities with key directive.\n entity_types = get_entity_types(schema)\n has_entities = len(entity_types) > 0\n\n # Add the federation type definitions.\n if has_entities:\n schema = extend_federated_schema(schema, parse(federation_entity_type_defs))\n\n # Add _entities query.\n entity_type = schema.get_type(\"_Entity\")\n if entity_type:\n entity_type = cast(GraphQLUnionType, entity_type)\n entity_type.types = entity_types\n\n query_type = schema.get_type(\"Query\")\n if query_type:\n query_type = cast(GraphQLObjectType, query_type)\n query_type.fields[\"_entities\"].resolve = resolve_entities\n\n # Add _service query.\n query_type = schema.get_type(\"Query\")\n if query_type:\n query_type = cast(GraphQLObjectType, query_type)\n query_type.fields[\"_service\"].resolve = lambda _service, info: {\"sdl\": sdl}\n\n return schema\n\n\ndef extend_federated_schema(\n schema: GraphQLSchema,\n document_ast: DocumentNode,\n assume_valid: bool = False,\n assume_valid_sdl: bool = False,\n) -> GraphQLSchema:\n extended_schema = extend_schema(\n schema,\n document_ast,\n assume_valid,\n assume_valid_sdl,\n )\n\n for (k, v) in schema.type_map.items():\n resolve_reference = getattr(v, \"__resolve_reference__\", None)\n if resolve_reference and k in extended_schema.type_map:\n setattr(\n extended_schema.type_map[k],\n \"__resolve_reference__\",\n resolve_reference,\n )\n\n return extended_schema\n", "path": "ariadne/contrib/federation/schema.py"}]}
| 1,687 | 547 |
gh_patches_debug_28358
|
rasdani/github-patches
|
git_diff
|
mkdocs__mkdocs-1517
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unicode paths break clean_directory on server
Discovered a weird error with the server and Unicode characters in paths. Run the following command and everything seems fine:
mkdocs serve -f mkdocs/tests/integration/unicode/mkdocs.yml
However, once a file is edited and saved, the second attempt at a build results in the following error:
```
Exception in callback <bound method type.poll_tasks of <class 'livereload.handlers.LiveReloadHandler'>>
Traceback (most recent call last):
File "/Users/waylan/VirtualEnvs/mkdocs/lib/python2.7/site-packages/tornado/ioloop.py", line 1209, in _run
return self.callback()
File "/Users/waylan/VirtualEnvs/mkdocs/lib/python2.7/site-packages/livereload/handlers.py", line 67, in poll_tasks
filepath, delay = cls.watcher.examine()
File "/Users/waylan/VirtualEnvs/mkdocs/lib/python2.7/site-packages/livereload/watcher.py", line 73, in examine
func and func()
File "/Users/waylan/Code/mkdocs/mkdocs/commands/serve.py", line 112, in builder
build(config, live_server=live_server, dirty=dirty)
File "/Users/waylan/Code/mkdocs/mkdocs/commands/build.py", line 265, in build
utils.clean_directory(config['site_dir'])
File "/Users/waylan/Code/mkdocs/mkdocs/utils/__init__.py", line 144, in clean_directory
if entry.startswith('.'):
UnicodeDecodeError: 'ascii' codec can't decode byte 0xcc in position 1: ordinal not in range(128)
```
It would seem that somewhere between the call to `utils.clean_directory(config['site_dir'])` on the first build, and the same call on the second build, the `site_dir` config setting is converted from a Unicode string to a byte string.
I'm assuming the server code is the culprit.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mkdocs/commands/serve.py`
Content:
```
1 from __future__ import unicode_literals
2
3 import logging
4 import shutil
5 import tempfile
6
7 from os.path import isfile, join
8 from mkdocs.commands.build import build
9 from mkdocs.config import load_config
10
11 log = logging.getLogger(__name__)
12
13
14 def _get_handler(site_dir, StaticFileHandler):
15
16 from tornado.template import Loader
17
18 class WebHandler(StaticFileHandler):
19
20 def write_error(self, status_code, **kwargs):
21
22 if status_code in (404, 500):
23 error_page = '{}.html'.format(status_code)
24 if isfile(join(site_dir, error_page)):
25 self.write(Loader(site_dir).load(error_page).generate())
26 else:
27 super(WebHandler, self).write_error(status_code, **kwargs)
28
29 return WebHandler
30
31
32 def _livereload(host, port, config, builder, site_dir):
33
34 # We are importing here for anyone that has issues with livereload. Even if
35 # this fails, the --no-livereload alternative should still work.
36 from livereload import Server
37 import livereload.handlers
38
39 class LiveReloadServer(Server):
40
41 def get_web_handlers(self, script):
42 handlers = super(LiveReloadServer, self).get_web_handlers(script)
43 # replace livereload handler
44 return [(handlers[0][0], _get_handler(site_dir, livereload.handlers.StaticFileHandler), handlers[0][2],)]
45
46 server = LiveReloadServer()
47
48 # Watch the documentation files, the config file and the theme files.
49 server.watch(config['docs_dir'], builder)
50 server.watch(config['config_file_path'], builder)
51
52 for d in config['theme'].dirs:
53 server.watch(d, builder)
54
55 # Run `serve` plugin events.
56 server = config['plugins'].run_event('serve', server, config=config)
57
58 server.serve(root=site_dir, host=host, port=port, restart_delay=0)
59
60
61 def _static_server(host, port, site_dir):
62
63 # Importing here to seperate the code paths from the --livereload
64 # alternative.
65 from tornado import ioloop
66 from tornado import web
67
68 application = web.Application([
69 (r"/(.*)", _get_handler(site_dir, web.StaticFileHandler), {
70 "path": site_dir,
71 "default_filename": "index.html"
72 }),
73 ])
74 application.listen(port=port, address=host)
75
76 log.info('Running at: http://%s:%s/', host, port)
77 log.info('Hold ctrl+c to quit.')
78 try:
79 ioloop.IOLoop.instance().start()
80 except KeyboardInterrupt:
81 log.info('Stopping server...')
82
83
84 def serve(config_file=None, dev_addr=None, strict=None, theme=None,
85 theme_dir=None, livereload='livereload'):
86 """
87 Start the MkDocs development server
88
89 By default it will serve the documentation on http://localhost:8000/ and
90 it will rebuild the documentation and refresh the page automatically
91 whenever a file is edited.
92 """
93
94 # Create a temporary build directory, and set some options to serve it
95 tempdir = tempfile.mkdtemp()
96
97 def builder():
98 log.info("Building documentation...")
99 config = load_config(
100 config_file=config_file,
101 dev_addr=dev_addr,
102 strict=strict,
103 theme=theme,
104 theme_dir=theme_dir
105 )
106 # Override a few config settings after validation
107 config['site_dir'] = tempdir
108 config['site_url'] = 'http://{0}/'.format(config['dev_addr'])
109
110 live_server = livereload in ['dirty', 'livereload']
111 dirty = livereload == 'dirty'
112 build(config, live_server=live_server, dirty=dirty)
113 return config
114
115 try:
116 # Perform the initial build
117 config = builder()
118
119 host, port = config['dev_addr']
120
121 if livereload in ['livereload', 'dirty']:
122 _livereload(host, port, config, builder, tempdir)
123 else:
124 _static_server(host, port, tempdir)
125 finally:
126 shutil.rmtree(tempdir)
127
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mkdocs/commands/serve.py b/mkdocs/commands/serve.py
--- a/mkdocs/commands/serve.py
+++ b/mkdocs/commands/serve.py
@@ -92,7 +92,9 @@
"""
# Create a temporary build directory, and set some options to serve it
- tempdir = tempfile.mkdtemp()
+ # PY2 returns a byte string by default. The Unicode prefix ensures a Unicode
+ # string is returned. And it makes MkDocs temp dirs easier to identify.
+ site_dir = tempfile.mkdtemp(prefix='mkdocs_')
def builder():
log.info("Building documentation...")
@@ -101,10 +103,10 @@
dev_addr=dev_addr,
strict=strict,
theme=theme,
- theme_dir=theme_dir
+ theme_dir=theme_dir,
+ site_dir=site_dir
)
# Override a few config settings after validation
- config['site_dir'] = tempdir
config['site_url'] = 'http://{0}/'.format(config['dev_addr'])
live_server = livereload in ['dirty', 'livereload']
@@ -119,8 +121,8 @@
host, port = config['dev_addr']
if livereload in ['livereload', 'dirty']:
- _livereload(host, port, config, builder, tempdir)
+ _livereload(host, port, config, builder, site_dir)
else:
- _static_server(host, port, tempdir)
+ _static_server(host, port, site_dir)
finally:
- shutil.rmtree(tempdir)
+ shutil.rmtree(site_dir)
|
{"golden_diff": "diff --git a/mkdocs/commands/serve.py b/mkdocs/commands/serve.py\n--- a/mkdocs/commands/serve.py\n+++ b/mkdocs/commands/serve.py\n@@ -92,7 +92,9 @@\n \"\"\"\n \n # Create a temporary build directory, and set some options to serve it\n- tempdir = tempfile.mkdtemp()\n+ # PY2 returns a byte string by default. The Unicode prefix ensures a Unicode\n+ # string is returned. And it makes MkDocs temp dirs easier to identify.\n+ site_dir = tempfile.mkdtemp(prefix='mkdocs_')\n \n def builder():\n log.info(\"Building documentation...\")\n@@ -101,10 +103,10 @@\n dev_addr=dev_addr,\n strict=strict,\n theme=theme,\n- theme_dir=theme_dir\n+ theme_dir=theme_dir,\n+ site_dir=site_dir\n )\n # Override a few config settings after validation\n- config['site_dir'] = tempdir\n config['site_url'] = 'http://{0}/'.format(config['dev_addr'])\n \n live_server = livereload in ['dirty', 'livereload']\n@@ -119,8 +121,8 @@\n host, port = config['dev_addr']\n \n if livereload in ['livereload', 'dirty']:\n- _livereload(host, port, config, builder, tempdir)\n+ _livereload(host, port, config, builder, site_dir)\n else:\n- _static_server(host, port, tempdir)\n+ _static_server(host, port, site_dir)\n finally:\n- shutil.rmtree(tempdir)\n+ shutil.rmtree(site_dir)\n", "issue": "Unicode paths break clean_directory on server\nDiscovered a weird error with the server and Unicode characters in paths. Run the following command and everything seems fine:\r\n\r\n mkdocs serve -f mkdocs/tests/integration/unicode/mkdocs.yml\r\n\r\nHowever, once a file is edited and saved, the second attempt at a build results in the following error:\r\n\r\n```\r\nException in callback <bound method type.poll_tasks of <class 'livereload.handlers.LiveReloadHandler'>>\r\n Traceback (most recent call last):\r\n File \"/Users/waylan/VirtualEnvs/mkdocs/lib/python2.7/site-packages/tornado/ioloop.py\", line 1209, in _run\r\n return self.callback()\r\n File \"/Users/waylan/VirtualEnvs/mkdocs/lib/python2.7/site-packages/livereload/handlers.py\", line 67, in poll_tasks\r\n filepath, delay = cls.watcher.examine()\r\n File \"/Users/waylan/VirtualEnvs/mkdocs/lib/python2.7/site-packages/livereload/watcher.py\", line 73, in examine\r\n func and func()\r\n File \"/Users/waylan/Code/mkdocs/mkdocs/commands/serve.py\", line 112, in builder\r\n build(config, live_server=live_server, dirty=dirty)\r\n File \"/Users/waylan/Code/mkdocs/mkdocs/commands/build.py\", line 265, in build\r\n utils.clean_directory(config['site_dir'])\r\n File \"/Users/waylan/Code/mkdocs/mkdocs/utils/__init__.py\", line 144, in clean_directory\r\n if entry.startswith('.'):\r\n UnicodeDecodeError: 'ascii' codec can't decode byte 0xcc in position 1: ordinal not in range(128)\r\n```\r\n\r\nIt would seem that somewhere between the call to `utils.clean_directory(config['site_dir'])` on the first build, and the same call on the second build, the `site_dir` config setting is converted from a Unicode string to a byte string.\r\n\r\nI'm assuming the server code is the culprit.\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport logging\nimport shutil\nimport tempfile\n\nfrom os.path import isfile, join\nfrom mkdocs.commands.build import build\nfrom mkdocs.config import load_config\n\nlog = logging.getLogger(__name__)\n\n\ndef _get_handler(site_dir, StaticFileHandler):\n\n from tornado.template import Loader\n\n class WebHandler(StaticFileHandler):\n\n def write_error(self, status_code, **kwargs):\n\n if status_code in (404, 500):\n error_page = '{}.html'.format(status_code)\n if isfile(join(site_dir, error_page)):\n self.write(Loader(site_dir).load(error_page).generate())\n else:\n super(WebHandler, self).write_error(status_code, **kwargs)\n\n return WebHandler\n\n\ndef _livereload(host, port, config, builder, site_dir):\n\n # We are importing here for anyone that has issues with livereload. Even if\n # this fails, the --no-livereload alternative should still work.\n from livereload import Server\n import livereload.handlers\n\n class LiveReloadServer(Server):\n\n def get_web_handlers(self, script):\n handlers = super(LiveReloadServer, self).get_web_handlers(script)\n # replace livereload handler\n return [(handlers[0][0], _get_handler(site_dir, livereload.handlers.StaticFileHandler), handlers[0][2],)]\n\n server = LiveReloadServer()\n\n # Watch the documentation files, the config file and the theme files.\n server.watch(config['docs_dir'], builder)\n server.watch(config['config_file_path'], builder)\n\n for d in config['theme'].dirs:\n server.watch(d, builder)\n\n # Run `serve` plugin events.\n server = config['plugins'].run_event('serve', server, config=config)\n\n server.serve(root=site_dir, host=host, port=port, restart_delay=0)\n\n\ndef _static_server(host, port, site_dir):\n\n # Importing here to seperate the code paths from the --livereload\n # alternative.\n from tornado import ioloop\n from tornado import web\n\n application = web.Application([\n (r\"/(.*)\", _get_handler(site_dir, web.StaticFileHandler), {\n \"path\": site_dir,\n \"default_filename\": \"index.html\"\n }),\n ])\n application.listen(port=port, address=host)\n\n log.info('Running at: http://%s:%s/', host, port)\n log.info('Hold ctrl+c to quit.')\n try:\n ioloop.IOLoop.instance().start()\n except KeyboardInterrupt:\n log.info('Stopping server...')\n\n\ndef serve(config_file=None, dev_addr=None, strict=None, theme=None,\n theme_dir=None, livereload='livereload'):\n \"\"\"\n Start the MkDocs development server\n\n By default it will serve the documentation on http://localhost:8000/ and\n it will rebuild the documentation and refresh the page automatically\n whenever a file is edited.\n \"\"\"\n\n # Create a temporary build directory, and set some options to serve it\n tempdir = tempfile.mkdtemp()\n\n def builder():\n log.info(\"Building documentation...\")\n config = load_config(\n config_file=config_file,\n dev_addr=dev_addr,\n strict=strict,\n theme=theme,\n theme_dir=theme_dir\n )\n # Override a few config settings after validation\n config['site_dir'] = tempdir\n config['site_url'] = 'http://{0}/'.format(config['dev_addr'])\n\n live_server = livereload in ['dirty', 'livereload']\n dirty = livereload == 'dirty'\n build(config, live_server=live_server, dirty=dirty)\n return config\n\n try:\n # Perform the initial build\n config = builder()\n\n host, port = config['dev_addr']\n\n if livereload in ['livereload', 'dirty']:\n _livereload(host, port, config, builder, tempdir)\n else:\n _static_server(host, port, tempdir)\n finally:\n shutil.rmtree(tempdir)\n", "path": "mkdocs/commands/serve.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport logging\nimport shutil\nimport tempfile\n\nfrom os.path import isfile, join\nfrom mkdocs.commands.build import build\nfrom mkdocs.config import load_config\n\nlog = logging.getLogger(__name__)\n\n\ndef _get_handler(site_dir, StaticFileHandler):\n\n from tornado.template import Loader\n\n class WebHandler(StaticFileHandler):\n\n def write_error(self, status_code, **kwargs):\n\n if status_code in (404, 500):\n error_page = '{}.html'.format(status_code)\n if isfile(join(site_dir, error_page)):\n self.write(Loader(site_dir).load(error_page).generate())\n else:\n super(WebHandler, self).write_error(status_code, **kwargs)\n\n return WebHandler\n\n\ndef _livereload(host, port, config, builder, site_dir):\n\n # We are importing here for anyone that has issues with livereload. Even if\n # this fails, the --no-livereload alternative should still work.\n from livereload import Server\n import livereload.handlers\n\n class LiveReloadServer(Server):\n\n def get_web_handlers(self, script):\n handlers = super(LiveReloadServer, self).get_web_handlers(script)\n # replace livereload handler\n return [(handlers[0][0], _get_handler(site_dir, livereload.handlers.StaticFileHandler), handlers[0][2],)]\n\n server = LiveReloadServer()\n\n # Watch the documentation files, the config file and the theme files.\n server.watch(config['docs_dir'], builder)\n server.watch(config['config_file_path'], builder)\n\n for d in config['theme'].dirs:\n server.watch(d, builder)\n\n # Run `serve` plugin events.\n server = config['plugins'].run_event('serve', server, config=config)\n\n server.serve(root=site_dir, host=host, port=port, restart_delay=0)\n\n\ndef _static_server(host, port, site_dir):\n\n # Importing here to seperate the code paths from the --livereload\n # alternative.\n from tornado import ioloop\n from tornado import web\n\n application = web.Application([\n (r\"/(.*)\", _get_handler(site_dir, web.StaticFileHandler), {\n \"path\": site_dir,\n \"default_filename\": \"index.html\"\n }),\n ])\n application.listen(port=port, address=host)\n\n log.info('Running at: http://%s:%s/', host, port)\n log.info('Hold ctrl+c to quit.')\n try:\n ioloop.IOLoop.instance().start()\n except KeyboardInterrupt:\n log.info('Stopping server...')\n\n\ndef serve(config_file=None, dev_addr=None, strict=None, theme=None,\n theme_dir=None, livereload='livereload'):\n \"\"\"\n Start the MkDocs development server\n\n By default it will serve the documentation on http://localhost:8000/ and\n it will rebuild the documentation and refresh the page automatically\n whenever a file is edited.\n \"\"\"\n\n # Create a temporary build directory, and set some options to serve it\n # PY2 returns a byte string by default. The Unicode prefix ensures a Unicode\n # string is returned. And it makes MkDocs temp dirs easier to identify.\n site_dir = tempfile.mkdtemp(prefix='mkdocs_')\n\n def builder():\n log.info(\"Building documentation...\")\n config = load_config(\n config_file=config_file,\n dev_addr=dev_addr,\n strict=strict,\n theme=theme,\n theme_dir=theme_dir,\n site_dir=site_dir\n )\n # Override a few config settings after validation\n config['site_url'] = 'http://{0}/'.format(config['dev_addr'])\n\n live_server = livereload in ['dirty', 'livereload']\n dirty = livereload == 'dirty'\n build(config, live_server=live_server, dirty=dirty)\n return config\n\n try:\n # Perform the initial build\n config = builder()\n\n host, port = config['dev_addr']\n\n if livereload in ['livereload', 'dirty']:\n _livereload(host, port, config, builder, site_dir)\n else:\n _static_server(host, port, site_dir)\n finally:\n shutil.rmtree(site_dir)\n", "path": "mkdocs/commands/serve.py"}]}
| 1,885 | 380 |
gh_patches_debug_34083
|
rasdani/github-patches
|
git_diff
|
Nitrate__Nitrate-532
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Replace deprecated inspect.getargspec call in log_call
`inspect.getargspec` has been deprecated. Replace it with equivalent function call.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/tcms/xmlrpc/decorators.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 import inspect
4 import logging
5
6 from functools import wraps
7
8 from django.conf import settings
9 from kobo.django.xmlrpc.models import XmlRpcLog
10
11
12 __all__ = ('log_call',)
13
14 logger = logging.getLogger('nitrate.xmlrpc')
15
16 if settings.DEBUG:
17 # To avoid pollute XMLRPC logs with those generated during development
18 def create_log(user, method, args):
19 log_msg = 'user: {}, method: {}, args: {}'.format(
20 user.username if hasattr(user, 'username') else user,
21 method,
22 args)
23 logger.debug(log_msg)
24 else:
25 create_log = XmlRpcLog.objects.create
26
27
28 def log_call(*args, **kwargs):
29 """Log XMLRPC-specific invocations
30
31 This is copied from kobo.django.xmlrpc.decorators to add custom abitlities,
32 so that we don't have to wait upstream to make the changes.
33
34 Usage::
35
36 from tcms.core.decorators import log_call
37 @log_call(namespace='TestNamespace')
38 def func(request):
39 return None
40 """
41 namespace = kwargs.get('namespace', '')
42 if namespace:
43 namespace = namespace + '.'
44
45 def decorator(function):
46 argspec = inspect.getargspec(function)
47 # Each XMLRPC method has an HttpRequest argument as the first one,
48 # it'll be ignored in the log.
49 arg_names = argspec.args[1:]
50
51 @wraps(function)
52 def _new_function(request, *args, **kwargs):
53 try:
54 known_args = zip(arg_names, args)
55 unknown_args = list(enumerate(args[len(arg_names):]))
56 keyword_args = [(key, value) for key, value in
57 kwargs.items()
58 if (key, value) not in known_args]
59
60 create_log(user=request.user,
61 method=f'{namespace}{function.__name__}',
62 args=str(known_args + unknown_args + keyword_args))
63 except Exception:
64 pass
65 return function(request, *args, **kwargs)
66
67 return _new_function
68
69 return decorator
70
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/tcms/xmlrpc/decorators.py b/src/tcms/xmlrpc/decorators.py
--- a/src/tcms/xmlrpc/decorators.py
+++ b/src/tcms/xmlrpc/decorators.py
@@ -28,7 +28,7 @@
def log_call(*args, **kwargs):
"""Log XMLRPC-specific invocations
- This is copied from kobo.django.xmlrpc.decorators to add custom abitlities,
+ This is copied from kobo.django.xmlrpc.decorators to add custom abilities,
so that we don't have to wait upstream to make the changes.
Usage::
@@ -43,7 +43,7 @@
namespace = namespace + '.'
def decorator(function):
- argspec = inspect.getargspec(function)
+ argspec = inspect.getfullargspec(function)
# Each XMLRPC method has an HttpRequest argument as the first one,
# it'll be ignored in the log.
arg_names = argspec.args[1:]
@@ -51,17 +51,19 @@
@wraps(function)
def _new_function(request, *args, **kwargs):
try:
- known_args = zip(arg_names, args)
+ known_args = list(zip(arg_names, args))
unknown_args = list(enumerate(args[len(arg_names):]))
- keyword_args = [(key, value) for key, value in
- kwargs.items()
- if (key, value) not in known_args]
+ keyword_args = [
+ (key, value) for key, value in kwargs.items()
+ if (key, value) not in known_args
+ ]
create_log(user=request.user,
method=f'{namespace}{function.__name__}',
args=str(known_args + unknown_args + keyword_args))
except Exception:
- pass
+ logger.exception(
+ f'Fail to log XMLRPC call on {function.__name__}')
return function(request, *args, **kwargs)
return _new_function
|
{"golden_diff": "diff --git a/src/tcms/xmlrpc/decorators.py b/src/tcms/xmlrpc/decorators.py\n--- a/src/tcms/xmlrpc/decorators.py\n+++ b/src/tcms/xmlrpc/decorators.py\n@@ -28,7 +28,7 @@\n def log_call(*args, **kwargs):\n \"\"\"Log XMLRPC-specific invocations\n \n- This is copied from kobo.django.xmlrpc.decorators to add custom abitlities,\n+ This is copied from kobo.django.xmlrpc.decorators to add custom abilities,\n so that we don't have to wait upstream to make the changes.\n \n Usage::\n@@ -43,7 +43,7 @@\n namespace = namespace + '.'\n \n def decorator(function):\n- argspec = inspect.getargspec(function)\n+ argspec = inspect.getfullargspec(function)\n # Each XMLRPC method has an HttpRequest argument as the first one,\n # it'll be ignored in the log.\n arg_names = argspec.args[1:]\n@@ -51,17 +51,19 @@\n @wraps(function)\n def _new_function(request, *args, **kwargs):\n try:\n- known_args = zip(arg_names, args)\n+ known_args = list(zip(arg_names, args))\n unknown_args = list(enumerate(args[len(arg_names):]))\n- keyword_args = [(key, value) for key, value in\n- kwargs.items()\n- if (key, value) not in known_args]\n+ keyword_args = [\n+ (key, value) for key, value in kwargs.items()\n+ if (key, value) not in known_args\n+ ]\n \n create_log(user=request.user,\n method=f'{namespace}{function.__name__}',\n args=str(known_args + unknown_args + keyword_args))\n except Exception:\n- pass\n+ logger.exception(\n+ f'Fail to log XMLRPC call on {function.__name__}')\n return function(request, *args, **kwargs)\n \n return _new_function\n", "issue": "Replace deprecated inspect.getargspec call in log_call\n`inspect.getargspec` has been deprecated. Replace it with equivalent function call.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport inspect\nimport logging\n\nfrom functools import wraps\n\nfrom django.conf import settings\nfrom kobo.django.xmlrpc.models import XmlRpcLog\n\n\n__all__ = ('log_call',)\n\nlogger = logging.getLogger('nitrate.xmlrpc')\n\nif settings.DEBUG:\n # To avoid pollute XMLRPC logs with those generated during development\n def create_log(user, method, args):\n log_msg = 'user: {}, method: {}, args: {}'.format(\n user.username if hasattr(user, 'username') else user,\n method,\n args)\n logger.debug(log_msg)\nelse:\n create_log = XmlRpcLog.objects.create\n\n\ndef log_call(*args, **kwargs):\n \"\"\"Log XMLRPC-specific invocations\n\n This is copied from kobo.django.xmlrpc.decorators to add custom abitlities,\n so that we don't have to wait upstream to make the changes.\n\n Usage::\n\n from tcms.core.decorators import log_call\n @log_call(namespace='TestNamespace')\n def func(request):\n return None\n \"\"\"\n namespace = kwargs.get('namespace', '')\n if namespace:\n namespace = namespace + '.'\n\n def decorator(function):\n argspec = inspect.getargspec(function)\n # Each XMLRPC method has an HttpRequest argument as the first one,\n # it'll be ignored in the log.\n arg_names = argspec.args[1:]\n\n @wraps(function)\n def _new_function(request, *args, **kwargs):\n try:\n known_args = zip(arg_names, args)\n unknown_args = list(enumerate(args[len(arg_names):]))\n keyword_args = [(key, value) for key, value in\n kwargs.items()\n if (key, value) not in known_args]\n\n create_log(user=request.user,\n method=f'{namespace}{function.__name__}',\n args=str(known_args + unknown_args + keyword_args))\n except Exception:\n pass\n return function(request, *args, **kwargs)\n\n return _new_function\n\n return decorator\n", "path": "src/tcms/xmlrpc/decorators.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport inspect\nimport logging\n\nfrom functools import wraps\n\nfrom django.conf import settings\nfrom kobo.django.xmlrpc.models import XmlRpcLog\n\n\n__all__ = ('log_call',)\n\nlogger = logging.getLogger('nitrate.xmlrpc')\n\nif settings.DEBUG:\n # To avoid pollute XMLRPC logs with those generated during development\n def create_log(user, method, args):\n log_msg = 'user: {}, method: {}, args: {}'.format(\n user.username if hasattr(user, 'username') else user,\n method,\n args)\n logger.debug(log_msg)\nelse:\n create_log = XmlRpcLog.objects.create\n\n\ndef log_call(*args, **kwargs):\n \"\"\"Log XMLRPC-specific invocations\n\n This is copied from kobo.django.xmlrpc.decorators to add custom abilities,\n so that we don't have to wait upstream to make the changes.\n\n Usage::\n\n from tcms.core.decorators import log_call\n @log_call(namespace='TestNamespace')\n def func(request):\n return None\n \"\"\"\n namespace = kwargs.get('namespace', '')\n if namespace:\n namespace = namespace + '.'\n\n def decorator(function):\n argspec = inspect.getfullargspec(function)\n # Each XMLRPC method has an HttpRequest argument as the first one,\n # it'll be ignored in the log.\n arg_names = argspec.args[1:]\n\n @wraps(function)\n def _new_function(request, *args, **kwargs):\n try:\n known_args = list(zip(arg_names, args))\n unknown_args = list(enumerate(args[len(arg_names):]))\n keyword_args = [\n (key, value) for key, value in kwargs.items()\n if (key, value) not in known_args\n ]\n\n create_log(user=request.user,\n method=f'{namespace}{function.__name__}',\n args=str(known_args + unknown_args + keyword_args))\n except Exception:\n logger.exception(\n f'Fail to log XMLRPC call on {function.__name__}')\n return function(request, *args, **kwargs)\n\n return _new_function\n\n return decorator\n", "path": "src/tcms/xmlrpc/decorators.py"}]}
| 868 | 442 |
gh_patches_debug_5812
|
rasdani/github-patches
|
git_diff
|
wagtail__wagtail-713
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Typo in wagtailforms.forms.FormBuilder.formfields
Line 86:
``` python
raise Exception("Unrecognised field type: " + form.field_type)
```
This raises `NameError: global name 'form' is not defined`
Ordinarily the flow of things wouldn't run into this line, but I was caught out when badly naming a form field in the json fixtures.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wagtail/wagtailforms/forms.py`
Content:
```
1 import django.forms
2 from django.utils.datastructures import SortedDict
3
4
5 class BaseForm(django.forms.Form):
6 def __init__(self, *args, **kwargs):
7 kwargs.setdefault('label_suffix', '')
8 return super(BaseForm, self).__init__(*args, **kwargs)
9
10
11 class FormBuilder(object):
12 def __init__(self, fields):
13 self.fields = fields
14
15 def create_singleline_field(self, field, options):
16 # TODO: This is a default value - it may need to be changed
17 options['max_length'] = 255
18 return django.forms.CharField(**options)
19
20 def create_multiline_field(self, field, options):
21 return django.forms.CharField(widget=django.forms.Textarea, **options)
22
23 def create_date_field(self, field, options):
24 return django.forms.DateField(**options)
25
26 def create_datetime_field(self, field, options):
27 return django.forms.DateTimeField(**options)
28
29 def create_email_field(self, field, options):
30 return django.forms.EmailField(**options)
31
32 def create_url_field(self, field, options):
33 return django.forms.URLField(**options)
34
35 def create_number_field(self, field, options):
36 return django.forms.DecimalField(**options)
37
38 def create_dropdown_field(self, field, options):
39 options['choices'] = map(
40 lambda x: (x.strip(), x.strip()),
41 field.choices.split(',')
42 )
43 return django.forms.ChoiceField(**options)
44
45 def create_radio_field(self, field, options):
46 options['choices'] = map(
47 lambda x: (x.strip(), x.strip()),
48 field.choices.split(',')
49 )
50 return django.forms.ChoiceField(widget=django.forms.RadioSelect, **options)
51
52 def create_checkboxes_field(self, field, options):
53 options['choices'] = [(x.strip(), x.strip()) for x in field.choices.split(',')]
54 options['initial'] = [x.strip() for x in field.default_value.split(',')]
55 return django.forms.MultipleChoiceField(
56 widget=django.forms.CheckboxSelectMultiple, **options
57 )
58
59 def create_checkbox_field(self, field, options):
60 return django.forms.BooleanField(**options)
61
62 FIELD_TYPES = {
63 'singleline': create_singleline_field,
64 'multiline': create_multiline_field,
65 'date': create_date_field,
66 'datetime': create_datetime_field,
67 'email': create_email_field,
68 'url': create_url_field,
69 'number': create_number_field,
70 'dropdown': create_dropdown_field,
71 'radio': create_radio_field,
72 'checkboxes': create_checkboxes_field,
73 'checkbox': create_checkbox_field,
74 }
75
76 @property
77 def formfields(self):
78 formfields = SortedDict()
79
80 for field in self.fields:
81 options = self.get_field_options(field)
82
83 if field.field_type in self.FIELD_TYPES:
84 formfields[field.clean_name] = self.FIELD_TYPES[field.field_type](self, field, options)
85 else:
86 raise Exception("Unrecognised field type: " + form.field_type)
87
88 return formfields
89
90 def get_field_options(self, field):
91 options = {}
92 options['label'] = field.label
93 options['help_text'] = field.help_text
94 options['required'] = field.required
95 options['initial'] = field.default_value
96 return options
97
98 def get_form_class(self):
99 return type('WagtailForm', (BaseForm,), self.formfields)
100
101
102 class SelectDateForm(django.forms.Form):
103 date_from = django.forms.DateTimeField(
104 required=False,
105 widget=django.forms.DateInput(attrs={'placeholder': 'Date from'})
106 )
107 date_to = django.forms.DateTimeField(
108 required=False,
109 widget=django.forms.DateInput(attrs={'placeholder': 'Date to'})
110 )
111
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/wagtail/wagtailforms/forms.py b/wagtail/wagtailforms/forms.py
--- a/wagtail/wagtailforms/forms.py
+++ b/wagtail/wagtailforms/forms.py
@@ -83,7 +83,7 @@
if field.field_type in self.FIELD_TYPES:
formfields[field.clean_name] = self.FIELD_TYPES[field.field_type](self, field, options)
else:
- raise Exception("Unrecognised field type: " + form.field_type)
+ raise Exception("Unrecognised field type: " + field.field_type)
return formfields
|
{"golden_diff": "diff --git a/wagtail/wagtailforms/forms.py b/wagtail/wagtailforms/forms.py\n--- a/wagtail/wagtailforms/forms.py\n+++ b/wagtail/wagtailforms/forms.py\n@@ -83,7 +83,7 @@\n if field.field_type in self.FIELD_TYPES:\n formfields[field.clean_name] = self.FIELD_TYPES[field.field_type](self, field, options)\n else:\n- raise Exception(\"Unrecognised field type: \" + form.field_type)\n+ raise Exception(\"Unrecognised field type: \" + field.field_type)\n \n return formfields\n", "issue": "Typo in wagtailforms.forms.FormBuilder.formfields\nLine 86:\n\n``` python\nraise Exception(\"Unrecognised field type: \" + form.field_type)\n```\n\nThis raises `NameError: global name 'form' is not defined`\n\nOrdinarily the flow of things wouldn't run into this line, but I was caught out when badly naming a form field in the json fixtures.\n\n", "before_files": [{"content": "import django.forms\nfrom django.utils.datastructures import SortedDict\n\n\nclass BaseForm(django.forms.Form):\n def __init__(self, *args, **kwargs):\n kwargs.setdefault('label_suffix', '')\n return super(BaseForm, self).__init__(*args, **kwargs)\n\n\nclass FormBuilder(object):\n def __init__(self, fields):\n self.fields = fields\n\n def create_singleline_field(self, field, options):\n # TODO: This is a default value - it may need to be changed\n options['max_length'] = 255\n return django.forms.CharField(**options)\n\n def create_multiline_field(self, field, options):\n return django.forms.CharField(widget=django.forms.Textarea, **options)\n\n def create_date_field(self, field, options):\n return django.forms.DateField(**options)\n\n def create_datetime_field(self, field, options):\n return django.forms.DateTimeField(**options)\n\n def create_email_field(self, field, options):\n return django.forms.EmailField(**options)\n\n def create_url_field(self, field, options):\n return django.forms.URLField(**options)\n\n def create_number_field(self, field, options):\n return django.forms.DecimalField(**options)\n\n def create_dropdown_field(self, field, options):\n options['choices'] = map(\n lambda x: (x.strip(), x.strip()),\n field.choices.split(',')\n )\n return django.forms.ChoiceField(**options)\n\n def create_radio_field(self, field, options):\n options['choices'] = map(\n lambda x: (x.strip(), x.strip()),\n field.choices.split(',')\n )\n return django.forms.ChoiceField(widget=django.forms.RadioSelect, **options)\n\n def create_checkboxes_field(self, field, options):\n options['choices'] = [(x.strip(), x.strip()) for x in field.choices.split(',')]\n options['initial'] = [x.strip() for x in field.default_value.split(',')]\n return django.forms.MultipleChoiceField(\n widget=django.forms.CheckboxSelectMultiple, **options\n )\n\n def create_checkbox_field(self, field, options):\n return django.forms.BooleanField(**options)\n\n FIELD_TYPES = {\n 'singleline': create_singleline_field,\n 'multiline': create_multiline_field,\n 'date': create_date_field,\n 'datetime': create_datetime_field,\n 'email': create_email_field,\n 'url': create_url_field,\n 'number': create_number_field,\n 'dropdown': create_dropdown_field,\n 'radio': create_radio_field,\n 'checkboxes': create_checkboxes_field,\n 'checkbox': create_checkbox_field,\n }\n\n @property\n def formfields(self):\n formfields = SortedDict()\n\n for field in self.fields:\n options = self.get_field_options(field)\n\n if field.field_type in self.FIELD_TYPES:\n formfields[field.clean_name] = self.FIELD_TYPES[field.field_type](self, field, options)\n else:\n raise Exception(\"Unrecognised field type: \" + form.field_type)\n\n return formfields\n\n def get_field_options(self, field):\n options = {}\n options['label'] = field.label\n options['help_text'] = field.help_text\n options['required'] = field.required\n options['initial'] = field.default_value\n return options\n\n def get_form_class(self):\n return type('WagtailForm', (BaseForm,), self.formfields)\n\n\nclass SelectDateForm(django.forms.Form):\n date_from = django.forms.DateTimeField(\n required=False,\n widget=django.forms.DateInput(attrs={'placeholder': 'Date from'})\n )\n date_to = django.forms.DateTimeField(\n required=False,\n widget=django.forms.DateInput(attrs={'placeholder': 'Date to'})\n )\n", "path": "wagtail/wagtailforms/forms.py"}], "after_files": [{"content": "import django.forms\nfrom django.utils.datastructures import SortedDict\n\n\nclass BaseForm(django.forms.Form):\n def __init__(self, *args, **kwargs):\n kwargs.setdefault('label_suffix', '')\n return super(BaseForm, self).__init__(*args, **kwargs)\n\n\nclass FormBuilder(object):\n def __init__(self, fields):\n self.fields = fields\n\n def create_singleline_field(self, field, options):\n # TODO: This is a default value - it may need to be changed\n options['max_length'] = 255\n return django.forms.CharField(**options)\n\n def create_multiline_field(self, field, options):\n return django.forms.CharField(widget=django.forms.Textarea, **options)\n\n def create_date_field(self, field, options):\n return django.forms.DateField(**options)\n\n def create_datetime_field(self, field, options):\n return django.forms.DateTimeField(**options)\n\n def create_email_field(self, field, options):\n return django.forms.EmailField(**options)\n\n def create_url_field(self, field, options):\n return django.forms.URLField(**options)\n\n def create_number_field(self, field, options):\n return django.forms.DecimalField(**options)\n\n def create_dropdown_field(self, field, options):\n options['choices'] = map(\n lambda x: (x.strip(), x.strip()),\n field.choices.split(',')\n )\n return django.forms.ChoiceField(**options)\n\n def create_radio_field(self, field, options):\n options['choices'] = map(\n lambda x: (x.strip(), x.strip()),\n field.choices.split(',')\n )\n return django.forms.ChoiceField(widget=django.forms.RadioSelect, **options)\n\n def create_checkboxes_field(self, field, options):\n options['choices'] = [(x.strip(), x.strip()) for x in field.choices.split(',')]\n options['initial'] = [x.strip() for x in field.default_value.split(',')]\n return django.forms.MultipleChoiceField(\n widget=django.forms.CheckboxSelectMultiple, **options\n )\n\n def create_checkbox_field(self, field, options):\n return django.forms.BooleanField(**options)\n\n FIELD_TYPES = {\n 'singleline': create_singleline_field,\n 'multiline': create_multiline_field,\n 'date': create_date_field,\n 'datetime': create_datetime_field,\n 'email': create_email_field,\n 'url': create_url_field,\n 'number': create_number_field,\n 'dropdown': create_dropdown_field,\n 'radio': create_radio_field,\n 'checkboxes': create_checkboxes_field,\n 'checkbox': create_checkbox_field,\n }\n\n @property\n def formfields(self):\n formfields = SortedDict()\n\n for field in self.fields:\n options = self.get_field_options(field)\n\n if field.field_type in self.FIELD_TYPES:\n formfields[field.clean_name] = self.FIELD_TYPES[field.field_type](self, field, options)\n else:\n raise Exception(\"Unrecognised field type: \" + field.field_type)\n\n return formfields\n\n def get_field_options(self, field):\n options = {}\n options['label'] = field.label\n options['help_text'] = field.help_text\n options['required'] = field.required\n options['initial'] = field.default_value\n return options\n\n def get_form_class(self):\n return type('WagtailForm', (BaseForm,), self.formfields)\n\n\nclass SelectDateForm(django.forms.Form):\n date_from = django.forms.DateTimeField(\n required=False,\n widget=django.forms.DateInput(attrs={'placeholder': 'Date from'})\n )\n date_to = django.forms.DateTimeField(\n required=False,\n widget=django.forms.DateInput(attrs={'placeholder': 'Date to'})\n )\n", "path": "wagtail/wagtailforms/forms.py"}]}
| 1,375 | 134 |
gh_patches_debug_23126
|
rasdani/github-patches
|
git_diff
|
alltheplaces__alltheplaces-2635
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider riteaid is broken
During the global build at 2021-08-11-14-42-19, spider **riteaid** failed with **1234 features** and **1 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-08-11-14-42-19/logs/riteaid.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-08-11-14-42-19/output/riteaid.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-08-11-14-42-19/output/riteaid.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/riteaid.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import scrapy
3 import json
4 import re
5 from locations.items import GeojsonPointItem
6
7 class RiteAidSpider(scrapy.Spider):
8 name = "riteaid"
9 allowed_domains = ["riteaid.com"]
10 start_urls = (
11 'https://locations.riteaid.com/',
12 )
13
14 def store_hours(self, store_hours):
15 day_groups = []
16 this_day_group = None
17 for day_info in store_hours:
18 day = day_info['day'][:2].title()
19
20 hour_intervals = []
21 for interval in day_info['intervals']:
22 f_time = str(interval['start']).zfill(4)
23 t_time = str(interval['end']).zfill(4)
24 hour_intervals.append('{}:{}-{}:{}'.format(
25 f_time[0:2],
26 f_time[2:4],
27 t_time[0:2],
28 t_time[2:4],
29 ))
30 hours = ','.join(hour_intervals)
31
32 if not this_day_group:
33 this_day_group = {
34 'from_day': day,
35 'to_day': day,
36 'hours': hours
37 }
38 elif this_day_group['hours'] != hours:
39 day_groups.append(this_day_group)
40 this_day_group = {
41 'from_day': day,
42 'to_day': day,
43 'hours': hours
44 }
45 elif this_day_group['hours'] == hours:
46 this_day_group['to_day'] = day
47
48 day_groups.append(this_day_group)
49
50 opening_hours = ""
51 if len(day_groups) == 1 and day_groups[0]['hours'] in ('00:00-23:59', '00:00-00:00'):
52 opening_hours = '24/7'
53 else:
54 for day_group in day_groups:
55 if day_group['from_day'] == day_group['to_day']:
56 opening_hours += '{from_day} {hours}; '.format(**day_group)
57 elif day_group['from_day'] == 'Su' and day_group['to_day'] == 'Sa':
58 opening_hours += '{hours}; '.format(**day_group)
59 else:
60 opening_hours += '{from_day}-{to_day} {hours}; '.format(**day_group)
61 opening_hours = opening_hours[:-2]
62
63 return opening_hours
64
65 def parse(self, response):
66 urls = response.xpath('//a[@class="c-directory-list-content-item-link"]/@href').extract()
67
68 for url in urls:
69 if len(url.split('/')) == 3:
70 yield scrapy.Request(response.urljoin(url), callback=self.parse_city)
71 elif len(url.split('/')) == 4:
72 yield scrapy.Request(response.urljoin(url), callback=self.parse_location)
73 else:
74 yield scrapy.Request(response.urljoin(url), callback=self.parse_state)
75
76 def parse_state(self, response):
77 state_urls = response.xpath('//a[@class="c-directory-list-content-item-link"]/@href').extract()
78
79 for url in state_urls:
80 if len(url.split('/')) == 5:
81 yield scrapy.Request(response.urljoin(url), callback=self.parse_location)
82 else:
83 yield scrapy.Request(response.urljoin(url), callback=self.parse_city)
84
85 def parse_city(self, response):
86 city_urls = response.xpath('//a[@itemprop="url"]/@href').extract()
87
88 for url in city_urls:
89 yield scrapy.Request(response.urljoin(url), callback=self.parse_location)
90
91 def parse_location(self, response):
92 ref = response.xpath('normalize-space(//h1[contains(@itemprop,"name")]/text())').extract_first()
93 brand_elem = response.xpath('//div[@class="alert alert-danger"]/text()').extract_first()
94
95 if brand_elem: #Changed ownership as part of the sale of select Rite Aid stores to Walgreens
96 brand = 'Walgreens'
97 else:
98 brand = re.search(r'([^#//s*]+)', ref).group(1)
99
100 hours_elem = response.xpath('//div[@class="Hours-store"]//div[contains(@class,"c-location-hours-details-wrapper")]/@data-days')
101 if hours_elem: # not shop, only clinic
102 hours = json.loads(hours_elem.extract_first())
103 else:
104 hours = json.loads(response.xpath('//div[contains(@class,"c-location-hours-details-wrapper")]/@data-days').extract_first())
105
106 properties = {
107 'ref': ref,
108 'addr_full': response.xpath('normalize-space(//span[contains(@itemprop,"streetAddress")]/span/text())').extract_first(),
109 'state': response.xpath('normalize-space(//abbr[contains(@itemprop,"addressRegion")]/text())').extract_first(),
110 'city': response.xpath('normalize-space(//span[contains(@itemprop,"addressLocality")]/text())').extract_first(),
111 'postcode': response.xpath('normalize-space(//span[contains(@itemprop,"postalCode")]/text())').extract_first(),
112 'country': response.xpath('normalize-space(//abbr[contains(@itemprop,"addressCountry")]/text())').extract_first(),
113 'phone': response.xpath('//span[contains(@itemprop,"telephone")]/text()').extract_first(),
114 'lat': float(response.xpath('//meta[contains(@itemprop,"latitude")]/@content').extract_first()),
115 'lon' : float(response.xpath('//meta[contains(@itemprop,"longitude")]/@content').extract_first()),
116 'website': response.url,
117 'opening_hours': self.store_hours(hours),
118 'brand': brand.strip(),
119 }
120
121 yield GeojsonPointItem(**properties)
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/locations/spiders/riteaid.py b/locations/spiders/riteaid.py
--- a/locations/spiders/riteaid.py
+++ b/locations/spiders/riteaid.py
@@ -8,7 +8,7 @@
name = "riteaid"
allowed_domains = ["riteaid.com"]
start_urls = (
- 'https://locations.riteaid.com/',
+ 'https://www.riteaid.com/locations/',
)
def store_hours(self, store_hours):
@@ -77,6 +77,9 @@
state_urls = response.xpath('//a[@class="c-directory-list-content-item-link"]/@href').extract()
for url in state_urls:
+ if url == 'https://www.riteaid.com/locations/pa/philadelphia.html':
+ # As of 2021-08-21, This URL 500's reliably, so skipping it.
+ continue
if len(url.split('/')) == 5:
yield scrapy.Request(response.urljoin(url), callback=self.parse_location)
else:
@@ -118,4 +121,4 @@
'brand': brand.strip(),
}
- yield GeojsonPointItem(**properties)
\ No newline at end of file
+ yield GeojsonPointItem(**properties)
|
{"golden_diff": "diff --git a/locations/spiders/riteaid.py b/locations/spiders/riteaid.py\n--- a/locations/spiders/riteaid.py\n+++ b/locations/spiders/riteaid.py\n@@ -8,7 +8,7 @@\n name = \"riteaid\"\n allowed_domains = [\"riteaid.com\"]\n start_urls = (\n- 'https://locations.riteaid.com/',\n+ 'https://www.riteaid.com/locations/',\n )\n \n def store_hours(self, store_hours):\n@@ -77,6 +77,9 @@\n state_urls = response.xpath('//a[@class=\"c-directory-list-content-item-link\"]/@href').extract()\n \n for url in state_urls:\n+ if url == 'https://www.riteaid.com/locations/pa/philadelphia.html':\n+ # As of 2021-08-21, This URL 500's reliably, so skipping it.\n+ continue\n if len(url.split('/')) == 5:\n yield scrapy.Request(response.urljoin(url), callback=self.parse_location)\n else:\n@@ -118,4 +121,4 @@\n 'brand': brand.strip(),\n }\n \n- yield GeojsonPointItem(**properties)\n\\ No newline at end of file\n+ yield GeojsonPointItem(**properties)\n", "issue": "Spider riteaid is broken\nDuring the global build at 2021-08-11-14-42-19, spider **riteaid** failed with **1234 features** and **1 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-08-11-14-42-19/logs/riteaid.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-08-11-14-42-19/output/riteaid.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-08-11-14-42-19/output/riteaid.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\nimport re\nfrom locations.items import GeojsonPointItem\n\nclass RiteAidSpider(scrapy.Spider):\n name = \"riteaid\"\n allowed_domains = [\"riteaid.com\"]\n start_urls = (\n 'https://locations.riteaid.com/',\n )\n\n def store_hours(self, store_hours):\n day_groups = []\n this_day_group = None\n for day_info in store_hours:\n day = day_info['day'][:2].title()\n\n hour_intervals = []\n for interval in day_info['intervals']:\n f_time = str(interval['start']).zfill(4)\n t_time = str(interval['end']).zfill(4)\n hour_intervals.append('{}:{}-{}:{}'.format(\n f_time[0:2],\n f_time[2:4],\n t_time[0:2],\n t_time[2:4],\n ))\n hours = ','.join(hour_intervals)\n\n if not this_day_group:\n this_day_group = {\n 'from_day': day,\n 'to_day': day,\n 'hours': hours\n }\n elif this_day_group['hours'] != hours:\n day_groups.append(this_day_group)\n this_day_group = {\n 'from_day': day,\n 'to_day': day,\n 'hours': hours\n }\n elif this_day_group['hours'] == hours:\n this_day_group['to_day'] = day\n\n day_groups.append(this_day_group)\n\n opening_hours = \"\"\n if len(day_groups) == 1 and day_groups[0]['hours'] in ('00:00-23:59', '00:00-00:00'):\n opening_hours = '24/7'\n else:\n for day_group in day_groups:\n if day_group['from_day'] == day_group['to_day']:\n opening_hours += '{from_day} {hours}; '.format(**day_group)\n elif day_group['from_day'] == 'Su' and day_group['to_day'] == 'Sa':\n opening_hours += '{hours}; '.format(**day_group)\n else:\n opening_hours += '{from_day}-{to_day} {hours}; '.format(**day_group)\n opening_hours = opening_hours[:-2]\n\n return opening_hours\n\n def parse(self, response):\n urls = response.xpath('//a[@class=\"c-directory-list-content-item-link\"]/@href').extract()\n\n for url in urls:\n if len(url.split('/')) == 3:\n yield scrapy.Request(response.urljoin(url), callback=self.parse_city)\n elif len(url.split('/')) == 4:\n yield scrapy.Request(response.urljoin(url), callback=self.parse_location)\n else:\n yield scrapy.Request(response.urljoin(url), callback=self.parse_state)\n\n def parse_state(self, response):\n state_urls = response.xpath('//a[@class=\"c-directory-list-content-item-link\"]/@href').extract()\n\n for url in state_urls:\n if len(url.split('/')) == 5:\n yield scrapy.Request(response.urljoin(url), callback=self.parse_location)\n else:\n yield scrapy.Request(response.urljoin(url), callback=self.parse_city)\n\n def parse_city(self, response):\n city_urls = response.xpath('//a[@itemprop=\"url\"]/@href').extract()\n\n for url in city_urls:\n yield scrapy.Request(response.urljoin(url), callback=self.parse_location)\n\n def parse_location(self, response):\n ref = response.xpath('normalize-space(//h1[contains(@itemprop,\"name\")]/text())').extract_first()\n brand_elem = response.xpath('//div[@class=\"alert alert-danger\"]/text()').extract_first()\n\n if brand_elem: #Changed ownership as part of the sale of select Rite Aid stores to Walgreens\n brand = 'Walgreens'\n else:\n brand = re.search(r'([^#//s*]+)', ref).group(1)\n\n hours_elem = response.xpath('//div[@class=\"Hours-store\"]//div[contains(@class,\"c-location-hours-details-wrapper\")]/@data-days')\n if hours_elem: # not shop, only clinic\n hours = json.loads(hours_elem.extract_first())\n else:\n hours = json.loads(response.xpath('//div[contains(@class,\"c-location-hours-details-wrapper\")]/@data-days').extract_first())\n\n properties = {\n 'ref': ref,\n 'addr_full': response.xpath('normalize-space(//span[contains(@itemprop,\"streetAddress\")]/span/text())').extract_first(),\n 'state': response.xpath('normalize-space(//abbr[contains(@itemprop,\"addressRegion\")]/text())').extract_first(),\n 'city': response.xpath('normalize-space(//span[contains(@itemprop,\"addressLocality\")]/text())').extract_first(),\n 'postcode': response.xpath('normalize-space(//span[contains(@itemprop,\"postalCode\")]/text())').extract_first(),\n 'country': response.xpath('normalize-space(//abbr[contains(@itemprop,\"addressCountry\")]/text())').extract_first(),\n 'phone': response.xpath('//span[contains(@itemprop,\"telephone\")]/text()').extract_first(),\n 'lat': float(response.xpath('//meta[contains(@itemprop,\"latitude\")]/@content').extract_first()),\n 'lon' : float(response.xpath('//meta[contains(@itemprop,\"longitude\")]/@content').extract_first()),\n 'website': response.url,\n 'opening_hours': self.store_hours(hours),\n 'brand': brand.strip(),\n }\n\n yield GeojsonPointItem(**properties)", "path": "locations/spiders/riteaid.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\nimport re\nfrom locations.items import GeojsonPointItem\n\nclass RiteAidSpider(scrapy.Spider):\n name = \"riteaid\"\n allowed_domains = [\"riteaid.com\"]\n start_urls = (\n 'https://www.riteaid.com/locations/',\n )\n\n def store_hours(self, store_hours):\n day_groups = []\n this_day_group = None\n for day_info in store_hours:\n day = day_info['day'][:2].title()\n\n hour_intervals = []\n for interval in day_info['intervals']:\n f_time = str(interval['start']).zfill(4)\n t_time = str(interval['end']).zfill(4)\n hour_intervals.append('{}:{}-{}:{}'.format(\n f_time[0:2],\n f_time[2:4],\n t_time[0:2],\n t_time[2:4],\n ))\n hours = ','.join(hour_intervals)\n\n if not this_day_group:\n this_day_group = {\n 'from_day': day,\n 'to_day': day,\n 'hours': hours\n }\n elif this_day_group['hours'] != hours:\n day_groups.append(this_day_group)\n this_day_group = {\n 'from_day': day,\n 'to_day': day,\n 'hours': hours\n }\n elif this_day_group['hours'] == hours:\n this_day_group['to_day'] = day\n\n day_groups.append(this_day_group)\n\n opening_hours = \"\"\n if len(day_groups) == 1 and day_groups[0]['hours'] in ('00:00-23:59', '00:00-00:00'):\n opening_hours = '24/7'\n else:\n for day_group in day_groups:\n if day_group['from_day'] == day_group['to_day']:\n opening_hours += '{from_day} {hours}; '.format(**day_group)\n elif day_group['from_day'] == 'Su' and day_group['to_day'] == 'Sa':\n opening_hours += '{hours}; '.format(**day_group)\n else:\n opening_hours += '{from_day}-{to_day} {hours}; '.format(**day_group)\n opening_hours = opening_hours[:-2]\n\n return opening_hours\n\n def parse(self, response):\n urls = response.xpath('//a[@class=\"c-directory-list-content-item-link\"]/@href').extract()\n\n for url in urls:\n if len(url.split('/')) == 3:\n yield scrapy.Request(response.urljoin(url), callback=self.parse_city)\n elif len(url.split('/')) == 4:\n yield scrapy.Request(response.urljoin(url), callback=self.parse_location)\n else:\n yield scrapy.Request(response.urljoin(url), callback=self.parse_state)\n\n def parse_state(self, response):\n state_urls = response.xpath('//a[@class=\"c-directory-list-content-item-link\"]/@href').extract()\n\n for url in state_urls:\n if url == 'https://www.riteaid.com/locations/pa/philadelphia.html':\n # As of 2021-08-21, This URL 500's reliably, so skipping it.\n continue\n if len(url.split('/')) == 5:\n yield scrapy.Request(response.urljoin(url), callback=self.parse_location)\n else:\n yield scrapy.Request(response.urljoin(url), callback=self.parse_city)\n\n def parse_city(self, response):\n city_urls = response.xpath('//a[@itemprop=\"url\"]/@href').extract()\n\n for url in city_urls:\n yield scrapy.Request(response.urljoin(url), callback=self.parse_location)\n\n def parse_location(self, response):\n ref = response.xpath('normalize-space(//h1[contains(@itemprop,\"name\")]/text())').extract_first()\n brand_elem = response.xpath('//div[@class=\"alert alert-danger\"]/text()').extract_first()\n\n if brand_elem: #Changed ownership as part of the sale of select Rite Aid stores to Walgreens\n brand = 'Walgreens'\n else:\n brand = re.search(r'([^#//s*]+)', ref).group(1)\n\n hours_elem = response.xpath('//div[@class=\"Hours-store\"]//div[contains(@class,\"c-location-hours-details-wrapper\")]/@data-days')\n if hours_elem: # not shop, only clinic\n hours = json.loads(hours_elem.extract_first())\n else:\n hours = json.loads(response.xpath('//div[contains(@class,\"c-location-hours-details-wrapper\")]/@data-days').extract_first())\n\n properties = {\n 'ref': ref,\n 'addr_full': response.xpath('normalize-space(//span[contains(@itemprop,\"streetAddress\")]/span/text())').extract_first(),\n 'state': response.xpath('normalize-space(//abbr[contains(@itemprop,\"addressRegion\")]/text())').extract_first(),\n 'city': response.xpath('normalize-space(//span[contains(@itemprop,\"addressLocality\")]/text())').extract_first(),\n 'postcode': response.xpath('normalize-space(//span[contains(@itemprop,\"postalCode\")]/text())').extract_first(),\n 'country': response.xpath('normalize-space(//abbr[contains(@itemprop,\"addressCountry\")]/text())').extract_first(),\n 'phone': response.xpath('//span[contains(@itemprop,\"telephone\")]/text()').extract_first(),\n 'lat': float(response.xpath('//meta[contains(@itemprop,\"latitude\")]/@content').extract_first()),\n 'lon' : float(response.xpath('//meta[contains(@itemprop,\"longitude\")]/@content').extract_first()),\n 'website': response.url,\n 'opening_hours': self.store_hours(hours),\n 'brand': brand.strip(),\n }\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/riteaid.py"}]}
| 1,890 | 290 |
gh_patches_debug_29536
|
rasdani/github-patches
|
git_diff
|
fossasia__open-event-server-2067
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Internal server error in promotion link
https://open-event-dev.herokuapp.com/events/110/tickets/promotion/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/views/admin/models_views/ticket_sales.py`
Content:
```
1 import flask_login
2 import pycountry
3 from flask import redirect
4 from flask import request
5 from flask import url_for
6 from flask_admin import BaseView, expose
7
8 from app import get_settings
9 from app.helpers.cache import cache
10 from app.helpers.data_getter import DataGetter
11 from app.helpers.ticketing import TicketingManager
12 from app.models.ticket import Ticket
13
14 class TicketSalesView(BaseView):
15 @cache.memoize(50)
16 def get_ticket(self, ticket_id):
17 return Ticket.query.get(ticket_id)
18
19 @expose('/')
20 @flask_login.login_required
21 def display_ticket_stats(self, event_id):
22 event = DataGetter.get_event(event_id)
23 orders = TicketingManager.get_orders(event_id)
24
25 completed_count = 0
26 completed_amount = 0
27 tickets_count = 0
28
29 orders_summary = {
30 'completed': {
31 'class': 'success',
32 'tickets_count': 0,
33 'orders_count': 0,
34 'total_sales': 0
35 },
36 'pending': {
37 'class': 'warning',
38 'tickets_count': 0,
39 'orders_count': 0,
40 'total_sales': 0
41 },
42 'expired': {
43 'class': 'danger',
44 'tickets_count': 0,
45 'orders_count': 0,
46 'total_sales': 0
47 }
48 }
49
50 tickets_summary = {}
51
52 for ticket in event.tickets:
53 tickets_summary[str(ticket.id)] = {
54 'name': ticket.name,
55 'quantity': ticket.quantity,
56 'completed': {
57 'tickets_count': 0,
58 'sales': 0
59 },
60 'pending': {
61 'tickets_count': 0,
62 'sales': 0
63 },
64 'expired': {
65 'class': 'danger',
66 'tickets_count': 0,
67 'sales': 0
68 }
69 }
70
71 for order in orders:
72 if order.status == 'initialized':
73 order.status = 'pending'
74 orders_summary[str(order.status)]['orders_count'] += 1
75 orders_summary[str(order.status)]['total_sales'] += order.amount
76 for order_ticket in order.tickets:
77 orders_summary[str(order.status)]['tickets_count'] += order_ticket.quantity
78 ticket = self.get_ticket(order_ticket.ticket_id)
79 tickets_summary[str(ticket.id)][str(order.status)]['tickets_count'] += order_ticket.quantity
80 tickets_summary[str(ticket.id)][str(order.status)]['sales'] += order_ticket.quantity * ticket.price
81
82 return self.render('/gentelella/admin/event/tickets/tickets.html', event=event, event_id=event_id,
83 orders_summary=orders_summary, tickets_summary=tickets_summary)
84
85 @expose('/orders/')
86 @flask_login.login_required
87 def display_orders(self, event_id):
88 event = DataGetter.get_event(event_id)
89 orders = TicketingManager.get_orders(event_id)
90 return self.render('/gentelella/admin/event/tickets/orders.html', event=event, event_id=event_id, orders=orders)
91
92 @expose('/attendees/')
93 @flask_login.login_required
94 def display_attendees(self, event_id):
95 event = DataGetter.get_event(event_id)
96 orders = TicketingManager.get_orders(event_id)
97 return self.render('/gentelella/admin/event/tickets/attendees.html', event=event,
98 event_id=event_id, orders=orders)
99
100 @expose('/add-order/', methods=('GET', 'POST'))
101 @flask_login.login_required
102 def add_order(self, event_id):
103
104 if request.method == 'POST':
105 order = TicketingManager.create_order(request.form, True)
106 return redirect(url_for('.proceed_order', event_id=event_id, order_identifier=order.identifier))
107
108 event = DataGetter.get_event(event_id)
109 return self.render('/gentelella/admin/event/tickets/add_order.html', event=event, event_id=event_id)
110
111 @expose('/<order_identifier>/', methods=('GET',))
112 def proceed_order(self, event_id, order_identifier):
113 order = TicketingManager.get_order_by_identifier(order_identifier)
114 if order.status == 'completed':
115 return redirect(url_for('ticketing.view_order_after_payment', order_identifier=order_identifier))
116 return self.render('/gentelella/guest/ticketing/order_pre_payment.html', order=order, event=order.event,
117 countries=list(pycountry.countries),
118 from_organizer=True,
119 pay_via=order.paid_via,
120 stripe_publishable_key=get_settings()['stripe_publishable_key'])
121
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/app/views/admin/models_views/ticket_sales.py b/app/views/admin/models_views/ticket_sales.py
--- a/app/views/admin/models_views/ticket_sales.py
+++ b/app/views/admin/models_views/ticket_sales.py
@@ -1,6 +1,6 @@
import flask_login
import pycountry
-from flask import redirect
+from flask import redirect, flash
from flask import request
from flask import url_for
from flask_admin import BaseView, expose
@@ -111,10 +111,17 @@
@expose('/<order_identifier>/', methods=('GET',))
def proceed_order(self, event_id, order_identifier):
order = TicketingManager.get_order_by_identifier(order_identifier)
- if order.status == 'completed':
- return redirect(url_for('ticketing.view_order_after_payment', order_identifier=order_identifier))
- return self.render('/gentelella/guest/ticketing/order_pre_payment.html', order=order, event=order.event,
- countries=list(pycountry.countries),
- from_organizer=True,
- pay_via=order.paid_via,
- stripe_publishable_key=get_settings()['stripe_publishable_key'])
+ if order:
+ if self.is_order_completed(order):
+ return redirect(url_for('ticketing.view_order_after_payment', order_identifier=order_identifier))
+ return self.render('/gentelella/guest/ticketing/order_pre_payment.html', order=order, event=order.event,
+ countries=list(pycountry.countries),
+ from_organizer=True,
+ pay_via=order.paid_via,
+ stripe_publishable_key=get_settings()['stripe_publishable_key'])
+ flash("Can't find order", 'warning')
+ return redirect(url_for('.display_ticket_stats', event_id=event_id))
+
+ @staticmethod
+ def is_order_completed(order):
+ return order.status == 'completed'
|
{"golden_diff": "diff --git a/app/views/admin/models_views/ticket_sales.py b/app/views/admin/models_views/ticket_sales.py\n--- a/app/views/admin/models_views/ticket_sales.py\n+++ b/app/views/admin/models_views/ticket_sales.py\n@@ -1,6 +1,6 @@\n import flask_login\n import pycountry\n-from flask import redirect\n+from flask import redirect, flash\n from flask import request\n from flask import url_for\n from flask_admin import BaseView, expose\n@@ -111,10 +111,17 @@\n @expose('/<order_identifier>/', methods=('GET',))\n def proceed_order(self, event_id, order_identifier):\n order = TicketingManager.get_order_by_identifier(order_identifier)\n- if order.status == 'completed':\n- return redirect(url_for('ticketing.view_order_after_payment', order_identifier=order_identifier))\n- return self.render('/gentelella/guest/ticketing/order_pre_payment.html', order=order, event=order.event,\n- countries=list(pycountry.countries),\n- from_organizer=True,\n- pay_via=order.paid_via,\n- stripe_publishable_key=get_settings()['stripe_publishable_key'])\n+ if order:\n+ if self.is_order_completed(order):\n+ return redirect(url_for('ticketing.view_order_after_payment', order_identifier=order_identifier))\n+ return self.render('/gentelella/guest/ticketing/order_pre_payment.html', order=order, event=order.event,\n+ countries=list(pycountry.countries),\n+ from_organizer=True,\n+ pay_via=order.paid_via,\n+ stripe_publishable_key=get_settings()['stripe_publishable_key'])\n+ flash(\"Can't find order\", 'warning')\n+ return redirect(url_for('.display_ticket_stats', event_id=event_id))\n+\n+ @staticmethod\n+ def is_order_completed(order):\n+ return order.status == 'completed'\n", "issue": "Internal server error in promotion link\nhttps://open-event-dev.herokuapp.com/events/110/tickets/promotion/\n\n", "before_files": [{"content": "import flask_login\nimport pycountry\nfrom flask import redirect\nfrom flask import request\nfrom flask import url_for\nfrom flask_admin import BaseView, expose\n\nfrom app import get_settings\nfrom app.helpers.cache import cache\nfrom app.helpers.data_getter import DataGetter\nfrom app.helpers.ticketing import TicketingManager\nfrom app.models.ticket import Ticket\n\nclass TicketSalesView(BaseView):\n @cache.memoize(50)\n def get_ticket(self, ticket_id):\n return Ticket.query.get(ticket_id)\n\n @expose('/')\n @flask_login.login_required\n def display_ticket_stats(self, event_id):\n event = DataGetter.get_event(event_id)\n orders = TicketingManager.get_orders(event_id)\n\n completed_count = 0\n completed_amount = 0\n tickets_count = 0\n\n orders_summary = {\n 'completed': {\n 'class': 'success',\n 'tickets_count': 0,\n 'orders_count': 0,\n 'total_sales': 0\n },\n 'pending': {\n 'class': 'warning',\n 'tickets_count': 0,\n 'orders_count': 0,\n 'total_sales': 0\n },\n 'expired': {\n 'class': 'danger',\n 'tickets_count': 0,\n 'orders_count': 0,\n 'total_sales': 0\n }\n }\n\n tickets_summary = {}\n\n for ticket in event.tickets:\n tickets_summary[str(ticket.id)] = {\n 'name': ticket.name,\n 'quantity': ticket.quantity,\n 'completed': {\n 'tickets_count': 0,\n 'sales': 0\n },\n 'pending': {\n 'tickets_count': 0,\n 'sales': 0\n },\n 'expired': {\n 'class': 'danger',\n 'tickets_count': 0,\n 'sales': 0\n }\n }\n\n for order in orders:\n if order.status == 'initialized':\n order.status = 'pending'\n orders_summary[str(order.status)]['orders_count'] += 1\n orders_summary[str(order.status)]['total_sales'] += order.amount\n for order_ticket in order.tickets:\n orders_summary[str(order.status)]['tickets_count'] += order_ticket.quantity\n ticket = self.get_ticket(order_ticket.ticket_id)\n tickets_summary[str(ticket.id)][str(order.status)]['tickets_count'] += order_ticket.quantity\n tickets_summary[str(ticket.id)][str(order.status)]['sales'] += order_ticket.quantity * ticket.price\n\n return self.render('/gentelella/admin/event/tickets/tickets.html', event=event, event_id=event_id,\n orders_summary=orders_summary, tickets_summary=tickets_summary)\n\n @expose('/orders/')\n @flask_login.login_required\n def display_orders(self, event_id):\n event = DataGetter.get_event(event_id)\n orders = TicketingManager.get_orders(event_id)\n return self.render('/gentelella/admin/event/tickets/orders.html', event=event, event_id=event_id, orders=orders)\n\n @expose('/attendees/')\n @flask_login.login_required\n def display_attendees(self, event_id):\n event = DataGetter.get_event(event_id)\n orders = TicketingManager.get_orders(event_id)\n return self.render('/gentelella/admin/event/tickets/attendees.html', event=event,\n event_id=event_id, orders=orders)\n\n @expose('/add-order/', methods=('GET', 'POST'))\n @flask_login.login_required\n def add_order(self, event_id):\n\n if request.method == 'POST':\n order = TicketingManager.create_order(request.form, True)\n return redirect(url_for('.proceed_order', event_id=event_id, order_identifier=order.identifier))\n\n event = DataGetter.get_event(event_id)\n return self.render('/gentelella/admin/event/tickets/add_order.html', event=event, event_id=event_id)\n\n @expose('/<order_identifier>/', methods=('GET',))\n def proceed_order(self, event_id, order_identifier):\n order = TicketingManager.get_order_by_identifier(order_identifier)\n if order.status == 'completed':\n return redirect(url_for('ticketing.view_order_after_payment', order_identifier=order_identifier))\n return self.render('/gentelella/guest/ticketing/order_pre_payment.html', order=order, event=order.event,\n countries=list(pycountry.countries),\n from_organizer=True,\n pay_via=order.paid_via,\n stripe_publishable_key=get_settings()['stripe_publishable_key'])\n", "path": "app/views/admin/models_views/ticket_sales.py"}], "after_files": [{"content": "import flask_login\nimport pycountry\nfrom flask import redirect, flash\nfrom flask import request\nfrom flask import url_for\nfrom flask_admin import BaseView, expose\n\nfrom app import get_settings\nfrom app.helpers.cache import cache\nfrom app.helpers.data_getter import DataGetter\nfrom app.helpers.ticketing import TicketingManager\nfrom app.models.ticket import Ticket\n\nclass TicketSalesView(BaseView):\n @cache.memoize(50)\n def get_ticket(self, ticket_id):\n return Ticket.query.get(ticket_id)\n\n @expose('/')\n @flask_login.login_required\n def display_ticket_stats(self, event_id):\n event = DataGetter.get_event(event_id)\n orders = TicketingManager.get_orders(event_id)\n\n completed_count = 0\n completed_amount = 0\n tickets_count = 0\n\n orders_summary = {\n 'completed': {\n 'class': 'success',\n 'tickets_count': 0,\n 'orders_count': 0,\n 'total_sales': 0\n },\n 'pending': {\n 'class': 'warning',\n 'tickets_count': 0,\n 'orders_count': 0,\n 'total_sales': 0\n },\n 'expired': {\n 'class': 'danger',\n 'tickets_count': 0,\n 'orders_count': 0,\n 'total_sales': 0\n }\n }\n\n tickets_summary = {}\n\n for ticket in event.tickets:\n tickets_summary[str(ticket.id)] = {\n 'name': ticket.name,\n 'quantity': ticket.quantity,\n 'completed': {\n 'tickets_count': 0,\n 'sales': 0\n },\n 'pending': {\n 'tickets_count': 0,\n 'sales': 0\n },\n 'expired': {\n 'class': 'danger',\n 'tickets_count': 0,\n 'sales': 0\n }\n }\n\n for order in orders:\n if order.status == 'initialized':\n order.status = 'pending'\n orders_summary[str(order.status)]['orders_count'] += 1\n orders_summary[str(order.status)]['total_sales'] += order.amount\n for order_ticket in order.tickets:\n orders_summary[str(order.status)]['tickets_count'] += order_ticket.quantity\n ticket = self.get_ticket(order_ticket.ticket_id)\n tickets_summary[str(ticket.id)][str(order.status)]['tickets_count'] += order_ticket.quantity\n tickets_summary[str(ticket.id)][str(order.status)]['sales'] += order_ticket.quantity * ticket.price\n\n return self.render('/gentelella/admin/event/tickets/tickets.html', event=event, event_id=event_id,\n orders_summary=orders_summary, tickets_summary=tickets_summary)\n\n @expose('/orders/')\n @flask_login.login_required\n def display_orders(self, event_id):\n event = DataGetter.get_event(event_id)\n orders = TicketingManager.get_orders(event_id)\n return self.render('/gentelella/admin/event/tickets/orders.html', event=event, event_id=event_id, orders=orders)\n\n @expose('/attendees/')\n @flask_login.login_required\n def display_attendees(self, event_id):\n event = DataGetter.get_event(event_id)\n orders = TicketingManager.get_orders(event_id)\n return self.render('/gentelella/admin/event/tickets/attendees.html', event=event,\n event_id=event_id, orders=orders)\n\n @expose('/add-order/', methods=('GET', 'POST'))\n @flask_login.login_required\n def add_order(self, event_id):\n\n if request.method == 'POST':\n order = TicketingManager.create_order(request.form, True)\n return redirect(url_for('.proceed_order', event_id=event_id, order_identifier=order.identifier))\n\n event = DataGetter.get_event(event_id)\n return self.render('/gentelella/admin/event/tickets/add_order.html', event=event, event_id=event_id)\n\n @expose('/<order_identifier>/', methods=('GET',))\n def proceed_order(self, event_id, order_identifier):\n order = TicketingManager.get_order_by_identifier(order_identifier)\n if order:\n if self.is_order_completed(order):\n return redirect(url_for('ticketing.view_order_after_payment', order_identifier=order_identifier))\n return self.render('/gentelella/guest/ticketing/order_pre_payment.html', order=order, event=order.event,\n countries=list(pycountry.countries),\n from_organizer=True,\n pay_via=order.paid_via,\n stripe_publishable_key=get_settings()['stripe_publishable_key'])\n flash(\"Can't find order\", 'warning')\n return redirect(url_for('.display_ticket_stats', event_id=event_id))\n\n @staticmethod\n def is_order_completed(order):\n return order.status == 'completed'\n", "path": "app/views/admin/models_views/ticket_sales.py"}]}
| 1,521 | 411 |
gh_patches_debug_31238
|
rasdani/github-patches
|
git_diff
|
marshmallow-code__webargs-832
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Dealing with empty values in `DelimitedFieldMixin`
`DelimitedList(String())` deserializes "a,,c" as `["a", "", "c"]`.
I guess this meets user expectations.
My expectation with integers would be that
`DelimitedList(Integer(allow_none=True))` deserializes `"1,,3"` as `[1,None,3]`
but it errors.
The reason for this is that when the string is split, it is turned into `["1", "", "3"]`. This is why it works in the string case.
I'm not sure this was really intended. It may be a side effect of the `split` function that happens to do well with strings.
We could change that to replace empty values with `None`. But it would break the string use case, unless the user overloads `String` field to deserialize `None` as `""`.
Likewise, users may already overload `Integer` to deserialize `""` as `None` and no modification is required to `DelimitedFieldMixin`.
Just been caught by this and wondering out loud. Advice welcome.
In any case, there is an intrinsic limitation in the delimited string format: one can't distinguish empty string from missing value (as opposed to a JSON payload). It is not clear to me how OpenAPI (for instance) deals with the case of an empty element in an array (in a query argument).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/webargs/fields.py`
Content:
```
1 """Field classes.
2
3 Includes all fields from `marshmallow.fields` in addition to a custom
4 `Nested` field and `DelimitedList`.
5
6 All fields can optionally take a special `location` keyword argument, which
7 tells webargs where to parse the request argument from.
8
9 .. code-block:: python
10
11 args = {
12 "active": fields.Bool(location="query"),
13 "content_type": fields.Str(data_key="Content-Type", location="headers"),
14 }
15 """
16 from __future__ import annotations
17
18 import marshmallow as ma
19
20 # Expose all fields from marshmallow.fields.
21 from marshmallow.fields import * # noqa: F40
22
23 __all__ = ["DelimitedList", "DelimitedTuple"] + ma.fields.__all__
24
25
26 # TODO: remove custom `Nested` in the next major release
27 #
28 # the `Nested` class is only needed on versions of marshmallow prior to v3.15.0
29 # in that version, `ma.fields.Nested` gained the ability to consume dict inputs
30 # prior to that, this subclass adds this capability
31 #
32 # if we drop support for ma.__version_info__ < (3, 15) we can do this
33 class Nested(ma.fields.Nested): # type: ignore[no-redef]
34 """Same as `marshmallow.fields.Nested`, except can be passed a dictionary as
35 the first argument, which will be converted to a `marshmallow.Schema`.
36
37 .. note::
38
39 The schema class here will always be `marshmallow.Schema`, regardless
40 of whether a custom schema class is set on the parser. Pass an explicit schema
41 class if necessary.
42 """
43
44 def __init__(self, nested, *args, **kwargs):
45 if isinstance(nested, dict):
46 nested = ma.Schema.from_dict(nested)
47 super().__init__(nested, *args, **kwargs)
48
49
50 class DelimitedFieldMixin:
51 """
52 This is a mixin class for subclasses of ma.fields.List and ma.fields.Tuple
53 which split on a pre-specified delimiter. By default, the delimiter will be ","
54
55 Because we want the MRO to reach this class before the List or Tuple class,
56 it must be listed first in the superclasses
57
58 For example, a DelimitedList-like type can be defined like so:
59
60 >>> class MyDelimitedList(DelimitedFieldMixin, ma.fields.List):
61 >>> pass
62 """
63
64 delimiter: str = ","
65 # delimited fields set is_multiple=False for webargs.core.is_multiple
66 is_multiple: bool = False
67
68 def _serialize(self, value, attr, obj, **kwargs):
69 # serializing will start with parent-class serialization, so that we correctly
70 # output lists of non-primitive types, e.g. DelimitedList(DateTime)
71 return self.delimiter.join(
72 format(each) for each in super()._serialize(value, attr, obj, **kwargs)
73 )
74
75 def _deserialize(self, value, attr, data, **kwargs):
76 # attempting to deserialize from a non-string source is an error
77 if not isinstance(value, (str, bytes)):
78 raise self.make_error("invalid")
79 values = value.split(self.delimiter) if value else []
80 return super()._deserialize(values, attr, data, **kwargs)
81
82
83 class DelimitedList(DelimitedFieldMixin, ma.fields.List):
84 """A field which is similar to a List, but takes its input as a delimited
85 string (e.g. "foo,bar,baz").
86
87 Like List, it can be given a nested field type which it will use to
88 de/serialize each element of the list.
89
90 :param Field cls_or_instance: A field class or instance.
91 :param str delimiter: Delimiter between values.
92 """
93
94 default_error_messages = {"invalid": "Not a valid delimited list."}
95
96 def __init__(
97 self,
98 cls_or_instance: ma.fields.Field | type,
99 *,
100 delimiter: str | None = None,
101 **kwargs,
102 ):
103 self.delimiter = delimiter or self.delimiter
104 super().__init__(cls_or_instance, **kwargs)
105
106
107 class DelimitedTuple(DelimitedFieldMixin, ma.fields.Tuple):
108 """A field which is similar to a Tuple, but takes its input as a delimited
109 string (e.g. "foo,bar,baz").
110
111 Like Tuple, it can be given a tuple of nested field types which it will use to
112 de/serialize each element of the tuple.
113
114 :param Iterable[Field] tuple_fields: An iterable of field classes or instances.
115 :param str delimiter: Delimiter between values.
116 """
117
118 default_error_messages = {"invalid": "Not a valid delimited tuple."}
119
120 def __init__(self, tuple_fields, *, delimiter: str | None = None, **kwargs):
121 self.delimiter = delimiter or self.delimiter
122 super().__init__(tuple_fields, **kwargs)
123
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/webargs/fields.py b/src/webargs/fields.py
--- a/src/webargs/fields.py
+++ b/src/webargs/fields.py
@@ -15,6 +15,8 @@
"""
from __future__ import annotations
+import typing
+
import marshmallow as ma
# Expose all fields from marshmallow.fields.
@@ -64,6 +66,8 @@
delimiter: str = ","
# delimited fields set is_multiple=False for webargs.core.is_multiple
is_multiple: bool = False
+ # NOTE: in 8.x this defaults to "" but in 9.x it will be 'missing'
+ empty_value: typing.Any = ""
def _serialize(self, value, attr, obj, **kwargs):
# serializing will start with parent-class serialization, so that we correctly
@@ -77,6 +81,8 @@
if not isinstance(value, (str, bytes)):
raise self.make_error("invalid")
values = value.split(self.delimiter) if value else []
+ # convert empty strings to the empty value; typically "" and therefore a no-op
+ values = [v or self.empty_value for v in values]
return super()._deserialize(values, attr, data, **kwargs)
@@ -117,6 +123,12 @@
default_error_messages = {"invalid": "Not a valid delimited tuple."}
- def __init__(self, tuple_fields, *, delimiter: str | None = None, **kwargs):
+ def __init__(
+ self,
+ tuple_fields,
+ *,
+ delimiter: str | None = None,
+ **kwargs,
+ ):
self.delimiter = delimiter or self.delimiter
super().__init__(tuple_fields, **kwargs)
|
{"golden_diff": "diff --git a/src/webargs/fields.py b/src/webargs/fields.py\n--- a/src/webargs/fields.py\n+++ b/src/webargs/fields.py\n@@ -15,6 +15,8 @@\n \"\"\"\n from __future__ import annotations\n \n+import typing\n+\n import marshmallow as ma\n \n # Expose all fields from marshmallow.fields.\n@@ -64,6 +66,8 @@\n delimiter: str = \",\"\n # delimited fields set is_multiple=False for webargs.core.is_multiple\n is_multiple: bool = False\n+ # NOTE: in 8.x this defaults to \"\" but in 9.x it will be 'missing'\n+ empty_value: typing.Any = \"\"\n \n def _serialize(self, value, attr, obj, **kwargs):\n # serializing will start with parent-class serialization, so that we correctly\n@@ -77,6 +81,8 @@\n if not isinstance(value, (str, bytes)):\n raise self.make_error(\"invalid\")\n values = value.split(self.delimiter) if value else []\n+ # convert empty strings to the empty value; typically \"\" and therefore a no-op\n+ values = [v or self.empty_value for v in values]\n return super()._deserialize(values, attr, data, **kwargs)\n \n \n@@ -117,6 +123,12 @@\n \n default_error_messages = {\"invalid\": \"Not a valid delimited tuple.\"}\n \n- def __init__(self, tuple_fields, *, delimiter: str | None = None, **kwargs):\n+ def __init__(\n+ self,\n+ tuple_fields,\n+ *,\n+ delimiter: str | None = None,\n+ **kwargs,\n+ ):\n self.delimiter = delimiter or self.delimiter\n super().__init__(tuple_fields, **kwargs)\n", "issue": "Dealing with empty values in `DelimitedFieldMixin`\n`DelimitedList(String())` deserializes \"a,,c\" as `[\"a\", \"\", \"c\"]`.\r\n\r\nI guess this meets user expectations.\r\n\r\nMy expectation with integers would be that\r\n\r\n`DelimitedList(Integer(allow_none=True))` deserializes `\"1,,3\"` as `[1,None,3]`\r\n\r\nbut it errors.\r\n\r\nThe reason for this is that when the string is split, it is turned into `[\"1\", \"\", \"3\"]`. This is why it works in the string case.\r\n\r\nI'm not sure this was really intended. It may be a side effect of the `split` function that happens to do well with strings.\r\n\r\nWe could change that to replace empty values with `None`. But it would break the string use case, unless the user overloads `String` field to deserialize `None` as `\"\"`.\r\n\r\nLikewise, users may already overload `Integer` to deserialize `\"\"` as `None` and no modification is required to `DelimitedFieldMixin`.\r\n\r\nJust been caught by this and wondering out loud. Advice welcome.\r\n\r\nIn any case, there is an intrinsic limitation in the delimited string format: one can't distinguish empty string from missing value (as opposed to a JSON payload). It is not clear to me how OpenAPI (for instance) deals with the case of an empty element in an array (in a query argument).\n", "before_files": [{"content": "\"\"\"Field classes.\n\nIncludes all fields from `marshmallow.fields` in addition to a custom\n`Nested` field and `DelimitedList`.\n\nAll fields can optionally take a special `location` keyword argument, which\ntells webargs where to parse the request argument from.\n\n.. code-block:: python\n\n args = {\n \"active\": fields.Bool(location=\"query\"),\n \"content_type\": fields.Str(data_key=\"Content-Type\", location=\"headers\"),\n }\n\"\"\"\nfrom __future__ import annotations\n\nimport marshmallow as ma\n\n# Expose all fields from marshmallow.fields.\nfrom marshmallow.fields import * # noqa: F40\n\n__all__ = [\"DelimitedList\", \"DelimitedTuple\"] + ma.fields.__all__\n\n\n# TODO: remove custom `Nested` in the next major release\n#\n# the `Nested` class is only needed on versions of marshmallow prior to v3.15.0\n# in that version, `ma.fields.Nested` gained the ability to consume dict inputs\n# prior to that, this subclass adds this capability\n#\n# if we drop support for ma.__version_info__ < (3, 15) we can do this\nclass Nested(ma.fields.Nested): # type: ignore[no-redef]\n \"\"\"Same as `marshmallow.fields.Nested`, except can be passed a dictionary as\n the first argument, which will be converted to a `marshmallow.Schema`.\n\n .. note::\n\n The schema class here will always be `marshmallow.Schema`, regardless\n of whether a custom schema class is set on the parser. Pass an explicit schema\n class if necessary.\n \"\"\"\n\n def __init__(self, nested, *args, **kwargs):\n if isinstance(nested, dict):\n nested = ma.Schema.from_dict(nested)\n super().__init__(nested, *args, **kwargs)\n\n\nclass DelimitedFieldMixin:\n \"\"\"\n This is a mixin class for subclasses of ma.fields.List and ma.fields.Tuple\n which split on a pre-specified delimiter. By default, the delimiter will be \",\"\n\n Because we want the MRO to reach this class before the List or Tuple class,\n it must be listed first in the superclasses\n\n For example, a DelimitedList-like type can be defined like so:\n\n >>> class MyDelimitedList(DelimitedFieldMixin, ma.fields.List):\n >>> pass\n \"\"\"\n\n delimiter: str = \",\"\n # delimited fields set is_multiple=False for webargs.core.is_multiple\n is_multiple: bool = False\n\n def _serialize(self, value, attr, obj, **kwargs):\n # serializing will start with parent-class serialization, so that we correctly\n # output lists of non-primitive types, e.g. DelimitedList(DateTime)\n return self.delimiter.join(\n format(each) for each in super()._serialize(value, attr, obj, **kwargs)\n )\n\n def _deserialize(self, value, attr, data, **kwargs):\n # attempting to deserialize from a non-string source is an error\n if not isinstance(value, (str, bytes)):\n raise self.make_error(\"invalid\")\n values = value.split(self.delimiter) if value else []\n return super()._deserialize(values, attr, data, **kwargs)\n\n\nclass DelimitedList(DelimitedFieldMixin, ma.fields.List):\n \"\"\"A field which is similar to a List, but takes its input as a delimited\n string (e.g. \"foo,bar,baz\").\n\n Like List, it can be given a nested field type which it will use to\n de/serialize each element of the list.\n\n :param Field cls_or_instance: A field class or instance.\n :param str delimiter: Delimiter between values.\n \"\"\"\n\n default_error_messages = {\"invalid\": \"Not a valid delimited list.\"}\n\n def __init__(\n self,\n cls_or_instance: ma.fields.Field | type,\n *,\n delimiter: str | None = None,\n **kwargs,\n ):\n self.delimiter = delimiter or self.delimiter\n super().__init__(cls_or_instance, **kwargs)\n\n\nclass DelimitedTuple(DelimitedFieldMixin, ma.fields.Tuple):\n \"\"\"A field which is similar to a Tuple, but takes its input as a delimited\n string (e.g. \"foo,bar,baz\").\n\n Like Tuple, it can be given a tuple of nested field types which it will use to\n de/serialize each element of the tuple.\n\n :param Iterable[Field] tuple_fields: An iterable of field classes or instances.\n :param str delimiter: Delimiter between values.\n \"\"\"\n\n default_error_messages = {\"invalid\": \"Not a valid delimited tuple.\"}\n\n def __init__(self, tuple_fields, *, delimiter: str | None = None, **kwargs):\n self.delimiter = delimiter or self.delimiter\n super().__init__(tuple_fields, **kwargs)\n", "path": "src/webargs/fields.py"}], "after_files": [{"content": "\"\"\"Field classes.\n\nIncludes all fields from `marshmallow.fields` in addition to a custom\n`Nested` field and `DelimitedList`.\n\nAll fields can optionally take a special `location` keyword argument, which\ntells webargs where to parse the request argument from.\n\n.. code-block:: python\n\n args = {\n \"active\": fields.Bool(location=\"query\"),\n \"content_type\": fields.Str(data_key=\"Content-Type\", location=\"headers\"),\n }\n\"\"\"\nfrom __future__ import annotations\n\nimport typing\n\nimport marshmallow as ma\n\n# Expose all fields from marshmallow.fields.\nfrom marshmallow.fields import * # noqa: F40\n\n__all__ = [\"DelimitedList\", \"DelimitedTuple\"] + ma.fields.__all__\n\n\n# TODO: remove custom `Nested` in the next major release\n#\n# the `Nested` class is only needed on versions of marshmallow prior to v3.15.0\n# in that version, `ma.fields.Nested` gained the ability to consume dict inputs\n# prior to that, this subclass adds this capability\n#\n# if we drop support for ma.__version_info__ < (3, 15) we can do this\nclass Nested(ma.fields.Nested): # type: ignore[no-redef]\n \"\"\"Same as `marshmallow.fields.Nested`, except can be passed a dictionary as\n the first argument, which will be converted to a `marshmallow.Schema`.\n\n .. note::\n\n The schema class here will always be `marshmallow.Schema`, regardless\n of whether a custom schema class is set on the parser. Pass an explicit schema\n class if necessary.\n \"\"\"\n\n def __init__(self, nested, *args, **kwargs):\n if isinstance(nested, dict):\n nested = ma.Schema.from_dict(nested)\n super().__init__(nested, *args, **kwargs)\n\n\nclass DelimitedFieldMixin:\n \"\"\"\n This is a mixin class for subclasses of ma.fields.List and ma.fields.Tuple\n which split on a pre-specified delimiter. By default, the delimiter will be \",\"\n\n Because we want the MRO to reach this class before the List or Tuple class,\n it must be listed first in the superclasses\n\n For example, a DelimitedList-like type can be defined like so:\n\n >>> class MyDelimitedList(DelimitedFieldMixin, ma.fields.List):\n >>> pass\n \"\"\"\n\n delimiter: str = \",\"\n # delimited fields set is_multiple=False for webargs.core.is_multiple\n is_multiple: bool = False\n # NOTE: in 8.x this defaults to \"\" but in 9.x it will be 'missing'\n empty_value: typing.Any = \"\"\n\n def _serialize(self, value, attr, obj, **kwargs):\n # serializing will start with parent-class serialization, so that we correctly\n # output lists of non-primitive types, e.g. DelimitedList(DateTime)\n return self.delimiter.join(\n format(each) for each in super()._serialize(value, attr, obj, **kwargs)\n )\n\n def _deserialize(self, value, attr, data, **kwargs):\n # attempting to deserialize from a non-string source is an error\n if not isinstance(value, (str, bytes)):\n raise self.make_error(\"invalid\")\n values = value.split(self.delimiter) if value else []\n # convert empty strings to the empty value; typically \"\" and therefore a no-op\n values = [v or self.empty_value for v in values]\n return super()._deserialize(values, attr, data, **kwargs)\n\n\nclass DelimitedList(DelimitedFieldMixin, ma.fields.List):\n \"\"\"A field which is similar to a List, but takes its input as a delimited\n string (e.g. \"foo,bar,baz\").\n\n Like List, it can be given a nested field type which it will use to\n de/serialize each element of the list.\n\n :param Field cls_or_instance: A field class or instance.\n :param str delimiter: Delimiter between values.\n \"\"\"\n\n default_error_messages = {\"invalid\": \"Not a valid delimited list.\"}\n\n def __init__(\n self,\n cls_or_instance: ma.fields.Field | type,\n *,\n delimiter: str | None = None,\n **kwargs,\n ):\n self.delimiter = delimiter or self.delimiter\n super().__init__(cls_or_instance, **kwargs)\n\n\nclass DelimitedTuple(DelimitedFieldMixin, ma.fields.Tuple):\n \"\"\"A field which is similar to a Tuple, but takes its input as a delimited\n string (e.g. \"foo,bar,baz\").\n\n Like Tuple, it can be given a tuple of nested field types which it will use to\n de/serialize each element of the tuple.\n\n :param Iterable[Field] tuple_fields: An iterable of field classes or instances.\n :param str delimiter: Delimiter between values.\n \"\"\"\n\n default_error_messages = {\"invalid\": \"Not a valid delimited tuple.\"}\n\n def __init__(\n self,\n tuple_fields,\n *,\n delimiter: str | None = None,\n **kwargs,\n ):\n self.delimiter = delimiter or self.delimiter\n super().__init__(tuple_fields, **kwargs)\n", "path": "src/webargs/fields.py"}]}
| 1,876 | 390 |
gh_patches_debug_15073
|
rasdani/github-patches
|
git_diff
|
allegro__ralph-3365
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
min_ip, max_ip fields erroring despite being specified via JSON REST API
As of commit 4794c4c04fced29b397b58a7689ff725c20ff6bd I'm having a problem where the JSON rest API won't let me create networks - https://22.0.0.2/api/networks/ (docker container instance of Ralph)
a request sent to
```json
{
"name":"test3",
"address":"2.0.0.0/8",
"remarks":"",
"vlan":1,
"dhcp_broadcast":false,
"reserved_from_beginning":0,
"reserved_from_end":0,
"gateway":null,
"network_environment":null,
"kind":null,
"service_env":null,
"terminators":[
],
"racks":[
],
"dns_servers":[
],
"min_ip":16777216,
"max_ip":33554431
}
```
produces the following result:
```json
{
"min_ip": [
"This field is required."
],
"max_ip": [
"This field is required."
]
}
```
This seems to be a known issue with Django REST API, and is possibly a result of min_ip and max_ip being set as non-editable in the model?
The same request works via the form request mechanism.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/ralph/networks/api.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from django.conf import settings
3 from rest_framework.exceptions import ValidationError
4
5 from ralph.api import RalphAPISerializer, RalphAPIViewSet, router
6 from ralph.api.serializers import RalphAPISaveSerializer
7 from ralph.assets.api.serializers import EthernetSerializer
8 from ralph.networks.models import (
9 IPAddress,
10 Network,
11 NetworkEnvironment,
12 NetworkKind
13 )
14
15
16 class NetworkEnvironmentSerializer(RalphAPISerializer):
17 class Meta:
18 model = NetworkEnvironment
19 depth = 1
20
21
22 class NetworkKindSerializer(RalphAPISerializer):
23 class Meta:
24 model = NetworkKind
25 depth = 1
26
27
28 class NetworkSimpleSerializer(RalphAPISerializer):
29 class Meta:
30 model = Network
31 fields = (
32 'id', 'url', 'name', 'remarks', 'vlan', 'dhcp_broadcast', 'parent',
33 'network_environment'
34 )
35
36
37 class NetworkSerializer(RalphAPISerializer):
38 class Meta:
39 model = Network
40 depth = 1
41
42
43 class IPAddressSerializer(RalphAPISerializer):
44 network = NetworkSimpleSerializer()
45 ethernet = EthernetSerializer()
46
47 class Meta:
48 model = IPAddress
49 depth = 1
50 exclude = ('number',)
51
52
53 class IPAddressSaveSerializer(RalphAPISaveSerializer):
54 class Meta:
55 model = IPAddress
56
57 def validate_dhcp_expose(self, value):
58 """
59 Check if dhcp_expose value has changed from True to False.
60 """
61 if (
62 settings.DHCP_ENTRY_FORBID_CHANGE and
63 self.instance and
64 self.instance.dhcp_expose and
65 not value
66 ):
67 raise ValidationError(
68 'Cannot remove entry from DHCP. Use transition to do this.'
69 )
70 return value
71
72
73 class IPAddressViewSet(RalphAPIViewSet):
74 queryset = IPAddress.objects.all()
75 serializer_class = IPAddressSerializer
76 save_serializer_class = IPAddressSaveSerializer
77 prefetch_related = [
78 'ethernet', 'ethernet__base_object', 'ethernet__base_object__tags',
79 'network',
80 ]
81 filter_fields = [
82 'hostname', 'ethernet__base_object', 'network', 'network__address',
83 'status', 'is_public', 'is_management', 'dhcp_expose', 'ethernet__mac',
84 ]
85
86 def destroy(self, request, *args, **kwargs):
87 instance = self.get_object()
88 if instance and instance.dhcp_expose:
89 raise ValidationError(
90 'Could not delete IPAddress when it is exposed in DHCP'
91 )
92 return super().destroy(request, *args, **kwargs)
93
94
95 class NetworkViewSet(RalphAPIViewSet):
96 queryset = Network.objects.all()
97 serializer_class = NetworkSerializer
98 select_related = ['network_environment', 'kind']
99 prefetch_related = ['racks']
100 extended_filter_fields = {
101 # workaround for custom field for address field defined in admin
102 'address': ['address'],
103 }
104
105
106 class NetworkEnvironmentViewSet(RalphAPIViewSet):
107 queryset = NetworkEnvironment.objects.all()
108 serializer_class = NetworkEnvironmentSerializer
109
110
111 class NetworkKindViewSet(RalphAPIViewSet):
112 queryset = NetworkKind.objects.all()
113 serializer_class = NetworkKindSerializer
114
115 router.register(r'ipaddresses', IPAddressViewSet)
116 router.register(r'networks', NetworkViewSet)
117 router.register(r'network-environments', NetworkEnvironmentViewSet)
118 router.register(r'network-kinds', NetworkKindViewSet)
119 urlpatterns = []
120
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/ralph/networks/api.py b/src/ralph/networks/api.py
--- a/src/ralph/networks/api.py
+++ b/src/ralph/networks/api.py
@@ -34,6 +34,13 @@
)
+class NetworkSaveSerializer(RalphAPISerializer):
+ class Meta:
+ model = Network
+ depth = 1
+ exclude = ('min_ip', 'max_ip')
+
+
class NetworkSerializer(RalphAPISerializer):
class Meta:
model = Network
@@ -95,6 +102,7 @@
class NetworkViewSet(RalphAPIViewSet):
queryset = Network.objects.all()
serializer_class = NetworkSerializer
+ save_serializer_class = NetworkSaveSerializer
select_related = ['network_environment', 'kind']
prefetch_related = ['racks']
extended_filter_fields = {
|
{"golden_diff": "diff --git a/src/ralph/networks/api.py b/src/ralph/networks/api.py\n--- a/src/ralph/networks/api.py\n+++ b/src/ralph/networks/api.py\n@@ -34,6 +34,13 @@\n )\n \n \n+class NetworkSaveSerializer(RalphAPISerializer):\n+ class Meta:\n+ model = Network\n+ depth = 1\n+ exclude = ('min_ip', 'max_ip')\n+\n+\n class NetworkSerializer(RalphAPISerializer):\n class Meta:\n model = Network\n@@ -95,6 +102,7 @@\n class NetworkViewSet(RalphAPIViewSet):\n queryset = Network.objects.all()\n serializer_class = NetworkSerializer\n+ save_serializer_class = NetworkSaveSerializer\n select_related = ['network_environment', 'kind']\n prefetch_related = ['racks']\n extended_filter_fields = {\n", "issue": "min_ip, max_ip fields erroring despite being specified via JSON REST API\nAs of commit 4794c4c04fced29b397b58a7689ff725c20ff6bd I'm having a problem where the JSON rest API won't let me create networks - https://22.0.0.2/api/networks/ (docker container instance of Ralph)\r\na request sent to \r\n```json\r\n{\r\n \"name\":\"test3\",\r\n \"address\":\"2.0.0.0/8\",\r\n \"remarks\":\"\",\r\n \"vlan\":1,\r\n \"dhcp_broadcast\":false,\r\n \"reserved_from_beginning\":0,\r\n \"reserved_from_end\":0,\r\n \"gateway\":null,\r\n \"network_environment\":null,\r\n \"kind\":null,\r\n \"service_env\":null,\r\n \"terminators\":[\r\n\r\n ],\r\n \"racks\":[\r\n\r\n ],\r\n \"dns_servers\":[\r\n\r\n ],\r\n \"min_ip\":16777216,\r\n \"max_ip\":33554431\r\n}\r\n```\r\nproduces the following result:\r\n```json\r\n{\r\n \"min_ip\": [\r\n \"This field is required.\"\r\n ],\r\n \"max_ip\": [\r\n \"This field is required.\"\r\n ]\r\n}\r\n```\r\n\r\nThis seems to be a known issue with Django REST API, and is possibly a result of min_ip and max_ip being set as non-editable in the model? \r\n\r\nThe same request works via the form request mechanism.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom django.conf import settings\nfrom rest_framework.exceptions import ValidationError\n\nfrom ralph.api import RalphAPISerializer, RalphAPIViewSet, router\nfrom ralph.api.serializers import RalphAPISaveSerializer\nfrom ralph.assets.api.serializers import EthernetSerializer\nfrom ralph.networks.models import (\n IPAddress,\n Network,\n NetworkEnvironment,\n NetworkKind\n)\n\n\nclass NetworkEnvironmentSerializer(RalphAPISerializer):\n class Meta:\n model = NetworkEnvironment\n depth = 1\n\n\nclass NetworkKindSerializer(RalphAPISerializer):\n class Meta:\n model = NetworkKind\n depth = 1\n\n\nclass NetworkSimpleSerializer(RalphAPISerializer):\n class Meta:\n model = Network\n fields = (\n 'id', 'url', 'name', 'remarks', 'vlan', 'dhcp_broadcast', 'parent',\n 'network_environment'\n )\n\n\nclass NetworkSerializer(RalphAPISerializer):\n class Meta:\n model = Network\n depth = 1\n\n\nclass IPAddressSerializer(RalphAPISerializer):\n network = NetworkSimpleSerializer()\n ethernet = EthernetSerializer()\n\n class Meta:\n model = IPAddress\n depth = 1\n exclude = ('number',)\n\n\nclass IPAddressSaveSerializer(RalphAPISaveSerializer):\n class Meta:\n model = IPAddress\n\n def validate_dhcp_expose(self, value):\n \"\"\"\n Check if dhcp_expose value has changed from True to False.\n \"\"\"\n if (\n settings.DHCP_ENTRY_FORBID_CHANGE and\n self.instance and\n self.instance.dhcp_expose and\n not value\n ):\n raise ValidationError(\n 'Cannot remove entry from DHCP. Use transition to do this.'\n )\n return value\n\n\nclass IPAddressViewSet(RalphAPIViewSet):\n queryset = IPAddress.objects.all()\n serializer_class = IPAddressSerializer\n save_serializer_class = IPAddressSaveSerializer\n prefetch_related = [\n 'ethernet', 'ethernet__base_object', 'ethernet__base_object__tags',\n 'network',\n ]\n filter_fields = [\n 'hostname', 'ethernet__base_object', 'network', 'network__address',\n 'status', 'is_public', 'is_management', 'dhcp_expose', 'ethernet__mac',\n ]\n\n def destroy(self, request, *args, **kwargs):\n instance = self.get_object()\n if instance and instance.dhcp_expose:\n raise ValidationError(\n 'Could not delete IPAddress when it is exposed in DHCP'\n )\n return super().destroy(request, *args, **kwargs)\n\n\nclass NetworkViewSet(RalphAPIViewSet):\n queryset = Network.objects.all()\n serializer_class = NetworkSerializer\n select_related = ['network_environment', 'kind']\n prefetch_related = ['racks']\n extended_filter_fields = {\n # workaround for custom field for address field defined in admin\n 'address': ['address'],\n }\n\n\nclass NetworkEnvironmentViewSet(RalphAPIViewSet):\n queryset = NetworkEnvironment.objects.all()\n serializer_class = NetworkEnvironmentSerializer\n\n\nclass NetworkKindViewSet(RalphAPIViewSet):\n queryset = NetworkKind.objects.all()\n serializer_class = NetworkKindSerializer\n\nrouter.register(r'ipaddresses', IPAddressViewSet)\nrouter.register(r'networks', NetworkViewSet)\nrouter.register(r'network-environments', NetworkEnvironmentViewSet)\nrouter.register(r'network-kinds', NetworkKindViewSet)\nurlpatterns = []\n", "path": "src/ralph/networks/api.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom django.conf import settings\nfrom rest_framework.exceptions import ValidationError\n\nfrom ralph.api import RalphAPISerializer, RalphAPIViewSet, router\nfrom ralph.api.serializers import RalphAPISaveSerializer\nfrom ralph.assets.api.serializers import EthernetSerializer\nfrom ralph.networks.models import (\n IPAddress,\n Network,\n NetworkEnvironment,\n NetworkKind\n)\n\n\nclass NetworkEnvironmentSerializer(RalphAPISerializer):\n class Meta:\n model = NetworkEnvironment\n depth = 1\n\n\nclass NetworkKindSerializer(RalphAPISerializer):\n class Meta:\n model = NetworkKind\n depth = 1\n\n\nclass NetworkSimpleSerializer(RalphAPISerializer):\n class Meta:\n model = Network\n fields = (\n 'id', 'url', 'name', 'remarks', 'vlan', 'dhcp_broadcast', 'parent',\n 'network_environment'\n )\n\n\nclass NetworkSaveSerializer(RalphAPISerializer):\n class Meta:\n model = Network\n depth = 1\n exclude = ('min_ip', 'max_ip')\n\n\nclass NetworkSerializer(RalphAPISerializer):\n class Meta:\n model = Network\n depth = 1\n\n\nclass IPAddressSerializer(RalphAPISerializer):\n network = NetworkSimpleSerializer()\n ethernet = EthernetSerializer()\n\n class Meta:\n model = IPAddress\n depth = 1\n exclude = ('number',)\n\n\nclass IPAddressSaveSerializer(RalphAPISaveSerializer):\n class Meta:\n model = IPAddress\n\n def validate_dhcp_expose(self, value):\n \"\"\"\n Check if dhcp_expose value has changed from True to False.\n \"\"\"\n if (\n settings.DHCP_ENTRY_FORBID_CHANGE and\n self.instance and\n self.instance.dhcp_expose and\n not value\n ):\n raise ValidationError(\n 'Cannot remove entry from DHCP. Use transition to do this.'\n )\n return value\n\n\nclass IPAddressViewSet(RalphAPIViewSet):\n queryset = IPAddress.objects.all()\n serializer_class = IPAddressSerializer\n save_serializer_class = IPAddressSaveSerializer\n prefetch_related = [\n 'ethernet', 'ethernet__base_object', 'ethernet__base_object__tags',\n 'network',\n ]\n filter_fields = [\n 'hostname', 'ethernet__base_object', 'network', 'network__address',\n 'status', 'is_public', 'is_management', 'dhcp_expose', 'ethernet__mac',\n ]\n\n def destroy(self, request, *args, **kwargs):\n instance = self.get_object()\n if instance and instance.dhcp_expose:\n raise ValidationError(\n 'Could not delete IPAddress when it is exposed in DHCP'\n )\n return super().destroy(request, *args, **kwargs)\n\n\nclass NetworkViewSet(RalphAPIViewSet):\n queryset = Network.objects.all()\n serializer_class = NetworkSerializer\n save_serializer_class = NetworkSaveSerializer\n select_related = ['network_environment', 'kind']\n prefetch_related = ['racks']\n extended_filter_fields = {\n # workaround for custom field for address field defined in admin\n 'address': ['address'],\n }\n\n\nclass NetworkEnvironmentViewSet(RalphAPIViewSet):\n queryset = NetworkEnvironment.objects.all()\n serializer_class = NetworkEnvironmentSerializer\n\n\nclass NetworkKindViewSet(RalphAPIViewSet):\n queryset = NetworkKind.objects.all()\n serializer_class = NetworkKindSerializer\n\nrouter.register(r'ipaddresses', IPAddressViewSet)\nrouter.register(r'networks', NetworkViewSet)\nrouter.register(r'network-environments', NetworkEnvironmentViewSet)\nrouter.register(r'network-kinds', NetworkKindViewSet)\nurlpatterns = []\n", "path": "src/ralph/networks/api.py"}]}
| 1,569 | 191 |
gh_patches_debug_24344
|
rasdani/github-patches
|
git_diff
|
getnikola__nikola-1068
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
nikola auto not working
I'm a newb so suspect user error, but I installed nikola v6.3.0 (pip install nikola) and have not been able to get **nikola auto** to work.
Initially I was getting errors due to a more recent version of livereload (which, isn't this marked as fixed?) and resolved by forcing livereload=2.0.0.
The next problem is that the site appears not to rebuild when I save edits to a post. I have not RTFM yet (as instructed :) but when I do:
nikola auto -b
I should be able to edit a post file (e.g. 1.rst) and on save, see the changes immediately, correct? What I see in the server log output and in Chrome dev tools shows that the browser is re-requesting a bunch of files, but I have to manually do **nikola build** to get the site to rebuild.
I'm using OS X 10.9.1, Python 2.7. Let me know if I can provide any other info; if I get it worked out I'll update the issue.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nikola/plugins/command/auto.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Copyright © 2012-2014 Roberto Alsina and others.
4
5 # Permission is hereby granted, free of charge, to any
6 # person obtaining a copy of this software and associated
7 # documentation files (the "Software"), to deal in the
8 # Software without restriction, including without limitation
9 # the rights to use, copy, modify, merge, publish,
10 # distribute, sublicense, and/or sell copies of the
11 # Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice
15 # shall be included in all copies or substantial portions of
16 # the Software.
17 #
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 from __future__ import print_function, unicode_literals
28
29 import os
30 import subprocess
31 import webbrowser
32
33 from nikola.plugin_categories import Command
34 from nikola.utils import req_missing
35
36
37 class CommandAuto(Command):
38 """Start debugging console."""
39 name = "auto"
40 doc_purpose = "automatically detect site changes, rebuild and optionally refresh a browser"
41 cmd_options = [
42 {
43 'name': 'browser',
44 'short': 'b',
45 'type': bool,
46 'help': 'Start a web browser.',
47 'default': False,
48 },
49 {
50 'name': 'port',
51 'short': 'p',
52 'long': 'port',
53 'default': 8000,
54 'type': int,
55 'help': 'Port nummber (default: 8000)',
56 },
57 ]
58
59 def _execute(self, options, args):
60 """Start the watcher."""
61 try:
62 from livereload import Server
63 except ImportError:
64 req_missing(['livereload>=2.0.0'], 'use the "auto" command')
65 return
66
67 # Run an initial build so we are uptodate
68 subprocess.call(("nikola", "build"))
69
70 port = options and options.get('port')
71
72 server = Server()
73 server.watch('conf.py')
74 server.watch('themes/')
75 server.watch('templates/')
76 server.watch(self.site.config['GALLERY_PATH'])
77 for item in self.site.config['post_pages']:
78 server.watch(os.path.dirname(item[0]))
79 for item in self.site.config['FILES_FOLDERS']:
80 server.watch(os.path.dirname(item))
81
82 out_folder = self.site.config['OUTPUT_FOLDER']
83 if options and options.get('browser'):
84 webbrowser.open('http://localhost:{0}'.format(port))
85
86 server.serve(port, None, out_folder)
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/nikola/plugins/command/auto.py b/nikola/plugins/command/auto.py
--- a/nikola/plugins/command/auto.py
+++ b/nikola/plugins/command/auto.py
@@ -64,20 +64,20 @@
req_missing(['livereload>=2.0.0'], 'use the "auto" command')
return
- # Run an initial build so we are uptodate
+ # Run an initial build so we are up-to-date
subprocess.call(("nikola", "build"))
port = options and options.get('port')
server = Server()
- server.watch('conf.py')
- server.watch('themes/')
- server.watch('templates/')
+ server.watch('conf.py', 'nikola build')
+ server.watch('themes/', 'nikola build')
+ server.watch('templates/', 'nikola build')
server.watch(self.site.config['GALLERY_PATH'])
for item in self.site.config['post_pages']:
- server.watch(os.path.dirname(item[0]))
+ server.watch(os.path.dirname(item[0]), 'nikola build')
for item in self.site.config['FILES_FOLDERS']:
- server.watch(os.path.dirname(item))
+ server.watch(os.path.dirname(item), 'nikola build')
out_folder = self.site.config['OUTPUT_FOLDER']
if options and options.get('browser'):
|
{"golden_diff": "diff --git a/nikola/plugins/command/auto.py b/nikola/plugins/command/auto.py\n--- a/nikola/plugins/command/auto.py\n+++ b/nikola/plugins/command/auto.py\n@@ -64,20 +64,20 @@\n req_missing(['livereload>=2.0.0'], 'use the \"auto\" command')\n return\n \n- # Run an initial build so we are uptodate\n+ # Run an initial build so we are up-to-date\n subprocess.call((\"nikola\", \"build\"))\n \n port = options and options.get('port')\n \n server = Server()\n- server.watch('conf.py')\n- server.watch('themes/')\n- server.watch('templates/')\n+ server.watch('conf.py', 'nikola build')\n+ server.watch('themes/', 'nikola build')\n+ server.watch('templates/', 'nikola build')\n server.watch(self.site.config['GALLERY_PATH'])\n for item in self.site.config['post_pages']:\n- server.watch(os.path.dirname(item[0]))\n+ server.watch(os.path.dirname(item[0]), 'nikola build')\n for item in self.site.config['FILES_FOLDERS']:\n- server.watch(os.path.dirname(item))\n+ server.watch(os.path.dirname(item), 'nikola build')\n \n out_folder = self.site.config['OUTPUT_FOLDER']\n if options and options.get('browser'):\n", "issue": "nikola auto not working \nI'm a newb so suspect user error, but I installed nikola v6.3.0 (pip install nikola) and have not been able to get **nikola auto** to work.\n\nInitially I was getting errors due to a more recent version of livereload (which, isn't this marked as fixed?) and resolved by forcing livereload=2.0.0. \n\nThe next problem is that the site appears not to rebuild when I save edits to a post. I have not RTFM yet (as instructed :) but when I do:\n nikola auto -b \n\nI should be able to edit a post file (e.g. 1.rst) and on save, see the changes immediately, correct? What I see in the server log output and in Chrome dev tools shows that the browser is re-requesting a bunch of files, but I have to manually do **nikola build** to get the site to rebuild. \n\nI'm using OS X 10.9.1, Python 2.7. Let me know if I can provide any other info; if I get it worked out I'll update the issue. \n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2014 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nfrom __future__ import print_function, unicode_literals\n\nimport os\nimport subprocess\nimport webbrowser\n\nfrom nikola.plugin_categories import Command\nfrom nikola.utils import req_missing\n\n\nclass CommandAuto(Command):\n \"\"\"Start debugging console.\"\"\"\n name = \"auto\"\n doc_purpose = \"automatically detect site changes, rebuild and optionally refresh a browser\"\n cmd_options = [\n {\n 'name': 'browser',\n 'short': 'b',\n 'type': bool,\n 'help': 'Start a web browser.',\n 'default': False,\n },\n {\n 'name': 'port',\n 'short': 'p',\n 'long': 'port',\n 'default': 8000,\n 'type': int,\n 'help': 'Port nummber (default: 8000)',\n },\n ]\n\n def _execute(self, options, args):\n \"\"\"Start the watcher.\"\"\"\n try:\n from livereload import Server\n except ImportError:\n req_missing(['livereload>=2.0.0'], 'use the \"auto\" command')\n return\n\n # Run an initial build so we are uptodate\n subprocess.call((\"nikola\", \"build\"))\n\n port = options and options.get('port')\n\n server = Server()\n server.watch('conf.py')\n server.watch('themes/')\n server.watch('templates/')\n server.watch(self.site.config['GALLERY_PATH'])\n for item in self.site.config['post_pages']:\n server.watch(os.path.dirname(item[0]))\n for item in self.site.config['FILES_FOLDERS']:\n server.watch(os.path.dirname(item))\n\n out_folder = self.site.config['OUTPUT_FOLDER']\n if options and options.get('browser'):\n webbrowser.open('http://localhost:{0}'.format(port))\n\n server.serve(port, None, out_folder)\n", "path": "nikola/plugins/command/auto.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2014 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nfrom __future__ import print_function, unicode_literals\n\nimport os\nimport subprocess\nimport webbrowser\n\nfrom nikola.plugin_categories import Command\nfrom nikola.utils import req_missing\n\n\nclass CommandAuto(Command):\n \"\"\"Start debugging console.\"\"\"\n name = \"auto\"\n doc_purpose = \"automatically detect site changes, rebuild and optionally refresh a browser\"\n cmd_options = [\n {\n 'name': 'browser',\n 'short': 'b',\n 'type': bool,\n 'help': 'Start a web browser.',\n 'default': False,\n },\n {\n 'name': 'port',\n 'short': 'p',\n 'long': 'port',\n 'default': 8000,\n 'type': int,\n 'help': 'Port nummber (default: 8000)',\n },\n ]\n\n def _execute(self, options, args):\n \"\"\"Start the watcher.\"\"\"\n try:\n from livereload import Server\n except ImportError:\n req_missing(['livereload>=2.0.0'], 'use the \"auto\" command')\n return\n\n # Run an initial build so we are up-to-date\n subprocess.call((\"nikola\", \"build\"))\n\n port = options and options.get('port')\n\n server = Server()\n server.watch('conf.py', 'nikola build')\n server.watch('themes/', 'nikola build')\n server.watch('templates/', 'nikola build')\n server.watch(self.site.config['GALLERY_PATH'])\n for item in self.site.config['post_pages']:\n server.watch(os.path.dirname(item[0]), 'nikola build')\n for item in self.site.config['FILES_FOLDERS']:\n server.watch(os.path.dirname(item), 'nikola build')\n\n out_folder = self.site.config['OUTPUT_FOLDER']\n if options and options.get('browser'):\n webbrowser.open('http://localhost:{0}'.format(port))\n\n server.serve(port, None, out_folder)\n", "path": "nikola/plugins/command/auto.py"}]}
| 1,325 | 298 |
gh_patches_debug_24665
|
rasdani/github-patches
|
git_diff
|
TabbycatDebate__tabbycat-1173
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
NonExistentTimeError and AmbiguousTimeError on daylight time changes with checkin filter window
I believe this error occurs when:
- There is a change into or out of daylight saving time
- A user loads a view that displays check-in statuses
- The current time is such that the beginning of the check-in window would be during the daylight saving time transition.
For example, if the check-in window is 12 hours, then loading any page that seeks check-in information (check-ins, results entry, private URL landing) at 2:15 p.m. would cause this error, because 2:15 a.m. is either ambiguous (if daylight saving just finished) or non-existent (if it just started).
https://sentry.io/organizations/tabbycat-ig/issues/975799227/?project=185382&referrer=github_plugin
```
NonExistentTimeError: 2019-04-07 02:51:30.519504
(24 additional frame(s) were not displayed)
...
File "utils/views.py", line 127, in get_tables
return [self.get_table()]
File "availability/views.py", line 167, in get_table
self.annotate_checkins(queryset, self.tournament)
File "availability/views.py", line 232, in annotate_checkins
return get_checkins(queryset, t, 'checkin_window_people')
File "checkins/utils.py", line 124, in get_checkins
events = get_unexpired_checkins(t, window_preference_type).values(
File "checkins/utils.py", line 61, in get_unexpired_checkins
time__gte=time_window).select_related('identifier').order_by('time')
NonExistentTimeError: 2019-04-07 02:51:30.519504
```
Relates to feature originally implemented from #513.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tabbycat/checkins/utils.py`
Content:
```
1 import datetime
2 import logging
3 import random
4 import string
5
6 from django.db import IntegrityError
7 from django.core.exceptions import ObjectDoesNotExist
8 from django.utils.translation import gettext as _
9
10 from .models import DebateIdentifier, Event, PersonIdentifier, VenueIdentifier
11
12 logger = logging.getLogger(__name__)
13
14
15 def generate_identifier(length=6):
16 """Generates a random identifier and saves it to the database."""
17 chars = string.ascii_uppercase + string.digits
18 return ''.join(random.SystemRandom().choice(chars) for _ in range(length))
19
20
21 IDENTIFIER_CLASSES = {
22 'participants.Person': PersonIdentifier,
23 'draw.Debate': DebateIdentifier,
24 'venues.Venue': VenueIdentifier,
25 }
26
27
28 def generate_identifiers(queryset, length=6, num_attempts=10):
29 """Generates identifiers for every instance in the given QuerySet."""
30 klass = IDENTIFIER_CLASSES[queryset.model._meta.label]
31 attr = klass.instance_attr
32
33 for instance in queryset:
34 identifier = generate_identifier(length=length)
35 for i in range(num_attempts):
36 try:
37 klass.objects.create(identifier=identifier, **{attr: instance})
38 except IntegrityError:
39 logger.warning("Identifier was not unique, trying again (%d of %d)", i, num_attempts)
40 continue
41 else:
42 break
43 else:
44 logger.error("Could not generate unique identifier for %r after %d tries", instance, num_attempts)
45
46
47 def delete_identifiers(queryset):
48 klass = IDENTIFIER_CLASSES[queryset.model._meta.label]
49 attr = klass.instance_attr
50 klass.objects.filter(**{attr + '__in': queryset}).delete()
51
52
53 def get_unexpired_checkins(tournament, window_preference_type):
54 if not window_preference_type:
55 time_window = datetime.datetime.fromtimestamp(0) # Unix start
56 else:
57 start = datetime.timedelta(hours=tournament.pref(window_preference_type))
58 time_window = datetime.datetime.now() - start
59
60 events = Event.objects.filter(tournament=tournament,
61 time__gte=time_window).select_related('identifier').order_by('time')
62 return events
63
64
65 def create_identifiers(model_to_make, items_to_check):
66 kind = model_to_make.instance_attr
67 identifiers_to_make = items_to_check.filter(checkin_identifier__isnull=True)
68
69 for item in identifiers_to_make:
70 model_to_make.objects.create(**{kind: item})
71
72 return
73
74
75 def single_checkin(instance, events):
76 instance.checked_icon = ''
77 instance.checked_in = False
78 try:
79 identifier = instance.checkin_identifier
80 instance.barcode = identifier.barcode
81 instance.checked_tooltip = _("Not checked in (barcode %(barcode)s)") % {'barcode': identifier.barcode}
82 except ObjectDoesNotExist:
83 identifier = None
84 instance.barcode = None
85 instance.checked_tooltip = _("Not checked in; no barcode assigned")
86
87 if identifier:
88 instance.time = next((e['time'] for e in events if e['identifier__barcode'] == identifier.barcode), None)
89 if instance.time:
90 instance.checked_in = True
91 instance.checked_icon = 'check'
92 instance.checked_tooltip = _("checked in at %(time)s") % {'time': instance.time.strftime('%H:%M')}
93 return instance
94
95
96 def multi_checkin(team, events, t):
97 team.checked_icon = ''
98 team.checked_in = False
99 tooltips = []
100
101 for speaker in team.speaker_set.all():
102 speaker = single_checkin(speaker, events)
103 if speaker.checked_in:
104 tooltip = _("%(speaker)s checked in at %(time)s.") % {'speaker': speaker.name, 'time': speaker.time.strftime('%H:%M')}
105 else:
106 tooltip = _("%(speaker)s is missing.") % {'speaker': speaker.name}
107 tooltips.append(tooltip)
108
109 team.checked_tooltip = " ".join(tooltips)
110
111 check_ins = sum(s.checked_in for s in team.speaker_set.all())
112 nsubstantives = t.pref('substantive_speakers')
113 if check_ins >= nsubstantives:
114 team.checked_in = True
115 team.checked_icon = 'check'
116 elif check_ins == nsubstantives - 1:
117 team.checked_in = True
118 team.checked_icon = 'shuffle'
119
120 return team
121
122
123 def get_checkins(queryset, t, window_preference_type):
124 events = get_unexpired_checkins(t, window_preference_type).values(
125 'time', 'identifier__barcode')
126 for instance in queryset:
127 if hasattr(instance, 'use_institution_prefix'):
128 instance = multi_checkin(instance, events, t)
129 else:
130 instance = single_checkin(instance, events)
131
132 return queryset
133
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/tabbycat/checkins/utils.py b/tabbycat/checkins/utils.py
--- a/tabbycat/checkins/utils.py
+++ b/tabbycat/checkins/utils.py
@@ -4,7 +4,9 @@
import string
from django.db import IntegrityError
+from django.db.models import Q
from django.core.exceptions import ObjectDoesNotExist
+from django.utils import timezone
from django.utils.translation import gettext as _
from .models import DebateIdentifier, Event, PersonIdentifier, VenueIdentifier
@@ -51,15 +53,13 @@
def get_unexpired_checkins(tournament, window_preference_type):
- if not window_preference_type:
- time_window = datetime.datetime.fromtimestamp(0) # Unix start
- else:
+ filters = Q(tournament=tournament)
+ if window_preference_type:
start = datetime.timedelta(hours=tournament.pref(window_preference_type))
- time_window = datetime.datetime.now() - start
+ time_window = timezone.now() - start
+ filters &= Q(time_gte=time_window)
- events = Event.objects.filter(tournament=tournament,
- time__gte=time_window).select_related('identifier').order_by('time')
- return events
+ return Event.objects.filter(filters).select_related('identifier').order_by('time')
def create_identifiers(model_to_make, items_to_check):
|
{"golden_diff": "diff --git a/tabbycat/checkins/utils.py b/tabbycat/checkins/utils.py\n--- a/tabbycat/checkins/utils.py\n+++ b/tabbycat/checkins/utils.py\n@@ -4,7 +4,9 @@\n import string\n \n from django.db import IntegrityError\n+from django.db.models import Q\n from django.core.exceptions import ObjectDoesNotExist\n+from django.utils import timezone\n from django.utils.translation import gettext as _\n \n from .models import DebateIdentifier, Event, PersonIdentifier, VenueIdentifier\n@@ -51,15 +53,13 @@\n \n \n def get_unexpired_checkins(tournament, window_preference_type):\n- if not window_preference_type:\n- time_window = datetime.datetime.fromtimestamp(0) # Unix start\n- else:\n+ filters = Q(tournament=tournament)\n+ if window_preference_type:\n start = datetime.timedelta(hours=tournament.pref(window_preference_type))\n- time_window = datetime.datetime.now() - start\n+ time_window = timezone.now() - start\n+ filters &= Q(time_gte=time_window)\n \n- events = Event.objects.filter(tournament=tournament,\n- time__gte=time_window).select_related('identifier').order_by('time')\n- return events\n+ return Event.objects.filter(filters).select_related('identifier').order_by('time')\n \n \n def create_identifiers(model_to_make, items_to_check):\n", "issue": "NonExistentTimeError and AmbiguousTimeError on daylight time changes with checkin filter window\nI believe this error occurs when:\n- There is a change into or out of daylight saving time\n- A user loads a view that displays check-in statuses\n- The current time is such that the beginning of the check-in window would be during the daylight saving time transition.\n\nFor example, if the check-in window is 12 hours, then loading any page that seeks check-in information (check-ins, results entry, private URL landing) at 2:15 p.m. would cause this error, because 2:15 a.m. is either ambiguous (if daylight saving just finished) or non-existent (if it just started).\n\nhttps://sentry.io/organizations/tabbycat-ig/issues/975799227/?project=185382&referrer=github_plugin\n\n```\nNonExistentTimeError: 2019-04-07 02:51:30.519504\n(24 additional frame(s) were not displayed)\n...\n File \"utils/views.py\", line 127, in get_tables\n return [self.get_table()]\n File \"availability/views.py\", line 167, in get_table\n self.annotate_checkins(queryset, self.tournament)\n File \"availability/views.py\", line 232, in annotate_checkins\n return get_checkins(queryset, t, 'checkin_window_people')\n File \"checkins/utils.py\", line 124, in get_checkins\n events = get_unexpired_checkins(t, window_preference_type).values(\n File \"checkins/utils.py\", line 61, in get_unexpired_checkins\n time__gte=time_window).select_related('identifier').order_by('time')\n\nNonExistentTimeError: 2019-04-07 02:51:30.519504\n```\n\nRelates to feature originally implemented from #513.\n", "before_files": [{"content": "import datetime\nimport logging\nimport random\nimport string\n\nfrom django.db import IntegrityError\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.utils.translation import gettext as _\n\nfrom .models import DebateIdentifier, Event, PersonIdentifier, VenueIdentifier\n\nlogger = logging.getLogger(__name__)\n\n\ndef generate_identifier(length=6):\n \"\"\"Generates a random identifier and saves it to the database.\"\"\"\n chars = string.ascii_uppercase + string.digits\n return ''.join(random.SystemRandom().choice(chars) for _ in range(length))\n\n\nIDENTIFIER_CLASSES = {\n 'participants.Person': PersonIdentifier,\n 'draw.Debate': DebateIdentifier,\n 'venues.Venue': VenueIdentifier,\n}\n\n\ndef generate_identifiers(queryset, length=6, num_attempts=10):\n \"\"\"Generates identifiers for every instance in the given QuerySet.\"\"\"\n klass = IDENTIFIER_CLASSES[queryset.model._meta.label]\n attr = klass.instance_attr\n\n for instance in queryset:\n identifier = generate_identifier(length=length)\n for i in range(num_attempts):\n try:\n klass.objects.create(identifier=identifier, **{attr: instance})\n except IntegrityError:\n logger.warning(\"Identifier was not unique, trying again (%d of %d)\", i, num_attempts)\n continue\n else:\n break\n else:\n logger.error(\"Could not generate unique identifier for %r after %d tries\", instance, num_attempts)\n\n\ndef delete_identifiers(queryset):\n klass = IDENTIFIER_CLASSES[queryset.model._meta.label]\n attr = klass.instance_attr\n klass.objects.filter(**{attr + '__in': queryset}).delete()\n\n\ndef get_unexpired_checkins(tournament, window_preference_type):\n if not window_preference_type:\n time_window = datetime.datetime.fromtimestamp(0) # Unix start\n else:\n start = datetime.timedelta(hours=tournament.pref(window_preference_type))\n time_window = datetime.datetime.now() - start\n\n events = Event.objects.filter(tournament=tournament,\n time__gte=time_window).select_related('identifier').order_by('time')\n return events\n\n\ndef create_identifiers(model_to_make, items_to_check):\n kind = model_to_make.instance_attr\n identifiers_to_make = items_to_check.filter(checkin_identifier__isnull=True)\n\n for item in identifiers_to_make:\n model_to_make.objects.create(**{kind: item})\n\n return\n\n\ndef single_checkin(instance, events):\n instance.checked_icon = ''\n instance.checked_in = False\n try:\n identifier = instance.checkin_identifier\n instance.barcode = identifier.barcode\n instance.checked_tooltip = _(\"Not checked in (barcode %(barcode)s)\") % {'barcode': identifier.barcode}\n except ObjectDoesNotExist:\n identifier = None\n instance.barcode = None\n instance.checked_tooltip = _(\"Not checked in; no barcode assigned\")\n\n if identifier:\n instance.time = next((e['time'] for e in events if e['identifier__barcode'] == identifier.barcode), None)\n if instance.time:\n instance.checked_in = True\n instance.checked_icon = 'check'\n instance.checked_tooltip = _(\"checked in at %(time)s\") % {'time': instance.time.strftime('%H:%M')}\n return instance\n\n\ndef multi_checkin(team, events, t):\n team.checked_icon = ''\n team.checked_in = False\n tooltips = []\n\n for speaker in team.speaker_set.all():\n speaker = single_checkin(speaker, events)\n if speaker.checked_in:\n tooltip = _(\"%(speaker)s checked in at %(time)s.\") % {'speaker': speaker.name, 'time': speaker.time.strftime('%H:%M')}\n else:\n tooltip = _(\"%(speaker)s is missing.\") % {'speaker': speaker.name}\n tooltips.append(tooltip)\n\n team.checked_tooltip = \" \".join(tooltips)\n\n check_ins = sum(s.checked_in for s in team.speaker_set.all())\n nsubstantives = t.pref('substantive_speakers')\n if check_ins >= nsubstantives:\n team.checked_in = True\n team.checked_icon = 'check'\n elif check_ins == nsubstantives - 1:\n team.checked_in = True\n team.checked_icon = 'shuffle'\n\n return team\n\n\ndef get_checkins(queryset, t, window_preference_type):\n events = get_unexpired_checkins(t, window_preference_type).values(\n 'time', 'identifier__barcode')\n for instance in queryset:\n if hasattr(instance, 'use_institution_prefix'):\n instance = multi_checkin(instance, events, t)\n else:\n instance = single_checkin(instance, events)\n\n return queryset\n", "path": "tabbycat/checkins/utils.py"}], "after_files": [{"content": "import datetime\nimport logging\nimport random\nimport string\n\nfrom django.db import IntegrityError\nfrom django.db.models import Q\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.utils import timezone\nfrom django.utils.translation import gettext as _\n\nfrom .models import DebateIdentifier, Event, PersonIdentifier, VenueIdentifier\n\nlogger = logging.getLogger(__name__)\n\n\ndef generate_identifier(length=6):\n \"\"\"Generates a random identifier and saves it to the database.\"\"\"\n chars = string.ascii_uppercase + string.digits\n return ''.join(random.SystemRandom().choice(chars) for _ in range(length))\n\n\nIDENTIFIER_CLASSES = {\n 'participants.Person': PersonIdentifier,\n 'draw.Debate': DebateIdentifier,\n 'venues.Venue': VenueIdentifier,\n}\n\n\ndef generate_identifiers(queryset, length=6, num_attempts=10):\n \"\"\"Generates identifiers for every instance in the given QuerySet.\"\"\"\n klass = IDENTIFIER_CLASSES[queryset.model._meta.label]\n attr = klass.instance_attr\n\n for instance in queryset:\n identifier = generate_identifier(length=length)\n for i in range(num_attempts):\n try:\n klass.objects.create(identifier=identifier, **{attr: instance})\n except IntegrityError:\n logger.warning(\"Identifier was not unique, trying again (%d of %d)\", i, num_attempts)\n continue\n else:\n break\n else:\n logger.error(\"Could not generate unique identifier for %r after %d tries\", instance, num_attempts)\n\n\ndef delete_identifiers(queryset):\n klass = IDENTIFIER_CLASSES[queryset.model._meta.label]\n attr = klass.instance_attr\n klass.objects.filter(**{attr + '__in': queryset}).delete()\n\n\ndef get_unexpired_checkins(tournament, window_preference_type):\n filters = Q(tournament=tournament)\n if window_preference_type:\n start = datetime.timedelta(hours=tournament.pref(window_preference_type))\n time_window = timezone.now() - start\n filters &= Q(time_gte=time_window)\n\n return Event.objects.filter(filters).select_related('identifier').order_by('time')\n\n\ndef create_identifiers(model_to_make, items_to_check):\n kind = model_to_make.instance_attr\n identifiers_to_make = items_to_check.filter(checkin_identifier__isnull=True)\n\n for item in identifiers_to_make:\n model_to_make.objects.create(**{kind: item})\n\n return\n\n\ndef single_checkin(instance, events):\n instance.checked_icon = ''\n instance.checked_in = False\n try:\n identifier = instance.checkin_identifier\n instance.barcode = identifier.barcode\n instance.checked_tooltip = _(\"Not checked in (barcode %(barcode)s)\") % {'barcode': identifier.barcode}\n except ObjectDoesNotExist:\n identifier = None\n instance.barcode = None\n instance.checked_tooltip = _(\"Not checked in; no barcode assigned\")\n\n if identifier:\n instance.time = next((e['time'] for e in events if e['identifier__barcode'] == identifier.barcode), None)\n if instance.time:\n instance.checked_in = True\n instance.checked_icon = 'check'\n instance.checked_tooltip = _(\"checked in at %(time)s\") % {'time': instance.time.strftime('%H:%M')}\n return instance\n\n\ndef multi_checkin(team, events, t):\n team.checked_icon = ''\n team.checked_in = False\n tooltips = []\n\n for speaker in team.speaker_set.all():\n speaker = single_checkin(speaker, events)\n if speaker.checked_in:\n tooltip = _(\"%(speaker)s checked in at %(time)s.\") % {'speaker': speaker.name, 'time': speaker.time.strftime('%H:%M')}\n else:\n tooltip = _(\"%(speaker)s is missing.\") % {'speaker': speaker.name}\n tooltips.append(tooltip)\n\n team.checked_tooltip = \" \".join(tooltips)\n\n check_ins = sum(s.checked_in for s in team.speaker_set.all())\n nsubstantives = t.pref('substantive_speakers')\n if check_ins >= nsubstantives:\n team.checked_in = True\n team.checked_icon = 'check'\n elif check_ins == nsubstantives - 1:\n team.checked_in = True\n team.checked_icon = 'shuffle'\n\n return team\n\n\ndef get_checkins(queryset, t, window_preference_type):\n events = get_unexpired_checkins(t, window_preference_type).values(\n 'time', 'identifier__barcode')\n for instance in queryset:\n if hasattr(instance, 'use_institution_prefix'):\n instance = multi_checkin(instance, events, t)\n else:\n instance = single_checkin(instance, events)\n\n return queryset\n", "path": "tabbycat/checkins/utils.py"}]}
| 1,997 | 296 |
gh_patches_debug_39039
|
rasdani/github-patches
|
git_diff
|
Mailu__Mailu-1392
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
certwatcher.py is not using TLS_CERT_FILENAME nor TLS_KEYPAIR_FILENAME
certwatcher.py is assuming that the certificate and the key are in /certs and named respectively cert.pem and key.pem
However, Mailu offer two environment variables to allow specific path & filename for the certificate and the key which are used in config.py:
TLS_CERT_FILENAME
TLS_KEYPAIR_FILENAME
I would add that in my use case (https-portal as a reverse proxy), those files are not directly in /certs. My environment variables look like:
TLS_CERT_FILENAME=mail.example.net/production/signed.crt
TLS_KEYPAIR_FILENAME=mail.example.net/production/domain.key
Currently, certwatcher.py is monitoring `/certs`, whereas I would need to monitor
`/certs/mail.example.net/production`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `core/nginx/certwatcher.py`
Content:
```
1 #!/usr/bin/python3
2 """
3 Certificate watcher which reloads nginx or reconfigures it, depending on what
4 happens to externally supplied certificates. Only executed by start.py in case
5 of TLS_FLAVOR=[mail, cert]
6 """
7
8 from os.path import exists, split as path_split
9 from os import system
10 import time
11 from watchdog.observers.polling import PollingObserver
12 from watchdog.events import FileSystemEventHandler, FileDeletedEvent, \
13 FileCreatedEvent, FileModifiedEvent, FileMovedEvent
14
15 class ChangeHandler(FileSystemEventHandler):
16 "watchdog-handler listening on any event, executing the correct configuration/reload steps"
17 @staticmethod
18 def reload_nginx():
19 "merely reload nginx without re-configuring everything"
20 if exists("/var/run/nginx.pid"):
21 print("Reloading a running nginx")
22 system("nginx -s reload")
23
24 @staticmethod
25 def reexec_config():
26 "execute a reconfiguration of the system, which also reloads"
27 print("Reconfiguring system")
28 system("/config.py")
29
30 def on_any_event(self, event):
31 "event-listener checking if the affected files are the cert-files we're interested in"
32 if event.is_directory:
33 return
34
35 filename = path_split(event.src_path)[-1]
36 if isinstance(event, FileMovedEvent):
37 filename = path_split(event.dest_path)[-1]
38
39 if filename in ['cert.pem', 'key.pem']:
40 # all cases except for FileModified need re-configure
41 if isinstance(event, (FileCreatedEvent, FileMovedEvent, FileDeletedEvent)):
42 ChangeHandler.reexec_config()
43 # file modification needs only a nginx reload without config.py
44 elif isinstance(event, FileModifiedEvent):
45 ChangeHandler.reload_nginx()
46 # cert files have been moved away, re-configure
47 elif isinstance(event, FileMovedEvent) and path_split(event.src_path)[-1] in ['cert.pem', 'key.pem']:
48 ChangeHandler.reexec_config()
49
50
51 if __name__ == '__main__':
52 observer = PollingObserver()
53 handler = ChangeHandler()
54 observer.schedule(handler, "/certs", recursive=False)
55 observer.start()
56
57 try:
58 while True:
59 time.sleep(1)
60 except KeyboardInterrupt:
61 observer.stop()
62
63 observer.join()
64
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/core/nginx/certwatcher.py b/core/nginx/certwatcher.py
--- a/core/nginx/certwatcher.py
+++ b/core/nginx/certwatcher.py
@@ -5,8 +5,8 @@
of TLS_FLAVOR=[mail, cert]
"""
-from os.path import exists, split as path_split
-from os import system
+from os.path import exists, split as path_split, join as path_join
+from os import system, getenv
import time
from watchdog.observers.polling import PollingObserver
from watchdog.events import FileSystemEventHandler, FileDeletedEvent, \
@@ -14,6 +14,13 @@
class ChangeHandler(FileSystemEventHandler):
"watchdog-handler listening on any event, executing the correct configuration/reload steps"
+
+ def __init__(self, cert_path, keypair_path):
+ "Initialize a new changehandler"""
+ super().__init__()
+ self.cert_path = cert_path
+ self.keypair_path = keypair_path
+
@staticmethod
def reload_nginx():
"merely reload nginx without re-configuring everything"
@@ -32,11 +39,11 @@
if event.is_directory:
return
- filename = path_split(event.src_path)[-1]
+ filename = event.src_path
if isinstance(event, FileMovedEvent):
- filename = path_split(event.dest_path)[-1]
+ filename = event.dest_path
- if filename in ['cert.pem', 'key.pem']:
+ if filename in [self.cert_path, self.keypair_path]:
# all cases except for FileModified need re-configure
if isinstance(event, (FileCreatedEvent, FileMovedEvent, FileDeletedEvent)):
ChangeHandler.reexec_config()
@@ -44,14 +51,21 @@
elif isinstance(event, FileModifiedEvent):
ChangeHandler.reload_nginx()
# cert files have been moved away, re-configure
- elif isinstance(event, FileMovedEvent) and path_split(event.src_path)[-1] in ['cert.pem', 'key.pem']:
+ elif isinstance(event, FileMovedEvent) and event.src_path in [self.cert_path, self.keypair_path]:
ChangeHandler.reexec_config()
if __name__ == '__main__':
+ cert_path = path_join("/certs/", getenv("TLS_CERT_FILENAME", default="cert.pem"))
+ cert_dir = path_split(cert_path)[0]
+ keypair_path = path_join("/certs/", getenv("TLS_KEYPAIR_FILENAME", default="key.pem"))
+ keypair_dir = path_split(keypair_path)[0]
+
observer = PollingObserver()
- handler = ChangeHandler()
- observer.schedule(handler, "/certs", recursive=False)
+ handler = ChangeHandler(cert_path, keypair_path)
+ observer.schedule(handler, cert_dir, recursive=False)
+ if keypair_dir != cert_dir:
+ observer.schedule(handler, keypair_dir, recursive=False)
observer.start()
try:
|
{"golden_diff": "diff --git a/core/nginx/certwatcher.py b/core/nginx/certwatcher.py\n--- a/core/nginx/certwatcher.py\n+++ b/core/nginx/certwatcher.py\n@@ -5,8 +5,8 @@\n of TLS_FLAVOR=[mail, cert]\n \"\"\"\n \n-from os.path import exists, split as path_split\n-from os import system\n+from os.path import exists, split as path_split, join as path_join\n+from os import system, getenv\n import time\n from watchdog.observers.polling import PollingObserver\n from watchdog.events import FileSystemEventHandler, FileDeletedEvent, \\\n@@ -14,6 +14,13 @@\n \n class ChangeHandler(FileSystemEventHandler):\n \"watchdog-handler listening on any event, executing the correct configuration/reload steps\"\n+\n+ def __init__(self, cert_path, keypair_path):\n+ \"Initialize a new changehandler\"\"\"\n+ super().__init__()\n+ self.cert_path = cert_path\n+ self.keypair_path = keypair_path\n+\n @staticmethod\n def reload_nginx():\n \"merely reload nginx without re-configuring everything\"\n@@ -32,11 +39,11 @@\n if event.is_directory:\n return\n \n- filename = path_split(event.src_path)[-1]\n+ filename = event.src_path\n if isinstance(event, FileMovedEvent):\n- filename = path_split(event.dest_path)[-1]\n+ filename = event.dest_path\n \n- if filename in ['cert.pem', 'key.pem']:\n+ if filename in [self.cert_path, self.keypair_path]:\n # all cases except for FileModified need re-configure\n if isinstance(event, (FileCreatedEvent, FileMovedEvent, FileDeletedEvent)):\n ChangeHandler.reexec_config()\n@@ -44,14 +51,21 @@\n elif isinstance(event, FileModifiedEvent):\n ChangeHandler.reload_nginx()\n # cert files have been moved away, re-configure\n- elif isinstance(event, FileMovedEvent) and path_split(event.src_path)[-1] in ['cert.pem', 'key.pem']:\n+ elif isinstance(event, FileMovedEvent) and event.src_path in [self.cert_path, self.keypair_path]:\n ChangeHandler.reexec_config()\n \n \n if __name__ == '__main__':\n+ cert_path = path_join(\"/certs/\", getenv(\"TLS_CERT_FILENAME\", default=\"cert.pem\"))\n+ cert_dir = path_split(cert_path)[0]\n+ keypair_path = path_join(\"/certs/\", getenv(\"TLS_KEYPAIR_FILENAME\", default=\"key.pem\"))\n+ keypair_dir = path_split(keypair_path)[0]\n+\n observer = PollingObserver()\n- handler = ChangeHandler()\n- observer.schedule(handler, \"/certs\", recursive=False)\n+ handler = ChangeHandler(cert_path, keypair_path)\n+ observer.schedule(handler, cert_dir, recursive=False)\n+ if keypair_dir != cert_dir:\n+ observer.schedule(handler, keypair_dir, recursive=False)\n observer.start()\n \n try:\n", "issue": "certwatcher.py is not using TLS_CERT_FILENAME nor TLS_KEYPAIR_FILENAME\ncertwatcher.py is assuming that the certificate and the key are in /certs and named respectively cert.pem and key.pem\r\n\r\nHowever, Mailu offer two environment variables to allow specific path & filename for the certificate and the key which are used in config.py:\r\nTLS_CERT_FILENAME\r\nTLS_KEYPAIR_FILENAME\r\n\r\nI would add that in my use case (https-portal as a reverse proxy), those files are not directly in /certs. My environment variables look like:\r\nTLS_CERT_FILENAME=mail.example.net/production/signed.crt\r\nTLS_KEYPAIR_FILENAME=mail.example.net/production/domain.key\r\nCurrently, certwatcher.py is monitoring `/certs`, whereas I would need to monitor \r\n`/certs/mail.example.net/production`\n", "before_files": [{"content": "#!/usr/bin/python3\n\"\"\"\nCertificate watcher which reloads nginx or reconfigures it, depending on what\nhappens to externally supplied certificates. Only executed by start.py in case\nof TLS_FLAVOR=[mail, cert]\n\"\"\"\n\nfrom os.path import exists, split as path_split\nfrom os import system\nimport time\nfrom watchdog.observers.polling import PollingObserver\nfrom watchdog.events import FileSystemEventHandler, FileDeletedEvent, \\\n FileCreatedEvent, FileModifiedEvent, FileMovedEvent\n\nclass ChangeHandler(FileSystemEventHandler):\n \"watchdog-handler listening on any event, executing the correct configuration/reload steps\"\n @staticmethod\n def reload_nginx():\n \"merely reload nginx without re-configuring everything\"\n if exists(\"/var/run/nginx.pid\"):\n print(\"Reloading a running nginx\")\n system(\"nginx -s reload\")\n\n @staticmethod\n def reexec_config():\n \"execute a reconfiguration of the system, which also reloads\"\n print(\"Reconfiguring system\")\n system(\"/config.py\")\n\n def on_any_event(self, event):\n \"event-listener checking if the affected files are the cert-files we're interested in\"\n if event.is_directory:\n return\n\n filename = path_split(event.src_path)[-1]\n if isinstance(event, FileMovedEvent):\n filename = path_split(event.dest_path)[-1]\n\n if filename in ['cert.pem', 'key.pem']:\n # all cases except for FileModified need re-configure\n if isinstance(event, (FileCreatedEvent, FileMovedEvent, FileDeletedEvent)):\n ChangeHandler.reexec_config()\n # file modification needs only a nginx reload without config.py\n elif isinstance(event, FileModifiedEvent):\n ChangeHandler.reload_nginx()\n # cert files have been moved away, re-configure\n elif isinstance(event, FileMovedEvent) and path_split(event.src_path)[-1] in ['cert.pem', 'key.pem']:\n ChangeHandler.reexec_config()\n\n\nif __name__ == '__main__':\n observer = PollingObserver()\n handler = ChangeHandler()\n observer.schedule(handler, \"/certs\", recursive=False)\n observer.start()\n\n try:\n while True:\n time.sleep(1)\n except KeyboardInterrupt:\n observer.stop()\n\n observer.join()\n", "path": "core/nginx/certwatcher.py"}], "after_files": [{"content": "#!/usr/bin/python3\n\"\"\"\nCertificate watcher which reloads nginx or reconfigures it, depending on what\nhappens to externally supplied certificates. Only executed by start.py in case\nof TLS_FLAVOR=[mail, cert]\n\"\"\"\n\nfrom os.path import exists, split as path_split, join as path_join\nfrom os import system, getenv\nimport time\nfrom watchdog.observers.polling import PollingObserver\nfrom watchdog.events import FileSystemEventHandler, FileDeletedEvent, \\\n FileCreatedEvent, FileModifiedEvent, FileMovedEvent\n\nclass ChangeHandler(FileSystemEventHandler):\n \"watchdog-handler listening on any event, executing the correct configuration/reload steps\"\n\n def __init__(self, cert_path, keypair_path):\n \"Initialize a new changehandler\"\"\"\n super().__init__()\n self.cert_path = cert_path\n self.keypair_path = keypair_path\n\n @staticmethod\n def reload_nginx():\n \"merely reload nginx without re-configuring everything\"\n if exists(\"/var/run/nginx.pid\"):\n print(\"Reloading a running nginx\")\n system(\"nginx -s reload\")\n\n @staticmethod\n def reexec_config():\n \"execute a reconfiguration of the system, which also reloads\"\n print(\"Reconfiguring system\")\n system(\"/config.py\")\n\n def on_any_event(self, event):\n \"event-listener checking if the affected files are the cert-files we're interested in\"\n if event.is_directory:\n return\n\n filename = event.src_path\n if isinstance(event, FileMovedEvent):\n filename = event.dest_path\n\n if filename in [self.cert_path, self.keypair_path]:\n # all cases except for FileModified need re-configure\n if isinstance(event, (FileCreatedEvent, FileMovedEvent, FileDeletedEvent)):\n ChangeHandler.reexec_config()\n # file modification needs only a nginx reload without config.py\n elif isinstance(event, FileModifiedEvent):\n ChangeHandler.reload_nginx()\n # cert files have been moved away, re-configure\n elif isinstance(event, FileMovedEvent) and event.src_path in [self.cert_path, self.keypair_path]:\n ChangeHandler.reexec_config()\n\n\nif __name__ == '__main__':\n cert_path = path_join(\"/certs/\", getenv(\"TLS_CERT_FILENAME\", default=\"cert.pem\"))\n cert_dir = path_split(cert_path)[0]\n keypair_path = path_join(\"/certs/\", getenv(\"TLS_KEYPAIR_FILENAME\", default=\"key.pem\"))\n keypair_dir = path_split(keypair_path)[0]\n\n observer = PollingObserver()\n handler = ChangeHandler(cert_path, keypair_path)\n observer.schedule(handler, cert_dir, recursive=False)\n if keypair_dir != cert_dir:\n observer.schedule(handler, keypair_dir, recursive=False)\n observer.start()\n\n try:\n while True:\n time.sleep(1)\n except KeyboardInterrupt:\n observer.stop()\n\n observer.join()\n", "path": "core/nginx/certwatcher.py"}]}
| 1,028 | 647 |
gh_patches_debug_5433
|
rasdani/github-patches
|
git_diff
|
open-mmlab__mmocr-221
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make default ingnored class to -100 in SDMGRLoss
In most cases, class 0 should not be ignored.
ref https://github.com/open-mmlab/mmocr/blob/main/mmocr/models/kie/losses/sdmgr_loss.py#L17
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mmocr/models/kie/losses/sdmgr_loss.py`
Content:
```
1 import torch
2 from torch import nn
3
4 from mmdet.models.builder import LOSSES
5 from mmdet.models.losses import accuracy
6
7
8 @LOSSES.register_module()
9 class SDMGRLoss(nn.Module):
10 """The implementation the loss of key information extraction proposed in
11 the paper: Spatial Dual-Modality Graph Reasoning for Key Information
12 Extraction.
13
14 https://arxiv.org/abs/2103.14470.
15 """
16
17 def __init__(self, node_weight=1.0, edge_weight=1.0, ignore=0):
18 super().__init__()
19 self.loss_node = nn.CrossEntropyLoss(ignore_index=ignore)
20 self.loss_edge = nn.CrossEntropyLoss(ignore_index=-1)
21 self.node_weight = node_weight
22 self.edge_weight = edge_weight
23 self.ignore = ignore
24
25 def forward(self, node_preds, edge_preds, gts):
26 node_gts, edge_gts = [], []
27 for gt in gts:
28 node_gts.append(gt[:, 0])
29 edge_gts.append(gt[:, 1:].contiguous().view(-1))
30 node_gts = torch.cat(node_gts).long()
31 edge_gts = torch.cat(edge_gts).long()
32
33 node_valids = torch.nonzero(node_gts != self.ignore).view(-1)
34 edge_valids = torch.nonzero(edge_gts != -1).view(-1)
35 return dict(
36 loss_node=self.node_weight * self.loss_node(node_preds, node_gts),
37 loss_edge=self.edge_weight * self.loss_edge(edge_preds, edge_gts),
38 acc_node=accuracy(node_preds[node_valids], node_gts[node_valids]),
39 acc_edge=accuracy(edge_preds[edge_valids], edge_gts[edge_valids]))
40
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mmocr/models/kie/losses/sdmgr_loss.py b/mmocr/models/kie/losses/sdmgr_loss.py
--- a/mmocr/models/kie/losses/sdmgr_loss.py
+++ b/mmocr/models/kie/losses/sdmgr_loss.py
@@ -14,7 +14,7 @@
https://arxiv.org/abs/2103.14470.
"""
- def __init__(self, node_weight=1.0, edge_weight=1.0, ignore=0):
+ def __init__(self, node_weight=1.0, edge_weight=1.0, ignore=-100):
super().__init__()
self.loss_node = nn.CrossEntropyLoss(ignore_index=ignore)
self.loss_edge = nn.CrossEntropyLoss(ignore_index=-1)
|
{"golden_diff": "diff --git a/mmocr/models/kie/losses/sdmgr_loss.py b/mmocr/models/kie/losses/sdmgr_loss.py\n--- a/mmocr/models/kie/losses/sdmgr_loss.py\n+++ b/mmocr/models/kie/losses/sdmgr_loss.py\n@@ -14,7 +14,7 @@\n https://arxiv.org/abs/2103.14470.\n \"\"\"\n \n- def __init__(self, node_weight=1.0, edge_weight=1.0, ignore=0):\n+ def __init__(self, node_weight=1.0, edge_weight=1.0, ignore=-100):\n super().__init__()\n self.loss_node = nn.CrossEntropyLoss(ignore_index=ignore)\n self.loss_edge = nn.CrossEntropyLoss(ignore_index=-1)\n", "issue": "Make default ingnored class to -100 in SDMGRLoss\nIn most cases, class 0 should not be ignored.\r\n\r\nref https://github.com/open-mmlab/mmocr/blob/main/mmocr/models/kie/losses/sdmgr_loss.py#L17\n", "before_files": [{"content": "import torch\nfrom torch import nn\n\nfrom mmdet.models.builder import LOSSES\nfrom mmdet.models.losses import accuracy\n\n\[email protected]_module()\nclass SDMGRLoss(nn.Module):\n \"\"\"The implementation the loss of key information extraction proposed in\n the paper: Spatial Dual-Modality Graph Reasoning for Key Information\n Extraction.\n\n https://arxiv.org/abs/2103.14470.\n \"\"\"\n\n def __init__(self, node_weight=1.0, edge_weight=1.0, ignore=0):\n super().__init__()\n self.loss_node = nn.CrossEntropyLoss(ignore_index=ignore)\n self.loss_edge = nn.CrossEntropyLoss(ignore_index=-1)\n self.node_weight = node_weight\n self.edge_weight = edge_weight\n self.ignore = ignore\n\n def forward(self, node_preds, edge_preds, gts):\n node_gts, edge_gts = [], []\n for gt in gts:\n node_gts.append(gt[:, 0])\n edge_gts.append(gt[:, 1:].contiguous().view(-1))\n node_gts = torch.cat(node_gts).long()\n edge_gts = torch.cat(edge_gts).long()\n\n node_valids = torch.nonzero(node_gts != self.ignore).view(-1)\n edge_valids = torch.nonzero(edge_gts != -1).view(-1)\n return dict(\n loss_node=self.node_weight * self.loss_node(node_preds, node_gts),\n loss_edge=self.edge_weight * self.loss_edge(edge_preds, edge_gts),\n acc_node=accuracy(node_preds[node_valids], node_gts[node_valids]),\n acc_edge=accuracy(edge_preds[edge_valids], edge_gts[edge_valids]))\n", "path": "mmocr/models/kie/losses/sdmgr_loss.py"}], "after_files": [{"content": "import torch\nfrom torch import nn\n\nfrom mmdet.models.builder import LOSSES\nfrom mmdet.models.losses import accuracy\n\n\[email protected]_module()\nclass SDMGRLoss(nn.Module):\n \"\"\"The implementation the loss of key information extraction proposed in\n the paper: Spatial Dual-Modality Graph Reasoning for Key Information\n Extraction.\n\n https://arxiv.org/abs/2103.14470.\n \"\"\"\n\n def __init__(self, node_weight=1.0, edge_weight=1.0, ignore=-100):\n super().__init__()\n self.loss_node = nn.CrossEntropyLoss(ignore_index=ignore)\n self.loss_edge = nn.CrossEntropyLoss(ignore_index=-1)\n self.node_weight = node_weight\n self.edge_weight = edge_weight\n self.ignore = ignore\n\n def forward(self, node_preds, edge_preds, gts):\n node_gts, edge_gts = [], []\n for gt in gts:\n node_gts.append(gt[:, 0])\n edge_gts.append(gt[:, 1:].contiguous().view(-1))\n node_gts = torch.cat(node_gts).long()\n edge_gts = torch.cat(edge_gts).long()\n\n node_valids = torch.nonzero(node_gts != self.ignore).view(-1)\n edge_valids = torch.nonzero(edge_gts != -1).view(-1)\n return dict(\n loss_node=self.node_weight * self.loss_node(node_preds, node_gts),\n loss_edge=self.edge_weight * self.loss_edge(edge_preds, edge_gts),\n acc_node=accuracy(node_preds[node_valids], node_gts[node_valids]),\n acc_edge=accuracy(edge_preds[edge_valids], edge_gts[edge_valids]))\n", "path": "mmocr/models/kie/losses/sdmgr_loss.py"}]}
| 778 | 186 |
gh_patches_debug_525
|
rasdani/github-patches
|
git_diff
|
fidals__shopelectro-592
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix env substitution in docker-compose.yml for docker-rm
https://ci.fidals.com/fidals/shopelectro/938/12
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `shopelectro/management/commands/excel.py`
Content:
```
1 """
2 Generate Excel price-list.
3
4 Use this excel editor lib: https://openpyxl.readthedocs.io/en/default/
5 """
6 import datetime
7 import os
8 from collections import namedtuple
9
10 import openpyxl
11 from django.conf import settings
12 from django.core.management.base import BaseCommand
13 from openpyxl.styles import borders, colors, Font
14
15 from shopelectro.models import Product, Category
16
17
18 class Command(BaseCommand):
19 TEMPLATE = 'templates/ecommerce/template.xlsx'
20 NAME = 'pricelist.xlsx'
21 SHEET_TITLE = 'Прайс Shopelectro'
22 CATEGORY_FILL = openpyxl.styles.PatternFill(
23 start_color='F4FEFD',
24 end_color='F4FEFD',
25 fill_type='solid'
26 )
27 BUY_FILL = openpyxl.styles.PatternFill(
28 start_color='FEFEF0',
29 end_color='FEFEF0',
30 fill_type='solid'
31 )
32 THIN_BORDER = borders.Border(
33 top=borders.Side(style='thin'),
34 right=borders.Side(style='thin'),
35 bottom=borders.Side(style='thin'),
36 left=borders.Side(style='thin')
37 )
38 CURRENT_ROW = '9' # Start of catalog section in file.
39 cell = namedtuple('cell', ['row', 'col'])
40 BAD_STYLED_CELLS = ['D5', 'E5', 'D6', 'G8']
41
42 def __init__(self, *args, **kwargs):
43 super(Command, self).__init__(*args, **kwargs)
44 self.file, self.sheet = self.load_file_and_sheet()
45
46 def handle(self, *args, **options):
47 """Open template's file and start proceeding it."""
48 self.set_collapse_controls()
49 self.fill_header()
50 self.write_catalog()
51 self.hide_formulas()
52 self.set_styles()
53 base_dir = settings.ASSETS_DIR
54 self.file.save(os.path.join(base_dir, self.NAME))
55
56 def set_styles(self):
57 for cell in self.BAD_STYLED_CELLS:
58 self.sheet[cell].border = self.THIN_BORDER
59
60 def set_collapse_controls(self):
61 """
62 Place collapse buttons above rows.
63
64 Collapse controls looks like this: http://prntscr.com/clf9xh. # Ignore InvalidLinkBear
65 Doc link: https://goo.gl/nR5pLO
66 """
67 self.sheet.sheet_properties.outlinePr.summaryBelow = False
68
69 def increase_row(self):
70 self.CURRENT_ROW = str(int(self.CURRENT_ROW) + 1)
71 return self.CURRENT_ROW
72
73 def get_row(self, row_number):
74 return self.sheet.row_dimensions[int(row_number)]
75
76 def load_file_and_sheet(self):
77 """
78 Load template file into openpyxl.
79
80 Return tuple with opened openpyxl file's object and active price sheet.
81 """
82 file = openpyxl.load_workbook(os.path.join(
83 settings.BASE_DIR, self.TEMPLATE))
84 return file, file.get_sheet_by_name('Прайслист')
85
86 def fill_header(self):
87 """Fill header of a sheet with date and title."""
88 date_cell = 'C5'
89 self.sheet.title = self.SHEET_TITLE
90 self.sheet[date_cell] = datetime.date.strftime(
91 datetime.date.today(), '%d.%m.%Y')
92
93 def hide_formulas(self):
94 """Hide formulas for calculating totals."""
95 self.sheet.column_dimensions.group('H', 'K', hidden=True, outline_level=0)
96
97 def write_catalog(self):
98 """Write categories and products to sheet."""
99 categories = Category.objects.all().order_by('name').filter(children=None)
100 for category in categories.iterator():
101 self.write_category_with_products(category)
102
103 def write_category_with_products(self, category):
104 """Write category line and beside that - all of products in this category."""
105 def hide_row(row):
106 row.hidden = True
107 row.outlineLevel = 1
108
109 def collapse_row(row):
110 row.collapsed = True
111
112 def write_product_rows():
113 """Write products lines."""
114 sheet = self.sheet
115 products = Product.objects.filter(category=category, page__is_active=True)
116 for product in products.iterator():
117 product_start = 'A' + self.CURRENT_ROW
118 sheet[product_start] = product.name
119 sheet[product_start].font = Font(color=colors.BLUE)
120 sheet[product_start].hyperlink = settings.BASE_URL + product.url
121 sheet[product_start].border = self.THIN_BORDER
122 prices = [
123 product.price,
124 product.wholesale_small,
125 product.wholesale_medium,
126 product.wholesale_large,
127 ]
128 for price, total in zip('CDEF', 'HIJK'):
129 sheet[price + self.CURRENT_ROW] = prices.pop(0)
130 sheet[total + self.CURRENT_ROW] = (
131 '={0}{1}*G{1}'.format(price, self.CURRENT_ROW)
132 )
133
134 sheet[price + self.CURRENT_ROW].border = self.THIN_BORDER
135
136 sheet['G' + self.CURRENT_ROW].fill = self.BUY_FILL
137 sheet['G' + self.CURRENT_ROW].border = self.THIN_BORDER
138
139 hide_row(self.get_row(self.CURRENT_ROW))
140 self.increase_row()
141
142 def write_category_row():
143 """Merge category line into one cell and write to it."""
144 sheet = self.sheet
145 collapse_row(self.get_row(self.CURRENT_ROW))
146
147 category_start = 'A' + self.CURRENT_ROW
148 category_line = '{}:{}'.format(
149 category_start, 'G' + self.CURRENT_ROW)
150 sheet.merge_cells(category_line)
151 sheet[category_start] = category.name
152 sheet[category_start].fill = self.CATEGORY_FILL
153
154 self.increase_row()
155
156 write_category_row()
157 write_product_rows()
158
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/shopelectro/management/commands/excel.py b/shopelectro/management/commands/excel.py
--- a/shopelectro/management/commands/excel.py
+++ b/shopelectro/management/commands/excel.py
@@ -1,7 +1,7 @@
"""
Generate Excel price-list.
-Use this excel editor lib: https://openpyxl.readthedocs.io/en/default/
+Use this excel editor lib: https://openpyxl.readthedocs.io/en/stable/
"""
import datetime
import os
|
{"golden_diff": "diff --git a/shopelectro/management/commands/excel.py b/shopelectro/management/commands/excel.py\n--- a/shopelectro/management/commands/excel.py\n+++ b/shopelectro/management/commands/excel.py\n@@ -1,7 +1,7 @@\n \"\"\"\n Generate Excel price-list.\n \n-Use this excel editor lib: https://openpyxl.readthedocs.io/en/default/\n+Use this excel editor lib: https://openpyxl.readthedocs.io/en/stable/\n \"\"\"\n import datetime\n import os\n", "issue": "Fix env substitution in docker-compose.yml for docker-rm\nhttps://ci.fidals.com/fidals/shopelectro/938/12\n", "before_files": [{"content": "\"\"\"\nGenerate Excel price-list.\n\nUse this excel editor lib: https://openpyxl.readthedocs.io/en/default/\n\"\"\"\nimport datetime\nimport os\nfrom collections import namedtuple\n\nimport openpyxl\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\nfrom openpyxl.styles import borders, colors, Font\n\nfrom shopelectro.models import Product, Category\n\n\nclass Command(BaseCommand):\n TEMPLATE = 'templates/ecommerce/template.xlsx'\n NAME = 'pricelist.xlsx'\n SHEET_TITLE = '\u041f\u0440\u0430\u0439\u0441 Shopelectro'\n CATEGORY_FILL = openpyxl.styles.PatternFill(\n start_color='F4FEFD',\n end_color='F4FEFD',\n fill_type='solid'\n )\n BUY_FILL = openpyxl.styles.PatternFill(\n start_color='FEFEF0',\n end_color='FEFEF0',\n fill_type='solid'\n )\n THIN_BORDER = borders.Border(\n top=borders.Side(style='thin'),\n right=borders.Side(style='thin'),\n bottom=borders.Side(style='thin'),\n left=borders.Side(style='thin')\n )\n CURRENT_ROW = '9' # Start of catalog section in file.\n cell = namedtuple('cell', ['row', 'col'])\n BAD_STYLED_CELLS = ['D5', 'E5', 'D6', 'G8']\n\n def __init__(self, *args, **kwargs):\n super(Command, self).__init__(*args, **kwargs)\n self.file, self.sheet = self.load_file_and_sheet()\n\n def handle(self, *args, **options):\n \"\"\"Open template's file and start proceeding it.\"\"\"\n self.set_collapse_controls()\n self.fill_header()\n self.write_catalog()\n self.hide_formulas()\n self.set_styles()\n base_dir = settings.ASSETS_DIR\n self.file.save(os.path.join(base_dir, self.NAME))\n\n def set_styles(self):\n for cell in self.BAD_STYLED_CELLS:\n self.sheet[cell].border = self.THIN_BORDER\n\n def set_collapse_controls(self):\n \"\"\"\n Place collapse buttons above rows.\n\n Collapse controls looks like this: http://prntscr.com/clf9xh. # Ignore InvalidLinkBear\n Doc link: https://goo.gl/nR5pLO\n \"\"\"\n self.sheet.sheet_properties.outlinePr.summaryBelow = False\n\n def increase_row(self):\n self.CURRENT_ROW = str(int(self.CURRENT_ROW) + 1)\n return self.CURRENT_ROW\n\n def get_row(self, row_number):\n return self.sheet.row_dimensions[int(row_number)]\n\n def load_file_and_sheet(self):\n \"\"\"\n Load template file into openpyxl.\n\n Return tuple with opened openpyxl file's object and active price sheet.\n \"\"\"\n file = openpyxl.load_workbook(os.path.join(\n settings.BASE_DIR, self.TEMPLATE))\n return file, file.get_sheet_by_name('\u041f\u0440\u0430\u0439\u0441\u043b\u0438\u0441\u0442')\n\n def fill_header(self):\n \"\"\"Fill header of a sheet with date and title.\"\"\"\n date_cell = 'C5'\n self.sheet.title = self.SHEET_TITLE\n self.sheet[date_cell] = datetime.date.strftime(\n datetime.date.today(), '%d.%m.%Y')\n\n def hide_formulas(self):\n \"\"\"Hide formulas for calculating totals.\"\"\"\n self.sheet.column_dimensions.group('H', 'K', hidden=True, outline_level=0)\n\n def write_catalog(self):\n \"\"\"Write categories and products to sheet.\"\"\"\n categories = Category.objects.all().order_by('name').filter(children=None)\n for category in categories.iterator():\n self.write_category_with_products(category)\n\n def write_category_with_products(self, category):\n \"\"\"Write category line and beside that - all of products in this category.\"\"\"\n def hide_row(row):\n row.hidden = True\n row.outlineLevel = 1\n\n def collapse_row(row):\n row.collapsed = True\n\n def write_product_rows():\n \"\"\"Write products lines.\"\"\"\n sheet = self.sheet\n products = Product.objects.filter(category=category, page__is_active=True)\n for product in products.iterator():\n product_start = 'A' + self.CURRENT_ROW\n sheet[product_start] = product.name\n sheet[product_start].font = Font(color=colors.BLUE)\n sheet[product_start].hyperlink = settings.BASE_URL + product.url\n sheet[product_start].border = self.THIN_BORDER\n prices = [\n product.price,\n product.wholesale_small,\n product.wholesale_medium,\n product.wholesale_large,\n ]\n for price, total in zip('CDEF', 'HIJK'):\n sheet[price + self.CURRENT_ROW] = prices.pop(0)\n sheet[total + self.CURRENT_ROW] = (\n '={0}{1}*G{1}'.format(price, self.CURRENT_ROW)\n )\n\n sheet[price + self.CURRENT_ROW].border = self.THIN_BORDER\n\n sheet['G' + self.CURRENT_ROW].fill = self.BUY_FILL\n sheet['G' + self.CURRENT_ROW].border = self.THIN_BORDER\n\n hide_row(self.get_row(self.CURRENT_ROW))\n self.increase_row()\n\n def write_category_row():\n \"\"\"Merge category line into one cell and write to it.\"\"\"\n sheet = self.sheet\n collapse_row(self.get_row(self.CURRENT_ROW))\n\n category_start = 'A' + self.CURRENT_ROW\n category_line = '{}:{}'.format(\n category_start, 'G' + self.CURRENT_ROW)\n sheet.merge_cells(category_line)\n sheet[category_start] = category.name\n sheet[category_start].fill = self.CATEGORY_FILL\n\n self.increase_row()\n\n write_category_row()\n write_product_rows()\n", "path": "shopelectro/management/commands/excel.py"}], "after_files": [{"content": "\"\"\"\nGenerate Excel price-list.\n\nUse this excel editor lib: https://openpyxl.readthedocs.io/en/stable/\n\"\"\"\nimport datetime\nimport os\nfrom collections import namedtuple\n\nimport openpyxl\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\nfrom openpyxl.styles import borders, colors, Font\n\nfrom shopelectro.models import Product, Category\n\n\nclass Command(BaseCommand):\n TEMPLATE = 'templates/ecommerce/template.xlsx'\n NAME = 'pricelist.xlsx'\n SHEET_TITLE = '\u041f\u0440\u0430\u0439\u0441 Shopelectro'\n CATEGORY_FILL = openpyxl.styles.PatternFill(\n start_color='F4FEFD',\n end_color='F4FEFD',\n fill_type='solid'\n )\n BUY_FILL = openpyxl.styles.PatternFill(\n start_color='FEFEF0',\n end_color='FEFEF0',\n fill_type='solid'\n )\n THIN_BORDER = borders.Border(\n top=borders.Side(style='thin'),\n right=borders.Side(style='thin'),\n bottom=borders.Side(style='thin'),\n left=borders.Side(style='thin')\n )\n CURRENT_ROW = '9' # Start of catalog section in file.\n cell = namedtuple('cell', ['row', 'col'])\n BAD_STYLED_CELLS = ['D5', 'E5', 'D6', 'G8']\n\n def __init__(self, *args, **kwargs):\n super(Command, self).__init__(*args, **kwargs)\n self.file, self.sheet = self.load_file_and_sheet()\n\n def handle(self, *args, **options):\n \"\"\"Open template's file and start proceeding it.\"\"\"\n self.set_collapse_controls()\n self.fill_header()\n self.write_catalog()\n self.hide_formulas()\n self.set_styles()\n base_dir = settings.ASSETS_DIR\n self.file.save(os.path.join(base_dir, self.NAME))\n\n def set_styles(self):\n for cell in self.BAD_STYLED_CELLS:\n self.sheet[cell].border = self.THIN_BORDER\n\n def set_collapse_controls(self):\n \"\"\"\n Place collapse buttons above rows.\n\n Collapse controls looks like this: http://prntscr.com/clf9xh. # Ignore InvalidLinkBear\n Doc link: https://goo.gl/nR5pLO\n \"\"\"\n self.sheet.sheet_properties.outlinePr.summaryBelow = False\n\n def increase_row(self):\n self.CURRENT_ROW = str(int(self.CURRENT_ROW) + 1)\n return self.CURRENT_ROW\n\n def get_row(self, row_number):\n return self.sheet.row_dimensions[int(row_number)]\n\n def load_file_and_sheet(self):\n \"\"\"\n Load template file into openpyxl.\n\n Return tuple with opened openpyxl file's object and active price sheet.\n \"\"\"\n file = openpyxl.load_workbook(os.path.join(\n settings.BASE_DIR, self.TEMPLATE))\n return file, file.get_sheet_by_name('\u041f\u0440\u0430\u0439\u0441\u043b\u0438\u0441\u0442')\n\n def fill_header(self):\n \"\"\"Fill header of a sheet with date and title.\"\"\"\n date_cell = 'C5'\n self.sheet.title = self.SHEET_TITLE\n self.sheet[date_cell] = datetime.date.strftime(\n datetime.date.today(), '%d.%m.%Y')\n\n def hide_formulas(self):\n \"\"\"Hide formulas for calculating totals.\"\"\"\n self.sheet.column_dimensions.group('H', 'K', hidden=True, outline_level=0)\n\n def write_catalog(self):\n \"\"\"Write categories and products to sheet.\"\"\"\n categories = Category.objects.all().order_by('name').filter(children=None)\n for category in categories.iterator():\n self.write_category_with_products(category)\n\n def write_category_with_products(self, category):\n \"\"\"Write category line and beside that - all of products in this category.\"\"\"\n def hide_row(row):\n row.hidden = True\n row.outlineLevel = 1\n\n def collapse_row(row):\n row.collapsed = True\n\n def write_product_rows():\n \"\"\"Write products lines.\"\"\"\n sheet = self.sheet\n products = Product.objects.filter(category=category, page__is_active=True)\n for product in products.iterator():\n product_start = 'A' + self.CURRENT_ROW\n sheet[product_start] = product.name\n sheet[product_start].font = Font(color=colors.BLUE)\n sheet[product_start].hyperlink = settings.BASE_URL + product.url\n sheet[product_start].border = self.THIN_BORDER\n prices = [\n product.price,\n product.wholesale_small,\n product.wholesale_medium,\n product.wholesale_large,\n ]\n for price, total in zip('CDEF', 'HIJK'):\n sheet[price + self.CURRENT_ROW] = prices.pop(0)\n sheet[total + self.CURRENT_ROW] = (\n '={0}{1}*G{1}'.format(price, self.CURRENT_ROW)\n )\n\n sheet[price + self.CURRENT_ROW].border = self.THIN_BORDER\n\n sheet['G' + self.CURRENT_ROW].fill = self.BUY_FILL\n sheet['G' + self.CURRENT_ROW].border = self.THIN_BORDER\n\n hide_row(self.get_row(self.CURRENT_ROW))\n self.increase_row()\n\n def write_category_row():\n \"\"\"Merge category line into one cell and write to it.\"\"\"\n sheet = self.sheet\n collapse_row(self.get_row(self.CURRENT_ROW))\n\n category_start = 'A' + self.CURRENT_ROW\n category_line = '{}:{}'.format(\n category_start, 'G' + self.CURRENT_ROW)\n sheet.merge_cells(category_line)\n sheet[category_start] = category.name\n sheet[category_start].fill = self.CATEGORY_FILL\n\n self.increase_row()\n\n write_category_row()\n write_product_rows()\n", "path": "shopelectro/management/commands/excel.py"}]}
| 1,907 | 119 |
gh_patches_debug_23961
|
rasdani/github-patches
|
git_diff
|
mindsdb__mindsdb-1858
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Feat] convert query to string using new mindsdb_sql feature
Since [that works](https://github.com/mindsdb/mindsdb_sql/issues/130) is finished, we can add this changes to mindsdb. So ve can delete dirtyfix for escape symbols and that should prevent from other hidden errors.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mindsdb/api/mysql/mysql_proxy/utilities/sql.py`
Content:
```
1 import duckdb
2 import pandas as pd
3 from mindsdb_sql import parse_sql
4 from mindsdb_sql.parser.ast import Select, Identifier, BinaryOperation, OrderBy
5
6
7 def _remove_table_name(root):
8 if isinstance(root, BinaryOperation):
9 _remove_table_name(root.args[0])
10 _remove_table_name(root.args[1])
11 elif isinstance(root, Identifier):
12 root.parts = [root.parts[-1]]
13
14
15 def query_df(df, query):
16 """ Perform simple query ('select' from one table, without subqueries and joins) on DataFrame.
17
18 Args:
19 df (pandas.DataFrame): data
20 query (mindsdb_sql.parser.ast.Select | str): select query
21
22 Returns:
23 pandas.DataFrame
24 """
25
26 query = parse_sql(str(query), dialect='mysql')
27 if isinstance(query, Select) is False or isinstance(query.from_table, Identifier) is False:
28 raise Exception("Only 'SELECT from TABLE' statements supported for internal query")
29
30 query.from_table.parts = ['df_table']
31 for identifier in query.targets:
32 if isinstance(identifier, Identifier):
33 identifier.parts = [identifier.parts[-1]]
34 if isinstance(query.order_by, list):
35 for orderby in query.order_by:
36 if isinstance(orderby, OrderBy) and isinstance(orderby.field, Identifier):
37 orderby.field.parts = [orderby.field.parts[-1]]
38 _remove_table_name(query.where)
39
40 # FIXME https://github.com/mindsdb/mindsdb_sql/issues/130
41 # we need way to dump suery in postgres dialect
42 sql_query = str(query).replace('`', '')
43 res = duckdb.query_df(df, 'df_table', sql_query)
44 result_df = res.df()
45 result_df = result_df.where(pd.notnull(result_df), None)
46 return result_df
47
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mindsdb/api/mysql/mysql_proxy/utilities/sql.py b/mindsdb/api/mysql/mysql_proxy/utilities/sql.py
--- a/mindsdb/api/mysql/mysql_proxy/utilities/sql.py
+++ b/mindsdb/api/mysql/mysql_proxy/utilities/sql.py
@@ -2,6 +2,9 @@
import pandas as pd
from mindsdb_sql import parse_sql
from mindsdb_sql.parser.ast import Select, Identifier, BinaryOperation, OrderBy
+from mindsdb_sql.render.sqlalchemy_render import SqlalchemyRender
+
+from mindsdb.utilities.log import log
def _remove_table_name(root):
@@ -37,10 +40,14 @@
orderby.field.parts = [orderby.field.parts[-1]]
_remove_table_name(query.where)
- # FIXME https://github.com/mindsdb/mindsdb_sql/issues/130
- # we need way to dump suery in postgres dialect
- sql_query = str(query).replace('`', '')
- res = duckdb.query_df(df, 'df_table', sql_query)
+ render = SqlalchemyRender('postgres')
+ try:
+ query_str = render.get_string(query, with_failback=False)
+ except Exception as e:
+ log.error(f"Exception during query casting to 'postgres' dialect. Query: {query}. Error: {e}")
+ query_str = render.get_string(query, with_failback=True)
+
+ res = duckdb.query_df(df, 'df_table', query_str)
result_df = res.df()
result_df = result_df.where(pd.notnull(result_df), None)
return result_df
|
{"golden_diff": "diff --git a/mindsdb/api/mysql/mysql_proxy/utilities/sql.py b/mindsdb/api/mysql/mysql_proxy/utilities/sql.py\n--- a/mindsdb/api/mysql/mysql_proxy/utilities/sql.py\n+++ b/mindsdb/api/mysql/mysql_proxy/utilities/sql.py\n@@ -2,6 +2,9 @@\n import pandas as pd\n from mindsdb_sql import parse_sql\n from mindsdb_sql.parser.ast import Select, Identifier, BinaryOperation, OrderBy\n+from mindsdb_sql.render.sqlalchemy_render import SqlalchemyRender\n+\n+from mindsdb.utilities.log import log\n \n \n def _remove_table_name(root):\n@@ -37,10 +40,14 @@\n orderby.field.parts = [orderby.field.parts[-1]]\n _remove_table_name(query.where)\n \n- # FIXME https://github.com/mindsdb/mindsdb_sql/issues/130\n- # we need way to dump suery in postgres dialect\n- sql_query = str(query).replace('`', '')\n- res = duckdb.query_df(df, 'df_table', sql_query)\n+ render = SqlalchemyRender('postgres')\n+ try:\n+ query_str = render.get_string(query, with_failback=False)\n+ except Exception as e:\n+ log.error(f\"Exception during query casting to 'postgres' dialect. Query: {query}. Error: {e}\")\n+ query_str = render.get_string(query, with_failback=True)\n+\n+ res = duckdb.query_df(df, 'df_table', query_str)\n result_df = res.df()\n result_df = result_df.where(pd.notnull(result_df), None)\n return result_df\n", "issue": "[Feat] convert query to string using new mindsdb_sql feature\nSince [that works](https://github.com/mindsdb/mindsdb_sql/issues/130) is finished, we can add this changes to mindsdb. So ve can delete dirtyfix for escape symbols and that should prevent from other hidden errors.\r\n\n", "before_files": [{"content": "import duckdb\nimport pandas as pd\nfrom mindsdb_sql import parse_sql\nfrom mindsdb_sql.parser.ast import Select, Identifier, BinaryOperation, OrderBy\n\n\ndef _remove_table_name(root):\n if isinstance(root, BinaryOperation):\n _remove_table_name(root.args[0])\n _remove_table_name(root.args[1])\n elif isinstance(root, Identifier):\n root.parts = [root.parts[-1]]\n\n\ndef query_df(df, query):\n \"\"\" Perform simple query ('select' from one table, without subqueries and joins) on DataFrame.\n\n Args:\n df (pandas.DataFrame): data\n query (mindsdb_sql.parser.ast.Select | str): select query\n\n Returns:\n pandas.DataFrame\n \"\"\"\n\n query = parse_sql(str(query), dialect='mysql')\n if isinstance(query, Select) is False or isinstance(query.from_table, Identifier) is False:\n raise Exception(\"Only 'SELECT from TABLE' statements supported for internal query\")\n\n query.from_table.parts = ['df_table']\n for identifier in query.targets:\n if isinstance(identifier, Identifier):\n identifier.parts = [identifier.parts[-1]]\n if isinstance(query.order_by, list):\n for orderby in query.order_by:\n if isinstance(orderby, OrderBy) and isinstance(orderby.field, Identifier):\n orderby.field.parts = [orderby.field.parts[-1]]\n _remove_table_name(query.where)\n\n # FIXME https://github.com/mindsdb/mindsdb_sql/issues/130\n # we need way to dump suery in postgres dialect\n sql_query = str(query).replace('`', '')\n res = duckdb.query_df(df, 'df_table', sql_query)\n result_df = res.df()\n result_df = result_df.where(pd.notnull(result_df), None)\n return result_df\n", "path": "mindsdb/api/mysql/mysql_proxy/utilities/sql.py"}], "after_files": [{"content": "import duckdb\nimport pandas as pd\nfrom mindsdb_sql import parse_sql\nfrom mindsdb_sql.parser.ast import Select, Identifier, BinaryOperation, OrderBy\nfrom mindsdb_sql.render.sqlalchemy_render import SqlalchemyRender\n\nfrom mindsdb.utilities.log import log\n\n\ndef _remove_table_name(root):\n if isinstance(root, BinaryOperation):\n _remove_table_name(root.args[0])\n _remove_table_name(root.args[1])\n elif isinstance(root, Identifier):\n root.parts = [root.parts[-1]]\n\n\ndef query_df(df, query):\n \"\"\" Perform simple query ('select' from one table, without subqueries and joins) on DataFrame.\n\n Args:\n df (pandas.DataFrame): data\n query (mindsdb_sql.parser.ast.Select | str): select query\n\n Returns:\n pandas.DataFrame\n \"\"\"\n\n query = parse_sql(str(query), dialect='mysql')\n if isinstance(query, Select) is False or isinstance(query.from_table, Identifier) is False:\n raise Exception(\"Only 'SELECT from TABLE' statements supported for internal query\")\n\n query.from_table.parts = ['df_table']\n for identifier in query.targets:\n if isinstance(identifier, Identifier):\n identifier.parts = [identifier.parts[-1]]\n if isinstance(query.order_by, list):\n for orderby in query.order_by:\n if isinstance(orderby, OrderBy) and isinstance(orderby.field, Identifier):\n orderby.field.parts = [orderby.field.parts[-1]]\n _remove_table_name(query.where)\n\n render = SqlalchemyRender('postgres')\n try:\n query_str = render.get_string(query, with_failback=False)\n except Exception as e:\n log.error(f\"Exception during query casting to 'postgres' dialect. Query: {query}. Error: {e}\")\n query_str = render.get_string(query, with_failback=True)\n\n res = duckdb.query_df(df, 'df_table', query_str)\n result_df = res.df()\n result_df = result_df.where(pd.notnull(result_df), None)\n return result_df\n", "path": "mindsdb/api/mysql/mysql_proxy/utilities/sql.py"}]}
| 802 | 350 |
gh_patches_debug_30403
|
rasdani/github-patches
|
git_diff
|
pytorch__pytorch-2645
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug in legacy padding layer
I've found a bug in [Padding.py](https://github.com/pytorch/pytorch/blob/master/torch/legacy/nn/Padding.py).
It doesn't handle nInputDim original Torch Padding layer's parameter . As far as I've unterstood, you've rewritten lua code from [here](https://github.com/torch/nn/blob/master/Padding.lua), but not fully. 19th line in this file contains code for dealing with nInputDim parameter.
To reproduce issue, you need to download ENet model from [here](https://www.dropbox.com/sh/dywzk3gyb12hpe5/AAD5YkUa8XgMpHs2gCRgmCVCa)(original model from ENet authors) and make inference:
`import numpy as np`
`import torch`
`from torch.utils.serialization import load_lua`
`image = np.ones((1,3,1024,2048))`
`tensor = torch.FloatTensor(image)`
`net_torch = load_lua(torch_model)`
`out_torch = net_torch.forward(tensor)`
Now I've got en exception:
Traceback (most recent call last):
```
File "/hdd/PycharmProjects/untitled/test.py", line 110, in <module>
out_torch = net_torch.forward(tensor).numpy()
File "/usr/local/lib/python2.7/dist-packages/torch/legacy/nn/Module.py", line 33, in forward
return self.updateOutput(input)
File "/usr/local/lib/python2.7/dist-packages/torch/legacy/nn/Sequential.py", line 36, in updateOutput
currentOutput = module.updateOutput(currentOutput)
File "/usr/local/lib/python2.7/dist-packages/torch/legacy/nn/Sequential.py", line 36, in updateOutput
currentOutput = module.updateOutput(currentOutput)
File "/usr/local/lib/python2.7/dist-packages/torch/legacy/nn/CAddTable.py", line 20, in updateOutput
self.output.add_(input[i])
RuntimeError: inconsistent tensor size at /b/wheel/pytorch-src/torch/lib/TH/generic/THTensorMath.c:827
```
Padding is added to wrong axis, what's why CAddTable can't sum blobs with different shapes and throws exception.
If I edit the code of Padding.py by adding one to self.dim variables (lines 21 and 23 in Padding.py), all works correctly.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torch/legacy/nn/Padding.py`
Content:
```
1 import torch
2 from .Module import Module
3
4
5 class Padding(Module):
6 # pad puts in [pad] amount of [value] over dimension [dim], starting at
7 # index [index] in that dimension. If pad<0, index counts from the left.
8 # If pad>0 index counts from the right index = 1 pads before index 1.
9 # index = 2 pads starting before index 2 and after index 1 in dimension [dim]
10
11 def __init__(self, dim, pad, value=0, index=0):
12 self.value = value
13 self.index = index
14 self.dim = dim
15 self.pad = pad
16 self.outputSize = torch.Size()
17 super(Padding, self).__init__()
18
19 def updateOutput(self, input):
20 outputSize = list(input.size())
21 outputSize[self.dim] += abs(self.pad)
22 self.outputSize = torch.Size(outputSize)
23 dim = self.dim
24
25 self.output.resize_(self.outputSize)
26 self.output.fill_(self.value)
27 index = self.index
28 pad = self.pad
29 if pad > 0:
30 index = input.size(dim) - index
31 else:
32 pad = -pad
33
34 if index == 0:
35 self.output.narrow(dim, pad, input.size(dim)).copy_(input)
36 elif index == input.size(dim):
37 self.output.narrow(dim, 0, input.size(dim)).copy_(input)
38 else:
39 self.output.narrow(dim, 0, index).copy_(input.narrow(dim, 0, index))
40 self.output.narrow(dim, index + pad, input.size(dim) -
41 index).copy_(input.narrow(dim, index, input.size(dim) - index))
42
43 return self.output
44
45 def updateGradInput(self, input, gradOutput):
46 self.gradInput.resize_as_(input)
47 dim = self.dim
48
49 index = self.index
50 pad = self.pad
51 if pad > 0:
52 index = input.size(dim) - index
53 else:
54 pad = -pad
55
56 if index == 0:
57 self.gradInput.copy_(gradOutput.narrow(dim, pad, input.size(dim)))
58 elif index == input.size(dim):
59 self.gradInput.copy_(gradOutput.narrow(dim, 0, input.size(dim)))
60 else:
61 self.gradInput.narrow(dim, 0, index).copy_(gradOutput.narrow(dim, 0, index))
62 self.gradInput.narrow(dim, index, input.size(
63 dim) - index).copy_(gradOutput.narrow(dim, index + pad, input.size(dim) - index))
64
65 return self.gradInput
66
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/torch/legacy/nn/Padding.py b/torch/legacy/nn/Padding.py
--- a/torch/legacy/nn/Padding.py
+++ b/torch/legacy/nn/Padding.py
@@ -7,12 +7,15 @@
# index [index] in that dimension. If pad<0, index counts from the left.
# If pad>0 index counts from the right index = 1 pads before index 1.
# index = 2 pads starting before index 2 and after index 1 in dimension [dim]
+ # When nInputDim is provided, inputs larger than that value will be considered batches
+ # where the actual dim to be padded will be dimension dim + 1.
- def __init__(self, dim, pad, value=0, index=0):
+ def __init__(self, dim, pad, value=0, index=0, nInputDim=0):
self.value = value
self.index = index
self.dim = dim
self.pad = pad
+ self.nInputDim = nInputDim
self.outputSize = torch.Size()
super(Padding, self).__init__()
@@ -22,6 +25,9 @@
self.outputSize = torch.Size(outputSize)
dim = self.dim
+ if hasattr(self, "nInputDim") and self.nInputDim > 0 and input.dim() != self.nInputDim:
+ dim = dim + 1
+
self.output.resize_(self.outputSize)
self.output.fill_(self.value)
index = self.index
@@ -46,6 +52,9 @@
self.gradInput.resize_as_(input)
dim = self.dim
+ if hasattr(self, "nInputDim") and self.nInputDim > 0 and input.dim() != self.nInputDim:
+ dim = dim + 1
+
index = self.index
pad = self.pad
if pad > 0:
|
{"golden_diff": "diff --git a/torch/legacy/nn/Padding.py b/torch/legacy/nn/Padding.py\n--- a/torch/legacy/nn/Padding.py\n+++ b/torch/legacy/nn/Padding.py\n@@ -7,12 +7,15 @@\n # index [index] in that dimension. If pad<0, index counts from the left.\n # If pad>0 index counts from the right index = 1 pads before index 1.\n # index = 2 pads starting before index 2 and after index 1 in dimension [dim]\n+ # When nInputDim is provided, inputs larger than that value will be considered batches\n+ # where the actual dim to be padded will be dimension dim + 1.\n \n- def __init__(self, dim, pad, value=0, index=0):\n+ def __init__(self, dim, pad, value=0, index=0, nInputDim=0):\n self.value = value\n self.index = index\n self.dim = dim\n self.pad = pad\n+ self.nInputDim = nInputDim\n self.outputSize = torch.Size()\n super(Padding, self).__init__()\n \n@@ -22,6 +25,9 @@\n self.outputSize = torch.Size(outputSize)\n dim = self.dim\n \n+ if hasattr(self, \"nInputDim\") and self.nInputDim > 0 and input.dim() != self.nInputDim:\n+ dim = dim + 1\n+\n self.output.resize_(self.outputSize)\n self.output.fill_(self.value)\n index = self.index\n@@ -46,6 +52,9 @@\n self.gradInput.resize_as_(input)\n dim = self.dim\n \n+ if hasattr(self, \"nInputDim\") and self.nInputDim > 0 and input.dim() != self.nInputDim:\n+ dim = dim + 1\n+\n index = self.index\n pad = self.pad\n if pad > 0:\n", "issue": "Bug in legacy padding layer\nI've found a bug in [Padding.py](https://github.com/pytorch/pytorch/blob/master/torch/legacy/nn/Padding.py).\r\nIt doesn't handle nInputDim original Torch Padding layer's parameter . As far as I've unterstood, you've rewritten lua code from [here](https://github.com/torch/nn/blob/master/Padding.lua), but not fully. 19th line in this file contains code for dealing with nInputDim parameter.\r\nTo reproduce issue, you need to download ENet model from [here](https://www.dropbox.com/sh/dywzk3gyb12hpe5/AAD5YkUa8XgMpHs2gCRgmCVCa)(original model from ENet authors) and make inference:\r\n`import numpy as np`\r\n`import torch`\r\n`from torch.utils.serialization import load_lua`\r\n`image = np.ones((1,3,1024,2048))`\r\n`tensor = torch.FloatTensor(image)`\r\n`net_torch = load_lua(torch_model)`\r\n`out_torch = net_torch.forward(tensor)`\r\n\r\nNow I've got en exception: \r\nTraceback (most recent call last):\r\n```\r\n File \"/hdd/PycharmProjects/untitled/test.py\", line 110, in <module>\r\n out_torch = net_torch.forward(tensor).numpy()\r\n File \"/usr/local/lib/python2.7/dist-packages/torch/legacy/nn/Module.py\", line 33, in forward\r\n return self.updateOutput(input)\r\n File \"/usr/local/lib/python2.7/dist-packages/torch/legacy/nn/Sequential.py\", line 36, in updateOutput\r\n currentOutput = module.updateOutput(currentOutput)\r\n File \"/usr/local/lib/python2.7/dist-packages/torch/legacy/nn/Sequential.py\", line 36, in updateOutput\r\n currentOutput = module.updateOutput(currentOutput)\r\n File \"/usr/local/lib/python2.7/dist-packages/torch/legacy/nn/CAddTable.py\", line 20, in updateOutput\r\n self.output.add_(input[i])\r\nRuntimeError: inconsistent tensor size at /b/wheel/pytorch-src/torch/lib/TH/generic/THTensorMath.c:827\r\n```\r\n\r\nPadding is added to wrong axis, what's why CAddTable can't sum blobs with different shapes and throws exception.\r\nIf I edit the code of Padding.py by adding one to self.dim variables (lines 21 and 23 in Padding.py), all works correctly.\n", "before_files": [{"content": "import torch\nfrom .Module import Module\n\n\nclass Padding(Module):\n # pad puts in [pad] amount of [value] over dimension [dim], starting at\n # index [index] in that dimension. If pad<0, index counts from the left.\n # If pad>0 index counts from the right index = 1 pads before index 1.\n # index = 2 pads starting before index 2 and after index 1 in dimension [dim]\n\n def __init__(self, dim, pad, value=0, index=0):\n self.value = value\n self.index = index\n self.dim = dim\n self.pad = pad\n self.outputSize = torch.Size()\n super(Padding, self).__init__()\n\n def updateOutput(self, input):\n outputSize = list(input.size())\n outputSize[self.dim] += abs(self.pad)\n self.outputSize = torch.Size(outputSize)\n dim = self.dim\n\n self.output.resize_(self.outputSize)\n self.output.fill_(self.value)\n index = self.index\n pad = self.pad\n if pad > 0:\n index = input.size(dim) - index\n else:\n pad = -pad\n\n if index == 0:\n self.output.narrow(dim, pad, input.size(dim)).copy_(input)\n elif index == input.size(dim):\n self.output.narrow(dim, 0, input.size(dim)).copy_(input)\n else:\n self.output.narrow(dim, 0, index).copy_(input.narrow(dim, 0, index))\n self.output.narrow(dim, index + pad, input.size(dim) -\n index).copy_(input.narrow(dim, index, input.size(dim) - index))\n\n return self.output\n\n def updateGradInput(self, input, gradOutput):\n self.gradInput.resize_as_(input)\n dim = self.dim\n\n index = self.index\n pad = self.pad\n if pad > 0:\n index = input.size(dim) - index\n else:\n pad = -pad\n\n if index == 0:\n self.gradInput.copy_(gradOutput.narrow(dim, pad, input.size(dim)))\n elif index == input.size(dim):\n self.gradInput.copy_(gradOutput.narrow(dim, 0, input.size(dim)))\n else:\n self.gradInput.narrow(dim, 0, index).copy_(gradOutput.narrow(dim, 0, index))\n self.gradInput.narrow(dim, index, input.size(\n dim) - index).copy_(gradOutput.narrow(dim, index + pad, input.size(dim) - index))\n\n return self.gradInput\n", "path": "torch/legacy/nn/Padding.py"}], "after_files": [{"content": "import torch\nfrom .Module import Module\n\n\nclass Padding(Module):\n # pad puts in [pad] amount of [value] over dimension [dim], starting at\n # index [index] in that dimension. If pad<0, index counts from the left.\n # If pad>0 index counts from the right index = 1 pads before index 1.\n # index = 2 pads starting before index 2 and after index 1 in dimension [dim]\n # When nInputDim is provided, inputs larger than that value will be considered batches\n # where the actual dim to be padded will be dimension dim + 1.\n\n def __init__(self, dim, pad, value=0, index=0, nInputDim=0):\n self.value = value\n self.index = index\n self.dim = dim\n self.pad = pad\n self.nInputDim = nInputDim\n self.outputSize = torch.Size()\n super(Padding, self).__init__()\n\n def updateOutput(self, input):\n outputSize = list(input.size())\n outputSize[self.dim] += abs(self.pad)\n self.outputSize = torch.Size(outputSize)\n dim = self.dim\n\n if hasattr(self, \"nInputDim\") and self.nInputDim > 0 and input.dim() != self.nInputDim:\n dim = dim + 1\n\n self.output.resize_(self.outputSize)\n self.output.fill_(self.value)\n index = self.index\n pad = self.pad\n if pad > 0:\n index = input.size(dim) - index\n else:\n pad = -pad\n\n if index == 0:\n self.output.narrow(dim, pad, input.size(dim)).copy_(input)\n elif index == input.size(dim):\n self.output.narrow(dim, 0, input.size(dim)).copy_(input)\n else:\n self.output.narrow(dim, 0, index).copy_(input.narrow(dim, 0, index))\n self.output.narrow(dim, index + pad, input.size(dim) -\n index).copy_(input.narrow(dim, index, input.size(dim) - index))\n\n return self.output\n\n def updateGradInput(self, input, gradOutput):\n self.gradInput.resize_as_(input)\n dim = self.dim\n\n if hasattr(self, \"nInputDim\") and self.nInputDim > 0 and input.dim() != self.nInputDim:\n dim = dim + 1\n\n index = self.index\n pad = self.pad\n if pad > 0:\n index = input.size(dim) - index\n else:\n pad = -pad\n\n if index == 0:\n self.gradInput.copy_(gradOutput.narrow(dim, pad, input.size(dim)))\n elif index == input.size(dim):\n self.gradInput.copy_(gradOutput.narrow(dim, 0, input.size(dim)))\n else:\n self.gradInput.narrow(dim, 0, index).copy_(gradOutput.narrow(dim, 0, index))\n self.gradInput.narrow(dim, index, input.size(\n dim) - index).copy_(gradOutput.narrow(dim, index + pad, input.size(dim) - index))\n\n return self.gradInput\n", "path": "torch/legacy/nn/Padding.py"}]}
| 1,502 | 437 |
gh_patches_debug_22994
|
rasdani/github-patches
|
git_diff
|
numba__numba-2610
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Lowering error when removing element from the middle of linked list
Hi
I am having a hard time trying to figure out an error when I reassign "element_0.next = element_1.next" of a linked list. I posted my error at stackoverflow: https://stackoverflow.com/questions/47232035/numba-lowering-error-when-reassigning-next-link-of-a-linked-list-element
I am creating this issue to make it closer to you.
I have visited most of your examples and documentation pages, no ideas whatsoever.
What am I missing?
I am using numba **0.35.0** (from Anaconda, python 3.6.2).
Thanks
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `numba/targets/optional.py`
Content:
```
1 from __future__ import print_function, absolute_import, division
2
3 from numba import types, cgutils
4
5 from .imputils import (lower_cast, lower_builtin, lower_getattr_generic,
6 impl_ret_untracked)
7
8
9 def always_return_true_impl(context, builder, sig, args):
10 return cgutils.true_bit
11
12
13 def always_return_false_impl(context, builder, sig, args):
14 return cgutils.false_bit
15
16
17 def optional_is_none(context, builder, sig, args):
18 """
19 Check if an Optional value is invalid
20 """
21 [lty, rty] = sig.args
22 [lval, rval] = args
23
24 # Make sure None is on the right
25 if lty == types.none:
26 lty, rty = rty, lty
27 lval, rval = rval, lval
28
29 opt_type = lty
30 opt_val = lval
31
32 opt = context.make_helper(builder, opt_type, opt_val)
33 res = builder.not_(cgutils.as_bool_bit(builder, opt.valid))
34 return impl_ret_untracked(context, builder, sig.return_type, res)
35
36
37 # None is/not None
38 lower_builtin('is', types.none, types.none)(always_return_true_impl)
39
40 # Optional is None
41 lower_builtin('is', types.Optional, types.none)(optional_is_none)
42 lower_builtin('is', types.none, types.Optional)(optional_is_none)
43
44
45 @lower_getattr_generic(types.Optional)
46 def optional_getattr(context, builder, typ, value, attr):
47 """
48 Optional.__getattr__ => redirect to the wrapped type.
49 """
50 inner_type = typ.type
51 val = context.cast(builder, value, typ, inner_type)
52 imp = context.get_getattr(inner_type, attr)
53 return imp(context, builder, inner_type, val, attr)
54
55
56 @lower_cast(types.Optional, types.Optional)
57 def optional_to_optional(context, builder, fromty, toty, val):
58 """
59 The handling of optional->optional cast must be special cased for
60 correct propagation of None value. Given type T and U. casting of
61 T? to U? (? denotes optional) should always succeed. If the from-value
62 is None, the None value the casted value (U?) should be None; otherwise,
63 the from-value is casted to U. This is different from casting T? to U,
64 which requires the from-value must not be None.
65 """
66 optval = context.make_helper(builder, fromty, value=val)
67 validbit = cgutils.as_bool_bit(builder, optval.valid)
68 # Create uninitialized optional value
69 outoptval = context.make_helper(builder, toty)
70
71 with builder.if_else(validbit) as (is_valid, is_not_valid):
72 with is_valid:
73 # Cast internal value
74 outoptval.valid = cgutils.true_bit
75 outoptval.data = context.cast(builder, optval.data,
76 fromty.type, toty.type)
77
78 with is_not_valid:
79 # Store None to result
80 outoptval.valid = cgutils.false_bit
81 outoptval.data = cgutils.get_null_value(
82 outoptval.data.type)
83
84 return outoptval._getvalue()
85
86
87 @lower_cast(types.Any, types.Optional)
88 def any_to_optional(context, builder, fromty, toty, val):
89 if fromty == types.none:
90 return context.make_optional_none(builder, toty.type)
91 else:
92 val = context.cast(builder, val, fromty, toty.type)
93 return context.make_optional_value(builder, toty.type, val)
94
95
96 @lower_cast(types.Optional, types.Any)
97 @lower_cast(types.Optional, types.Boolean)
98 def optional_to_any(context, builder, fromty, toty, val):
99 optval = context.make_helper(builder, fromty, value=val)
100 validbit = cgutils.as_bool_bit(builder, optval.valid)
101 with builder.if_then(builder.not_(validbit), likely=False):
102 msg = "expected %s, got None" % (fromty.type,)
103 context.call_conv.return_user_exc(builder, TypeError, (msg,))
104
105 return context.cast(builder, optval.data, fromty.type, toty)
106
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/numba/targets/optional.py b/numba/targets/optional.py
--- a/numba/targets/optional.py
+++ b/numba/targets/optional.py
@@ -1,9 +1,9 @@
from __future__ import print_function, absolute_import, division
-from numba import types, cgutils
+from numba import types, cgutils, typing
from .imputils import (lower_cast, lower_builtin, lower_getattr_generic,
- impl_ret_untracked)
+ impl_ret_untracked, lower_setattr_generic)
def always_return_true_impl(context, builder, sig, args):
@@ -53,6 +53,21 @@
return imp(context, builder, inner_type, val, attr)
+@lower_setattr_generic(types.Optional)
+def optional_setattr(context, builder, sig, args, attr):
+ """
+ Optional.__setattr__ => redirect to the wrapped type.
+ """
+ basety, valty = sig.args
+ target, val = args
+ target_type = basety.type
+ target = context.cast(builder, target, basety, target_type)
+
+ newsig = typing.signature(sig.return_type, target_type, valty)
+ imp = context.get_setattr(attr, newsig)
+ return imp(builder, (target, val))
+
+
@lower_cast(types.Optional, types.Optional)
def optional_to_optional(context, builder, fromty, toty, val):
"""
|
{"golden_diff": "diff --git a/numba/targets/optional.py b/numba/targets/optional.py\n--- a/numba/targets/optional.py\n+++ b/numba/targets/optional.py\n@@ -1,9 +1,9 @@\n from __future__ import print_function, absolute_import, division\n \n-from numba import types, cgutils\n+from numba import types, cgutils, typing\n \n from .imputils import (lower_cast, lower_builtin, lower_getattr_generic,\n- impl_ret_untracked)\n+ impl_ret_untracked, lower_setattr_generic)\n \n \n def always_return_true_impl(context, builder, sig, args):\n@@ -53,6 +53,21 @@\n return imp(context, builder, inner_type, val, attr)\n \n \n+@lower_setattr_generic(types.Optional)\n+def optional_setattr(context, builder, sig, args, attr):\n+ \"\"\"\n+ Optional.__setattr__ => redirect to the wrapped type.\n+ \"\"\"\n+ basety, valty = sig.args\n+ target, val = args\n+ target_type = basety.type\n+ target = context.cast(builder, target, basety, target_type)\n+\n+ newsig = typing.signature(sig.return_type, target_type, valty)\n+ imp = context.get_setattr(attr, newsig)\n+ return imp(builder, (target, val))\n+\n+\n @lower_cast(types.Optional, types.Optional)\n def optional_to_optional(context, builder, fromty, toty, val):\n \"\"\"\n", "issue": "Lowering error when removing element from the middle of linked list\nHi\r\n\r\nI am having a hard time trying to figure out an error when I reassign \"element_0.next = element_1.next\" of a linked list. I posted my error at stackoverflow: https://stackoverflow.com/questions/47232035/numba-lowering-error-when-reassigning-next-link-of-a-linked-list-element\r\n\r\nI am creating this issue to make it closer to you.\r\n\r\nI have visited most of your examples and documentation pages, no ideas whatsoever.\r\n\r\nWhat am I missing?\r\n\r\nI am using numba **0.35.0** (from Anaconda, python 3.6.2).\r\n\r\nThanks\n", "before_files": [{"content": "from __future__ import print_function, absolute_import, division\n\nfrom numba import types, cgutils\n\nfrom .imputils import (lower_cast, lower_builtin, lower_getattr_generic,\n impl_ret_untracked)\n\n\ndef always_return_true_impl(context, builder, sig, args):\n return cgutils.true_bit\n\n\ndef always_return_false_impl(context, builder, sig, args):\n return cgutils.false_bit\n\n\ndef optional_is_none(context, builder, sig, args):\n \"\"\"\n Check if an Optional value is invalid\n \"\"\"\n [lty, rty] = sig.args\n [lval, rval] = args\n\n # Make sure None is on the right\n if lty == types.none:\n lty, rty = rty, lty\n lval, rval = rval, lval\n\n opt_type = lty\n opt_val = lval\n\n opt = context.make_helper(builder, opt_type, opt_val)\n res = builder.not_(cgutils.as_bool_bit(builder, opt.valid))\n return impl_ret_untracked(context, builder, sig.return_type, res)\n\n\n# None is/not None\nlower_builtin('is', types.none, types.none)(always_return_true_impl)\n\n# Optional is None\nlower_builtin('is', types.Optional, types.none)(optional_is_none)\nlower_builtin('is', types.none, types.Optional)(optional_is_none)\n\n\n@lower_getattr_generic(types.Optional)\ndef optional_getattr(context, builder, typ, value, attr):\n \"\"\"\n Optional.__getattr__ => redirect to the wrapped type.\n \"\"\"\n inner_type = typ.type\n val = context.cast(builder, value, typ, inner_type)\n imp = context.get_getattr(inner_type, attr)\n return imp(context, builder, inner_type, val, attr)\n\n\n@lower_cast(types.Optional, types.Optional)\ndef optional_to_optional(context, builder, fromty, toty, val):\n \"\"\"\n The handling of optional->optional cast must be special cased for\n correct propagation of None value. Given type T and U. casting of\n T? to U? (? denotes optional) should always succeed. If the from-value\n is None, the None value the casted value (U?) should be None; otherwise,\n the from-value is casted to U. This is different from casting T? to U,\n which requires the from-value must not be None.\n \"\"\"\n optval = context.make_helper(builder, fromty, value=val)\n validbit = cgutils.as_bool_bit(builder, optval.valid)\n # Create uninitialized optional value\n outoptval = context.make_helper(builder, toty)\n\n with builder.if_else(validbit) as (is_valid, is_not_valid):\n with is_valid:\n # Cast internal value\n outoptval.valid = cgutils.true_bit\n outoptval.data = context.cast(builder, optval.data,\n fromty.type, toty.type)\n\n with is_not_valid:\n # Store None to result\n outoptval.valid = cgutils.false_bit\n outoptval.data = cgutils.get_null_value(\n outoptval.data.type)\n\n return outoptval._getvalue()\n\n\n@lower_cast(types.Any, types.Optional)\ndef any_to_optional(context, builder, fromty, toty, val):\n if fromty == types.none:\n return context.make_optional_none(builder, toty.type)\n else:\n val = context.cast(builder, val, fromty, toty.type)\n return context.make_optional_value(builder, toty.type, val)\n\n\n@lower_cast(types.Optional, types.Any)\n@lower_cast(types.Optional, types.Boolean)\ndef optional_to_any(context, builder, fromty, toty, val):\n optval = context.make_helper(builder, fromty, value=val)\n validbit = cgutils.as_bool_bit(builder, optval.valid)\n with builder.if_then(builder.not_(validbit), likely=False):\n msg = \"expected %s, got None\" % (fromty.type,)\n context.call_conv.return_user_exc(builder, TypeError, (msg,))\n\n return context.cast(builder, optval.data, fromty.type, toty)\n", "path": "numba/targets/optional.py"}], "after_files": [{"content": "from __future__ import print_function, absolute_import, division\n\nfrom numba import types, cgutils, typing\n\nfrom .imputils import (lower_cast, lower_builtin, lower_getattr_generic,\n impl_ret_untracked, lower_setattr_generic)\n\n\ndef always_return_true_impl(context, builder, sig, args):\n return cgutils.true_bit\n\n\ndef always_return_false_impl(context, builder, sig, args):\n return cgutils.false_bit\n\n\ndef optional_is_none(context, builder, sig, args):\n \"\"\"\n Check if an Optional value is invalid\n \"\"\"\n [lty, rty] = sig.args\n [lval, rval] = args\n\n # Make sure None is on the right\n if lty == types.none:\n lty, rty = rty, lty\n lval, rval = rval, lval\n\n opt_type = lty\n opt_val = lval\n\n opt = context.make_helper(builder, opt_type, opt_val)\n res = builder.not_(cgutils.as_bool_bit(builder, opt.valid))\n return impl_ret_untracked(context, builder, sig.return_type, res)\n\n\n# None is/not None\nlower_builtin('is', types.none, types.none)(always_return_true_impl)\n\n# Optional is None\nlower_builtin('is', types.Optional, types.none)(optional_is_none)\nlower_builtin('is', types.none, types.Optional)(optional_is_none)\n\n\n@lower_getattr_generic(types.Optional)\ndef optional_getattr(context, builder, typ, value, attr):\n \"\"\"\n Optional.__getattr__ => redirect to the wrapped type.\n \"\"\"\n inner_type = typ.type\n val = context.cast(builder, value, typ, inner_type)\n imp = context.get_getattr(inner_type, attr)\n return imp(context, builder, inner_type, val, attr)\n\n\n@lower_setattr_generic(types.Optional)\ndef optional_setattr(context, builder, sig, args, attr):\n \"\"\"\n Optional.__setattr__ => redirect to the wrapped type.\n \"\"\"\n basety, valty = sig.args\n target, val = args\n target_type = basety.type\n target = context.cast(builder, target, basety, target_type)\n\n newsig = typing.signature(sig.return_type, target_type, valty)\n imp = context.get_setattr(attr, newsig)\n return imp(builder, (target, val))\n\n\n@lower_cast(types.Optional, types.Optional)\ndef optional_to_optional(context, builder, fromty, toty, val):\n \"\"\"\n The handling of optional->optional cast must be special cased for\n correct propagation of None value. Given type T and U. casting of\n T? to U? (? denotes optional) should always succeed. If the from-value\n is None, the None value the casted value (U?) should be None; otherwise,\n the from-value is casted to U. This is different from casting T? to U,\n which requires the from-value must not be None.\n \"\"\"\n optval = context.make_helper(builder, fromty, value=val)\n validbit = cgutils.as_bool_bit(builder, optval.valid)\n # Create uninitialized optional value\n outoptval = context.make_helper(builder, toty)\n\n with builder.if_else(validbit) as (is_valid, is_not_valid):\n with is_valid:\n # Cast internal value\n outoptval.valid = cgutils.true_bit\n outoptval.data = context.cast(builder, optval.data,\n fromty.type, toty.type)\n\n with is_not_valid:\n # Store None to result\n outoptval.valid = cgutils.false_bit\n outoptval.data = cgutils.get_null_value(\n outoptval.data.type)\n\n return outoptval._getvalue()\n\n\n@lower_cast(types.Any, types.Optional)\ndef any_to_optional(context, builder, fromty, toty, val):\n if fromty == types.none:\n return context.make_optional_none(builder, toty.type)\n else:\n val = context.cast(builder, val, fromty, toty.type)\n return context.make_optional_value(builder, toty.type, val)\n\n\n@lower_cast(types.Optional, types.Any)\n@lower_cast(types.Optional, types.Boolean)\ndef optional_to_any(context, builder, fromty, toty, val):\n optval = context.make_helper(builder, fromty, value=val)\n validbit = cgutils.as_bool_bit(builder, optval.valid)\n with builder.if_then(builder.not_(validbit), likely=False):\n msg = \"expected %s, got None\" % (fromty.type,)\n context.call_conv.return_user_exc(builder, TypeError, (msg,))\n\n return context.cast(builder, optval.data, fromty.type, toty)\n", "path": "numba/targets/optional.py"}]}
| 1,524 | 325 |
gh_patches_debug_8179
|
rasdani/github-patches
|
git_diff
|
Gallopsled__pwntools-1659
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fatal error when $TERM is not set
I am trying to use `pwntools` in a SageMath script. This works great interactively, but when I run my script with `sage foo.sage`, it fails somewhere in terminal code. I have traced this back to sage unsetting $TERM in non-interactive calls due to https://trac.sagemath.org/ticket/12263. Thus, issue can be easily reproduced without SageMath:
```sh
~$ docker run -it pwntools/pwntools:stable
pwntools@bce19e99e965:~$ TERM= python -c 'import pwn'
Warning: error: setupterm: could not find terminfo database
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/usr/local/lib/python2.7/dist-packages/pwn/__init__.py", line 4, in <module>
from pwn.toplevel import *
File "/usr/local/lib/python2.7/dist-packages/pwn/toplevel.py", line 20, in <module>
import pwnlib
File "/usr/local/lib/python2.7/dist-packages/pwnlib/__init__.py", line 43, in <module>
importlib.import_module('.%s' % module, 'pwnlib')
File "/usr/lib/python2.7/importlib/__init__.py", line 37, in import_module
__import__(name)
File "/usr/local/lib/python2.7/dist-packages/pwnlib/args.py", line 62, in <module>
from pwnlib import term
File "/usr/local/lib/python2.7/dist-packages/pwnlib/term/__init__.py", line 6, in <module>
from pwnlib.term import completer
File "/usr/local/lib/python2.7/dist-packages/pwnlib/term/completer.py", line 7, in <module>
from pwnlib.term import readline
File "/usr/local/lib/python2.7/dist-packages/pwnlib/term/readline.py", line 13, in <module>
from pwnlib.term import text
File "/usr/local/lib/python2.7/dist-packages/pwnlib/term/text.py", line 126, in <module>
sys.modules[__name__] = Module()
File "/usr/local/lib/python2.7/dist-packages/pwnlib/term/text.py", line 49, in __init__
s = termcap.get(y)
File "/usr/local/lib/python2.7/dist-packages/pwnlib/term/unix_termcap.py", line 28, in get
s = curses.tigetstr(cap)
_curses.error: must call (at least) setupterm() first
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pwnlib/term/unix_termcap.py`
Content:
```
1 from __future__ import division
2 from __future__ import print_function
3
4 __all__ = ['get']
5 import curses
6 import os
7 import sys
8
9 cache = None
10
11 def get(cap, *args, **kwargs):
12 default = kwargs.pop('default', '')
13
14 if 'PWNLIB_NOTERM' in os.environ:
15 return ''
16
17 # Hack for readthedocs.org
18 if 'READTHEDOCS' in os.environ:
19 return ''
20
21 if kwargs != {}:
22 raise TypeError("get(): No such argument %r" % kwargs.popitem()[0])
23
24 if cache is None:
25 init()
26 s = cache.get(cap)
27 if not s:
28 s = curses.tigetstr(cap)
29 if s is None:
30 s = curses.tigetnum(cap)
31 if s == -2:
32 s = curses.tigetflag(cap)
33 if s == -1:
34 # default to empty string so tparm doesn't fail
35 s = ''
36 else:
37 s = bool(s)
38 cache[cap] = s
39 # if `s' is not set `curses.tparm' will throw an error if given arguments
40 if args and s:
41 return curses.tparm(s, *args)
42 else:
43 return s
44
45 def init():
46 global cache
47
48 # Detect running under Jupyter
49 try:
50 if get_ipython().__class__.__name__ == 'ZMQInteractiveShell':
51 os.environ['PWNLIB_NOTERM'] = '1'
52 os.environ['JUPYTER_DETECTED'] ='yes'
53 except NameError:
54 pass
55
56 if 'PWNLIB_NOTERM' not in os.environ:
57 # Fix for BPython
58 try:
59 curses.setupterm()
60 except curses.error as e:
61 import traceback
62 print('Warning:', ''.join(traceback.format_exception_only(e.__class__, e)), file=sys.stderr)
63
64 cache = {}
65 # Manually add reset sequence into the cache.
66 # Can't look it up using tigetstr.
67 cache['reset'] = '\x1b[m'
68
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pwnlib/term/unix_termcap.py b/pwnlib/term/unix_termcap.py
--- a/pwnlib/term/unix_termcap.py
+++ b/pwnlib/term/unix_termcap.py
@@ -60,6 +60,8 @@
except curses.error as e:
import traceback
print('Warning:', ''.join(traceback.format_exception_only(e.__class__, e)), file=sys.stderr)
+ print('Terminal features will not be available. Consider setting TERM variable to your current terminal name (or xterm).', file=sys.stderr)
+ os.environ['PWNLIB_NOTERM'] = '1'
cache = {}
# Manually add reset sequence into the cache.
|
{"golden_diff": "diff --git a/pwnlib/term/unix_termcap.py b/pwnlib/term/unix_termcap.py\n--- a/pwnlib/term/unix_termcap.py\n+++ b/pwnlib/term/unix_termcap.py\n@@ -60,6 +60,8 @@\n except curses.error as e:\n import traceback\n print('Warning:', ''.join(traceback.format_exception_only(e.__class__, e)), file=sys.stderr)\n+ print('Terminal features will not be available. Consider setting TERM variable to your current terminal name (or xterm).', file=sys.stderr)\n+ os.environ['PWNLIB_NOTERM'] = '1'\n \n cache = {}\n # Manually add reset sequence into the cache.\n", "issue": "Fatal error when $TERM is not set\nI am trying to use `pwntools` in a SageMath script. This works great interactively, but when I run my script with `sage foo.sage`, it fails somewhere in terminal code. I have traced this back to sage unsetting $TERM in non-interactive calls due to https://trac.sagemath.org/ticket/12263. Thus, issue can be easily reproduced without SageMath:\r\n\r\n```sh\r\n~$ docker run -it pwntools/pwntools:stable\r\npwntools@bce19e99e965:~$ TERM= python -c 'import pwn'\r\nWarning: error: setupterm: could not find terminfo database\r\n\r\nTraceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\n File \"/usr/local/lib/python2.7/dist-packages/pwn/__init__.py\", line 4, in <module>\r\n from pwn.toplevel import *\r\n File \"/usr/local/lib/python2.7/dist-packages/pwn/toplevel.py\", line 20, in <module>\r\n import pwnlib\r\n File \"/usr/local/lib/python2.7/dist-packages/pwnlib/__init__.py\", line 43, in <module>\r\n importlib.import_module('.%s' % module, 'pwnlib')\r\n File \"/usr/lib/python2.7/importlib/__init__.py\", line 37, in import_module\r\n __import__(name)\r\n File \"/usr/local/lib/python2.7/dist-packages/pwnlib/args.py\", line 62, in <module>\r\n from pwnlib import term\r\n File \"/usr/local/lib/python2.7/dist-packages/pwnlib/term/__init__.py\", line 6, in <module>\r\n from pwnlib.term import completer\r\n File \"/usr/local/lib/python2.7/dist-packages/pwnlib/term/completer.py\", line 7, in <module>\r\n from pwnlib.term import readline\r\n File \"/usr/local/lib/python2.7/dist-packages/pwnlib/term/readline.py\", line 13, in <module>\r\n from pwnlib.term import text\r\n File \"/usr/local/lib/python2.7/dist-packages/pwnlib/term/text.py\", line 126, in <module>\r\n sys.modules[__name__] = Module()\r\n File \"/usr/local/lib/python2.7/dist-packages/pwnlib/term/text.py\", line 49, in __init__\r\n s = termcap.get(y)\r\n File \"/usr/local/lib/python2.7/dist-packages/pwnlib/term/unix_termcap.py\", line 28, in get\r\n s = curses.tigetstr(cap)\r\n_curses.error: must call (at least) setupterm() first\r\n```\n", "before_files": [{"content": "from __future__ import division\nfrom __future__ import print_function\n\n__all__ = ['get']\nimport curses\nimport os\nimport sys\n\ncache = None\n\ndef get(cap, *args, **kwargs):\n default = kwargs.pop('default', '')\n\n if 'PWNLIB_NOTERM' in os.environ:\n return ''\n\n # Hack for readthedocs.org\n if 'READTHEDOCS' in os.environ:\n return ''\n\n if kwargs != {}:\n raise TypeError(\"get(): No such argument %r\" % kwargs.popitem()[0])\n\n if cache is None:\n init()\n s = cache.get(cap)\n if not s:\n s = curses.tigetstr(cap)\n if s is None:\n s = curses.tigetnum(cap)\n if s == -2:\n s = curses.tigetflag(cap)\n if s == -1:\n # default to empty string so tparm doesn't fail\n s = ''\n else:\n s = bool(s)\n cache[cap] = s\n # if `s' is not set `curses.tparm' will throw an error if given arguments\n if args and s:\n return curses.tparm(s, *args)\n else:\n return s\n\ndef init():\n global cache\n\n # Detect running under Jupyter\n try:\n if get_ipython().__class__.__name__ == 'ZMQInteractiveShell':\n os.environ['PWNLIB_NOTERM'] = '1'\n os.environ['JUPYTER_DETECTED'] ='yes'\n except NameError:\n pass\n\n if 'PWNLIB_NOTERM' not in os.environ:\n # Fix for BPython\n try:\n curses.setupterm()\n except curses.error as e:\n import traceback\n print('Warning:', ''.join(traceback.format_exception_only(e.__class__, e)), file=sys.stderr)\n\n cache = {}\n # Manually add reset sequence into the cache.\n # Can't look it up using tigetstr.\n cache['reset'] = '\\x1b[m'\n", "path": "pwnlib/term/unix_termcap.py"}], "after_files": [{"content": "from __future__ import division\nfrom __future__ import print_function\n\n__all__ = ['get']\nimport curses\nimport os\nimport sys\n\ncache = None\n\ndef get(cap, *args, **kwargs):\n default = kwargs.pop('default', '')\n\n if 'PWNLIB_NOTERM' in os.environ:\n return ''\n\n # Hack for readthedocs.org\n if 'READTHEDOCS' in os.environ:\n return ''\n\n if kwargs != {}:\n raise TypeError(\"get(): No such argument %r\" % kwargs.popitem()[0])\n\n if cache is None:\n init()\n s = cache.get(cap)\n if not s:\n s = curses.tigetstr(cap)\n if s is None:\n s = curses.tigetnum(cap)\n if s == -2:\n s = curses.tigetflag(cap)\n if s == -1:\n # default to empty string so tparm doesn't fail\n s = ''\n else:\n s = bool(s)\n cache[cap] = s\n # if `s' is not set `curses.tparm' will throw an error if given arguments\n if args and s:\n return curses.tparm(s, *args)\n else:\n return s\n\ndef init():\n global cache\n\n # Detect running under Jupyter\n try:\n if get_ipython().__class__.__name__ == 'ZMQInteractiveShell':\n os.environ['PWNLIB_NOTERM'] = '1'\n os.environ['JUPYTER_DETECTED'] ='yes'\n except NameError:\n pass\n\n if 'PWNLIB_NOTERM' not in os.environ:\n # Fix for BPython\n try:\n curses.setupterm()\n except curses.error as e:\n import traceback\n print('Warning:', ''.join(traceback.format_exception_only(e.__class__, e)), file=sys.stderr)\n print('Terminal features will not be available. Consider setting TERM variable to your current terminal name (or xterm).', file=sys.stderr)\n os.environ['PWNLIB_NOTERM'] = '1'\n\n cache = {}\n # Manually add reset sequence into the cache.\n # Can't look it up using tigetstr.\n cache['reset'] = '\\x1b[m'\n", "path": "pwnlib/term/unix_termcap.py"}]}
| 1,463 | 159 |
gh_patches_debug_30175
|
rasdani/github-patches
|
git_diff
|
microsoft__ptvsd-480
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PyPI package missing information
Looking at https://pypi.org/project/ptvsd/4.0.0a1/ we are currently missing:
* link to GitHub
* long description
* specific classifiers
* any mention of VS Code
* link to doc pages
Would be nice to get these added.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 # Copyright (c) Microsoft Corporation. All rights reserved.
4 # Licensed under the MIT License. See LICENSE in the project root
5 # for license information.
6
7 import os
8 import os.path
9 import subprocess
10 import sys
11
12 from setuptools import setup
13
14 import versioneer
15 import ptvsd
16 import ptvsd._vendored
17
18
19 PYDEVD_ROOT = ptvsd._vendored.project_root('pydevd')
20 PTVSD_ROOT = os.path.dirname(os.path.abspath(ptvsd.__file__))
21
22
23 def cython_build():
24 print('Compiling extension modules (set SKIP_CYTHON_BUILD=1 to omit)')
25 subprocess.call([
26 sys.executable,
27 os.path.join(PYDEVD_ROOT, 'setup_cython.py'),
28 'build_ext',
29 '-i',
30 ])
31
32
33 def iter_vendored_files():
34 # Add pydevd files as data files for this package. They are not
35 # treated as a package of their own, because we don't actually
36 # want to provide pydevd - just use our own copy internally.
37 for project in ptvsd._vendored.list_all():
38 for filename in ptvsd._vendored.iter_packaging_files(project):
39 yield filename
40
41
42 if __name__ == '__main__':
43 if not os.getenv('SKIP_CYTHON_BUILD'):
44 cython_build()
45
46 setup(
47 name='ptvsd',
48 version=versioneer.get_version(),
49 description='Remote debugging server for Python support in Visual Studio and Visual Studio Code', # noqa
50 #long_description=open('DESCRIPTION.md').read(),
51 #long_description_content_type='text/markdown',
52 license='MIT',
53 author='Microsoft Corporation',
54 author_email='[email protected]',
55 url='https://aka.ms/ptvs',
56 classifiers=[
57 'Development Status :: 3 - Alpha',
58 'Programming Language :: Python',
59 'Programming Language :: Python :: 2',
60 'Programming Language :: Python :: 3',
61 'License :: OSI Approved :: MIT License',
62 ],
63 packages=[
64 'ptvsd',
65 'ptvsd._vendored',
66 ],
67 package_data={
68 'ptvsd': ['ThirdPartyNotices.txt'],
69 'ptvsd._vendored': list(iter_vendored_files()),
70 },
71 cmdclass=versioneer.get_cmdclass(),
72 )
73
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -39,6 +39,10 @@
yield filename
+with open('DESCRIPTION.md', 'r') as fh:
+ long_description = fh.read()
+
+
if __name__ == '__main__':
if not os.getenv('SKIP_CYTHON_BUILD'):
cython_build()
@@ -47,17 +51,22 @@
name='ptvsd',
version=versioneer.get_version(),
description='Remote debugging server for Python support in Visual Studio and Visual Studio Code', # noqa
- #long_description=open('DESCRIPTION.md').read(),
- #long_description_content_type='text/markdown',
+ long_description=long_description,
+ long_description_content_type='text/markdown',
license='MIT',
author='Microsoft Corporation',
author_email='[email protected]',
url='https://aka.ms/ptvs',
+ python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*",
classifiers=[
'Development Status :: 3 - Alpha',
- 'Programming Language :: Python',
- 'Programming Language :: Python :: 2',
- 'Programming Language :: Python :: 3',
+ 'Programming Language :: Python :: 2.7',
+ 'Programming Language :: Python :: 3.5',
+ 'Programming Language :: Python :: 3.6',
+ 'Programming Language :: Python :: 3.7',
+ 'Topic :: Software Development :: Debuggers',
+ 'Operating System :: OS Independent',
+ 'License :: OSI Approved :: Eclipse Public License 2.0 (EPL-2.0)',
'License :: OSI Approved :: MIT License',
],
packages=[
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -39,6 +39,10 @@\n yield filename\n \n \n+with open('DESCRIPTION.md', 'r') as fh:\n+ long_description = fh.read()\n+\n+\n if __name__ == '__main__':\n if not os.getenv('SKIP_CYTHON_BUILD'):\n cython_build()\n@@ -47,17 +51,22 @@\n name='ptvsd',\n version=versioneer.get_version(),\n description='Remote debugging server for Python support in Visual Studio and Visual Studio Code', # noqa\n- #long_description=open('DESCRIPTION.md').read(),\n- #long_description_content_type='text/markdown',\n+ long_description=long_description,\n+ long_description_content_type='text/markdown',\n license='MIT',\n author='Microsoft Corporation',\n author_email='[email protected]',\n url='https://aka.ms/ptvs',\n+ python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*\",\n classifiers=[\n 'Development Status :: 3 - Alpha',\n- 'Programming Language :: Python',\n- 'Programming Language :: Python :: 2',\n- 'Programming Language :: Python :: 3',\n+ 'Programming Language :: Python :: 2.7',\n+ 'Programming Language :: Python :: 3.5',\n+ 'Programming Language :: Python :: 3.6',\n+ 'Programming Language :: Python :: 3.7',\n+ 'Topic :: Software Development :: Debuggers',\n+ 'Operating System :: OS Independent',\n+ 'License :: OSI Approved :: Eclipse Public License 2.0 (EPL-2.0)',\n 'License :: OSI Approved :: MIT License',\n ],\n packages=[\n", "issue": "PyPI package missing information\nLooking at https://pypi.org/project/ptvsd/4.0.0a1/ we are currently missing:\r\n* link to GitHub\r\n* long description\r\n* specific classifiers\r\n* any mention of VS Code\r\n* link to doc pages\r\n\r\nWould be nice to get these added.\n", "before_files": [{"content": "#!/usr/bin/env python\n\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nimport os\nimport os.path\nimport subprocess\nimport sys\n\nfrom setuptools import setup\n\nimport versioneer\nimport ptvsd\nimport ptvsd._vendored\n\n\nPYDEVD_ROOT = ptvsd._vendored.project_root('pydevd')\nPTVSD_ROOT = os.path.dirname(os.path.abspath(ptvsd.__file__))\n\n\ndef cython_build():\n print('Compiling extension modules (set SKIP_CYTHON_BUILD=1 to omit)')\n subprocess.call([\n sys.executable,\n os.path.join(PYDEVD_ROOT, 'setup_cython.py'),\n 'build_ext',\n '-i',\n ])\n\n\ndef iter_vendored_files():\n # Add pydevd files as data files for this package. They are not\n # treated as a package of their own, because we don't actually\n # want to provide pydevd - just use our own copy internally.\n for project in ptvsd._vendored.list_all():\n for filename in ptvsd._vendored.iter_packaging_files(project):\n yield filename\n\n\nif __name__ == '__main__':\n if not os.getenv('SKIP_CYTHON_BUILD'):\n cython_build()\n\n setup(\n name='ptvsd',\n version=versioneer.get_version(),\n description='Remote debugging server for Python support in Visual Studio and Visual Studio Code', # noqa\n #long_description=open('DESCRIPTION.md').read(),\n #long_description_content_type='text/markdown',\n license='MIT',\n author='Microsoft Corporation',\n author_email='[email protected]',\n url='https://aka.ms/ptvs',\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3',\n 'License :: OSI Approved :: MIT License',\n ],\n packages=[\n 'ptvsd',\n 'ptvsd._vendored',\n ],\n package_data={\n 'ptvsd': ['ThirdPartyNotices.txt'],\n 'ptvsd._vendored': list(iter_vendored_files()),\n },\n cmdclass=versioneer.get_cmdclass(),\n )\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nimport os\nimport os.path\nimport subprocess\nimport sys\n\nfrom setuptools import setup\n\nimport versioneer\nimport ptvsd\nimport ptvsd._vendored\n\n\nPYDEVD_ROOT = ptvsd._vendored.project_root('pydevd')\nPTVSD_ROOT = os.path.dirname(os.path.abspath(ptvsd.__file__))\n\n\ndef cython_build():\n print('Compiling extension modules (set SKIP_CYTHON_BUILD=1 to omit)')\n subprocess.call([\n sys.executable,\n os.path.join(PYDEVD_ROOT, 'setup_cython.py'),\n 'build_ext',\n '-i',\n ])\n\n\ndef iter_vendored_files():\n # Add pydevd files as data files for this package. They are not\n # treated as a package of their own, because we don't actually\n # want to provide pydevd - just use our own copy internally.\n for project in ptvsd._vendored.list_all():\n for filename in ptvsd._vendored.iter_packaging_files(project):\n yield filename\n\n\nwith open('DESCRIPTION.md', 'r') as fh:\n long_description = fh.read()\n\n\nif __name__ == '__main__':\n if not os.getenv('SKIP_CYTHON_BUILD'):\n cython_build()\n\n setup(\n name='ptvsd',\n version=versioneer.get_version(),\n description='Remote debugging server for Python support in Visual Studio and Visual Studio Code', # noqa\n long_description=long_description,\n long_description_content_type='text/markdown',\n license='MIT',\n author='Microsoft Corporation',\n author_email='[email protected]',\n url='https://aka.ms/ptvs',\n python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*\",\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Software Development :: Debuggers',\n 'Operating System :: OS Independent',\n 'License :: OSI Approved :: Eclipse Public License 2.0 (EPL-2.0)',\n 'License :: OSI Approved :: MIT License',\n ],\n packages=[\n 'ptvsd',\n 'ptvsd._vendored',\n ],\n package_data={\n 'ptvsd': ['ThirdPartyNotices.txt'],\n 'ptvsd._vendored': list(iter_vendored_files()),\n },\n cmdclass=versioneer.get_cmdclass(),\n )\n", "path": "setup.py"}]}
| 972 | 400 |
gh_patches_debug_18596
|
rasdani/github-patches
|
git_diff
|
mkdocs__mkdocs-647
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Mailto links not correctly interpreted
Hi,
I've noticed "mailto" links are not correctly interpreted.
- This code:
```
Contact us at <foo@bar>
```
- Renders the following link:
http://localhost//mailto:foo@bar
---
- This code:
```
You can [contact us](foo@bar)
```
- Renders the following link:
http://localhost/page/permalink/foo@bar
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mkdocs/relative_path_ext.py`
Content:
```
1 """
2 # Relative Path Markdown Extension
3
4 During the MkDocs build we rewrite URLs that link to local
5 Markdown or media files. Using the following pages configuration
6 we can look at how the output is changed.
7
8 pages:
9 - ['index.md']
10 - ['tutorial/install.md']
11 - ['tutorial/intro.md']
12
13 ## Markdown URLs
14
15 When linking from `install.md` to `intro.md` the link would
16 simply be `[intro](intro.md)`. However, when we build
17 `install.md` we place it in a directory to create nicer URLs.
18 This means that the path to `intro.md` becomes `../intro/`
19
20 ## Media URLs
21
22 To make it easier to work with media files and store them all
23 under one directory we re-write those to all be based on the
24 root. So, with the following markdown to add an image.
25
26 
27
28 The output would depend on the location of the Markdown file it
29 was added too.
30
31 Source file | Generated Path | Image Path |
32 ------------------- | ----------------- | ---------------------------- |
33 index.md | / | ./img/initial-layout.png |
34 tutorial/install.md | tutorial/install/ | ../img/initial-layout.png |
35 tutorial/intro.md | tutorial/intro/ | ../../img/initial-layout.png |
36
37 """
38
39 from __future__ import unicode_literals
40
41 import logging
42 import os
43
44 from markdown.extensions import Extension
45 from markdown.treeprocessors import Treeprocessor
46
47 from mkdocs import utils
48 from mkdocs.exceptions import MarkdownNotFound
49
50 log = logging.getLogger(__name__)
51
52
53 def _iter(node):
54 # TODO: Remove when dropping Python 2.6. Replace this
55 # function call with note.iter()
56 return [node] + node.findall('.//*')
57
58
59 def path_to_url(url, nav, strict):
60
61 scheme, netloc, path, params, query, fragment = (
62 utils.urlparse(url))
63
64 if scheme or netloc or not path:
65 # Ignore URLs unless they are a relative link to a markdown file.
66 return url
67
68 if nav and not utils.is_markdown_file(path):
69 path = utils.create_relative_media_url(nav, path)
70 elif nav:
71 # If the site navigation has been provided, then validate
72 # the internal hyperlink, making sure the target actually exists.
73 target_file = nav.file_context.make_absolute(path)
74
75 if target_file.startswith(os.path.sep):
76 target_file = target_file[1:]
77
78 if target_file not in nav.source_files:
79 source_file = nav.file_context.current_file
80 msg = (
81 'The page "%s" contained a hyperlink to "%s" which '
82 'is not listed in the "pages" configuration.'
83 ) % (source_file, target_file)
84
85 # In strict mode raise an error at this point.
86 if strict:
87 raise MarkdownNotFound(msg)
88 # Otherwise, when strict mode isn't enabled, log a warning
89 # to the user and leave the URL as it is.
90 log.warning(msg)
91 return url
92 path = utils.get_url_path(target_file, nav.use_directory_urls)
93 path = nav.url_context.make_relative(path)
94 else:
95 path = utils.get_url_path(path).lstrip('/')
96
97 # Convert the .md hyperlink to a relative hyperlink to the HTML page.
98 fragments = (scheme, netloc, path, params, query, fragment)
99 url = utils.urlunparse(fragments)
100 return url
101
102
103 class RelativePathTreeprocessor(Treeprocessor):
104
105 def __init__(self, site_navigation, strict):
106 self.site_navigation = site_navigation
107 self.strict = strict
108
109 def run(self, root):
110 """Update urls on anchors and images to make them relative
111
112 Iterates through the full document tree looking for specific
113 tags and then makes them relative based on the site navigation
114 """
115
116 for element in _iter(root):
117
118 if element.tag == 'a':
119 key = 'href'
120 elif element.tag == 'img':
121 key = 'src'
122 else:
123 continue
124
125 url = element.get(key)
126 new_url = path_to_url(url, self.site_navigation, self.strict)
127 element.set(key, new_url)
128
129 return root
130
131
132 class RelativePathExtension(Extension):
133 """
134 The Extension class is what we pass to markdown, it then
135 registers the Treeprocessor.
136 """
137
138 def __init__(self, site_navigation, strict):
139 self.site_navigation = site_navigation
140 self.strict = strict
141
142 def extendMarkdown(self, md, md_globals):
143 relpath = RelativePathTreeprocessor(self.site_navigation, self.strict)
144 md.treeprocessors.add("relpath", relpath, "_end")
145
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mkdocs/relative_path_ext.py b/mkdocs/relative_path_ext.py
--- a/mkdocs/relative_path_ext.py
+++ b/mkdocs/relative_path_ext.py
@@ -43,6 +43,7 @@
from markdown.extensions import Extension
from markdown.treeprocessors import Treeprocessor
+from markdown.util import AMP_SUBSTITUTE
from mkdocs import utils
from mkdocs.exceptions import MarkdownNotFound
@@ -61,8 +62,10 @@
scheme, netloc, path, params, query, fragment = (
utils.urlparse(url))
- if scheme or netloc or not path:
+ if scheme or netloc or not path or AMP_SUBSTITUTE in url:
# Ignore URLs unless they are a relative link to a markdown file.
+ # AMP_SUBSTITUTE is used internally by Markdown only for email,which is
+ # not a relative link. As urlparse errors on them, skip explicitly
return url
if nav and not utils.is_markdown_file(path):
|
{"golden_diff": "diff --git a/mkdocs/relative_path_ext.py b/mkdocs/relative_path_ext.py\n--- a/mkdocs/relative_path_ext.py\n+++ b/mkdocs/relative_path_ext.py\n@@ -43,6 +43,7 @@\n \n from markdown.extensions import Extension\n from markdown.treeprocessors import Treeprocessor\n+from markdown.util import AMP_SUBSTITUTE\n \n from mkdocs import utils\n from mkdocs.exceptions import MarkdownNotFound\n@@ -61,8 +62,10 @@\n scheme, netloc, path, params, query, fragment = (\n utils.urlparse(url))\n \n- if scheme or netloc or not path:\n+ if scheme or netloc or not path or AMP_SUBSTITUTE in url:\n # Ignore URLs unless they are a relative link to a markdown file.\n+ # AMP_SUBSTITUTE is used internally by Markdown only for email,which is\n+ # not a relative link. As urlparse errors on them, skip explicitly\n return url\n \n if nav and not utils.is_markdown_file(path):\n", "issue": "Mailto links not correctly interpreted\nHi, \nI've noticed \"mailto\" links are not correctly interpreted.\n- This code:\n\n```\nContact us at <foo@bar>\n```\n- Renders the following link: \n http://localhost//mailto:foo@bar \n\n---\n- This code:\n\n```\nYou can [contact us](foo@bar)\n```\n- Renders the following link: \n http://localhost/page/permalink/foo@bar\n\n", "before_files": [{"content": "\"\"\"\n# Relative Path Markdown Extension\n\nDuring the MkDocs build we rewrite URLs that link to local\nMarkdown or media files. Using the following pages configuration\nwe can look at how the output is changed.\n\n pages:\n - ['index.md']\n - ['tutorial/install.md']\n - ['tutorial/intro.md']\n\n## Markdown URLs\n\nWhen linking from `install.md` to `intro.md` the link would\nsimply be `[intro](intro.md)`. However, when we build\n`install.md` we place it in a directory to create nicer URLs.\nThis means that the path to `intro.md` becomes `../intro/`\n\n## Media URLs\n\nTo make it easier to work with media files and store them all\nunder one directory we re-write those to all be based on the\nroot. So, with the following markdown to add an image.\n\n \n\nThe output would depend on the location of the Markdown file it\nwas added too.\n\nSource file | Generated Path | Image Path |\n------------------- | ----------------- | ---------------------------- |\nindex.md | / | ./img/initial-layout.png |\ntutorial/install.md | tutorial/install/ | ../img/initial-layout.png |\ntutorial/intro.md | tutorial/intro/ | ../../img/initial-layout.png |\n\n\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport logging\nimport os\n\nfrom markdown.extensions import Extension\nfrom markdown.treeprocessors import Treeprocessor\n\nfrom mkdocs import utils\nfrom mkdocs.exceptions import MarkdownNotFound\n\nlog = logging.getLogger(__name__)\n\n\ndef _iter(node):\n # TODO: Remove when dropping Python 2.6. Replace this\n # function call with note.iter()\n return [node] + node.findall('.//*')\n\n\ndef path_to_url(url, nav, strict):\n\n scheme, netloc, path, params, query, fragment = (\n utils.urlparse(url))\n\n if scheme or netloc or not path:\n # Ignore URLs unless they are a relative link to a markdown file.\n return url\n\n if nav and not utils.is_markdown_file(path):\n path = utils.create_relative_media_url(nav, path)\n elif nav:\n # If the site navigation has been provided, then validate\n # the internal hyperlink, making sure the target actually exists.\n target_file = nav.file_context.make_absolute(path)\n\n if target_file.startswith(os.path.sep):\n target_file = target_file[1:]\n\n if target_file not in nav.source_files:\n source_file = nav.file_context.current_file\n msg = (\n 'The page \"%s\" contained a hyperlink to \"%s\" which '\n 'is not listed in the \"pages\" configuration.'\n ) % (source_file, target_file)\n\n # In strict mode raise an error at this point.\n if strict:\n raise MarkdownNotFound(msg)\n # Otherwise, when strict mode isn't enabled, log a warning\n # to the user and leave the URL as it is.\n log.warning(msg)\n return url\n path = utils.get_url_path(target_file, nav.use_directory_urls)\n path = nav.url_context.make_relative(path)\n else:\n path = utils.get_url_path(path).lstrip('/')\n\n # Convert the .md hyperlink to a relative hyperlink to the HTML page.\n fragments = (scheme, netloc, path, params, query, fragment)\n url = utils.urlunparse(fragments)\n return url\n\n\nclass RelativePathTreeprocessor(Treeprocessor):\n\n def __init__(self, site_navigation, strict):\n self.site_navigation = site_navigation\n self.strict = strict\n\n def run(self, root):\n \"\"\"Update urls on anchors and images to make them relative\n\n Iterates through the full document tree looking for specific\n tags and then makes them relative based on the site navigation\n \"\"\"\n\n for element in _iter(root):\n\n if element.tag == 'a':\n key = 'href'\n elif element.tag == 'img':\n key = 'src'\n else:\n continue\n\n url = element.get(key)\n new_url = path_to_url(url, self.site_navigation, self.strict)\n element.set(key, new_url)\n\n return root\n\n\nclass RelativePathExtension(Extension):\n \"\"\"\n The Extension class is what we pass to markdown, it then\n registers the Treeprocessor.\n \"\"\"\n\n def __init__(self, site_navigation, strict):\n self.site_navigation = site_navigation\n self.strict = strict\n\n def extendMarkdown(self, md, md_globals):\n relpath = RelativePathTreeprocessor(self.site_navigation, self.strict)\n md.treeprocessors.add(\"relpath\", relpath, \"_end\")\n", "path": "mkdocs/relative_path_ext.py"}], "after_files": [{"content": "\"\"\"\n# Relative Path Markdown Extension\n\nDuring the MkDocs build we rewrite URLs that link to local\nMarkdown or media files. Using the following pages configuration\nwe can look at how the output is changed.\n\n pages:\n - ['index.md']\n - ['tutorial/install.md']\n - ['tutorial/intro.md']\n\n## Markdown URLs\n\nWhen linking from `install.md` to `intro.md` the link would\nsimply be `[intro](intro.md)`. However, when we build\n`install.md` we place it in a directory to create nicer URLs.\nThis means that the path to `intro.md` becomes `../intro/`\n\n## Media URLs\n\nTo make it easier to work with media files and store them all\nunder one directory we re-write those to all be based on the\nroot. So, with the following markdown to add an image.\n\n \n\nThe output would depend on the location of the Markdown file it\nwas added too.\n\nSource file | Generated Path | Image Path |\n------------------- | ----------------- | ---------------------------- |\nindex.md | / | ./img/initial-layout.png |\ntutorial/install.md | tutorial/install/ | ../img/initial-layout.png |\ntutorial/intro.md | tutorial/intro/ | ../../img/initial-layout.png |\n\n\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport logging\nimport os\n\nfrom markdown.extensions import Extension\nfrom markdown.treeprocessors import Treeprocessor\nfrom markdown.util import AMP_SUBSTITUTE\n\nfrom mkdocs import utils\nfrom mkdocs.exceptions import MarkdownNotFound\n\nlog = logging.getLogger(__name__)\n\n\ndef _iter(node):\n # TODO: Remove when dropping Python 2.6. Replace this\n # function call with note.iter()\n return [node] + node.findall('.//*')\n\n\ndef path_to_url(url, nav, strict):\n\n scheme, netloc, path, params, query, fragment = (\n utils.urlparse(url))\n\n if scheme or netloc or not path or AMP_SUBSTITUTE in url:\n # Ignore URLs unless they are a relative link to a markdown file.\n # AMP_SUBSTITUTE is used internally by Markdown only for email,which is\n # not a relative link. As urlparse errors on them, skip explicitly\n return url\n\n if nav and not utils.is_markdown_file(path):\n path = utils.create_relative_media_url(nav, path)\n elif nav:\n # If the site navigation has been provided, then validate\n # the internal hyperlink, making sure the target actually exists.\n target_file = nav.file_context.make_absolute(path)\n\n if target_file.startswith(os.path.sep):\n target_file = target_file[1:]\n\n if target_file not in nav.source_files:\n source_file = nav.file_context.current_file\n msg = (\n 'The page \"%s\" contained a hyperlink to \"%s\" which '\n 'is not listed in the \"pages\" configuration.'\n ) % (source_file, target_file)\n\n # In strict mode raise an error at this point.\n if strict:\n raise MarkdownNotFound(msg)\n # Otherwise, when strict mode isn't enabled, log a warning\n # to the user and leave the URL as it is.\n log.warning(msg)\n return url\n path = utils.get_url_path(target_file, nav.use_directory_urls)\n path = nav.url_context.make_relative(path)\n else:\n path = utils.get_url_path(path).lstrip('/')\n\n # Convert the .md hyperlink to a relative hyperlink to the HTML page.\n fragments = (scheme, netloc, path, params, query, fragment)\n url = utils.urlunparse(fragments)\n return url\n\n\nclass RelativePathTreeprocessor(Treeprocessor):\n\n def __init__(self, site_navigation, strict):\n self.site_navigation = site_navigation\n self.strict = strict\n\n def run(self, root):\n \"\"\"Update urls on anchors and images to make them relative\n\n Iterates through the full document tree looking for specific\n tags and then makes them relative based on the site navigation\n \"\"\"\n\n for element in _iter(root):\n\n if element.tag == 'a':\n key = 'href'\n elif element.tag == 'img':\n key = 'src'\n else:\n continue\n\n url = element.get(key)\n new_url = path_to_url(url, self.site_navigation, self.strict)\n element.set(key, new_url)\n\n return root\n\n\nclass RelativePathExtension(Extension):\n \"\"\"\n The Extension class is what we pass to markdown, it then\n registers the Treeprocessor.\n \"\"\"\n\n def __init__(self, site_navigation, strict):\n self.site_navigation = site_navigation\n self.strict = strict\n\n def extendMarkdown(self, md, md_globals):\n relpath = RelativePathTreeprocessor(self.site_navigation, self.strict)\n md.treeprocessors.add(\"relpath\", relpath, \"_end\")\n", "path": "mkdocs/relative_path_ext.py"}]}
| 1,712 | 228 |
gh_patches_debug_30640
|
rasdani/github-patches
|
git_diff
|
sanic-org__sanic-326
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
During handling of the above exception, another exception occurred
In the examples/exception_monitoring.py, when i try to run the app and hit the default route, it prints out the exception and then it calls "return super.default(self, request, exception)", it returns with the following exception:
AttributeError: type object 'super' has no attribute 'default'
looks like in the exceptions.py, this line is the culprit (i could be wrong):
`handler = self.handlers.get(type(exception), self.default)
`
since __init__ does not have that attribute defined. I am running python 3.6
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/exception_monitoring.py`
Content:
```
1 """
2 Example intercepting uncaught exceptions using Sanic's error handler framework.
3
4 This may be useful for developers wishing to use Sentry, Airbrake, etc.
5 or a custom system to log and monitor unexpected errors in production.
6
7 First we create our own class inheriting from Handler in sanic.exceptions,
8 and pass in an instance of it when we create our Sanic instance. Inside this
9 class' default handler, we can do anything including sending exceptions to
10 an external service.
11 """
12
13
14
15 """
16 Imports and code relevant for our CustomHandler class
17 (Ordinarily this would be in a separate file)
18 """
19 from sanic.response import text
20 from sanic.exceptions import Handler, SanicException
21
22 class CustomHandler(Handler):
23 def default(self, request, exception):
24 # Here, we have access to the exception object
25 # and can do anything with it (log, send to external service, etc)
26
27 # Some exceptions are trivial and built into Sanic (404s, etc)
28 if not issubclass(type(exception), SanicException):
29 print(exception)
30
31 # Then, we must finish handling the exception by returning
32 # our response to the client
33 # For this we can just call the super class' default handler
34 return super.default(self, request, exception)
35
36
37
38
39 """
40 This is an ordinary Sanic server, with the exception that we set the
41 server's error_handler to an instance of our CustomHandler
42 """
43
44 from sanic import Sanic
45 from sanic.response import json
46
47 app = Sanic(__name__)
48
49 handler = CustomHandler(sanic=app)
50 app.error_handler = handler
51
52 @app.route("/")
53 async def test(request):
54 # Here, something occurs which causes an unexpected exception
55 # This exception will flow to our custom handler.
56 x = 1 / 0
57 return json({"test": True})
58
59
60 app.run(host="0.0.0.0", port=8000, debug=True)
61
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/examples/exception_monitoring.py b/examples/exception_monitoring.py
--- a/examples/exception_monitoring.py
+++ b/examples/exception_monitoring.py
@@ -9,17 +9,15 @@
class' default handler, we can do anything including sending exceptions to
an external service.
"""
-
-
-
+from sanic.exceptions import Handler, SanicException
"""
Imports and code relevant for our CustomHandler class
(Ordinarily this would be in a separate file)
"""
-from sanic.response import text
-from sanic.exceptions import Handler, SanicException
+
class CustomHandler(Handler):
+
def default(self, request, exception):
# Here, we have access to the exception object
# and can do anything with it (log, send to external service, etc)
@@ -31,9 +29,7 @@
# Then, we must finish handling the exception by returning
# our response to the client
# For this we can just call the super class' default handler
- return super.default(self, request, exception)
-
-
+ return super().default(request, exception)
"""
@@ -49,11 +45,12 @@
handler = CustomHandler(sanic=app)
app.error_handler = handler
+
@app.route("/")
async def test(request):
# Here, something occurs which causes an unexpected exception
# This exception will flow to our custom handler.
- x = 1 / 0
+ 1 / 0
return json({"test": True})
|
{"golden_diff": "diff --git a/examples/exception_monitoring.py b/examples/exception_monitoring.py\n--- a/examples/exception_monitoring.py\n+++ b/examples/exception_monitoring.py\n@@ -9,17 +9,15 @@\n class' default handler, we can do anything including sending exceptions to\n an external service.\n \"\"\"\n-\n-\n-\n+from sanic.exceptions import Handler, SanicException\n \"\"\"\n Imports and code relevant for our CustomHandler class\n (Ordinarily this would be in a separate file)\n \"\"\"\n-from sanic.response import text\n-from sanic.exceptions import Handler, SanicException\n+\n \n class CustomHandler(Handler):\n+\n def default(self, request, exception):\n # Here, we have access to the exception object\n # and can do anything with it (log, send to external service, etc)\n@@ -31,9 +29,7 @@\n # Then, we must finish handling the exception by returning\n # our response to the client\n # For this we can just call the super class' default handler\n- return super.default(self, request, exception)\n-\n-\n+ return super().default(request, exception)\n \n \n \"\"\"\n@@ -49,11 +45,12 @@\n handler = CustomHandler(sanic=app)\n app.error_handler = handler\n \n+\n @app.route(\"/\")\n async def test(request):\n # Here, something occurs which causes an unexpected exception\n # This exception will flow to our custom handler.\n- x = 1 / 0\n+ 1 / 0\n return json({\"test\": True})\n", "issue": "During handling of the above exception, another exception occurred\nIn the examples/exception_monitoring.py, when i try to run the app and hit the default route, it prints out the exception and then it calls \"return super.default(self, request, exception)\", it returns with the following exception:\r\n\r\nAttributeError: type object 'super' has no attribute 'default'\r\n\r\nlooks like in the exceptions.py, this line is the culprit (i could be wrong):\r\n\r\n`handler = self.handlers.get(type(exception), self.default)\r\n`\r\n since __init__ does not have that attribute defined. I am running python 3.6\n", "before_files": [{"content": "\"\"\"\nExample intercepting uncaught exceptions using Sanic's error handler framework.\n\nThis may be useful for developers wishing to use Sentry, Airbrake, etc.\nor a custom system to log and monitor unexpected errors in production.\n\nFirst we create our own class inheriting from Handler in sanic.exceptions,\nand pass in an instance of it when we create our Sanic instance. Inside this\nclass' default handler, we can do anything including sending exceptions to\nan external service.\n\"\"\"\n\n\n\n\"\"\"\nImports and code relevant for our CustomHandler class\n(Ordinarily this would be in a separate file)\n\"\"\"\nfrom sanic.response import text\nfrom sanic.exceptions import Handler, SanicException\n\nclass CustomHandler(Handler):\n def default(self, request, exception):\n # Here, we have access to the exception object\n # and can do anything with it (log, send to external service, etc)\n\n # Some exceptions are trivial and built into Sanic (404s, etc)\n if not issubclass(type(exception), SanicException):\n print(exception)\n\n # Then, we must finish handling the exception by returning\n # our response to the client\n # For this we can just call the super class' default handler\n return super.default(self, request, exception)\n\n\n\n\n\"\"\"\nThis is an ordinary Sanic server, with the exception that we set the\nserver's error_handler to an instance of our CustomHandler\n\"\"\"\n\nfrom sanic import Sanic\nfrom sanic.response import json\n\napp = Sanic(__name__)\n\nhandler = CustomHandler(sanic=app)\napp.error_handler = handler\n\[email protected](\"/\")\nasync def test(request):\n # Here, something occurs which causes an unexpected exception\n # This exception will flow to our custom handler.\n x = 1 / 0\n return json({\"test\": True})\n\n\napp.run(host=\"0.0.0.0\", port=8000, debug=True)\n", "path": "examples/exception_monitoring.py"}], "after_files": [{"content": "\"\"\"\nExample intercepting uncaught exceptions using Sanic's error handler framework.\n\nThis may be useful for developers wishing to use Sentry, Airbrake, etc.\nor a custom system to log and monitor unexpected errors in production.\n\nFirst we create our own class inheriting from Handler in sanic.exceptions,\nand pass in an instance of it when we create our Sanic instance. Inside this\nclass' default handler, we can do anything including sending exceptions to\nan external service.\n\"\"\"\nfrom sanic.exceptions import Handler, SanicException\n\"\"\"\nImports and code relevant for our CustomHandler class\n(Ordinarily this would be in a separate file)\n\"\"\"\n\n\nclass CustomHandler(Handler):\n\n def default(self, request, exception):\n # Here, we have access to the exception object\n # and can do anything with it (log, send to external service, etc)\n\n # Some exceptions are trivial and built into Sanic (404s, etc)\n if not issubclass(type(exception), SanicException):\n print(exception)\n\n # Then, we must finish handling the exception by returning\n # our response to the client\n # For this we can just call the super class' default handler\n return super().default(request, exception)\n\n\n\"\"\"\nThis is an ordinary Sanic server, with the exception that we set the\nserver's error_handler to an instance of our CustomHandler\n\"\"\"\n\nfrom sanic import Sanic\nfrom sanic.response import json\n\napp = Sanic(__name__)\n\nhandler = CustomHandler(sanic=app)\napp.error_handler = handler\n\n\[email protected](\"/\")\nasync def test(request):\n # Here, something occurs which causes an unexpected exception\n # This exception will flow to our custom handler.\n 1 / 0\n return json({\"test\": True})\n\n\napp.run(host=\"0.0.0.0\", port=8000, debug=True)\n", "path": "examples/exception_monitoring.py"}]}
| 918 | 333 |
gh_patches_debug_13210
|
rasdani/github-patches
|
git_diff
|
fossasia__open-event-server-7862
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Calendar link in calendar not working properly
In the description, "Join using link: https://eventyay.com/e/fa96ae2c/video/Main Hall/187"
The space is treated as a break, clipping the link at Main"
"Main Hall" should be HTTP encoded to Main%20Hall
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/api/helpers/calendar/ical.py`
Content:
```
1 import pytz
2 from flask import jsonify
3 from flask_jwt_extended import current_user
4 from icalendar import Calendar, Event
5 from sqlalchemy import or_
6 from sqlalchemy.orm import joinedload
7
8 from app.models.session import Session
9
10
11 def to_ical(event, include_sessions=False, my_schedule=False, user_id=None):
12 cal = Calendar()
13 cal.add('version', '2.0')
14 cal.add('METHOD', 'PUBLISH')
15 cal.add('X-WR-CALNAME', event.name)
16 cal.add('X-WR-CALDESC', 'Event Calendar')
17
18 event_component = Event()
19 event_component.add('uid', event.identifier)
20 event_component.add('summary', event.name)
21 event_component.add('url', event.site_link)
22 event_component.add('dtstart', event.starts_at_tz)
23 event_component.add('dtend', event.ends_at_tz)
24 event_component.add('location', event.normalized_location)
25 event_component.add('description', event.description)
26 if event.has_coordinates:
27 event_component.add('geo', (event.latitude, event.longitude))
28 if event.owner_description:
29 event_component.add('organizer', event.owner_description)
30
31 cal.add_component(event_component)
32
33 if include_sessions:
34 sessions_query = (
35 Session.query.filter_by(event_id=event.id)
36 .options(joinedload(Session.microlocation))
37 .filter_by(deleted_at=None)
38 .filter(or_(Session.state == 'accepted', Session.state == 'confirmed'))
39 .order_by(Session.starts_at.asc())
40 )
41 if my_schedule:
42 if not (current_user or user_id):
43 return jsonify(error='Login or User ID required'), 401
44
45 user_id = user_id or current_user.id
46 sessions_query = sessions_query.join(Session.favourites).filter_by(
47 user_id=user_id
48 )
49 sessions = sessions_query.all()
50
51 for session in sessions:
52
53 if not (session and session.starts_at and session.ends_at):
54 continue
55
56 session_video_url = (
57 " "
58 + event.site_link
59 + '/video/'
60 + session.microlocation.video_stream.name
61 + "/"
62 + str(session.microlocation.video_stream.id)
63 if session.microlocation.video_stream
64 else ""
65 )
66 session_link_heading = (
67 "Join using link: " + session_video_url + "<br/>"
68 if session_video_url
69 else ""
70 )
71 session_description = (
72 " "
73 + "Room: "
74 + session.microlocation.name
75 + "<br/>"
76 + session_link_heading
77 + "<br/>"
78 + session.short_abstract
79 )
80 session_component = Event()
81 session_component.add('summary', session.title)
82 session_component.add('uid', str(session.id) + "-" + event.identifier)
83 session_component.add('geo', (event.latitude, event.longitude))
84 session_component.add(
85 'location',
86 session_video_url
87 or (session.microlocation and session.microlocation.name)
88 or '' + " " + event.location_name,
89 )
90 session_component.add(
91 'dtstart', session.starts_at.astimezone(pytz.timezone(event.timezone))
92 )
93 session_component.add(
94 'dtend', session.ends_at.astimezone(pytz.timezone(event.timezone))
95 )
96 session_component.add('description', session_description)
97 session_component.add('url', event.site_link + '/session/' + str(session.id))
98
99 cal.add_component(session_component)
100
101 return cal.to_ical()
102
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/app/api/helpers/calendar/ical.py b/app/api/helpers/calendar/ical.py
--- a/app/api/helpers/calendar/ical.py
+++ b/app/api/helpers/calendar/ical.py
@@ -1,3 +1,5 @@
+from urllib.parse import quote
+
import pytz
from flask import jsonify
from flask_jwt_extended import current_user
@@ -57,7 +59,7 @@
" "
+ event.site_link
+ '/video/'
- + session.microlocation.video_stream.name
+ + quote(session.microlocation.video_stream.name)
+ "/"
+ str(session.microlocation.video_stream.id)
if session.microlocation.video_stream
|
{"golden_diff": "diff --git a/app/api/helpers/calendar/ical.py b/app/api/helpers/calendar/ical.py\n--- a/app/api/helpers/calendar/ical.py\n+++ b/app/api/helpers/calendar/ical.py\n@@ -1,3 +1,5 @@\n+from urllib.parse import quote\n+\n import pytz\n from flask import jsonify\n from flask_jwt_extended import current_user\n@@ -57,7 +59,7 @@\n \" \"\n + event.site_link\n + '/video/'\n- + session.microlocation.video_stream.name\n+ + quote(session.microlocation.video_stream.name)\n + \"/\"\n + str(session.microlocation.video_stream.id)\n if session.microlocation.video_stream\n", "issue": "Calendar link in calendar not working properly\nIn the description, \"Join using link: https://eventyay.com/e/fa96ae2c/video/Main Hall/187\"\r\n\r\nThe space is treated as a break, clipping the link at Main\"\r\n\r\n\"Main Hall\" should be HTTP encoded to Main%20Hall\n", "before_files": [{"content": "import pytz\nfrom flask import jsonify\nfrom flask_jwt_extended import current_user\nfrom icalendar import Calendar, Event\nfrom sqlalchemy import or_\nfrom sqlalchemy.orm import joinedload\n\nfrom app.models.session import Session\n\n\ndef to_ical(event, include_sessions=False, my_schedule=False, user_id=None):\n cal = Calendar()\n cal.add('version', '2.0')\n cal.add('METHOD', 'PUBLISH')\n cal.add('X-WR-CALNAME', event.name)\n cal.add('X-WR-CALDESC', 'Event Calendar')\n\n event_component = Event()\n event_component.add('uid', event.identifier)\n event_component.add('summary', event.name)\n event_component.add('url', event.site_link)\n event_component.add('dtstart', event.starts_at_tz)\n event_component.add('dtend', event.ends_at_tz)\n event_component.add('location', event.normalized_location)\n event_component.add('description', event.description)\n if event.has_coordinates:\n event_component.add('geo', (event.latitude, event.longitude))\n if event.owner_description:\n event_component.add('organizer', event.owner_description)\n\n cal.add_component(event_component)\n\n if include_sessions:\n sessions_query = (\n Session.query.filter_by(event_id=event.id)\n .options(joinedload(Session.microlocation))\n .filter_by(deleted_at=None)\n .filter(or_(Session.state == 'accepted', Session.state == 'confirmed'))\n .order_by(Session.starts_at.asc())\n )\n if my_schedule:\n if not (current_user or user_id):\n return jsonify(error='Login or User ID required'), 401\n\n user_id = user_id or current_user.id\n sessions_query = sessions_query.join(Session.favourites).filter_by(\n user_id=user_id\n )\n sessions = sessions_query.all()\n\n for session in sessions:\n\n if not (session and session.starts_at and session.ends_at):\n continue\n\n session_video_url = (\n \" \"\n + event.site_link\n + '/video/'\n + session.microlocation.video_stream.name\n + \"/\"\n + str(session.microlocation.video_stream.id)\n if session.microlocation.video_stream\n else \"\"\n )\n session_link_heading = (\n \"Join using link: \" + session_video_url + \"<br/>\"\n if session_video_url\n else \"\"\n )\n session_description = (\n \" \"\n + \"Room: \"\n + session.microlocation.name\n + \"<br/>\"\n + session_link_heading\n + \"<br/>\"\n + session.short_abstract\n )\n session_component = Event()\n session_component.add('summary', session.title)\n session_component.add('uid', str(session.id) + \"-\" + event.identifier)\n session_component.add('geo', (event.latitude, event.longitude))\n session_component.add(\n 'location',\n session_video_url\n or (session.microlocation and session.microlocation.name)\n or '' + \" \" + event.location_name,\n )\n session_component.add(\n 'dtstart', session.starts_at.astimezone(pytz.timezone(event.timezone))\n )\n session_component.add(\n 'dtend', session.ends_at.astimezone(pytz.timezone(event.timezone))\n )\n session_component.add('description', session_description)\n session_component.add('url', event.site_link + '/session/' + str(session.id))\n\n cal.add_component(session_component)\n\n return cal.to_ical()\n", "path": "app/api/helpers/calendar/ical.py"}], "after_files": [{"content": "from urllib.parse import quote\n\nimport pytz\nfrom flask import jsonify\nfrom flask_jwt_extended import current_user\nfrom icalendar import Calendar, Event\nfrom sqlalchemy import or_\nfrom sqlalchemy.orm import joinedload\n\nfrom app.models.session import Session\n\n\ndef to_ical(event, include_sessions=False, my_schedule=False, user_id=None):\n cal = Calendar()\n cal.add('version', '2.0')\n cal.add('METHOD', 'PUBLISH')\n cal.add('X-WR-CALNAME', event.name)\n cal.add('X-WR-CALDESC', 'Event Calendar')\n\n event_component = Event()\n event_component.add('uid', event.identifier)\n event_component.add('summary', event.name)\n event_component.add('url', event.site_link)\n event_component.add('dtstart', event.starts_at_tz)\n event_component.add('dtend', event.ends_at_tz)\n event_component.add('location', event.normalized_location)\n event_component.add('description', event.description)\n if event.has_coordinates:\n event_component.add('geo', (event.latitude, event.longitude))\n if event.owner_description:\n event_component.add('organizer', event.owner_description)\n\n cal.add_component(event_component)\n\n if include_sessions:\n sessions_query = (\n Session.query.filter_by(event_id=event.id)\n .options(joinedload(Session.microlocation))\n .filter_by(deleted_at=None)\n .filter(or_(Session.state == 'accepted', Session.state == 'confirmed'))\n .order_by(Session.starts_at.asc())\n )\n if my_schedule:\n if not (current_user or user_id):\n return jsonify(error='Login or User ID required'), 401\n\n user_id = user_id or current_user.id\n sessions_query = sessions_query.join(Session.favourites).filter_by(\n user_id=user_id\n )\n sessions = sessions_query.all()\n\n for session in sessions:\n\n if not (session and session.starts_at and session.ends_at):\n continue\n\n session_video_url = (\n \" \"\n + event.site_link\n + '/video/'\n + quote(session.microlocation.video_stream.name)\n + \"/\"\n + str(session.microlocation.video_stream.id)\n if session.microlocation.video_stream\n else \"\"\n )\n session_link_heading = (\n \"Join using link: \" + session_video_url + \"<br/>\"\n if session_video_url\n else \"\"\n )\n session_description = (\n \" \"\n + \"Room: \"\n + session.microlocation.name\n + \"<br/>\"\n + session_link_heading\n + \"<br/>\"\n + session.short_abstract\n )\n session_component = Event()\n session_component.add('summary', session.title)\n session_component.add('uid', str(session.id) + \"-\" + event.identifier)\n session_component.add('geo', (event.latitude, event.longitude))\n session_component.add(\n 'location',\n session_video_url\n or (session.microlocation and session.microlocation.name)\n or '' + \" \" + event.location_name,\n )\n session_component.add(\n 'dtstart', session.starts_at.astimezone(pytz.timezone(event.timezone))\n )\n session_component.add(\n 'dtend', session.ends_at.astimezone(pytz.timezone(event.timezone))\n )\n session_component.add('description', session_description)\n session_component.add('url', event.site_link + '/session/' + str(session.id))\n\n cal.add_component(session_component)\n\n return cal.to_ical()\n", "path": "app/api/helpers/calendar/ical.py"}]}
| 1,267 | 143 |
gh_patches_debug_38935
|
rasdani/github-patches
|
git_diff
|
Zeroto521__my-data-toolkit-833
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ENH: New geoaccessor `geocentroid` to get the center point of points
<!--
Thanks for contributing a pull request!
Please follow these standard acronyms to start the commit message:
- ENH: enhancement
- BUG: bug fix
- DOC: documentation
- TYP: type annotations
- TST: addition or modification of tests
- MAINT: maintenance commit (refactoring, typos, etc.)
- BLD: change related to building
- REL: related to releasing
- API: an (incompatible) API change
- DEP: deprecate something, or remove a deprecated object
- DEV: development tool or utility
- REV: revert an earlier commit
- PERF: performance improvement
- BOT: always commit via a bot
- CI: related to CI or CD
- CLN: Code cleanup
-->
- [ ] closes #xxxx
- [x] whatsnew entry
```python
>>> import dtoolkit.geoaccessor
>>> import geopandas as gpd
>>> from shapely import Point
>>> df = gpd.GeoDataFrame(
... geometry=[
... Point(100, 32),
... Point(120, 50),
... Point(122, 55)
... ],
... crs=4326,
... )
>>> df
geometry
0 POINT (100.00000 32.00000)
1 POINT (120.00000 50.00000)
2 POINT (122.00000 55.00000)
>>> df.geocentroid()
<POINT (112.213 44.119)>
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dtoolkit/geoaccessor/geoseries/geocentroid.py`
Content:
```
1 import geopandas as gpd
2 import numpy as np
3 import pandas as pd
4 from shapely import Point
5
6 from dtoolkit.geoaccessor.geoseries.geodistance import geodistance
7 from dtoolkit.geoaccessor.geoseries.xy import xy
8 from dtoolkit.geoaccessor.register import register_geoseries_method
9
10
11 @register_geoseries_method
12 def geocentroid(
13 s: gpd.GeoSeries,
14 /,
15 weights: pd.Series = None,
16 max_iter: int = 300,
17 tol: float = 1e-4,
18 ) -> Point:
19 """
20 Return the centroid of all points via the center of gravity method.
21
22 Parameters
23 ----------
24 weights : Hashable or 1d array-like, optional
25 - None : All weights will be set to 1.
26 - Hashable : Only for DataFrame, the column name.
27 - 1d array-like : The weights of each point.
28
29 max_iter : int, default 300
30 Maximum number of iterations to perform.
31
32 tol : float, default 1e-4
33 Tolerance for convergence.
34
35 Returns
36 -------
37 Point
38
39 Raises
40 ------
41 ValueError
42 If the CRS is not ``ESGP:4326``.
43
44 See Also
45 --------
46 geopandas.GeoSeries.centroid
47 dtoolkit.geoaccessor.geoseries.geocentroid
48 dtoolkit.geoaccessor.geodataframe.geocentroid
49
50 Examples
51 --------
52 >>> import dtoolkit.geoaccessor
53 >>> import geopandas as gpd
54 >>> from shapely import Point
55 >>> df = gpd.GeoDataFrame(
56 ... {
57 ... "weights": [1, 2, 3],
58 ... "geometry": [Point(100, 32), Point(120, 50), Point(122, 55)],
59 ... },
60 ... crs=4326,
61 ... )
62 >>> df
63 weights geometry
64 0 1 POINT (100.00000 32.00000)
65 1 2 POINT (120.00000 50.00000)
66 2 3 POINT (122.00000 55.00000)
67 >>> df.geocentroid()
68 <POINT (112.375 44.276)>
69
70 Set weights for each point.
71
72 >>> df.geocentroid("weights")
73 <POINT (114.516 46.675)>
74 >>> df.geocentroid([1, 2, 3])
75 <POINT (114.516 46.675)>
76 """
77
78 if s.crs != 4326:
79 raise ValueError(f"Only support 'EPSG:4326' CRS, but got {s.crs!r}.")
80
81 weights = np.asarray(weights) if weights is not None else 1
82 coord = xy(s)
83 X = coord.mean()
84 for _ in range(max_iter):
85 dis = geodistance(s, Point(*X.tolist())).mul(weights, axis=0)
86 Xt = coord.mul(dis, axis=0).sum() / dis.sum()
87
88 if ((X - Xt).abs() <= tol).all():
89 X = Xt
90 break
91
92 X = Xt
93
94 return Point(*X.tolist())
95
```
Path: `dtoolkit/geoaccessor/geodataframe/geocentroid.py`
Content:
```
1 from __future__ import annotations
2
3 from typing import Hashable
4
5 import geopandas as gpd
6 import pandas as pd
7 from pandas.util._decorators import doc
8 from shapely import Point
9
10 from dtoolkit.geoaccessor.geoseries import geocentroid as s_geocentroid
11 from dtoolkit.geoaccessor.register import register_geodataframe_method
12
13
14 @register_geodataframe_method
15 @doc(s_geocentroid)
16 def geocentroid(
17 df: gpd.GeoDataFrame,
18 /,
19 weights: Hashable | pd.Series = None,
20 max_iter: int = 300,
21 tol: float = 1e-4,
22 ) -> Point:
23 if weights is not None and isinstance(weights, Hashable):
24 weights = df[weights]
25
26 return s_geocentroid(df.geometry, weights=weights, max_iter=max_iter, tol=tol)
27
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/dtoolkit/geoaccessor/geodataframe/geocentroid.py b/dtoolkit/geoaccessor/geodataframe/geocentroid.py
--- a/dtoolkit/geoaccessor/geodataframe/geocentroid.py
+++ b/dtoolkit/geoaccessor/geodataframe/geocentroid.py
@@ -18,7 +18,7 @@
/,
weights: Hashable | pd.Series = None,
max_iter: int = 300,
- tol: float = 1e-4,
+ tol: float = 1e-5,
) -> Point:
if weights is not None and isinstance(weights, Hashable):
weights = df[weights]
diff --git a/dtoolkit/geoaccessor/geoseries/geocentroid.py b/dtoolkit/geoaccessor/geoseries/geocentroid.py
--- a/dtoolkit/geoaccessor/geoseries/geocentroid.py
+++ b/dtoolkit/geoaccessor/geoseries/geocentroid.py
@@ -14,11 +14,21 @@
/,
weights: pd.Series = None,
max_iter: int = 300,
- tol: float = 1e-4,
+ tol: float = 1e-5,
) -> Point:
- """
+ r"""
Return the centroid of all points via the center of gravity method.
+ .. math::
+
+ \left\{\begin{matrix}
+ d_i &=& D(P(\bar{x}_n, \bar{y}_n), P(x_i, y_i)) \\
+ \bar{x}_0 &=& \frac{\sum w_i x_i}{\sum w_i} \\
+ \bar{y}_0 &=& \frac{\sum w_i y_i}{\sum w_i} \\
+ \bar{x}_{n+1} &=& \frac{\sum w_i x_i / d_i}{\sum w_i / d_i} \\
+ \bar{y}_{n+1} &=& \frac{\sum w_i y_i / d_i}{\sum w_i / d_i} \\
+ \end{matrix}\right.
+
Parameters
----------
weights : Hashable or 1d array-like, optional
@@ -29,18 +39,13 @@
max_iter : int, default 300
Maximum number of iterations to perform.
- tol : float, default 1e-4
+ tol : float, default 1e-5
Tolerance for convergence.
Returns
-------
Point
- Raises
- ------
- ValueError
- If the CRS is not ``ESGP:4326``.
-
See Also
--------
geopandas.GeoSeries.centroid
@@ -65,24 +70,21 @@
1 2 POINT (120.00000 50.00000)
2 3 POINT (122.00000 55.00000)
>>> df.geocentroid()
- <POINT (112.375 44.276)>
+ <POINT (120 50)>
Set weights for each point.
>>> df.geocentroid("weights")
- <POINT (114.516 46.675)>
+ <POINT (121.999 54.998)>
>>> df.geocentroid([1, 2, 3])
- <POINT (114.516 46.675)>
+ <POINT (121.999 54.998)>
"""
- if s.crs != 4326:
- raise ValueError(f"Only support 'EPSG:4326' CRS, but got {s.crs!r}.")
-
weights = np.asarray(weights) if weights is not None else 1
coord = xy(s)
X = coord.mean()
for _ in range(max_iter):
- dis = geodistance(s, Point(*X.tolist())).mul(weights, axis=0)
+ dis = geodistance(s, Point(*X.tolist())).rdiv(1).mul(weights, axis=0)
Xt = coord.mul(dis, axis=0).sum() / dis.sum()
if ((X - Xt).abs() <= tol).all():
|
{"golden_diff": "diff --git a/dtoolkit/geoaccessor/geodataframe/geocentroid.py b/dtoolkit/geoaccessor/geodataframe/geocentroid.py\n--- a/dtoolkit/geoaccessor/geodataframe/geocentroid.py\n+++ b/dtoolkit/geoaccessor/geodataframe/geocentroid.py\n@@ -18,7 +18,7 @@\n /,\n weights: Hashable | pd.Series = None,\n max_iter: int = 300,\n- tol: float = 1e-4,\n+ tol: float = 1e-5,\n ) -> Point:\n if weights is not None and isinstance(weights, Hashable):\n weights = df[weights]\ndiff --git a/dtoolkit/geoaccessor/geoseries/geocentroid.py b/dtoolkit/geoaccessor/geoseries/geocentroid.py\n--- a/dtoolkit/geoaccessor/geoseries/geocentroid.py\n+++ b/dtoolkit/geoaccessor/geoseries/geocentroid.py\n@@ -14,11 +14,21 @@\n /,\n weights: pd.Series = None,\n max_iter: int = 300,\n- tol: float = 1e-4,\n+ tol: float = 1e-5,\n ) -> Point:\n- \"\"\"\n+ r\"\"\"\n Return the centroid of all points via the center of gravity method.\n \n+ .. math::\n+\n+ \\left\\{\\begin{matrix}\n+ d_i &=& D(P(\\bar{x}_n, \\bar{y}_n), P(x_i, y_i)) \\\\\n+ \\bar{x}_0 &=& \\frac{\\sum w_i x_i}{\\sum w_i} \\\\\n+ \\bar{y}_0 &=& \\frac{\\sum w_i y_i}{\\sum w_i} \\\\\n+ \\bar{x}_{n+1} &=& \\frac{\\sum w_i x_i / d_i}{\\sum w_i / d_i} \\\\\n+ \\bar{y}_{n+1} &=& \\frac{\\sum w_i y_i / d_i}{\\sum w_i / d_i} \\\\\n+ \\end{matrix}\\right.\n+\n Parameters\n ----------\n weights : Hashable or 1d array-like, optional\n@@ -29,18 +39,13 @@\n max_iter : int, default 300\n Maximum number of iterations to perform.\n \n- tol : float, default 1e-4\n+ tol : float, default 1e-5\n Tolerance for convergence.\n \n Returns\n -------\n Point\n \n- Raises\n- ------\n- ValueError\n- If the CRS is not ``ESGP:4326``.\n-\n See Also\n --------\n geopandas.GeoSeries.centroid\n@@ -65,24 +70,21 @@\n 1 2 POINT (120.00000 50.00000)\n 2 3 POINT (122.00000 55.00000)\n >>> df.geocentroid()\n- <POINT (112.375 44.276)>\n+ <POINT (120 50)>\n \n Set weights for each point.\n \n >>> df.geocentroid(\"weights\")\n- <POINT (114.516 46.675)>\n+ <POINT (121.999 54.998)>\n >>> df.geocentroid([1, 2, 3])\n- <POINT (114.516 46.675)>\n+ <POINT (121.999 54.998)>\n \"\"\"\n \n- if s.crs != 4326:\n- raise ValueError(f\"Only support 'EPSG:4326' CRS, but got {s.crs!r}.\")\n-\n weights = np.asarray(weights) if weights is not None else 1\n coord = xy(s)\n X = coord.mean()\n for _ in range(max_iter):\n- dis = geodistance(s, Point(*X.tolist())).mul(weights, axis=0)\n+ dis = geodistance(s, Point(*X.tolist())).rdiv(1).mul(weights, axis=0)\n Xt = coord.mul(dis, axis=0).sum() / dis.sum()\n \n if ((X - Xt).abs() <= tol).all():\n", "issue": "ENH: New geoaccessor `geocentroid` to get the center point of points\n<!--\r\nThanks for contributing a pull request!\r\n\r\nPlease follow these standard acronyms to start the commit message:\r\n\r\n- ENH: enhancement\r\n- BUG: bug fix\r\n- DOC: documentation\r\n- TYP: type annotations\r\n- TST: addition or modification of tests\r\n- MAINT: maintenance commit (refactoring, typos, etc.)\r\n- BLD: change related to building\r\n- REL: related to releasing\r\n- API: an (incompatible) API change\r\n- DEP: deprecate something, or remove a deprecated object\r\n- DEV: development tool or utility\r\n- REV: revert an earlier commit\r\n- PERF: performance improvement\r\n- BOT: always commit via a bot\r\n- CI: related to CI or CD\r\n- CLN: Code cleanup\r\n-->\r\n\r\n- [ ] closes #xxxx\r\n- [x] whatsnew entry\r\n\r\n```python\r\n >>> import dtoolkit.geoaccessor\r\n >>> import geopandas as gpd\r\n >>> from shapely import Point\r\n >>> df = gpd.GeoDataFrame(\r\n ... geometry=[\r\n ... Point(100, 32),\r\n ... Point(120, 50),\r\n ... Point(122, 55)\r\n ... ],\r\n ... crs=4326,\r\n ... )\r\n >>> df\r\n geometry\r\n 0 POINT (100.00000 32.00000)\r\n 1 POINT (120.00000 50.00000)\r\n 2 POINT (122.00000 55.00000)\r\n >>> df.geocentroid()\r\n <POINT (112.213 44.119)>\r\n```\n", "before_files": [{"content": "import geopandas as gpd\nimport numpy as np\nimport pandas as pd\nfrom shapely import Point\n\nfrom dtoolkit.geoaccessor.geoseries.geodistance import geodistance\nfrom dtoolkit.geoaccessor.geoseries.xy import xy\nfrom dtoolkit.geoaccessor.register import register_geoseries_method\n\n\n@register_geoseries_method\ndef geocentroid(\n s: gpd.GeoSeries,\n /,\n weights: pd.Series = None,\n max_iter: int = 300,\n tol: float = 1e-4,\n) -> Point:\n \"\"\"\n Return the centroid of all points via the center of gravity method.\n\n Parameters\n ----------\n weights : Hashable or 1d array-like, optional\n - None : All weights will be set to 1.\n - Hashable : Only for DataFrame, the column name.\n - 1d array-like : The weights of each point.\n\n max_iter : int, default 300\n Maximum number of iterations to perform.\n\n tol : float, default 1e-4\n Tolerance for convergence.\n\n Returns\n -------\n Point\n\n Raises\n ------\n ValueError\n If the CRS is not ``ESGP:4326``.\n\n See Also\n --------\n geopandas.GeoSeries.centroid\n dtoolkit.geoaccessor.geoseries.geocentroid\n dtoolkit.geoaccessor.geodataframe.geocentroid\n\n Examples\n --------\n >>> import dtoolkit.geoaccessor\n >>> import geopandas as gpd\n >>> from shapely import Point\n >>> df = gpd.GeoDataFrame(\n ... {\n ... \"weights\": [1, 2, 3],\n ... \"geometry\": [Point(100, 32), Point(120, 50), Point(122, 55)],\n ... },\n ... crs=4326,\n ... )\n >>> df\n weights geometry\n 0 1 POINT (100.00000 32.00000)\n 1 2 POINT (120.00000 50.00000)\n 2 3 POINT (122.00000 55.00000)\n >>> df.geocentroid()\n <POINT (112.375 44.276)>\n\n Set weights for each point.\n\n >>> df.geocentroid(\"weights\")\n <POINT (114.516 46.675)>\n >>> df.geocentroid([1, 2, 3])\n <POINT (114.516 46.675)>\n \"\"\"\n\n if s.crs != 4326:\n raise ValueError(f\"Only support 'EPSG:4326' CRS, but got {s.crs!r}.\")\n\n weights = np.asarray(weights) if weights is not None else 1\n coord = xy(s)\n X = coord.mean()\n for _ in range(max_iter):\n dis = geodistance(s, Point(*X.tolist())).mul(weights, axis=0)\n Xt = coord.mul(dis, axis=0).sum() / dis.sum()\n\n if ((X - Xt).abs() <= tol).all():\n X = Xt\n break\n\n X = Xt\n\n return Point(*X.tolist())\n", "path": "dtoolkit/geoaccessor/geoseries/geocentroid.py"}, {"content": "from __future__ import annotations\n\nfrom typing import Hashable\n\nimport geopandas as gpd\nimport pandas as pd\nfrom pandas.util._decorators import doc\nfrom shapely import Point\n\nfrom dtoolkit.geoaccessor.geoseries import geocentroid as s_geocentroid\nfrom dtoolkit.geoaccessor.register import register_geodataframe_method\n\n\n@register_geodataframe_method\n@doc(s_geocentroid)\ndef geocentroid(\n df: gpd.GeoDataFrame,\n /,\n weights: Hashable | pd.Series = None,\n max_iter: int = 300,\n tol: float = 1e-4,\n) -> Point:\n if weights is not None and isinstance(weights, Hashable):\n weights = df[weights]\n\n return s_geocentroid(df.geometry, weights=weights, max_iter=max_iter, tol=tol)\n", "path": "dtoolkit/geoaccessor/geodataframe/geocentroid.py"}], "after_files": [{"content": "import geopandas as gpd\nimport numpy as np\nimport pandas as pd\nfrom shapely import Point\n\nfrom dtoolkit.geoaccessor.geoseries.geodistance import geodistance\nfrom dtoolkit.geoaccessor.geoseries.xy import xy\nfrom dtoolkit.geoaccessor.register import register_geoseries_method\n\n\n@register_geoseries_method\ndef geocentroid(\n s: gpd.GeoSeries,\n /,\n weights: pd.Series = None,\n max_iter: int = 300,\n tol: float = 1e-5,\n) -> Point:\n r\"\"\"\n Return the centroid of all points via the center of gravity method.\n\n .. math::\n\n \\left\\{\\begin{matrix}\n d_i &=& D(P(\\bar{x}_n, \\bar{y}_n), P(x_i, y_i)) \\\\\n \\bar{x}_0 &=& \\frac{\\sum w_i x_i}{\\sum w_i} \\\\\n \\bar{y}_0 &=& \\frac{\\sum w_i y_i}{\\sum w_i} \\\\\n \\bar{x}_{n+1} &=& \\frac{\\sum w_i x_i / d_i}{\\sum w_i / d_i} \\\\\n \\bar{y}_{n+1} &=& \\frac{\\sum w_i y_i / d_i}{\\sum w_i / d_i} \\\\\n \\end{matrix}\\right.\n\n Parameters\n ----------\n weights : Hashable or 1d array-like, optional\n - None : All weights will be set to 1.\n - Hashable : Only for DataFrame, the column name.\n - 1d array-like : The weights of each point.\n\n max_iter : int, default 300\n Maximum number of iterations to perform.\n\n tol : float, default 1e-5\n Tolerance for convergence.\n\n Returns\n -------\n Point\n\n See Also\n --------\n geopandas.GeoSeries.centroid\n dtoolkit.geoaccessor.geoseries.geocentroid\n dtoolkit.geoaccessor.geodataframe.geocentroid\n\n Examples\n --------\n >>> import dtoolkit.geoaccessor\n >>> import geopandas as gpd\n >>> from shapely import Point\n >>> df = gpd.GeoDataFrame(\n ... {\n ... \"weights\": [1, 2, 3],\n ... \"geometry\": [Point(100, 32), Point(120, 50), Point(122, 55)],\n ... },\n ... crs=4326,\n ... )\n >>> df\n weights geometry\n 0 1 POINT (100.00000 32.00000)\n 1 2 POINT (120.00000 50.00000)\n 2 3 POINT (122.00000 55.00000)\n >>> df.geocentroid()\n <POINT (120 50)>\n\n Set weights for each point.\n\n >>> df.geocentroid(\"weights\")\n <POINT (121.999 54.998)>\n >>> df.geocentroid([1, 2, 3])\n <POINT (121.999 54.998)>\n \"\"\"\n\n weights = np.asarray(weights) if weights is not None else 1\n coord = xy(s)\n X = coord.mean()\n for _ in range(max_iter):\n dis = geodistance(s, Point(*X.tolist())).rdiv(1).mul(weights, axis=0)\n Xt = coord.mul(dis, axis=0).sum() / dis.sum()\n\n if ((X - Xt).abs() <= tol).all():\n X = Xt\n break\n\n X = Xt\n\n return Point(*X.tolist())\n", "path": "dtoolkit/geoaccessor/geoseries/geocentroid.py"}, {"content": "from __future__ import annotations\n\nfrom typing import Hashable\n\nimport geopandas as gpd\nimport pandas as pd\nfrom pandas.util._decorators import doc\nfrom shapely import Point\n\nfrom dtoolkit.geoaccessor.geoseries import geocentroid as s_geocentroid\nfrom dtoolkit.geoaccessor.register import register_geodataframe_method\n\n\n@register_geodataframe_method\n@doc(s_geocentroid)\ndef geocentroid(\n df: gpd.GeoDataFrame,\n /,\n weights: Hashable | pd.Series = None,\n max_iter: int = 300,\n tol: float = 1e-5,\n) -> Point:\n if weights is not None and isinstance(weights, Hashable):\n weights = df[weights]\n\n return s_geocentroid(df.geometry, weights=weights, max_iter=max_iter, tol=tol)\n", "path": "dtoolkit/geoaccessor/geodataframe/geocentroid.py"}]}
| 1,933 | 1,014 |
gh_patches_debug_33369
|
rasdani/github-patches
|
git_diff
|
OCA__stock-logistics-warehouse-1192
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[12.0] stock_secondary_unit "secondary qty" value in picking not affecting "initial demand"
AFFECTED VERSIONS
12.0 (it works ok on 13.0)
STEPS TO REPRODUCE
Activate Units of Measure in general settings > inventory
Inventory > Master Data > Products > Set a secondary unit in a product

Create a new transfer > add product > select secondary UoM > input secondary qty

CURRENT BEHAVIOR
Value "Initial demand" is not affected by secondary qty input
In the same way, if "Initial demand" is set, "secondary qty" does not change - basically there is no relation between the two fields

REQUIRED BEHAVIOR
When "secondary qty" is updated, also "initial demand" should update - and viceversa, as it happens in SO with secondary unit modules
VIDEO
https://recordit.co/zcuDUx6xco
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `stock_secondary_unit/models/stock_move.py`
Content:
```
1 # Copyright 2018 Tecnativa - Sergio Teruel
2 # License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
3 from odoo import api, fields, models
4 from odoo.addons import decimal_precision as dp
5 from odoo.tools.float_utils import float_round
6
7
8 class StockSecondaryUnitMixin(models.AbstractModel):
9 _name = 'stock.secondary.unit.mixin'
10 _description = 'Stock Secondary Unit Mixin'
11
12 secondary_uom_id = fields.Many2one(
13 comodel_name='product.secondary.unit',
14 string='Second unit',
15 )
16 secondary_uom_qty = fields.Float(
17 string='Secondary Qty',
18 digits=dp.get_precision('Product Unit of Measure'),
19 )
20
21
22 class StockMove(models.Model):
23 _inherit = ['stock.move', 'stock.secondary.unit.mixin']
24 _name = 'stock.move'
25
26 def _merge_moves_fields(self):
27 res = super(StockMove, self)._merge_moves_fields()
28 res['secondary_uom_qty'] = self[-1:].secondary_uom_qty
29 return res
30
31
32 class StockMoveLine(models.Model):
33 _inherit = ['stock.move.line', 'stock.secondary.unit.mixin']
34 _name = 'stock.move.line'
35
36 @api.model
37 def create(self, vals):
38 move = self.env['stock.move'].browse(vals.get('move_id', False))
39 if move.secondary_uom_id:
40 uom = self.env['uom.uom'].browse(vals['product_uom_id'])
41 factor = move.secondary_uom_id.factor * uom.factor
42 move_line_qty = vals.get(
43 'product_uom_qty', vals.get('qty_done', 0.0))
44 qty = float_round(
45 move_line_qty / (factor or 1.0),
46 precision_rounding=move.secondary_uom_id.uom_id.rounding
47 )
48 vals.update({
49 'secondary_uom_qty': qty,
50 'secondary_uom_id': move.secondary_uom_id.id,
51 })
52 return super().create(vals)
53
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/stock_secondary_unit/models/stock_move.py b/stock_secondary_unit/models/stock_move.py
--- a/stock_secondary_unit/models/stock_move.py
+++ b/stock_secondary_unit/models/stock_move.py
@@ -2,7 +2,7 @@
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo import api, fields, models
from odoo.addons import decimal_precision as dp
-from odoo.tools.float_utils import float_round
+from odoo.tools.float_utils import float_compare, float_round
class StockSecondaryUnitMixin(models.AbstractModel):
@@ -28,6 +28,52 @@
res['secondary_uom_qty'] = self[-1:].secondary_uom_qty
return res
+ @api.onchange('secondary_uom_id', 'secondary_uom_qty')
+ def onchange_secondary_uom(self):
+ if not self.secondary_uom_id:
+ return
+ factor = self.secondary_uom_id.factor * self.product_uom.factor
+
+ qty = float_round(
+ self.secondary_uom_qty * factor,
+ precision_rounding=self.product_uom.rounding
+ )
+ if float_compare(
+ self.product_uom_qty, qty, precision_rounding=self.product_uom.rounding
+ ) != 0:
+ self.product_uom_qty = qty
+
+ @api.onchange('product_uom_qty')
+ def onchange_secondary_unit_product_uom_qty(self):
+ if not self.secondary_uom_id:
+ return
+ factor = self.secondary_uom_id.factor * self.product_uom.factor
+
+ qty = float_round(
+ self.product_uom_qty / (factor or 1.0),
+ precision_rounding=self.secondary_uom_id.uom_id.rounding
+ )
+ if float_compare(
+ self.secondary_uom_qty,
+ qty,
+ precision_rounding=self.secondary_uom_id.uom_id.rounding
+ ) != 0:
+ self.secondary_uom_qty = qty
+
+ @api.onchange('product_uom')
+ def onchange_product_uom_for_secondary(self):
+ if not self.secondary_uom_id:
+ return
+ factor = self.product_uom.factor * self.secondary_uom_id.factor
+ qty = float_round(
+ self.product_uom_qty / (factor or 1.0),
+ precision_rounding=self.product_uom.rounding
+ )
+ if float_compare(
+ self.secondary_uom_qty, qty, precision_rounding=self.product_uom.rounding
+ ) != 0:
+ self.secondary_uom_qty = qty
+
class StockMoveLine(models.Model):
_inherit = ['stock.move.line', 'stock.secondary.unit.mixin']
|
{"golden_diff": "diff --git a/stock_secondary_unit/models/stock_move.py b/stock_secondary_unit/models/stock_move.py\n--- a/stock_secondary_unit/models/stock_move.py\n+++ b/stock_secondary_unit/models/stock_move.py\n@@ -2,7 +2,7 @@\n # License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).\n from odoo import api, fields, models\n from odoo.addons import decimal_precision as dp\n-from odoo.tools.float_utils import float_round\n+from odoo.tools.float_utils import float_compare, float_round\n \n \n class StockSecondaryUnitMixin(models.AbstractModel):\n@@ -28,6 +28,52 @@\n res['secondary_uom_qty'] = self[-1:].secondary_uom_qty\n return res\n \n+ @api.onchange('secondary_uom_id', 'secondary_uom_qty')\n+ def onchange_secondary_uom(self):\n+ if not self.secondary_uom_id:\n+ return\n+ factor = self.secondary_uom_id.factor * self.product_uom.factor\n+\n+ qty = float_round(\n+ self.secondary_uom_qty * factor,\n+ precision_rounding=self.product_uom.rounding\n+ )\n+ if float_compare(\n+ self.product_uom_qty, qty, precision_rounding=self.product_uom.rounding\n+ ) != 0:\n+ self.product_uom_qty = qty\n+\n+ @api.onchange('product_uom_qty')\n+ def onchange_secondary_unit_product_uom_qty(self):\n+ if not self.secondary_uom_id:\n+ return\n+ factor = self.secondary_uom_id.factor * self.product_uom.factor\n+\n+ qty = float_round(\n+ self.product_uom_qty / (factor or 1.0),\n+ precision_rounding=self.secondary_uom_id.uom_id.rounding\n+ )\n+ if float_compare(\n+ self.secondary_uom_qty,\n+ qty,\n+ precision_rounding=self.secondary_uom_id.uom_id.rounding\n+ ) != 0:\n+ self.secondary_uom_qty = qty\n+\n+ @api.onchange('product_uom')\n+ def onchange_product_uom_for_secondary(self):\n+ if not self.secondary_uom_id:\n+ return\n+ factor = self.product_uom.factor * self.secondary_uom_id.factor\n+ qty = float_round(\n+ self.product_uom_qty / (factor or 1.0),\n+ precision_rounding=self.product_uom.rounding\n+ )\n+ if float_compare(\n+ self.secondary_uom_qty, qty, precision_rounding=self.product_uom.rounding\n+ ) != 0:\n+ self.secondary_uom_qty = qty\n+\n \n class StockMoveLine(models.Model):\n _inherit = ['stock.move.line', 'stock.secondary.unit.mixin']\n", "issue": "[12.0] stock_secondary_unit \"secondary qty\" value in picking not affecting \"initial demand\"\nAFFECTED VERSIONS\r\n\r\n12.0 (it works ok on 13.0)\r\n\r\nSTEPS TO REPRODUCE\r\n\r\nActivate Units of Measure in general settings > inventory\r\n\r\nInventory > Master Data > Products > Set a secondary unit in a product\r\n\r\n\r\n\r\nCreate a new transfer > add product > select secondary UoM > input secondary qty\r\n\r\n\r\n\r\nCURRENT BEHAVIOR\r\n\r\nValue \"Initial demand\" is not affected by secondary qty input\r\nIn the same way, if \"Initial demand\" is set, \"secondary qty\" does not change - basically there is no relation between the two fields\r\n\r\n\r\n\r\nREQUIRED BEHAVIOR \r\n\r\nWhen \"secondary qty\" is updated, also \"initial demand\" should update - and viceversa, as it happens in SO with secondary unit modules\r\n\r\nVIDEO\r\n\r\nhttps://recordit.co/zcuDUx6xco\r\n\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright 2018 Tecnativa - Sergio Teruel\n# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).\nfrom odoo import api, fields, models\nfrom odoo.addons import decimal_precision as dp\nfrom odoo.tools.float_utils import float_round\n\n\nclass StockSecondaryUnitMixin(models.AbstractModel):\n _name = 'stock.secondary.unit.mixin'\n _description = 'Stock Secondary Unit Mixin'\n\n secondary_uom_id = fields.Many2one(\n comodel_name='product.secondary.unit',\n string='Second unit',\n )\n secondary_uom_qty = fields.Float(\n string='Secondary Qty',\n digits=dp.get_precision('Product Unit of Measure'),\n )\n\n\nclass StockMove(models.Model):\n _inherit = ['stock.move', 'stock.secondary.unit.mixin']\n _name = 'stock.move'\n\n def _merge_moves_fields(self):\n res = super(StockMove, self)._merge_moves_fields()\n res['secondary_uom_qty'] = self[-1:].secondary_uom_qty\n return res\n\n\nclass StockMoveLine(models.Model):\n _inherit = ['stock.move.line', 'stock.secondary.unit.mixin']\n _name = 'stock.move.line'\n\n @api.model\n def create(self, vals):\n move = self.env['stock.move'].browse(vals.get('move_id', False))\n if move.secondary_uom_id:\n uom = self.env['uom.uom'].browse(vals['product_uom_id'])\n factor = move.secondary_uom_id.factor * uom.factor\n move_line_qty = vals.get(\n 'product_uom_qty', vals.get('qty_done', 0.0))\n qty = float_round(\n move_line_qty / (factor or 1.0),\n precision_rounding=move.secondary_uom_id.uom_id.rounding\n )\n vals.update({\n 'secondary_uom_qty': qty,\n 'secondary_uom_id': move.secondary_uom_id.id,\n })\n return super().create(vals)\n", "path": "stock_secondary_unit/models/stock_move.py"}], "after_files": [{"content": "# Copyright 2018 Tecnativa - Sergio Teruel\n# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).\nfrom odoo import api, fields, models\nfrom odoo.addons import decimal_precision as dp\nfrom odoo.tools.float_utils import float_compare, float_round\n\n\nclass StockSecondaryUnitMixin(models.AbstractModel):\n _name = 'stock.secondary.unit.mixin'\n _description = 'Stock Secondary Unit Mixin'\n\n secondary_uom_id = fields.Many2one(\n comodel_name='product.secondary.unit',\n string='Second unit',\n )\n secondary_uom_qty = fields.Float(\n string='Secondary Qty',\n digits=dp.get_precision('Product Unit of Measure'),\n )\n\n\nclass StockMove(models.Model):\n _inherit = ['stock.move', 'stock.secondary.unit.mixin']\n _name = 'stock.move'\n\n def _merge_moves_fields(self):\n res = super(StockMove, self)._merge_moves_fields()\n res['secondary_uom_qty'] = self[-1:].secondary_uom_qty\n return res\n\n @api.onchange('secondary_uom_id', 'secondary_uom_qty')\n def onchange_secondary_uom(self):\n if not self.secondary_uom_id:\n return\n factor = self.secondary_uom_id.factor * self.product_uom.factor\n\n qty = float_round(\n self.secondary_uom_qty * factor,\n precision_rounding=self.product_uom.rounding\n )\n if float_compare(\n self.product_uom_qty, qty, precision_rounding=self.product_uom.rounding\n ) != 0:\n self.product_uom_qty = qty\n\n @api.onchange('product_uom_qty')\n def onchange_secondary_unit_product_uom_qty(self):\n if not self.secondary_uom_id:\n return\n factor = self.secondary_uom_id.factor * self.product_uom.factor\n\n qty = float_round(\n self.product_uom_qty / (factor or 1.0),\n precision_rounding=self.secondary_uom_id.uom_id.rounding\n )\n if float_compare(\n self.secondary_uom_qty,\n qty,\n precision_rounding=self.secondary_uom_id.uom_id.rounding\n ) != 0:\n self.secondary_uom_qty = qty\n\n @api.onchange('product_uom')\n def onchange_product_uom_for_secondary(self):\n if not self.secondary_uom_id:\n return\n factor = self.product_uom.factor * self.secondary_uom_id.factor\n qty = float_round(\n self.product_uom_qty / (factor or 1.0),\n precision_rounding=self.product_uom.rounding\n )\n if float_compare(\n self.secondary_uom_qty, qty, precision_rounding=self.product_uom.rounding\n ) != 0:\n self.secondary_uom_qty = qty\n\n\nclass StockMoveLine(models.Model):\n _inherit = ['stock.move.line', 'stock.secondary.unit.mixin']\n _name = 'stock.move.line'\n\n @api.model\n def create(self, vals):\n move = self.env['stock.move'].browse(vals.get('move_id', False))\n if move.secondary_uom_id:\n uom = self.env['uom.uom'].browse(vals['product_uom_id'])\n factor = move.secondary_uom_id.factor * uom.factor\n move_line_qty = vals.get(\n 'product_uom_qty', vals.get('qty_done', 0.0))\n qty = float_round(\n move_line_qty / (factor or 1.0),\n precision_rounding=move.secondary_uom_id.uom_id.rounding\n )\n vals.update({\n 'secondary_uom_qty': qty,\n 'secondary_uom_id': move.secondary_uom_id.id,\n })\n return super().create(vals)\n", "path": "stock_secondary_unit/models/stock_move.py"}]}
| 1,177 | 611 |
gh_patches_debug_14880
|
rasdani/github-patches
|
git_diff
|
google__pytype-520
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pytype can't parse hex literals
The following code causes the error `ParseError: syntax error, unexpected NAME, expecting ']'`:
```py
from typing import Literal
a: Literal[0x00ff]
```
Add lexing for base 2, 8, and 16 literals
Alters lexer.lex and adds a test for the new numeric types, Closes #494
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pytype/tools/traces/source.py`
Content:
```
1 # Lint as: python2, python3
2 """Source and trace information."""
3
4 from __future__ import print_function
5 import collections
6
7 Location = collections.namedtuple("Location", ("line", "column"))
8
9
10 class AbstractTrace(
11 collections.namedtuple("AbstractTrace", ("op", "symbol", "types"))):
12
13 def __new__(cls, op, symbol, types):
14 if cls is AbstractTrace:
15 raise TypeError("cannot instantiate AbstractTrace")
16 return super(AbstractTrace, cls).__new__(cls, op, symbol, types)
17
18 def __repr__(self):
19 return "%s : %s <- %s" % self
20
21
22 class Code(object):
23 """Line-based source code access.
24
25 Attributes:
26 text: The source text.
27 traces: A dictionary from line number to traces.
28 filename: The filename - when using traces.trace(), this value is meaningful
29 only if an options object containing the filename was provided.
30 """
31
32 def __init__(self, src, raw_traces, trace_factory, filename):
33 """Initializer.
34
35 Args:
36 src: The source text.
37 raw_traces: Raw (opcode, symbol, types) values.
38 trace_factory: A subclass of source.AbstractTrace that will be used to
39 instantiate traces from raw values.
40 filename: The filename.
41 """
42 self.text = src
43 self.traces = _collect_traces(raw_traces, trace_factory)
44 self.filename = filename
45 self._lines = src.split("\n")
46 self._offsets = []
47 self._init_byte_offsets()
48
49 def _init_byte_offsets(self):
50 offset = 0
51 for line in self._lines:
52 self._offsets.append(offset)
53 offset += len(line) + 1 # account for the \n
54
55 def get_offset(self, location):
56 """Gets the utf-8 byte offset of a source.Location from start of source."""
57 return self._offsets[location.line - 1] + location.column
58
59 def line(self, n):
60 """Gets the text at a line number."""
61 return self._lines[n - 1]
62
63 def get_closest_line_range(self, start, end):
64 """Gets all valid line numbers in the [start, end) line range."""
65 return range(start, min(end, len(self._lines) + 1))
66
67 def find_first_text(self, start, end, text):
68 """Gets first location, if any, the string appears at in the line range."""
69
70 for l in self.get_closest_line_range(start, end):
71 col = self.line(l).find(text)
72 if col > -1:
73 # TODO(mdemello): Temporary hack, replace with a token stream!
74 # This will break if we have a # in a string before our desired text.
75 comment_marker = self.line(l).find("#")
76 if -1 < comment_marker < col:
77 continue
78 return Location(l, col)
79 return None
80
81 def next_non_comment_line(self, line):
82 """Gets the next non-comment line, if any, after the given line."""
83 for l in range(line + 1, len(self._lines) + 1):
84 if self.line(l).lstrip().startswith("#"):
85 continue
86 return l
87 return None
88
89 def display_traces(self):
90 """Prints the source file with traces for debugging."""
91 for line in sorted(self.traces):
92 print("%d %s" % (line, self.line(line)))
93 for trace in self.traces[line]:
94 print(" %s" % (trace,))
95 print("-------------------")
96
97 def get_attr_location(self, name, location):
98 """Returns the location and span of the attribute in an attribute access.
99
100 Args:
101 name: The attribute name.
102 location: The location of the value the attribute is accessed on.
103 """
104 # TODO(mdemello): This is pretty crude, and does not for example take into
105 # account multiple calls of the same attribute in a line. It is just to get
106 # our tests passing until we incorporate asttokens.
107 line, _ = location
108 src_line = self.line(line)
109 attr = name.split(".")[-1]
110 dot_attr = "." + attr
111 if dot_attr in src_line:
112 col = src_line.index(dot_attr)
113 return (Location(line, col + 1), len(attr))
114 else:
115 # We have something like
116 # (foo
117 # .bar)
118 # or
119 # (foo.
120 # bar)
121 # Lookahead up to 5 lines to find '.attr' (the ast node always starts from
122 # the beginning of the chain, so foo.\nbar.\nbaz etc could span several
123 # lines).
124 attr_loc = self._get_multiline_location(location, 5, dot_attr)
125 if attr_loc:
126 return (Location(attr_loc.line, attr_loc.column + 1), len(attr))
127 else:
128 # Find consecutive lines ending with '.' and starting with 'attr'.
129 for l in self.get_closest_line_range(line, line + 5):
130 if self.line(l).endswith("."):
131 next_line = self.next_non_comment_line(l)
132 text = self.line(next_line)
133 if text.lstrip().startswith(attr):
134 c = text.index(attr)
135 return (Location(next_line, c), len(attr))
136 # if all else fails, fall back to just spanning the name
137 return (location, len(name))
138
139 def _get_multiline_location(self, location, n_lines, text):
140 """Gets the start location of text anywhere within n_lines of location."""
141 line, _ = location
142 text_loc = self.find_first_text(line, line + n_lines, text)
143 if text_loc:
144 return text_loc
145 else:
146 return None
147
148
149 def _collect_traces(raw_traces, trace_factory):
150 """Postprocesses pytype's opcode traces."""
151 out = collections.defaultdict(list)
152 for op, symbol, data in raw_traces:
153 out[op.line].append(trace_factory(op.name, symbol, data))
154 return out
155
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pytype/tools/traces/source.py b/pytype/tools/traces/source.py
--- a/pytype/tools/traces/source.py
+++ b/pytype/tools/traces/source.py
@@ -4,6 +4,9 @@
from __future__ import print_function
import collections
+from pytype import compat
+
+
Location = collections.namedtuple("Location", ("line", "column"))
@@ -50,7 +53,9 @@
offset = 0
for line in self._lines:
self._offsets.append(offset)
- offset += len(line) + 1 # account for the \n
+ # convert line to bytes
+ bytes_ = compat.bytestring(line)
+ offset += len(bytes_) + 1 # account for the \n
def get_offset(self, location):
"""Gets the utf-8 byte offset of a source.Location from start of source."""
|
{"golden_diff": "diff --git a/pytype/tools/traces/source.py b/pytype/tools/traces/source.py\n--- a/pytype/tools/traces/source.py\n+++ b/pytype/tools/traces/source.py\n@@ -4,6 +4,9 @@\n from __future__ import print_function\n import collections\n \n+from pytype import compat\n+\n+\n Location = collections.namedtuple(\"Location\", (\"line\", \"column\"))\n \n \n@@ -50,7 +53,9 @@\n offset = 0\n for line in self._lines:\n self._offsets.append(offset)\n- offset += len(line) + 1 # account for the \\n\n+ # convert line to bytes\n+ bytes_ = compat.bytestring(line)\n+ offset += len(bytes_) + 1 # account for the \\n\n \n def get_offset(self, location):\n \"\"\"Gets the utf-8 byte offset of a source.Location from start of source.\"\"\"\n", "issue": "pytype can't parse hex literals\nThe following code causes the error `ParseError: syntax error, unexpected NAME, expecting ']'`:\r\n\r\n```py\r\nfrom typing import Literal\r\n\r\na: Literal[0x00ff]\r\n```\nAdd lexing for base 2, 8, and 16 literals\nAlters lexer.lex and adds a test for the new numeric types, Closes #494 \n", "before_files": [{"content": "# Lint as: python2, python3\n\"\"\"Source and trace information.\"\"\"\n\nfrom __future__ import print_function\nimport collections\n\nLocation = collections.namedtuple(\"Location\", (\"line\", \"column\"))\n\n\nclass AbstractTrace(\n collections.namedtuple(\"AbstractTrace\", (\"op\", \"symbol\", \"types\"))):\n\n def __new__(cls, op, symbol, types):\n if cls is AbstractTrace:\n raise TypeError(\"cannot instantiate AbstractTrace\")\n return super(AbstractTrace, cls).__new__(cls, op, symbol, types)\n\n def __repr__(self):\n return \"%s : %s <- %s\" % self\n\n\nclass Code(object):\n \"\"\"Line-based source code access.\n\n Attributes:\n text: The source text.\n traces: A dictionary from line number to traces.\n filename: The filename - when using traces.trace(), this value is meaningful\n only if an options object containing the filename was provided.\n \"\"\"\n\n def __init__(self, src, raw_traces, trace_factory, filename):\n \"\"\"Initializer.\n\n Args:\n src: The source text.\n raw_traces: Raw (opcode, symbol, types) values.\n trace_factory: A subclass of source.AbstractTrace that will be used to\n instantiate traces from raw values.\n filename: The filename.\n \"\"\"\n self.text = src\n self.traces = _collect_traces(raw_traces, trace_factory)\n self.filename = filename\n self._lines = src.split(\"\\n\")\n self._offsets = []\n self._init_byte_offsets()\n\n def _init_byte_offsets(self):\n offset = 0\n for line in self._lines:\n self._offsets.append(offset)\n offset += len(line) + 1 # account for the \\n\n\n def get_offset(self, location):\n \"\"\"Gets the utf-8 byte offset of a source.Location from start of source.\"\"\"\n return self._offsets[location.line - 1] + location.column\n\n def line(self, n):\n \"\"\"Gets the text at a line number.\"\"\"\n return self._lines[n - 1]\n\n def get_closest_line_range(self, start, end):\n \"\"\"Gets all valid line numbers in the [start, end) line range.\"\"\"\n return range(start, min(end, len(self._lines) + 1))\n\n def find_first_text(self, start, end, text):\n \"\"\"Gets first location, if any, the string appears at in the line range.\"\"\"\n\n for l in self.get_closest_line_range(start, end):\n col = self.line(l).find(text)\n if col > -1:\n # TODO(mdemello): Temporary hack, replace with a token stream!\n # This will break if we have a # in a string before our desired text.\n comment_marker = self.line(l).find(\"#\")\n if -1 < comment_marker < col:\n continue\n return Location(l, col)\n return None\n\n def next_non_comment_line(self, line):\n \"\"\"Gets the next non-comment line, if any, after the given line.\"\"\"\n for l in range(line + 1, len(self._lines) + 1):\n if self.line(l).lstrip().startswith(\"#\"):\n continue\n return l\n return None\n\n def display_traces(self):\n \"\"\"Prints the source file with traces for debugging.\"\"\"\n for line in sorted(self.traces):\n print(\"%d %s\" % (line, self.line(line)))\n for trace in self.traces[line]:\n print(\" %s\" % (trace,))\n print(\"-------------------\")\n\n def get_attr_location(self, name, location):\n \"\"\"Returns the location and span of the attribute in an attribute access.\n\n Args:\n name: The attribute name.\n location: The location of the value the attribute is accessed on.\n \"\"\"\n # TODO(mdemello): This is pretty crude, and does not for example take into\n # account multiple calls of the same attribute in a line. It is just to get\n # our tests passing until we incorporate asttokens.\n line, _ = location\n src_line = self.line(line)\n attr = name.split(\".\")[-1]\n dot_attr = \".\" + attr\n if dot_attr in src_line:\n col = src_line.index(dot_attr)\n return (Location(line, col + 1), len(attr))\n else:\n # We have something like\n # (foo\n # .bar)\n # or\n # (foo.\n # bar)\n # Lookahead up to 5 lines to find '.attr' (the ast node always starts from\n # the beginning of the chain, so foo.\\nbar.\\nbaz etc could span several\n # lines).\n attr_loc = self._get_multiline_location(location, 5, dot_attr)\n if attr_loc:\n return (Location(attr_loc.line, attr_loc.column + 1), len(attr))\n else:\n # Find consecutive lines ending with '.' and starting with 'attr'.\n for l in self.get_closest_line_range(line, line + 5):\n if self.line(l).endswith(\".\"):\n next_line = self.next_non_comment_line(l)\n text = self.line(next_line)\n if text.lstrip().startswith(attr):\n c = text.index(attr)\n return (Location(next_line, c), len(attr))\n # if all else fails, fall back to just spanning the name\n return (location, len(name))\n\n def _get_multiline_location(self, location, n_lines, text):\n \"\"\"Gets the start location of text anywhere within n_lines of location.\"\"\"\n line, _ = location\n text_loc = self.find_first_text(line, line + n_lines, text)\n if text_loc:\n return text_loc\n else:\n return None\n\n\ndef _collect_traces(raw_traces, trace_factory):\n \"\"\"Postprocesses pytype's opcode traces.\"\"\"\n out = collections.defaultdict(list)\n for op, symbol, data in raw_traces:\n out[op.line].append(trace_factory(op.name, symbol, data))\n return out\n", "path": "pytype/tools/traces/source.py"}], "after_files": [{"content": "# Lint as: python2, python3\n\"\"\"Source and trace information.\"\"\"\n\nfrom __future__ import print_function\nimport collections\n\nfrom pytype import compat\n\n\nLocation = collections.namedtuple(\"Location\", (\"line\", \"column\"))\n\n\nclass AbstractTrace(\n collections.namedtuple(\"AbstractTrace\", (\"op\", \"symbol\", \"types\"))):\n\n def __new__(cls, op, symbol, types):\n if cls is AbstractTrace:\n raise TypeError(\"cannot instantiate AbstractTrace\")\n return super(AbstractTrace, cls).__new__(cls, op, symbol, types)\n\n def __repr__(self):\n return \"%s : %s <- %s\" % self\n\n\nclass Code(object):\n \"\"\"Line-based source code access.\n\n Attributes:\n text: The source text.\n traces: A dictionary from line number to traces.\n filename: The filename - when using traces.trace(), this value is meaningful\n only if an options object containing the filename was provided.\n \"\"\"\n\n def __init__(self, src, raw_traces, trace_factory, filename):\n \"\"\"Initializer.\n\n Args:\n src: The source text.\n raw_traces: Raw (opcode, symbol, types) values.\n trace_factory: A subclass of source.AbstractTrace that will be used to\n instantiate traces from raw values.\n filename: The filename.\n \"\"\"\n self.text = src\n self.traces = _collect_traces(raw_traces, trace_factory)\n self.filename = filename\n self._lines = src.split(\"\\n\")\n self._offsets = []\n self._init_byte_offsets()\n\n def _init_byte_offsets(self):\n offset = 0\n for line in self._lines:\n self._offsets.append(offset)\n # convert line to bytes\n bytes_ = compat.bytestring(line)\n offset += len(bytes_) + 1 # account for the \\n\n\n def get_offset(self, location):\n \"\"\"Gets the utf-8 byte offset of a source.Location from start of source.\"\"\"\n return self._offsets[location.line - 1] + location.column\n\n def line(self, n):\n \"\"\"Gets the text at a line number.\"\"\"\n return self._lines[n - 1]\n\n def get_closest_line_range(self, start, end):\n \"\"\"Gets all valid line numbers in the [start, end) line range.\"\"\"\n return range(start, min(end, len(self._lines) + 1))\n\n def find_first_text(self, start, end, text):\n \"\"\"Gets first location, if any, the string appears at in the line range.\"\"\"\n\n for l in self.get_closest_line_range(start, end):\n col = self.line(l).find(text)\n if col > -1:\n # TODO(mdemello): Temporary hack, replace with a token stream!\n # This will break if we have a # in a string before our desired text.\n comment_marker = self.line(l).find(\"#\")\n if -1 < comment_marker < col:\n continue\n return Location(l, col)\n return None\n\n def next_non_comment_line(self, line):\n \"\"\"Gets the next non-comment line, if any, after the given line.\"\"\"\n for l in range(line + 1, len(self._lines) + 1):\n if self.line(l).lstrip().startswith(\"#\"):\n continue\n return l\n return None\n\n def display_traces(self):\n \"\"\"Prints the source file with traces for debugging.\"\"\"\n for line in sorted(self.traces):\n print(\"%d %s\" % (line, self.line(line)))\n for trace in self.traces[line]:\n print(\" %s\" % (trace,))\n print(\"-------------------\")\n\n def get_attr_location(self, name, location):\n \"\"\"Returns the location and span of the attribute in an attribute access.\n\n Args:\n name: The attribute name.\n location: The location of the value the attribute is accessed on.\n \"\"\"\n # TODO(mdemello): This is pretty crude, and does not for example take into\n # account multiple calls of the same attribute in a line. It is just to get\n # our tests passing until we incorporate asttokens.\n line, _ = location\n src_line = self.line(line)\n attr = name.split(\".\")[-1]\n dot_attr = \".\" + attr\n if dot_attr in src_line:\n col = src_line.index(dot_attr)\n return (Location(line, col + 1), len(attr))\n else:\n # We have something like\n # (foo\n # .bar)\n # or\n # (foo.\n # bar)\n # Lookahead up to 5 lines to find '.attr' (the ast node always starts from\n # the beginning of the chain, so foo.\\nbar.\\nbaz etc could span several\n # lines).\n attr_loc = self._get_multiline_location(location, 5, dot_attr)\n if attr_loc:\n return (Location(attr_loc.line, attr_loc.column + 1), len(attr))\n else:\n # Find consecutive lines ending with '.' and starting with 'attr'.\n for l in self.get_closest_line_range(line, line + 5):\n if self.line(l).endswith(\".\"):\n next_line = self.next_non_comment_line(l)\n text = self.line(next_line)\n if text.lstrip().startswith(attr):\n c = text.index(attr)\n return (Location(next_line, c), len(attr))\n # if all else fails, fall back to just spanning the name\n return (location, len(name))\n\n def _get_multiline_location(self, location, n_lines, text):\n \"\"\"Gets the start location of text anywhere within n_lines of location.\"\"\"\n line, _ = location\n text_loc = self.find_first_text(line, line + n_lines, text)\n if text_loc:\n return text_loc\n else:\n return None\n\n\ndef _collect_traces(raw_traces, trace_factory):\n \"\"\"Postprocesses pytype's opcode traces.\"\"\"\n out = collections.defaultdict(list)\n for op, symbol, data in raw_traces:\n out[op.line].append(trace_factory(op.name, symbol, data))\n return out\n", "path": "pytype/tools/traces/source.py"}]}
| 2,024 | 200 |
gh_patches_debug_11603
|
rasdani/github-patches
|
git_diff
|
opsdroid__opsdroid-1648
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Migrate tests from unittest to pytest
Our existing test suite has been written with the Python [`unittest`](https://docs.python.org/3/library/unittest.html) framework. However, as the test suite has grown and opsdroid has become more complex we are running into issues with the tests. Mainly around setting up and tearing down tests.
The @opsdroid/maintainers team have decided that we want to migrate all tests to be written with the [`pytest`](https://docs.pytest.org/en/latest/contents.html) framework instead so that we can make better use of fixtures. Fixtures are more reusable and portable and should help reduce complexity all over.
There's a lot to be done but it can be done piece by piece as `pytest` can run tests in either format. So if you wish to help in the effort you can start by searching the codebase for unittest suites. These are classes which are subclassed from `unittest.TestCase` or `asynctest.TestCase`, so searching all files for `unittest.TestCase` and `asynctest.TestCase` should be a good place to start.
For detailed information on running the test suite and contributing to opsdroid [see the docs](https://docs.opsdroid.dev/en/latest/contributing/index.html). But the quickest way to get started us with [`tox`](https://tox.readthedocs.io/en/latest/).
```bash
pip install -U tox # You only need to install tox once
tox -e py36,lint # Run the Python 3.6 tests (the lowest version we support) and the linter
```
Once you have found a test suite you wish to convert there are a few steps you need to follow to convert from unittest to pytest:
- Move tests from top level `tests` directory to a nested `tests` directory in opsdroid. Create one in an appropriate place if there isn't already one.
- Remove test from class, pytest tests are just regular functions.
- Change assertions to use regular `assert` or [pytest assertions](https://docs.pytest.org/en/latest/assert.html).
- Mark async tests. In unittest we write async tests by using the `asynctest.TestCase`, but in pytest we decorate our tests with `@pytest.mark.asyncio` instead.
- Move setup operations to fixtures. If a test class contains a `setUp` method anything created here should become a fixture. Check the existing fixtures in `conftest.py` before creating new ones.
- Add docstrings to tests and fixtures to explain what they do. We have been pretty rubbish with this up until now and there are many tests which are not obvious in what they are testing.
Here's an example:
```python
# Before (unittest)
import asynctest
import asynctest.mock as mock
from opsdroid.cli.start import configure_lang
from opsdroid.core import OpsDroid
from opsdroid.events import Message
from opsdroid.matchers import match_regex
from opsdroid import constraints
class TestConstraints(asynctest.TestCase):
"""Test the opsdroid constraint decorators."""
async def setUp(self):
configure_lang({})
async def getMockSkill(self):
async def mockedskill(opsdroid, config, message):
pass
mockedskill.config = {}
return mockedskill
async def test_constrain_rooms_constrains(self):
with OpsDroid() as opsdroid:
skill = await self.getMockSkill()
skill = match_regex(r".*")(skill)
skill = constraints.constrain_rooms(["#general"])(skill)
opsdroid.skills.append(skill)
tasks = await opsdroid.parse(
Message(text="Hello", user="user", target="#random", connector=None)
)
self.assertEqual(len(tasks), 2) # Just match_always and match_event
```
```python
# After (pytest)
import pytest
from opsdroid.cli.start import configure_lang
from opsdroid.core import OpsDroid
from opsdroid.events import Message
from opsdroid.matchers import match_regex
from opsdroid import constraints
configure_lang({}) # Required for our internationalization of error messages
@pytest.fixture
def opsdroid():
"""An instance of the OpsDroid class."""
with OpsDroid() as opsdroid:
yield opsdroid
@pytest.fixture
def mock_skill():
"""A skill which does nothing but follows the skill API."""
async def mockedskill(opsdroid, config, message):
pass
mockedskill.config = {}
return mockedskill
@pytest.mark.asyncio
async def test_constrain_rooms_constrains(opsdroid, mock_skill):
"""Test that with the room constraint a skill is not called."""
skill = match_regex(r".*")(mock_skill)
skill = constraints.constrain_rooms(["#general"])(skill)
opsdroid.skills.append(skill)
tasks = await opsdroid.parse(
Message(text="Hello", user="user", target="#random", connector=None)
)
assert len(tasks) == 2 # Just match_always and match_event
```
Fixtures such as the `opsdroid` fixture will be extremely useful in many tests, so we will be creating some standard ones like this which will be available in all tests. If you write a fixture that you think could be useful in other places please don't hesitate to contribute it to the `conftest.py` file.
If you have any questions you can chat to us on [Matrix](https://riot.im/app/#/room/#opsdroid-general:matrix.org). We encourage you to get started and if you have issues or get stuck then open a [draft PR](https://github.blog/2019-02-14-introducing-draft-pull-requests/) with your changes and we can take a look.
---
When this issue is complete the documentation about the migration should be removed from the testing docs.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opsdroid/conftest.py`
Content:
```
1 """Pytest config for all opsdroid tests."""
2 from opsdroid.testing import opsdroid
3
4 from opsdroid.cli.start import configure_lang
5
6 __all__ = ["opsdroid"]
7
8 configure_lang({})
9
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/opsdroid/conftest.py b/opsdroid/conftest.py
--- a/opsdroid/conftest.py
+++ b/opsdroid/conftest.py
@@ -1,8 +1,29 @@
"""Pytest config for all opsdroid tests."""
+import pytest
+
+import asyncio
+
from opsdroid.testing import opsdroid
+from opsdroid.connector import Connector
from opsdroid.cli.start import configure_lang
__all__ = ["opsdroid"]
configure_lang({})
+
+
[email protected](scope="session")
+def get_connector():
+ def _get_connector(config={}):
+ return Connector(config, opsdroid=opsdroid)
+
+ return _get_connector
+
+
[email protected]_fixture
+def event_loop():
+ """Create an instance of the default event loop for each test case."""
+ loop = asyncio.get_event_loop_policy().new_event_loop()
+ yield loop
+ loop.close()
|
{"golden_diff": "diff --git a/opsdroid/conftest.py b/opsdroid/conftest.py\n--- a/opsdroid/conftest.py\n+++ b/opsdroid/conftest.py\n@@ -1,8 +1,29 @@\n \"\"\"Pytest config for all opsdroid tests.\"\"\"\n+import pytest\n+\n+import asyncio\n+\n from opsdroid.testing import opsdroid\n+from opsdroid.connector import Connector\n \n from opsdroid.cli.start import configure_lang\n \n __all__ = [\"opsdroid\"]\n \n configure_lang({})\n+\n+\[email protected](scope=\"session\")\n+def get_connector():\n+ def _get_connector(config={}):\n+ return Connector(config, opsdroid=opsdroid)\n+\n+ return _get_connector\n+\n+\[email protected]_fixture\n+def event_loop():\n+ \"\"\"Create an instance of the default event loop for each test case.\"\"\"\n+ loop = asyncio.get_event_loop_policy().new_event_loop()\n+ yield loop\n+ loop.close()\n", "issue": "Migrate tests from unittest to pytest\nOur existing test suite has been written with the Python [`unittest`](https://docs.python.org/3/library/unittest.html) framework. However, as the test suite has grown and opsdroid has become more complex we are running into issues with the tests. Mainly around setting up and tearing down tests.\r\n\r\nThe @opsdroid/maintainers team have decided that we want to migrate all tests to be written with the [`pytest`](https://docs.pytest.org/en/latest/contents.html) framework instead so that we can make better use of fixtures. Fixtures are more reusable and portable and should help reduce complexity all over.\r\n\r\nThere's a lot to be done but it can be done piece by piece as `pytest` can run tests in either format. So if you wish to help in the effort you can start by searching the codebase for unittest suites. These are classes which are subclassed from `unittest.TestCase` or `asynctest.TestCase`, so searching all files for `unittest.TestCase` and `asynctest.TestCase` should be a good place to start.\r\n\r\nFor detailed information on running the test suite and contributing to opsdroid [see the docs](https://docs.opsdroid.dev/en/latest/contributing/index.html). But the quickest way to get started us with [`tox`](https://tox.readthedocs.io/en/latest/).\r\n\r\n```bash\r\npip install -U tox # You only need to install tox once\r\n\r\ntox -e py36,lint # Run the Python 3.6 tests (the lowest version we support) and the linter\r\n```\r\n\r\nOnce you have found a test suite you wish to convert there are a few steps you need to follow to convert from unittest to pytest:\r\n- Move tests from top level `tests` directory to a nested `tests` directory in opsdroid. Create one in an appropriate place if there isn't already one.\r\n- Remove test from class, pytest tests are just regular functions.\r\n- Change assertions to use regular `assert` or [pytest assertions](https://docs.pytest.org/en/latest/assert.html).\r\n- Mark async tests. In unittest we write async tests by using the `asynctest.TestCase`, but in pytest we decorate our tests with `@pytest.mark.asyncio` instead.\r\n- Move setup operations to fixtures. If a test class contains a `setUp` method anything created here should become a fixture. Check the existing fixtures in `conftest.py` before creating new ones.\r\n- Add docstrings to tests and fixtures to explain what they do. We have been pretty rubbish with this up until now and there are many tests which are not obvious in what they are testing.\r\n\r\nHere's an example:\r\n\r\n```python\r\n# Before (unittest)\r\nimport asynctest\r\nimport asynctest.mock as mock\r\n\r\nfrom opsdroid.cli.start import configure_lang\r\nfrom opsdroid.core import OpsDroid\r\nfrom opsdroid.events import Message\r\nfrom opsdroid.matchers import match_regex\r\nfrom opsdroid import constraints\r\n\r\n\r\nclass TestConstraints(asynctest.TestCase):\r\n \"\"\"Test the opsdroid constraint decorators.\"\"\"\r\n\r\n async def setUp(self):\r\n configure_lang({})\r\n\r\n async def getMockSkill(self):\r\n async def mockedskill(opsdroid, config, message):\r\n pass\r\n\r\n mockedskill.config = {}\r\n return mockedskill\r\n\r\n async def test_constrain_rooms_constrains(self):\r\n with OpsDroid() as opsdroid:\r\n skill = await self.getMockSkill()\r\n skill = match_regex(r\".*\")(skill)\r\n skill = constraints.constrain_rooms([\"#general\"])(skill)\r\n opsdroid.skills.append(skill)\r\n\r\n tasks = await opsdroid.parse(\r\n Message(text=\"Hello\", user=\"user\", target=\"#random\", connector=None)\r\n )\r\n self.assertEqual(len(tasks), 2) # Just match_always and match_event\r\n```\r\n\r\n```python\r\n# After (pytest)\r\nimport pytest\r\n\r\nfrom opsdroid.cli.start import configure_lang\r\nfrom opsdroid.core import OpsDroid\r\nfrom opsdroid.events import Message\r\nfrom opsdroid.matchers import match_regex\r\nfrom opsdroid import constraints\r\n\r\n\r\nconfigure_lang({}) # Required for our internationalization of error messages\r\n\r\n\r\[email protected]\r\ndef opsdroid():\r\n \"\"\"An instance of the OpsDroid class.\"\"\"\r\n with OpsDroid() as opsdroid:\r\n yield opsdroid\r\n\r\n\r\[email protected]\r\ndef mock_skill():\r\n \"\"\"A skill which does nothing but follows the skill API.\"\"\"\r\n\r\n async def mockedskill(opsdroid, config, message):\r\n pass\r\n\r\n mockedskill.config = {}\r\n return mockedskill\r\n\r\n\r\[email protected]\r\nasync def test_constrain_rooms_constrains(opsdroid, mock_skill):\r\n \"\"\"Test that with the room constraint a skill is not called.\"\"\"\r\n skill = match_regex(r\".*\")(mock_skill)\r\n skill = constraints.constrain_rooms([\"#general\"])(skill)\r\n opsdroid.skills.append(skill)\r\n\r\n tasks = await opsdroid.parse(\r\n Message(text=\"Hello\", user=\"user\", target=\"#random\", connector=None)\r\n )\r\n assert len(tasks) == 2 # Just match_always and match_event\r\n```\r\n\r\nFixtures such as the `opsdroid` fixture will be extremely useful in many tests, so we will be creating some standard ones like this which will be available in all tests. If you write a fixture that you think could be useful in other places please don't hesitate to contribute it to the `conftest.py` file.\r\n\r\nIf you have any questions you can chat to us on [Matrix](https://riot.im/app/#/room/#opsdroid-general:matrix.org). We encourage you to get started and if you have issues or get stuck then open a [draft PR](https://github.blog/2019-02-14-introducing-draft-pull-requests/) with your changes and we can take a look.\r\n\r\n---\r\n\r\nWhen this issue is complete the documentation about the migration should be removed from the testing docs.\n", "before_files": [{"content": "\"\"\"Pytest config for all opsdroid tests.\"\"\"\nfrom opsdroid.testing import opsdroid\n\nfrom opsdroid.cli.start import configure_lang\n\n__all__ = [\"opsdroid\"]\n\nconfigure_lang({})\n", "path": "opsdroid/conftest.py"}], "after_files": [{"content": "\"\"\"Pytest config for all opsdroid tests.\"\"\"\nimport pytest\n\nimport asyncio\n\nfrom opsdroid.testing import opsdroid\nfrom opsdroid.connector import Connector\n\nfrom opsdroid.cli.start import configure_lang\n\n__all__ = [\"opsdroid\"]\n\nconfigure_lang({})\n\n\[email protected](scope=\"session\")\ndef get_connector():\n def _get_connector(config={}):\n return Connector(config, opsdroid=opsdroid)\n\n return _get_connector\n\n\[email protected]_fixture\ndef event_loop():\n \"\"\"Create an instance of the default event loop for each test case.\"\"\"\n loop = asyncio.get_event_loop_policy().new_event_loop()\n yield loop\n loop.close()\n", "path": "opsdroid/conftest.py"}]}
| 1,570 | 218 |
gh_patches_debug_38667
|
rasdani/github-patches
|
git_diff
|
encode__uvicorn-22
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Signal handlers
In the worker class
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `uvicorn/worker.py`
Content:
```
1 import asyncio
2 import functools
3
4 import uvloop
5
6 from gunicorn.workers.base import Worker
7 from uvicorn.protocols import http
8
9
10 class UvicornWorker(Worker):
11 """
12 A worker class for Gunicorn that interfaces with an ASGI consumer callable,
13 rather than a WSGI callable.
14
15 We use a couple of packages from MagicStack in order to achieve an
16 extremely high-throughput and low-latency implementation:
17
18 * `uvloop` as the event loop policy.
19 * `httptools` as the HTTP request parser.
20 """
21
22 def init_process(self):
23 # Close any existing event loop before setting a
24 # new policy.
25 asyncio.get_event_loop().close()
26
27 # Setup uvloop policy, so that every
28 # asyncio.get_event_loop() will create an instance
29 # of uvloop event loop.
30 asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
31
32 super().init_process()
33
34 def run(self):
35 loop = asyncio.get_event_loop()
36 loop.create_task(self.create_servers(loop))
37 loop.create_task(tick(loop, self.notify))
38 loop.run_forever()
39
40 async def create_servers(self, loop):
41 cfg = self.cfg
42 consumer = self.wsgi
43
44 for sock in self.sockets:
45 protocol = functools.partial(
46 http.HttpProtocol,
47 consumer=consumer, loop=loop, sock=sock, cfg=cfg
48 )
49 await loop.create_server(protocol, sock=sock)
50
51
52 async def tick(loop, notify):
53 cycle = 0
54 while True:
55 http.set_time_and_date()
56 cycle = (cycle + 1) % 10
57 if cycle == 0:
58 notify()
59 await asyncio.sleep(1)
60
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/uvicorn/worker.py b/uvicorn/worker.py
--- a/uvicorn/worker.py
+++ b/uvicorn/worker.py
@@ -1,5 +1,7 @@
import asyncio
import functools
+import signal
+import sys
import uvloop
@@ -19,6 +21,11 @@
* `httptools` as the HTTP request parser.
"""
+ def __init__(self, *args, **kwargs): # pragma: no cover
+ super().__init__(*args, **kwargs)
+ self.servers = []
+ self.exit_code = 0
+
def init_process(self):
# Close any existing event loop before setting a
# new policy.
@@ -34,8 +41,45 @@
def run(self):
loop = asyncio.get_event_loop()
loop.create_task(self.create_servers(loop))
- loop.create_task(tick(loop, self.notify))
+ loop.create_task(self.tick(loop))
loop.run_forever()
+ sys.exit(self.exit_code)
+
+ def init_signals(self):
+ # Set up signals through the event loop API.
+ loop = asyncio.get_event_loop()
+
+ loop.add_signal_handler(signal.SIGQUIT, self.handle_quit,
+ signal.SIGQUIT, None)
+
+ loop.add_signal_handler(signal.SIGTERM, self.handle_exit,
+ signal.SIGTERM, None)
+
+ loop.add_signal_handler(signal.SIGINT, self.handle_quit,
+ signal.SIGINT, None)
+
+ loop.add_signal_handler(signal.SIGWINCH, self.handle_winch,
+ signal.SIGWINCH, None)
+
+ loop.add_signal_handler(signal.SIGUSR1, self.handle_usr1,
+ signal.SIGUSR1, None)
+
+ loop.add_signal_handler(signal.SIGABRT, self.handle_abort,
+ signal.SIGABRT, None)
+
+ # Don't let SIGTERM and SIGUSR1 disturb active requests
+ # by interrupting system calls
+ signal.siginterrupt(signal.SIGTERM, False)
+ signal.siginterrupt(signal.SIGUSR1, False)
+
+ def handle_quit(self, sig, frame):
+ self.alive = False
+ self.cfg.worker_int(self)
+
+ def handle_abort(self, sig, frame):
+ self.alive = False
+ self.exit_code = 1
+ self.cfg.worker_abort(self)
async def create_servers(self, loop):
cfg = self.cfg
@@ -46,14 +90,19 @@
http.HttpProtocol,
consumer=consumer, loop=loop, sock=sock, cfg=cfg
)
- await loop.create_server(protocol, sock=sock)
+ server = await loop.create_server(protocol, sock=sock)
+ self.servers.append(server)
+ async def tick(self, loop):
+ cycle = 0
+ while self.alive:
+ http.set_time_and_date()
+ cycle = (cycle + 1) % 10
+ if cycle == 0:
+ self.notify()
+ await asyncio.sleep(1)
-async def tick(loop, notify):
- cycle = 0
- while True:
- http.set_time_and_date()
- cycle = (cycle + 1) % 10
- if cycle == 0:
- notify()
- await asyncio.sleep(1)
+ for server in self.servers:
+ server.close()
+ await server.wait_closed()
+ loop.stop()
|
{"golden_diff": "diff --git a/uvicorn/worker.py b/uvicorn/worker.py\n--- a/uvicorn/worker.py\n+++ b/uvicorn/worker.py\n@@ -1,5 +1,7 @@\n import asyncio\n import functools\n+import signal\n+import sys\n \n import uvloop\n \n@@ -19,6 +21,11 @@\n * `httptools` as the HTTP request parser.\n \"\"\"\n \n+ def __init__(self, *args, **kwargs): # pragma: no cover\n+ super().__init__(*args, **kwargs)\n+ self.servers = []\n+ self.exit_code = 0\n+\n def init_process(self):\n # Close any existing event loop before setting a\n # new policy.\n@@ -34,8 +41,45 @@\n def run(self):\n loop = asyncio.get_event_loop()\n loop.create_task(self.create_servers(loop))\n- loop.create_task(tick(loop, self.notify))\n+ loop.create_task(self.tick(loop))\n loop.run_forever()\n+ sys.exit(self.exit_code)\n+\n+ def init_signals(self):\n+ # Set up signals through the event loop API.\n+ loop = asyncio.get_event_loop()\n+\n+ loop.add_signal_handler(signal.SIGQUIT, self.handle_quit,\n+ signal.SIGQUIT, None)\n+\n+ loop.add_signal_handler(signal.SIGTERM, self.handle_exit,\n+ signal.SIGTERM, None)\n+\n+ loop.add_signal_handler(signal.SIGINT, self.handle_quit,\n+ signal.SIGINT, None)\n+\n+ loop.add_signal_handler(signal.SIGWINCH, self.handle_winch,\n+ signal.SIGWINCH, None)\n+\n+ loop.add_signal_handler(signal.SIGUSR1, self.handle_usr1,\n+ signal.SIGUSR1, None)\n+\n+ loop.add_signal_handler(signal.SIGABRT, self.handle_abort,\n+ signal.SIGABRT, None)\n+\n+ # Don't let SIGTERM and SIGUSR1 disturb active requests\n+ # by interrupting system calls\n+ signal.siginterrupt(signal.SIGTERM, False)\n+ signal.siginterrupt(signal.SIGUSR1, False)\n+\n+ def handle_quit(self, sig, frame):\n+ self.alive = False\n+ self.cfg.worker_int(self)\n+\n+ def handle_abort(self, sig, frame):\n+ self.alive = False\n+ self.exit_code = 1\n+ self.cfg.worker_abort(self)\n \n async def create_servers(self, loop):\n cfg = self.cfg\n@@ -46,14 +90,19 @@\n http.HttpProtocol,\n consumer=consumer, loop=loop, sock=sock, cfg=cfg\n )\n- await loop.create_server(protocol, sock=sock)\n+ server = await loop.create_server(protocol, sock=sock)\n+ self.servers.append(server)\n \n+ async def tick(self, loop):\n+ cycle = 0\n+ while self.alive:\n+ http.set_time_and_date()\n+ cycle = (cycle + 1) % 10\n+ if cycle == 0:\n+ self.notify()\n+ await asyncio.sleep(1)\n \n-async def tick(loop, notify):\n- cycle = 0\n- while True:\n- http.set_time_and_date()\n- cycle = (cycle + 1) % 10\n- if cycle == 0:\n- notify()\n- await asyncio.sleep(1)\n+ for server in self.servers:\n+ server.close()\n+ await server.wait_closed()\n+ loop.stop()\n", "issue": "Signal handlers\nIn the worker class\n", "before_files": [{"content": "import asyncio\nimport functools\n\nimport uvloop\n\nfrom gunicorn.workers.base import Worker\nfrom uvicorn.protocols import http\n\n\nclass UvicornWorker(Worker):\n \"\"\"\n A worker class for Gunicorn that interfaces with an ASGI consumer callable,\n rather than a WSGI callable.\n\n We use a couple of packages from MagicStack in order to achieve an\n extremely high-throughput and low-latency implementation:\n\n * `uvloop` as the event loop policy.\n * `httptools` as the HTTP request parser.\n \"\"\"\n\n def init_process(self):\n # Close any existing event loop before setting a\n # new policy.\n asyncio.get_event_loop().close()\n\n # Setup uvloop policy, so that every\n # asyncio.get_event_loop() will create an instance\n # of uvloop event loop.\n asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())\n\n super().init_process()\n\n def run(self):\n loop = asyncio.get_event_loop()\n loop.create_task(self.create_servers(loop))\n loop.create_task(tick(loop, self.notify))\n loop.run_forever()\n\n async def create_servers(self, loop):\n cfg = self.cfg\n consumer = self.wsgi\n\n for sock in self.sockets:\n protocol = functools.partial(\n http.HttpProtocol,\n consumer=consumer, loop=loop, sock=sock, cfg=cfg\n )\n await loop.create_server(protocol, sock=sock)\n\n\nasync def tick(loop, notify):\n cycle = 0\n while True:\n http.set_time_and_date()\n cycle = (cycle + 1) % 10\n if cycle == 0:\n notify()\n await asyncio.sleep(1)\n", "path": "uvicorn/worker.py"}], "after_files": [{"content": "import asyncio\nimport functools\nimport signal\nimport sys\n\nimport uvloop\n\nfrom gunicorn.workers.base import Worker\nfrom uvicorn.protocols import http\n\n\nclass UvicornWorker(Worker):\n \"\"\"\n A worker class for Gunicorn that interfaces with an ASGI consumer callable,\n rather than a WSGI callable.\n\n We use a couple of packages from MagicStack in order to achieve an\n extremely high-throughput and low-latency implementation:\n\n * `uvloop` as the event loop policy.\n * `httptools` as the HTTP request parser.\n \"\"\"\n\n def __init__(self, *args, **kwargs): # pragma: no cover\n super().__init__(*args, **kwargs)\n self.servers = []\n self.exit_code = 0\n\n def init_process(self):\n # Close any existing event loop before setting a\n # new policy.\n asyncio.get_event_loop().close()\n\n # Setup uvloop policy, so that every\n # asyncio.get_event_loop() will create an instance\n # of uvloop event loop.\n asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())\n\n super().init_process()\n\n def run(self):\n loop = asyncio.get_event_loop()\n loop.create_task(self.create_servers(loop))\n loop.create_task(self.tick(loop))\n loop.run_forever()\n sys.exit(self.exit_code)\n\n def init_signals(self):\n # Set up signals through the event loop API.\n loop = asyncio.get_event_loop()\n\n loop.add_signal_handler(signal.SIGQUIT, self.handle_quit,\n signal.SIGQUIT, None)\n\n loop.add_signal_handler(signal.SIGTERM, self.handle_exit,\n signal.SIGTERM, None)\n\n loop.add_signal_handler(signal.SIGINT, self.handle_quit,\n signal.SIGINT, None)\n\n loop.add_signal_handler(signal.SIGWINCH, self.handle_winch,\n signal.SIGWINCH, None)\n\n loop.add_signal_handler(signal.SIGUSR1, self.handle_usr1,\n signal.SIGUSR1, None)\n\n loop.add_signal_handler(signal.SIGABRT, self.handle_abort,\n signal.SIGABRT, None)\n\n # Don't let SIGTERM and SIGUSR1 disturb active requests\n # by interrupting system calls\n signal.siginterrupt(signal.SIGTERM, False)\n signal.siginterrupt(signal.SIGUSR1, False)\n\n def handle_quit(self, sig, frame):\n self.alive = False\n self.cfg.worker_int(self)\n\n def handle_abort(self, sig, frame):\n self.alive = False\n self.exit_code = 1\n self.cfg.worker_abort(self)\n\n async def create_servers(self, loop):\n cfg = self.cfg\n consumer = self.wsgi\n\n for sock in self.sockets:\n protocol = functools.partial(\n http.HttpProtocol,\n consumer=consumer, loop=loop, sock=sock, cfg=cfg\n )\n server = await loop.create_server(protocol, sock=sock)\n self.servers.append(server)\n\n async def tick(self, loop):\n cycle = 0\n while self.alive:\n http.set_time_and_date()\n cycle = (cycle + 1) % 10\n if cycle == 0:\n self.notify()\n await asyncio.sleep(1)\n\n for server in self.servers:\n server.close()\n await server.wait_closed()\n loop.stop()\n", "path": "uvicorn/worker.py"}]}
| 753 | 766 |
gh_patches_debug_2047
|
rasdani/github-patches
|
git_diff
|
conan-io__conan-center-index-7286
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[package] aws-c-event-stream/0.2.7: conflicting openssl versions
```
ERROR: Conflict in s2n/1.0.11:
's2n/1.0.11' requires 'openssl/1.1.1k' while 'aws-c-cal/0.5.11' requires 'openssl/1.1.1l'.
To fix this conflict you need to override the package 'openssl' in your root package.
```
seems like it was introduced by #7260
### Package and Environment Details (include every applicable attribute)
* Package Name/Version: **aws-c-event-stream/0.2.7**
* Conan version: **conan 1.39.0**
### Conan profile (output of `conan profile show default` or `conan profile show <profile>` if custom profile is in use)
```
[settings]
arch=x86_64
arch_build=x86_64
build_type=Release
compiler=gcc
compiler.libcxx=libstdc++
compiler.version=7
os=Linux
os_build=Linux
[options]
[build_requires]
[env]
```
### Steps to reproduce (Include if Applicable)
conan install --build missing aws-c-event-stream/0.2.7@
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `recipes/s2n/all/conanfile.py`
Content:
```
1 from conans import ConanFile, CMake, tools
2 from conans.errors import ConanInvalidConfiguration
3 import os
4
5 required_conan_version = ">=1.33.0"
6
7 class S2n(ConanFile):
8 name = "s2n"
9 description = "An implementation of the TLS/SSL protocols"
10 topics = ("conan", "aws", "amazon", "cloud", )
11 url = "https://github.com/conan-io/conan-center-index"
12 homepage = "https://github.com/aws/s2n-tls"
13 license = "Apache-2.0",
14 exports_sources = "CMakeLists.txt"
15 generators = "cmake", "cmake_find_package"
16 settings = "os", "arch", "compiler", "build_type"
17 options = {
18 "shared": [True, False],
19 "fPIC": [True, False],
20 }
21 default_options = {
22 "shared": False,
23 "fPIC": True,
24 }
25
26 _cmake = None
27
28 @property
29 def _source_subfolder(self):
30 return "source_subfolder"
31
32 def configure(self):
33 if self.options.shared:
34 del self.options.fPIC
35 del self.settings.compiler.cppstd
36 del self.settings.compiler.libcxx
37
38 def requirements(self):
39 self.requires("openssl/1.1.1k")
40
41 def source(self):
42 tools.get(**self.conan_data["sources"][self.version],
43 destination=self._source_subfolder, strip_root=True)
44
45 def validate(self):
46 if self.settings.os == "Windows":
47 raise ConanInvalidConfiguration("Not supported (yet)")
48
49 def _configure_cmake(self):
50 if self._cmake:
51 return self._cmake
52 self._cmake = CMake(self)
53 self._cmake.definitions["BUILD_TESTING"] = False
54 self._cmake.definitions["UNSAFE_TREAT_WARNINGS_AS_ERRORS"] = False
55 self._cmake.configure()
56 return self._cmake
57
58 def build(self):
59 cmake = self._configure_cmake()
60 cmake.build()
61
62 def package(self):
63 self.copy(pattern="LICENSE", dst="licenses", src=self._source_subfolder)
64 cmake = self._configure_cmake()
65 cmake.install()
66 tools.rmdir(os.path.join(self.package_folder, "lib", "s2n"))
67
68 def package_info(self):
69 self.cpp_info.filenames["cmake_find_package"] = "s2n"
70 self.cpp_info.filenames["cmake_find_package_multi"] = "s2n"
71 self.cpp_info.names["cmake_find_package"] = "AWS"
72 self.cpp_info.names["cmake_find_package_multi"] = "AWS"
73 self.cpp_info.components["s2n-lib"].names["cmake_find_package"] = "s2n"
74 self.cpp_info.components["s2n-lib"].names["cmake_find_package_multi"] = "s2n"
75 self.cpp_info.components["s2n-lib"].libs = ["s2n"]
76 self.cpp_info.components["s2n-lib"].requires = ["openssl::crypto"]
77 if self.settings.os in ("FreeBSD", "Linux"):
78 self.cpp_info.components["s2n-lib"].system_libs = ["m", "pthread"]
79
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/recipes/s2n/all/conanfile.py b/recipes/s2n/all/conanfile.py
--- a/recipes/s2n/all/conanfile.py
+++ b/recipes/s2n/all/conanfile.py
@@ -36,7 +36,7 @@
del self.settings.compiler.libcxx
def requirements(self):
- self.requires("openssl/1.1.1k")
+ self.requires("openssl/1.1.1l")
def source(self):
tools.get(**self.conan_data["sources"][self.version],
|
{"golden_diff": "diff --git a/recipes/s2n/all/conanfile.py b/recipes/s2n/all/conanfile.py\n--- a/recipes/s2n/all/conanfile.py\n+++ b/recipes/s2n/all/conanfile.py\n@@ -36,7 +36,7 @@\n del self.settings.compiler.libcxx\n \n def requirements(self):\n- self.requires(\"openssl/1.1.1k\")\n+ self.requires(\"openssl/1.1.1l\")\n \n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version],\n", "issue": "[package] aws-c-event-stream/0.2.7: conflicting openssl versions\n```\r\nERROR: Conflict in s2n/1.0.11:\r\n 's2n/1.0.11' requires 'openssl/1.1.1k' while 'aws-c-cal/0.5.11' requires 'openssl/1.1.1l'.\r\n To fix this conflict you need to override the package 'openssl' in your root package.\r\n```\r\n\r\nseems like it was introduced by #7260 \r\n\r\n### Package and Environment Details (include every applicable attribute)\r\n * Package Name/Version: **aws-c-event-stream/0.2.7**\r\n * Conan version: **conan 1.39.0**\r\n\r\n\r\n### Conan profile (output of `conan profile show default` or `conan profile show <profile>` if custom profile is in use)\r\n```\r\n[settings]\r\narch=x86_64\r\narch_build=x86_64\r\nbuild_type=Release\r\ncompiler=gcc\r\ncompiler.libcxx=libstdc++\r\ncompiler.version=7\r\nos=Linux\r\nos_build=Linux\r\n[options]\r\n[build_requires]\r\n[env]\r\n```\r\n\r\n\r\n### Steps to reproduce (Include if Applicable)\r\nconan install --build missing aws-c-event-stream/0.2.7@\n", "before_files": [{"content": "from conans import ConanFile, CMake, tools\nfrom conans.errors import ConanInvalidConfiguration\nimport os\n\nrequired_conan_version = \">=1.33.0\"\n\nclass S2n(ConanFile):\n name = \"s2n\"\n description = \"An implementation of the TLS/SSL protocols\"\n topics = (\"conan\", \"aws\", \"amazon\", \"cloud\", )\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/aws/s2n-tls\"\n license = \"Apache-2.0\",\n exports_sources = \"CMakeLists.txt\"\n generators = \"cmake\", \"cmake_find_package\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n }\n\n _cmake = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n del self.settings.compiler.cppstd\n del self.settings.compiler.libcxx\n\n def requirements(self):\n self.requires(\"openssl/1.1.1k\")\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version],\n destination=self._source_subfolder, strip_root=True)\n\n def validate(self):\n if self.settings.os == \"Windows\":\n raise ConanInvalidConfiguration(\"Not supported (yet)\")\n\n def _configure_cmake(self):\n if self._cmake:\n return self._cmake\n self._cmake = CMake(self)\n self._cmake.definitions[\"BUILD_TESTING\"] = False\n self._cmake.definitions[\"UNSAFE_TREAT_WARNINGS_AS_ERRORS\"] = False\n self._cmake.configure()\n return self._cmake\n\n def build(self):\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n self.copy(pattern=\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)\n cmake = self._configure_cmake()\n cmake.install()\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"s2n\"))\n\n def package_info(self):\n self.cpp_info.filenames[\"cmake_find_package\"] = \"s2n\"\n self.cpp_info.filenames[\"cmake_find_package_multi\"] = \"s2n\"\n self.cpp_info.names[\"cmake_find_package\"] = \"AWS\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"AWS\"\n self.cpp_info.components[\"s2n-lib\"].names[\"cmake_find_package\"] = \"s2n\"\n self.cpp_info.components[\"s2n-lib\"].names[\"cmake_find_package_multi\"] = \"s2n\"\n self.cpp_info.components[\"s2n-lib\"].libs = [\"s2n\"]\n self.cpp_info.components[\"s2n-lib\"].requires = [\"openssl::crypto\"]\n if self.settings.os in (\"FreeBSD\", \"Linux\"):\n self.cpp_info.components[\"s2n-lib\"].system_libs = [\"m\", \"pthread\"]\n", "path": "recipes/s2n/all/conanfile.py"}], "after_files": [{"content": "from conans import ConanFile, CMake, tools\nfrom conans.errors import ConanInvalidConfiguration\nimport os\n\nrequired_conan_version = \">=1.33.0\"\n\nclass S2n(ConanFile):\n name = \"s2n\"\n description = \"An implementation of the TLS/SSL protocols\"\n topics = (\"conan\", \"aws\", \"amazon\", \"cloud\", )\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/aws/s2n-tls\"\n license = \"Apache-2.0\",\n exports_sources = \"CMakeLists.txt\"\n generators = \"cmake\", \"cmake_find_package\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n }\n\n _cmake = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n del self.settings.compiler.cppstd\n del self.settings.compiler.libcxx\n\n def requirements(self):\n self.requires(\"openssl/1.1.1l\")\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version],\n destination=self._source_subfolder, strip_root=True)\n\n def validate(self):\n if self.settings.os == \"Windows\":\n raise ConanInvalidConfiguration(\"Not supported (yet)\")\n\n def _configure_cmake(self):\n if self._cmake:\n return self._cmake\n self._cmake = CMake(self)\n self._cmake.definitions[\"BUILD_TESTING\"] = False\n self._cmake.definitions[\"UNSAFE_TREAT_WARNINGS_AS_ERRORS\"] = False\n self._cmake.configure()\n return self._cmake\n\n def build(self):\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n self.copy(pattern=\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)\n cmake = self._configure_cmake()\n cmake.install()\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"s2n\"))\n\n def package_info(self):\n self.cpp_info.filenames[\"cmake_find_package\"] = \"s2n\"\n self.cpp_info.filenames[\"cmake_find_package_multi\"] = \"s2n\"\n self.cpp_info.names[\"cmake_find_package\"] = \"AWS\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"AWS\"\n self.cpp_info.components[\"s2n-lib\"].names[\"cmake_find_package\"] = \"s2n\"\n self.cpp_info.components[\"s2n-lib\"].names[\"cmake_find_package_multi\"] = \"s2n\"\n self.cpp_info.components[\"s2n-lib\"].libs = [\"s2n\"]\n self.cpp_info.components[\"s2n-lib\"].requires = [\"openssl::crypto\"]\n if self.settings.os in (\"FreeBSD\", \"Linux\"):\n self.cpp_info.components[\"s2n-lib\"].system_libs = [\"m\", \"pthread\"]\n", "path": "recipes/s2n/all/conanfile.py"}]}
| 1,401 | 125 |
gh_patches_debug_3768
|
rasdani/github-patches
|
git_diff
|
quantumlib__Cirq-1794
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
QubitOrder.explicit fails when given an iterator
Also affects `QubitOrder.as_qubit_order()`. I believe the issue is on line 77 of cirq/ops/qubit_order.py.
Example:
```
cirq.QubitOrder.as_qubit_order(reversed(cirq.LineQubit.range(5))).order_for(
(cirq.LineQubit(3),))
```
The example raises `ValueError: Unexpected extra qubits: {cirq.LineQubit(3)}.` because `reversed()` is an iterator and QubitOrder tried to iterate it twice.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cirq/ops/qubit_order.py`
Content:
```
1 # Copyright 2018 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15
16 from typing import (
17 Any,
18 Callable,
19 Iterable,
20 Optional,
21 Tuple,
22 TypeVar,
23 TYPE_CHECKING,
24 )
25
26 import collections
27
28 from cirq.ops import raw_types
29
30 if TYPE_CHECKING:
31 # pylint: disable=unused-import
32 from cirq.ops import qubit_order_or_list
33
34
35 TInternalQubit = TypeVar('TInternalQubit')
36 TExternalQubit = TypeVar('TExternalQubit')
37
38
39 class QubitOrder:
40 """Defines the kronecker product order of qubits."""
41
42 def __init__(self, explicit_func: Callable[[Iterable[raw_types.Qid]],
43 Tuple[raw_types.Qid, ...]]
44 ) -> None:
45 self._explicit_func = explicit_func
46
47 DEFAULT = None # type: QubitOrder
48 """A basis that orders qubits in the same way that calling `sorted` does.
49
50 Specifically, qubits are ordered first by their type name and then by
51 whatever comparison value qubits of a given type provide (e.g. for LineQubit
52 it is the x coordinate of the qubit).
53 """
54
55 @staticmethod
56 def explicit(fixed_qubits: Iterable[raw_types.Qid],
57 fallback: Optional['QubitOrder']=None) -> 'QubitOrder':
58 """A basis that contains exactly the given qubits in the given order.
59
60 Args:
61 fixed_qubits: The qubits in basis order.
62 fallback: A fallback order to use for extra qubits not in the
63 fixed_qubits list. Extra qubits will always come after the
64 fixed_qubits, but will be ordered based on the fallback. If no
65 fallback is specified, a ValueError is raised when extra qubits
66 are specified.
67
68 Returns:
69 A Basis instance that forces the given qubits in the given order.
70 """
71 result = tuple(fixed_qubits)
72 if len(set(result)) < len(result):
73 raise ValueError(
74 'Qubits appear in fixed_order twice: {}.'.format(result))
75
76 def func(qubits):
77 remaining = set(qubits) - set(fixed_qubits)
78 if not remaining:
79 return result
80 if not fallback:
81 raise ValueError(
82 'Unexpected extra qubits: {}.'.format(remaining))
83 return result + fallback.order_for(remaining)
84
85 return QubitOrder(func)
86
87 @staticmethod
88 def sorted_by(key: Callable[[raw_types.Qid], Any]) -> 'QubitOrder':
89 """A basis that orders qubits ascending based on a key function.
90
91 Args:
92 key: A function that takes a qubit and returns a key value. The
93 basis will be ordered ascending according to these key values.
94
95
96 Returns:
97 A basis that orders qubits ascending based on a key function.
98 """
99 return QubitOrder(lambda qubits: tuple(sorted(qubits, key=key)))
100
101 def order_for(self, qubits: Iterable[raw_types.Qid]
102 ) -> Tuple[raw_types.Qid, ...]:
103 """Returns a qubit tuple ordered corresponding to the basis.
104
105 Args:
106 qubits: Qubits that should be included in the basis. (Additional
107 qubits may be added into the output by the basis.)
108
109 Returns:
110 A tuple of qubits in the same order that their single-qubit
111 matrices would be passed into `np.kron` when producing a matrix for
112 the entire system.
113 """
114 return self._explicit_func(qubits)
115
116 @staticmethod
117 def as_qubit_order(val: 'qubit_order_or_list.QubitOrderOrList'
118 ) -> 'QubitOrder':
119 """Converts a value into a basis.
120
121 Args:
122 val: An iterable or a basis.
123
124 Returns:
125 The basis implied by the value.
126 """
127 if isinstance(val, collections.Iterable):
128 return QubitOrder.explicit(val)
129 if isinstance(val, QubitOrder):
130 return val
131 raise ValueError(
132 "Don't know how to interpret <{}> as a Basis.".format(val))
133
134 def map(self,
135 internalize: Callable[[TExternalQubit], TInternalQubit],
136 externalize: Callable[[TInternalQubit], TExternalQubit]
137 ) -> 'QubitOrder':
138 """Transforms the Basis so that it applies to wrapped qubits.
139
140 Args:
141 externalize: Converts an internal qubit understood by the underlying
142 basis into an external qubit understood by the caller.
143 internalize: Converts an external qubit understood by the caller
144 into an internal qubit understood by the underlying basis.
145
146 Returns:
147 A basis that transforms qubits understood by the caller into qubits
148 understood by an underlying basis, uses that to order the qubits,
149 then wraps the ordered qubits back up for the caller.
150 """
151
152 def func(qubits):
153 unwrapped_qubits = [internalize(q) for q in qubits]
154 unwrapped_result = self.order_for(unwrapped_qubits)
155 return tuple(externalize(q) for q in unwrapped_result)
156
157 return QubitOrder(func)
158
159
160 QubitOrder.DEFAULT = QubitOrder.sorted_by(lambda v: v)
161
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/cirq/ops/qubit_order.py b/cirq/ops/qubit_order.py
--- a/cirq/ops/qubit_order.py
+++ b/cirq/ops/qubit_order.py
@@ -74,7 +74,7 @@
'Qubits appear in fixed_order twice: {}.'.format(result))
def func(qubits):
- remaining = set(qubits) - set(fixed_qubits)
+ remaining = set(qubits) - set(result)
if not remaining:
return result
if not fallback:
|
{"golden_diff": "diff --git a/cirq/ops/qubit_order.py b/cirq/ops/qubit_order.py\n--- a/cirq/ops/qubit_order.py\n+++ b/cirq/ops/qubit_order.py\n@@ -74,7 +74,7 @@\n 'Qubits appear in fixed_order twice: {}.'.format(result))\n \n def func(qubits):\n- remaining = set(qubits) - set(fixed_qubits)\n+ remaining = set(qubits) - set(result)\n if not remaining:\n return result\n if not fallback:\n", "issue": "QubitOrder.explicit fails when given an iterator\nAlso affects `QubitOrder.as_qubit_order()`. I believe the issue is on line 77 of cirq/ops/qubit_order.py.\r\n\r\nExample:\r\n```\r\ncirq.QubitOrder.as_qubit_order(reversed(cirq.LineQubit.range(5))).order_for(\r\n (cirq.LineQubit(3),))\r\n```\r\n\r\nThe example raises `ValueError: Unexpected extra qubits: {cirq.LineQubit(3)}.` because `reversed()` is an iterator and QubitOrder tried to iterate it twice.\n", "before_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom typing import (\n Any,\n Callable,\n Iterable,\n Optional,\n Tuple,\n TypeVar,\n TYPE_CHECKING,\n)\n\nimport collections\n\nfrom cirq.ops import raw_types\n\nif TYPE_CHECKING:\n # pylint: disable=unused-import\n from cirq.ops import qubit_order_or_list\n\n\nTInternalQubit = TypeVar('TInternalQubit')\nTExternalQubit = TypeVar('TExternalQubit')\n\n\nclass QubitOrder:\n \"\"\"Defines the kronecker product order of qubits.\"\"\"\n\n def __init__(self, explicit_func: Callable[[Iterable[raw_types.Qid]],\n Tuple[raw_types.Qid, ...]]\n ) -> None:\n self._explicit_func = explicit_func\n\n DEFAULT = None # type: QubitOrder\n \"\"\"A basis that orders qubits in the same way that calling `sorted` does.\n\n Specifically, qubits are ordered first by their type name and then by\n whatever comparison value qubits of a given type provide (e.g. for LineQubit\n it is the x coordinate of the qubit).\n \"\"\"\n\n @staticmethod\n def explicit(fixed_qubits: Iterable[raw_types.Qid],\n fallback: Optional['QubitOrder']=None) -> 'QubitOrder':\n \"\"\"A basis that contains exactly the given qubits in the given order.\n\n Args:\n fixed_qubits: The qubits in basis order.\n fallback: A fallback order to use for extra qubits not in the\n fixed_qubits list. Extra qubits will always come after the\n fixed_qubits, but will be ordered based on the fallback. If no\n fallback is specified, a ValueError is raised when extra qubits\n are specified.\n\n Returns:\n A Basis instance that forces the given qubits in the given order.\n \"\"\"\n result = tuple(fixed_qubits)\n if len(set(result)) < len(result):\n raise ValueError(\n 'Qubits appear in fixed_order twice: {}.'.format(result))\n\n def func(qubits):\n remaining = set(qubits) - set(fixed_qubits)\n if not remaining:\n return result\n if not fallback:\n raise ValueError(\n 'Unexpected extra qubits: {}.'.format(remaining))\n return result + fallback.order_for(remaining)\n\n return QubitOrder(func)\n\n @staticmethod\n def sorted_by(key: Callable[[raw_types.Qid], Any]) -> 'QubitOrder':\n \"\"\"A basis that orders qubits ascending based on a key function.\n\n Args:\n key: A function that takes a qubit and returns a key value. The\n basis will be ordered ascending according to these key values.\n\n\n Returns:\n A basis that orders qubits ascending based on a key function.\n \"\"\"\n return QubitOrder(lambda qubits: tuple(sorted(qubits, key=key)))\n\n def order_for(self, qubits: Iterable[raw_types.Qid]\n ) -> Tuple[raw_types.Qid, ...]:\n \"\"\"Returns a qubit tuple ordered corresponding to the basis.\n\n Args:\n qubits: Qubits that should be included in the basis. (Additional\n qubits may be added into the output by the basis.)\n\n Returns:\n A tuple of qubits in the same order that their single-qubit\n matrices would be passed into `np.kron` when producing a matrix for\n the entire system.\n \"\"\"\n return self._explicit_func(qubits)\n\n @staticmethod\n def as_qubit_order(val: 'qubit_order_or_list.QubitOrderOrList'\n ) -> 'QubitOrder':\n \"\"\"Converts a value into a basis.\n\n Args:\n val: An iterable or a basis.\n\n Returns:\n The basis implied by the value.\n \"\"\"\n if isinstance(val, collections.Iterable):\n return QubitOrder.explicit(val)\n if isinstance(val, QubitOrder):\n return val\n raise ValueError(\n \"Don't know how to interpret <{}> as a Basis.\".format(val))\n\n def map(self,\n internalize: Callable[[TExternalQubit], TInternalQubit],\n externalize: Callable[[TInternalQubit], TExternalQubit]\n ) -> 'QubitOrder':\n \"\"\"Transforms the Basis so that it applies to wrapped qubits.\n\n Args:\n externalize: Converts an internal qubit understood by the underlying\n basis into an external qubit understood by the caller.\n internalize: Converts an external qubit understood by the caller\n into an internal qubit understood by the underlying basis.\n\n Returns:\n A basis that transforms qubits understood by the caller into qubits\n understood by an underlying basis, uses that to order the qubits,\n then wraps the ordered qubits back up for the caller.\n \"\"\"\n\n def func(qubits):\n unwrapped_qubits = [internalize(q) for q in qubits]\n unwrapped_result = self.order_for(unwrapped_qubits)\n return tuple(externalize(q) for q in unwrapped_result)\n\n return QubitOrder(func)\n\n\nQubitOrder.DEFAULT = QubitOrder.sorted_by(lambda v: v)\n", "path": "cirq/ops/qubit_order.py"}], "after_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom typing import (\n Any,\n Callable,\n Iterable,\n Optional,\n Tuple,\n TypeVar,\n TYPE_CHECKING,\n)\n\nimport collections\n\nfrom cirq.ops import raw_types\n\nif TYPE_CHECKING:\n # pylint: disable=unused-import\n from cirq.ops import qubit_order_or_list\n\n\nTInternalQubit = TypeVar('TInternalQubit')\nTExternalQubit = TypeVar('TExternalQubit')\n\n\nclass QubitOrder:\n \"\"\"Defines the kronecker product order of qubits.\"\"\"\n\n def __init__(self, explicit_func: Callable[[Iterable[raw_types.Qid]],\n Tuple[raw_types.Qid, ...]]\n ) -> None:\n self._explicit_func = explicit_func\n\n DEFAULT = None # type: QubitOrder\n \"\"\"A basis that orders qubits in the same way that calling `sorted` does.\n\n Specifically, qubits are ordered first by their type name and then by\n whatever comparison value qubits of a given type provide (e.g. for LineQubit\n it is the x coordinate of the qubit).\n \"\"\"\n\n @staticmethod\n def explicit(fixed_qubits: Iterable[raw_types.Qid],\n fallback: Optional['QubitOrder']=None) -> 'QubitOrder':\n \"\"\"A basis that contains exactly the given qubits in the given order.\n\n Args:\n fixed_qubits: The qubits in basis order.\n fallback: A fallback order to use for extra qubits not in the\n fixed_qubits list. Extra qubits will always come after the\n fixed_qubits, but will be ordered based on the fallback. If no\n fallback is specified, a ValueError is raised when extra qubits\n are specified.\n\n Returns:\n A Basis instance that forces the given qubits in the given order.\n \"\"\"\n result = tuple(fixed_qubits)\n if len(set(result)) < len(result):\n raise ValueError(\n 'Qubits appear in fixed_order twice: {}.'.format(result))\n\n def func(qubits):\n remaining = set(qubits) - set(result)\n if not remaining:\n return result\n if not fallback:\n raise ValueError(\n 'Unexpected extra qubits: {}.'.format(remaining))\n return result + fallback.order_for(remaining)\n\n return QubitOrder(func)\n\n @staticmethod\n def sorted_by(key: Callable[[raw_types.Qid], Any]) -> 'QubitOrder':\n \"\"\"A basis that orders qubits ascending based on a key function.\n\n Args:\n key: A function that takes a qubit and returns a key value. The\n basis will be ordered ascending according to these key values.\n\n\n Returns:\n A basis that orders qubits ascending based on a key function.\n \"\"\"\n return QubitOrder(lambda qubits: tuple(sorted(qubits, key=key)))\n\n def order_for(self, qubits: Iterable[raw_types.Qid]\n ) -> Tuple[raw_types.Qid, ...]:\n \"\"\"Returns a qubit tuple ordered corresponding to the basis.\n\n Args:\n qubits: Qubits that should be included in the basis. (Additional\n qubits may be added into the output by the basis.)\n\n Returns:\n A tuple of qubits in the same order that their single-qubit\n matrices would be passed into `np.kron` when producing a matrix for\n the entire system.\n \"\"\"\n return self._explicit_func(qubits)\n\n @staticmethod\n def as_qubit_order(val: 'qubit_order_or_list.QubitOrderOrList'\n ) -> 'QubitOrder':\n \"\"\"Converts a value into a basis.\n\n Args:\n val: An iterable or a basis.\n\n Returns:\n The basis implied by the value.\n \"\"\"\n if isinstance(val, collections.Iterable):\n return QubitOrder.explicit(val)\n if isinstance(val, QubitOrder):\n return val\n raise ValueError(\n \"Don't know how to interpret <{}> as a Basis.\".format(val))\n\n def map(self,\n internalize: Callable[[TExternalQubit], TInternalQubit],\n externalize: Callable[[TInternalQubit], TExternalQubit]\n ) -> 'QubitOrder':\n \"\"\"Transforms the Basis so that it applies to wrapped qubits.\n\n Args:\n externalize: Converts an internal qubit understood by the underlying\n basis into an external qubit understood by the caller.\n internalize: Converts an external qubit understood by the caller\n into an internal qubit understood by the underlying basis.\n\n Returns:\n A basis that transforms qubits understood by the caller into qubits\n understood by an underlying basis, uses that to order the qubits,\n then wraps the ordered qubits back up for the caller.\n \"\"\"\n\n def func(qubits):\n unwrapped_qubits = [internalize(q) for q in qubits]\n unwrapped_result = self.order_for(unwrapped_qubits)\n return tuple(externalize(q) for q in unwrapped_result)\n\n return QubitOrder(func)\n\n\nQubitOrder.DEFAULT = QubitOrder.sorted_by(lambda v: v)\n", "path": "cirq/ops/qubit_order.py"}]}
| 2,010 | 119 |
gh_patches_debug_8659
|
rasdani/github-patches
|
git_diff
|
DataDog__dd-trace-py-1745
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
RuntimeError: Set changed size during iteration
Python Version: 3.7.7
Error:
```
RuntimeError: Set changed size during iteration
File "django/core/handlers/wsgi.py", line 141, in __call__
response = self.get_response(request)
File "ddtrace/contrib/django/patch.py", line 82, in wrapper
return func(mod, pin, wrapped, instance, args, kwargs)
File "ddtrace/contrib/django/patch.py", line 406, in traced_get_response
span_type=SpanTypes.HTTP,
File "ddtrace/tracer.py", line 638, in trace
span_type=span_type,
File "ddtrace/tracer.py", line 389, in start_span
new_ctx = self._check_new_process()
File "ddtrace/tracer.py", line 570, in _check_new_process
self._update_dogstatsd_constant_tags()
File "ddtrace/tracer.py", line 525, in _update_dogstatsd_constant_tags
for k, v in RuntimeTags()
File "ddtrace/tracer.py", line 524, in <listcomp>
'{}:{}'.format(k, v)
File "ddtrace/internal/runtime/runtime_metrics.py", line 29, in <genexpr>
collected = (collector.collect(self._enabled) for collector in self._collectors)
File "ddtrace/internal/runtime/collector.py", line 67, in collect
self.value = self.collect_fn(keys)
File "ddtrace/internal/runtime/tag_collectors.py", line 25, in collect_fn
tags = [(SERVICE, service) for service in ddtrace.tracer._services]
File "ddtrace/internal/runtime/tag_collectors.py", line 25, in <listcomp>
tags = [(SERVICE, service) for service in ddtrace.tracer._services]
```
### Which version of dd-trace-py are you using?
```
0.42.0
```
### Which version of the libraries are you using?
```
django==2.2.14
ddtrace==0.42.0
gunicorn==20.0.4
```
### How can we reproduce your problem?
It's not clear if there's anything specific about the WSGI requests that triggers this condition.
### What is the result that you get?
-
### What is the result that you expected?
-
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ddtrace/internal/runtime/tag_collectors.py`
Content:
```
1 from .collector import ValueCollector
2 from .constants import (
3 SERVICE,
4 LANG_INTERPRETER,
5 LANG_VERSION,
6 LANG,
7 TRACER_VERSION,
8 )
9 from ...constants import ENV_KEY
10
11
12 class RuntimeTagCollector(ValueCollector):
13 periodic = False
14 value = []
15
16
17 class TracerTagCollector(RuntimeTagCollector):
18 """Tag collector for the ddtrace Tracer"""
19
20 required_modules = ["ddtrace"]
21
22 def collect_fn(self, keys):
23 ddtrace = self.modules.get("ddtrace")
24 tags = [(SERVICE, service) for service in ddtrace.tracer._services]
25 if ENV_KEY in ddtrace.tracer.tags:
26 tags.append((ENV_KEY, ddtrace.tracer.tags[ENV_KEY]))
27 return tags
28
29
30 class PlatformTagCollector(RuntimeTagCollector):
31 """Tag collector for the Python interpreter implementation.
32
33 Tags collected:
34 - ``lang_interpreter``:
35
36 * For CPython this is 'CPython'.
37 * For Pypy this is ``PyPy``
38 * For Jython this is ``Jython``
39
40 - `lang_version``, eg ``2.7.10``
41 - ``lang`` e.g. ``Python``
42 - ``tracer_version`` e.g. ``0.29.0``
43
44 """
45
46 required_modules = ("platform", "ddtrace")
47
48 def collect_fn(self, keys):
49 platform = self.modules.get("platform")
50 ddtrace = self.modules.get("ddtrace")
51 tags = [
52 (LANG, "python"),
53 (LANG_INTERPRETER, platform.python_implementation()),
54 (LANG_VERSION, platform.python_version()),
55 (TRACER_VERSION, ddtrace.__version__),
56 ]
57 return tags
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ddtrace/internal/runtime/tag_collectors.py b/ddtrace/internal/runtime/tag_collectors.py
--- a/ddtrace/internal/runtime/tag_collectors.py
+++ b/ddtrace/internal/runtime/tag_collectors.py
@@ -21,7 +21,8 @@
def collect_fn(self, keys):
ddtrace = self.modules.get("ddtrace")
- tags = [(SERVICE, service) for service in ddtrace.tracer._services]
+ # make sure to copy _services to avoid RuntimeError: Set changed size during iteration
+ tags = [(SERVICE, service) for service in list(ddtrace.tracer._services)]
if ENV_KEY in ddtrace.tracer.tags:
tags.append((ENV_KEY, ddtrace.tracer.tags[ENV_KEY]))
return tags
|
{"golden_diff": "diff --git a/ddtrace/internal/runtime/tag_collectors.py b/ddtrace/internal/runtime/tag_collectors.py\n--- a/ddtrace/internal/runtime/tag_collectors.py\n+++ b/ddtrace/internal/runtime/tag_collectors.py\n@@ -21,7 +21,8 @@\n \n def collect_fn(self, keys):\n ddtrace = self.modules.get(\"ddtrace\")\n- tags = [(SERVICE, service) for service in ddtrace.tracer._services]\n+ # make sure to copy _services to avoid RuntimeError: Set changed size during iteration\n+ tags = [(SERVICE, service) for service in list(ddtrace.tracer._services)]\n if ENV_KEY in ddtrace.tracer.tags:\n tags.append((ENV_KEY, ddtrace.tracer.tags[ENV_KEY]))\n return tags\n", "issue": "RuntimeError: Set changed size during iteration\nPython Version: 3.7.7\r\n\r\nError:\r\n```\r\nRuntimeError: Set changed size during iteration\r\n File \"django/core/handlers/wsgi.py\", line 141, in __call__\r\n response = self.get_response(request)\r\n File \"ddtrace/contrib/django/patch.py\", line 82, in wrapper\r\n return func(mod, pin, wrapped, instance, args, kwargs)\r\n File \"ddtrace/contrib/django/patch.py\", line 406, in traced_get_response\r\n span_type=SpanTypes.HTTP,\r\n File \"ddtrace/tracer.py\", line 638, in trace\r\n span_type=span_type,\r\n File \"ddtrace/tracer.py\", line 389, in start_span\r\n new_ctx = self._check_new_process()\r\n File \"ddtrace/tracer.py\", line 570, in _check_new_process\r\n self._update_dogstatsd_constant_tags()\r\n File \"ddtrace/tracer.py\", line 525, in _update_dogstatsd_constant_tags\r\n for k, v in RuntimeTags()\r\n File \"ddtrace/tracer.py\", line 524, in <listcomp>\r\n '{}:{}'.format(k, v)\r\n File \"ddtrace/internal/runtime/runtime_metrics.py\", line 29, in <genexpr>\r\n collected = (collector.collect(self._enabled) for collector in self._collectors)\r\n File \"ddtrace/internal/runtime/collector.py\", line 67, in collect\r\n self.value = self.collect_fn(keys)\r\n File \"ddtrace/internal/runtime/tag_collectors.py\", line 25, in collect_fn\r\n tags = [(SERVICE, service) for service in ddtrace.tracer._services]\r\n File \"ddtrace/internal/runtime/tag_collectors.py\", line 25, in <listcomp>\r\n tags = [(SERVICE, service) for service in ddtrace.tracer._services]\r\n```\r\n\r\n### Which version of dd-trace-py are you using?\r\n```\r\n0.42.0\r\n```\r\n\r\n### Which version of the libraries are you using?\r\n```\r\ndjango==2.2.14\r\nddtrace==0.42.0\r\ngunicorn==20.0.4\r\n```\r\n\r\n### How can we reproduce your problem?\r\nIt's not clear if there's anything specific about the WSGI requests that triggers this condition. \r\n\r\n### What is the result that you get?\r\n-\r\n\r\n### What is the result that you expected?\r\n-\n", "before_files": [{"content": "from .collector import ValueCollector\nfrom .constants import (\n SERVICE,\n LANG_INTERPRETER,\n LANG_VERSION,\n LANG,\n TRACER_VERSION,\n)\nfrom ...constants import ENV_KEY\n\n\nclass RuntimeTagCollector(ValueCollector):\n periodic = False\n value = []\n\n\nclass TracerTagCollector(RuntimeTagCollector):\n \"\"\"Tag collector for the ddtrace Tracer\"\"\"\n\n required_modules = [\"ddtrace\"]\n\n def collect_fn(self, keys):\n ddtrace = self.modules.get(\"ddtrace\")\n tags = [(SERVICE, service) for service in ddtrace.tracer._services]\n if ENV_KEY in ddtrace.tracer.tags:\n tags.append((ENV_KEY, ddtrace.tracer.tags[ENV_KEY]))\n return tags\n\n\nclass PlatformTagCollector(RuntimeTagCollector):\n \"\"\"Tag collector for the Python interpreter implementation.\n\n Tags collected:\n - ``lang_interpreter``:\n\n * For CPython this is 'CPython'.\n * For Pypy this is ``PyPy``\n * For Jython this is ``Jython``\n\n - `lang_version``, eg ``2.7.10``\n - ``lang`` e.g. ``Python``\n - ``tracer_version`` e.g. ``0.29.0``\n\n \"\"\"\n\n required_modules = (\"platform\", \"ddtrace\")\n\n def collect_fn(self, keys):\n platform = self.modules.get(\"platform\")\n ddtrace = self.modules.get(\"ddtrace\")\n tags = [\n (LANG, \"python\"),\n (LANG_INTERPRETER, platform.python_implementation()),\n (LANG_VERSION, platform.python_version()),\n (TRACER_VERSION, ddtrace.__version__),\n ]\n return tags\n", "path": "ddtrace/internal/runtime/tag_collectors.py"}], "after_files": [{"content": "from .collector import ValueCollector\nfrom .constants import (\n SERVICE,\n LANG_INTERPRETER,\n LANG_VERSION,\n LANG,\n TRACER_VERSION,\n)\nfrom ...constants import ENV_KEY\n\n\nclass RuntimeTagCollector(ValueCollector):\n periodic = False\n value = []\n\n\nclass TracerTagCollector(RuntimeTagCollector):\n \"\"\"Tag collector for the ddtrace Tracer\"\"\"\n\n required_modules = [\"ddtrace\"]\n\n def collect_fn(self, keys):\n ddtrace = self.modules.get(\"ddtrace\")\n # make sure to copy _services to avoid RuntimeError: Set changed size during iteration\n tags = [(SERVICE, service) for service in list(ddtrace.tracer._services)]\n if ENV_KEY in ddtrace.tracer.tags:\n tags.append((ENV_KEY, ddtrace.tracer.tags[ENV_KEY]))\n return tags\n\n\nclass PlatformTagCollector(RuntimeTagCollector):\n \"\"\"Tag collector for the Python interpreter implementation.\n\n Tags collected:\n - ``lang_interpreter``:\n\n * For CPython this is 'CPython'.\n * For Pypy this is ``PyPy``\n * For Jython this is ``Jython``\n\n - `lang_version``, eg ``2.7.10``\n - ``lang`` e.g. ``Python``\n - ``tracer_version`` e.g. ``0.29.0``\n\n \"\"\"\n\n required_modules = (\"platform\", \"ddtrace\")\n\n def collect_fn(self, keys):\n platform = self.modules.get(\"platform\")\n ddtrace = self.modules.get(\"ddtrace\")\n tags = [\n (LANG, \"python\"),\n (LANG_INTERPRETER, platform.python_implementation()),\n (LANG_VERSION, platform.python_version()),\n (TRACER_VERSION, ddtrace.__version__),\n ]\n return tags\n", "path": "ddtrace/internal/runtime/tag_collectors.py"}]}
| 1,280 | 165 |
gh_patches_debug_39127
|
rasdani/github-patches
|
git_diff
|
ietf-tools__datatracker-4309
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
session_purpose_demo management command no longer needed
### Description
When the session purpose project was in development, we added a management command `session_purpose_demo` to add a fake meeting that exercised the new features. Since the session purposes are now in active use, I think that management command can be pruned.
### Code of Conduct
- [X] I agree to follow the [IETF's Code of Conduct](https://github.com/ietf-tools/.github/blob/main/CODE_OF_CONDUCT.md)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ietf/meeting/management/commands/session_purpose_demo.py`
Content:
```
1 import datetime
2 import random
3
4 from django.core.management.base import BaseCommand, CommandError
5
6 from ietf.group.models import Group
7 from ietf.meeting.factories import RoomFactory, TimeSlotFactory, SessionFactory
8 from ietf.meeting.helpers import get_meeting
9 from ietf.meeting.models import Room, Session
10 from ietf.name.models import SessionPurposeName
11
12
13 class Command(BaseCommand):
14 help = 'Set up a demo of the session purpose updates'
15
16 DEMO_PREFIX='PDemo' # used to identify things added by this command
17
18 def add_arguments(self, parser):
19 parser.add_argument('--remove', action='store_true')
20
21 def handle(self, *args, **options):
22 if options['remove']:
23 self.remove_demo()
24 else:
25 self.install_demo()
26
27 def remove_demo(self):
28 self.stdout.write(f'Removing rooms with "{self.DEMO_PREFIX}" name prefix...\n')
29 Room.objects.filter(name__startswith=self.DEMO_PREFIX).delete()
30 self.stdout.write(f'Removing sessions with "{self.DEMO_PREFIX}" name prefix...\n')
31 Session.objects.filter(name__startswith=self.DEMO_PREFIX).delete()
32
33 def install_demo(self):
34 # get meeting
35 try:
36 meeting = get_meeting(days=14) # matches how secr app finds meetings
37 except:
38 raise CommandError('No upcoming meeting to modify')
39
40 # create rooms
41 self.stdout.write('Creating rooms...\n')
42 rooms = [
43 RoomFactory(meeting=meeting, name=f'{self.DEMO_PREFIX} 1'),
44 RoomFactory(meeting=meeting, name=f'{self.DEMO_PREFIX} 2'),
45 RoomFactory(meeting=meeting, name=f'{self.DEMO_PREFIX} 3'),
46 ]
47
48 # get all the timeslot types used by a session purpose
49 type_ids = set()
50 for purpose in SessionPurposeName.objects.filter(used=True):
51 type_ids.update(purpose.timeslot_types)
52
53 # set up timeslots
54 self.stdout.write('Creating timeslots...\n')
55 for room in rooms:
56 for day in range(meeting.days):
57 date = meeting.get_meeting_date(day)
58 for n, type_id in enumerate(type_ids):
59 TimeSlotFactory(
60 type_id=type_id,
61 meeting=meeting,
62 location=room,
63 time=datetime.datetime.combine(date, datetime.time(10, 0, 0)) + datetime.timedelta(hours=n),
64 duration=datetime.timedelta(hours=1),
65 )
66
67 # set up sessions
68 self.stdout.write('Creating sessions...')
69 groups_for_session_purpose = {
70 purpose.slug: list(
71 Group.objects.filter(
72 type__features__session_purposes__contains=f'"{purpose.slug}"',
73 state_id='active',
74 )
75 )
76 for purpose in SessionPurposeName.objects.filter(used=True)
77 }
78 for purpose in SessionPurposeName.objects.filter(used=True):
79 for type_id in purpose.timeslot_types:
80 group=random.choice(groups_for_session_purpose[purpose.slug])
81 SessionFactory(
82 meeting=meeting,
83 purpose=purpose,
84 type_id=type_id,
85 group=group,
86 name=f'{self.DEMO_PREFIX} for {group.acronym}',
87 status_id='schedw',
88 add_to_schedule=False,
89 )
90
91 self.stdout.write(f'\nRooms and sessions created with "{self.DEMO_PREFIX}" as name prefix\n')
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ietf/meeting/management/commands/session_purpose_demo.py b/ietf/meeting/management/commands/session_purpose_demo.py
deleted file mode 100644
--- a/ietf/meeting/management/commands/session_purpose_demo.py
+++ /dev/null
@@ -1,91 +0,0 @@
-import datetime
-import random
-
-from django.core.management.base import BaseCommand, CommandError
-
-from ietf.group.models import Group
-from ietf.meeting.factories import RoomFactory, TimeSlotFactory, SessionFactory
-from ietf.meeting.helpers import get_meeting
-from ietf.meeting.models import Room, Session
-from ietf.name.models import SessionPurposeName
-
-
-class Command(BaseCommand):
- help = 'Set up a demo of the session purpose updates'
-
- DEMO_PREFIX='PDemo' # used to identify things added by this command
-
- def add_arguments(self, parser):
- parser.add_argument('--remove', action='store_true')
-
- def handle(self, *args, **options):
- if options['remove']:
- self.remove_demo()
- else:
- self.install_demo()
-
- def remove_demo(self):
- self.stdout.write(f'Removing rooms with "{self.DEMO_PREFIX}" name prefix...\n')
- Room.objects.filter(name__startswith=self.DEMO_PREFIX).delete()
- self.stdout.write(f'Removing sessions with "{self.DEMO_PREFIX}" name prefix...\n')
- Session.objects.filter(name__startswith=self.DEMO_PREFIX).delete()
-
- def install_demo(self):
- # get meeting
- try:
- meeting = get_meeting(days=14) # matches how secr app finds meetings
- except:
- raise CommandError('No upcoming meeting to modify')
-
- # create rooms
- self.stdout.write('Creating rooms...\n')
- rooms = [
- RoomFactory(meeting=meeting, name=f'{self.DEMO_PREFIX} 1'),
- RoomFactory(meeting=meeting, name=f'{self.DEMO_PREFIX} 2'),
- RoomFactory(meeting=meeting, name=f'{self.DEMO_PREFIX} 3'),
- ]
-
- # get all the timeslot types used by a session purpose
- type_ids = set()
- for purpose in SessionPurposeName.objects.filter(used=True):
- type_ids.update(purpose.timeslot_types)
-
- # set up timeslots
- self.stdout.write('Creating timeslots...\n')
- for room in rooms:
- for day in range(meeting.days):
- date = meeting.get_meeting_date(day)
- for n, type_id in enumerate(type_ids):
- TimeSlotFactory(
- type_id=type_id,
- meeting=meeting,
- location=room,
- time=datetime.datetime.combine(date, datetime.time(10, 0, 0)) + datetime.timedelta(hours=n),
- duration=datetime.timedelta(hours=1),
- )
-
- # set up sessions
- self.stdout.write('Creating sessions...')
- groups_for_session_purpose = {
- purpose.slug: list(
- Group.objects.filter(
- type__features__session_purposes__contains=f'"{purpose.slug}"',
- state_id='active',
- )
- )
- for purpose in SessionPurposeName.objects.filter(used=True)
- }
- for purpose in SessionPurposeName.objects.filter(used=True):
- for type_id in purpose.timeslot_types:
- group=random.choice(groups_for_session_purpose[purpose.slug])
- SessionFactory(
- meeting=meeting,
- purpose=purpose,
- type_id=type_id,
- group=group,
- name=f'{self.DEMO_PREFIX} for {group.acronym}',
- status_id='schedw',
- add_to_schedule=False,
- )
-
- self.stdout.write(f'\nRooms and sessions created with "{self.DEMO_PREFIX}" as name prefix\n')
\ No newline at end of file
|
{"golden_diff": "diff --git a/ietf/meeting/management/commands/session_purpose_demo.py b/ietf/meeting/management/commands/session_purpose_demo.py\ndeleted file mode 100644\n--- a/ietf/meeting/management/commands/session_purpose_demo.py\n+++ /dev/null\n@@ -1,91 +0,0 @@\n-import datetime\n-import random\n-\n-from django.core.management.base import BaseCommand, CommandError\n-\n-from ietf.group.models import Group\n-from ietf.meeting.factories import RoomFactory, TimeSlotFactory, SessionFactory\n-from ietf.meeting.helpers import get_meeting\n-from ietf.meeting.models import Room, Session\n-from ietf.name.models import SessionPurposeName\n-\n-\n-class Command(BaseCommand):\n- help = 'Set up a demo of the session purpose updates'\n-\n- DEMO_PREFIX='PDemo' # used to identify things added by this command\n-\n- def add_arguments(self, parser):\n- parser.add_argument('--remove', action='store_true')\n-\n- def handle(self, *args, **options):\n- if options['remove']:\n- self.remove_demo()\n- else:\n- self.install_demo()\n-\n- def remove_demo(self):\n- self.stdout.write(f'Removing rooms with \"{self.DEMO_PREFIX}\" name prefix...\\n')\n- Room.objects.filter(name__startswith=self.DEMO_PREFIX).delete()\n- self.stdout.write(f'Removing sessions with \"{self.DEMO_PREFIX}\" name prefix...\\n')\n- Session.objects.filter(name__startswith=self.DEMO_PREFIX).delete()\n-\n- def install_demo(self):\n- # get meeting\n- try:\n- meeting = get_meeting(days=14) # matches how secr app finds meetings\n- except:\n- raise CommandError('No upcoming meeting to modify')\n-\n- # create rooms\n- self.stdout.write('Creating rooms...\\n')\n- rooms = [\n- RoomFactory(meeting=meeting, name=f'{self.DEMO_PREFIX} 1'),\n- RoomFactory(meeting=meeting, name=f'{self.DEMO_PREFIX} 2'),\n- RoomFactory(meeting=meeting, name=f'{self.DEMO_PREFIX} 3'),\n- ]\n-\n- # get all the timeslot types used by a session purpose\n- type_ids = set()\n- for purpose in SessionPurposeName.objects.filter(used=True):\n- type_ids.update(purpose.timeslot_types)\n-\n- # set up timeslots\n- self.stdout.write('Creating timeslots...\\n')\n- for room in rooms:\n- for day in range(meeting.days):\n- date = meeting.get_meeting_date(day)\n- for n, type_id in enumerate(type_ids):\n- TimeSlotFactory(\n- type_id=type_id,\n- meeting=meeting,\n- location=room,\n- time=datetime.datetime.combine(date, datetime.time(10, 0, 0)) + datetime.timedelta(hours=n),\n- duration=datetime.timedelta(hours=1),\n- )\n-\n- # set up sessions\n- self.stdout.write('Creating sessions...')\n- groups_for_session_purpose = {\n- purpose.slug: list(\n- Group.objects.filter(\n- type__features__session_purposes__contains=f'\"{purpose.slug}\"',\n- state_id='active',\n- )\n- )\n- for purpose in SessionPurposeName.objects.filter(used=True)\n- }\n- for purpose in SessionPurposeName.objects.filter(used=True):\n- for type_id in purpose.timeslot_types:\n- group=random.choice(groups_for_session_purpose[purpose.slug])\n- SessionFactory(\n- meeting=meeting,\n- purpose=purpose,\n- type_id=type_id,\n- group=group,\n- name=f'{self.DEMO_PREFIX} for {group.acronym}',\n- status_id='schedw',\n- add_to_schedule=False,\n- )\n-\n- self.stdout.write(f'\\nRooms and sessions created with \"{self.DEMO_PREFIX}\" as name prefix\\n')\n\\ No newline at end of file\n", "issue": "session_purpose_demo management command no longer needed\n### Description\n\nWhen the session purpose project was in development, we added a management command `session_purpose_demo` to add a fake meeting that exercised the new features. Since the session purposes are now in active use, I think that management command can be pruned.\n\n### Code of Conduct\n\n- [X] I agree to follow the [IETF's Code of Conduct](https://github.com/ietf-tools/.github/blob/main/CODE_OF_CONDUCT.md)\n", "before_files": [{"content": "import datetime\nimport random\n\nfrom django.core.management.base import BaseCommand, CommandError\n\nfrom ietf.group.models import Group\nfrom ietf.meeting.factories import RoomFactory, TimeSlotFactory, SessionFactory\nfrom ietf.meeting.helpers import get_meeting\nfrom ietf.meeting.models import Room, Session\nfrom ietf.name.models import SessionPurposeName\n\n\nclass Command(BaseCommand):\n help = 'Set up a demo of the session purpose updates'\n\n DEMO_PREFIX='PDemo' # used to identify things added by this command\n\n def add_arguments(self, parser):\n parser.add_argument('--remove', action='store_true')\n\n def handle(self, *args, **options):\n if options['remove']:\n self.remove_demo()\n else:\n self.install_demo()\n\n def remove_demo(self):\n self.stdout.write(f'Removing rooms with \"{self.DEMO_PREFIX}\" name prefix...\\n')\n Room.objects.filter(name__startswith=self.DEMO_PREFIX).delete()\n self.stdout.write(f'Removing sessions with \"{self.DEMO_PREFIX}\" name prefix...\\n')\n Session.objects.filter(name__startswith=self.DEMO_PREFIX).delete()\n\n def install_demo(self):\n # get meeting\n try:\n meeting = get_meeting(days=14) # matches how secr app finds meetings\n except:\n raise CommandError('No upcoming meeting to modify')\n\n # create rooms\n self.stdout.write('Creating rooms...\\n')\n rooms = [\n RoomFactory(meeting=meeting, name=f'{self.DEMO_PREFIX} 1'),\n RoomFactory(meeting=meeting, name=f'{self.DEMO_PREFIX} 2'),\n RoomFactory(meeting=meeting, name=f'{self.DEMO_PREFIX} 3'),\n ]\n\n # get all the timeslot types used by a session purpose\n type_ids = set()\n for purpose in SessionPurposeName.objects.filter(used=True):\n type_ids.update(purpose.timeslot_types)\n\n # set up timeslots\n self.stdout.write('Creating timeslots...\\n')\n for room in rooms:\n for day in range(meeting.days):\n date = meeting.get_meeting_date(day)\n for n, type_id in enumerate(type_ids):\n TimeSlotFactory(\n type_id=type_id,\n meeting=meeting,\n location=room,\n time=datetime.datetime.combine(date, datetime.time(10, 0, 0)) + datetime.timedelta(hours=n),\n duration=datetime.timedelta(hours=1),\n )\n\n # set up sessions\n self.stdout.write('Creating sessions...')\n groups_for_session_purpose = {\n purpose.slug: list(\n Group.objects.filter(\n type__features__session_purposes__contains=f'\"{purpose.slug}\"',\n state_id='active',\n )\n )\n for purpose in SessionPurposeName.objects.filter(used=True)\n }\n for purpose in SessionPurposeName.objects.filter(used=True):\n for type_id in purpose.timeslot_types:\n group=random.choice(groups_for_session_purpose[purpose.slug])\n SessionFactory(\n meeting=meeting,\n purpose=purpose,\n type_id=type_id,\n group=group,\n name=f'{self.DEMO_PREFIX} for {group.acronym}',\n status_id='schedw',\n add_to_schedule=False,\n )\n\n self.stdout.write(f'\\nRooms and sessions created with \"{self.DEMO_PREFIX}\" as name prefix\\n')", "path": "ietf/meeting/management/commands/session_purpose_demo.py"}], "after_files": [{"content": null, "path": "ietf/meeting/management/commands/session_purpose_demo.py"}]}
| 1,280 | 889 |
gh_patches_debug_29684
|
rasdani/github-patches
|
git_diff
|
secondmind-labs__trieste-194
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pareto set: hypervolume
As a user, I want functionality available to calculate the hypervolume of a Pareto front given the cell bounds, so that I can easily find the hypervolume when defining multi-objective acquisition functionality
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `trieste/utils/pareto.py`
Content:
```
1 # Copyright 2020 The Trieste Contributors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """ This module contains functions and classes for Pareto based multi-objective optimization. """
15
16 from typing import Tuple
17
18 import tensorflow as tf
19 from typing_extensions import Final
20
21 from ..type import TensorType
22
23
24 def non_dominated(observations: TensorType) -> Tuple[TensorType, TensorType]:
25 """
26 Computes the non-dominated set for a set of data points.
27 if there are duplicate point(s) in the non-dominated set, this function will return
28 as it is without removing the duplicate.
29
30 :param observations: set of points with shape [N,D]
31 :return: tf.Tensor of the non-dominated set [P,D] and the degree of dominance [N],
32 P is the number of points in pareto front
33 dominances gives the number of dominating points for each data point
34
35
36 """
37 extended = tf.tile(observations[None], [len(observations), 1, 1])
38 swapped_ext = tf.transpose(extended, [1, 0, 2])
39 dominance = tf.math.count_nonzero(
40 tf.logical_and(
41 tf.reduce_all(extended <= swapped_ext, axis=2),
42 tf.reduce_any(extended < swapped_ext, axis=2),
43 ),
44 axis=1,
45 )
46
47 return tf.boolean_mask(observations, dominance == 0), dominance
48
49
50 class BoundedVolumes:
51 """
52 A :class:`BoundedVolumes` store the index of the Pareto front to form lower and upper
53 bounds of the pseudo cells decomposition.
54 """
55
56 def __init__(self, lower_idx: tf.Tensor, upper_idx: tf.Tensor):
57 """
58 Construct bounded volumes.
59
60 :param lower_idx: the lowerbounds index of the volumes
61 :param upper_idx: the upperbounds index of the volumes
62 """
63
64 tf.debugging.assert_shapes([(lower_idx, ["N", "D"]), (upper_idx, ["N", "D"])])
65 self.lower_idx: Final[TensorType] = lower_idx
66 self.upper_idx: Final[TensorType] = upper_idx
67
68
69 class Pareto:
70 """
71 A :class:`Pareto` Construct a Pareto set.
72 Stores a Pareto set and calculates the cell bounds covering the non-dominated region.
73 The latter is needed for certain multiobjective acquisition functions.
74 """
75
76 def __init__(self, observations: TensorType):
77 """
78 :param observations: The observations for all objectives, with shape [N, 2].
79 :raise ValueError (or InvalidArgumentError): If ``observations`` has an invalid shape.
80 """
81 tf.debugging.assert_shapes([(observations, [None, 2])])
82
83 pf, _ = non_dominated(observations)
84 self.front: Final[TensorType] = tf.gather_nd(pf, tf.argsort(pf[:, :1], axis=0))
85 self.bounds: Final[BoundedVolumes] = self._bounds_2d(self.front)
86
87 @staticmethod
88 def _bounds_2d(front: TensorType) -> BoundedVolumes:
89
90 # this assumes the Pareto set has been sorted in ascending order on the first
91 # objective, which implies the second objective is sorted in descending order
92 len_front, number_of_objectives = front.shape
93
94 pf_ext_idx = tf.concat(
95 [
96 tf.zeros([1, number_of_objectives], dtype=tf.int32),
97 tf.argsort(front, axis=0) + 1,
98 tf.ones([1, number_of_objectives], dtype=tf.int32) * len_front + 1,
99 ],
100 axis=0,
101 )
102
103 range_ = tf.range(len_front + 1)[:, None]
104 lower = tf.concat([range_, tf.zeros_like(range_)], axis=-1)
105 upper = tf.concat([range_ + 1, pf_ext_idx[::-1, 1:][: pf_ext_idx[-1, 0]]], axis=-1)
106
107 return BoundedVolumes(lower, upper)
108
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/trieste/utils/pareto.py b/trieste/utils/pareto.py
--- a/trieste/utils/pareto.py
+++ b/trieste/utils/pareto.py
@@ -105,3 +105,46 @@
upper = tf.concat([range_ + 1, pf_ext_idx[::-1, 1:][: pf_ext_idx[-1, 0]]], axis=-1)
return BoundedVolumes(lower, upper)
+
+ def hypervolume_indicator(self, reference: TensorType) -> TensorType:
+ """
+ Calculate the hypervolume indicator
+ The hypervolume indicator is the volume of the dominated region.
+
+ :param reference: a reference point to use, with shape [D].
+ Defines the upper bound of the hypervolume.
+ Should be equal or bigger than the anti-ideal point of the Pareto set.
+ For comparing results across runs, the same reference point must be used.
+ :return: hypervolume indicator
+ :raise ValueError (or `tf.errors.InvalidArgumentError`): If ``reference`` has an invalid
+ shape.
+ :raise `tf.errors.InvalidArgumentError`: If ``reference`` is less than the anti-ideal point
+ in any dimension.
+ """
+ tf.debugging.assert_greater_equal(reference, self.front)
+
+ tf.debugging.assert_shapes(
+ [
+ (self.bounds.lower_idx, ["N", "D"]),
+ (self.bounds.upper_idx, ["N", "D"]),
+ (self.front, ["M", "D"]),
+ (reference, ["D"]),
+ ]
+ )
+
+ min_pfront = tf.reduce_min(self.front, 0, keepdims=True)
+ pseudo_pfront = tf.concat((min_pfront, self.front, reference[None]), 0)
+ N, D = tf.shape(self.bounds.upper_idx)
+
+ idx = tf.tile(tf.expand_dims(tf.range(D), -1), [1, N])
+ upper_idx = tf.reshape(
+ tf.stack([tf.transpose(self.bounds.upper_idx), idx], axis=2), [N * D, 2]
+ )
+ lower_idx = tf.reshape(
+ tf.stack([tf.transpose(self.bounds.lower_idx), idx], axis=2), [N * D, 2]
+ )
+ upper = tf.reshape(tf.gather_nd(pseudo_pfront, upper_idx), [D, N])
+ lower = tf.reshape(tf.gather_nd(pseudo_pfront, lower_idx), [D, N])
+ hypervolume = tf.reduce_sum(tf.reduce_prod(upper - lower, 0))
+
+ return tf.reduce_prod(reference[None] - min_pfront) - hypervolume
|
{"golden_diff": "diff --git a/trieste/utils/pareto.py b/trieste/utils/pareto.py\n--- a/trieste/utils/pareto.py\n+++ b/trieste/utils/pareto.py\n@@ -105,3 +105,46 @@\n upper = tf.concat([range_ + 1, pf_ext_idx[::-1, 1:][: pf_ext_idx[-1, 0]]], axis=-1)\n \n return BoundedVolumes(lower, upper)\n+\n+ def hypervolume_indicator(self, reference: TensorType) -> TensorType:\n+ \"\"\"\n+ Calculate the hypervolume indicator\n+ The hypervolume indicator is the volume of the dominated region.\n+\n+ :param reference: a reference point to use, with shape [D].\n+ Defines the upper bound of the hypervolume.\n+ Should be equal or bigger than the anti-ideal point of the Pareto set.\n+ For comparing results across runs, the same reference point must be used.\n+ :return: hypervolume indicator\n+ :raise ValueError (or `tf.errors.InvalidArgumentError`): If ``reference`` has an invalid\n+ shape.\n+ :raise `tf.errors.InvalidArgumentError`: If ``reference`` is less than the anti-ideal point\n+ in any dimension.\n+ \"\"\"\n+ tf.debugging.assert_greater_equal(reference, self.front)\n+\n+ tf.debugging.assert_shapes(\n+ [\n+ (self.bounds.lower_idx, [\"N\", \"D\"]),\n+ (self.bounds.upper_idx, [\"N\", \"D\"]),\n+ (self.front, [\"M\", \"D\"]),\n+ (reference, [\"D\"]),\n+ ]\n+ )\n+\n+ min_pfront = tf.reduce_min(self.front, 0, keepdims=True)\n+ pseudo_pfront = tf.concat((min_pfront, self.front, reference[None]), 0)\n+ N, D = tf.shape(self.bounds.upper_idx)\n+\n+ idx = tf.tile(tf.expand_dims(tf.range(D), -1), [1, N])\n+ upper_idx = tf.reshape(\n+ tf.stack([tf.transpose(self.bounds.upper_idx), idx], axis=2), [N * D, 2]\n+ )\n+ lower_idx = tf.reshape(\n+ tf.stack([tf.transpose(self.bounds.lower_idx), idx], axis=2), [N * D, 2]\n+ )\n+ upper = tf.reshape(tf.gather_nd(pseudo_pfront, upper_idx), [D, N])\n+ lower = tf.reshape(tf.gather_nd(pseudo_pfront, lower_idx), [D, N])\n+ hypervolume = tf.reduce_sum(tf.reduce_prod(upper - lower, 0))\n+\n+ return tf.reduce_prod(reference[None] - min_pfront) - hypervolume\n", "issue": "Pareto set: hypervolume\nAs a user, I want functionality available to calculate the hypervolume of a Pareto front given the cell bounds, so that I can easily find the hypervolume when defining multi-objective acquisition functionality\n", "before_files": [{"content": "# Copyright 2020 The Trieste Contributors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" This module contains functions and classes for Pareto based multi-objective optimization. \"\"\"\n\nfrom typing import Tuple\n\nimport tensorflow as tf\nfrom typing_extensions import Final\n\nfrom ..type import TensorType\n\n\ndef non_dominated(observations: TensorType) -> Tuple[TensorType, TensorType]:\n \"\"\"\n Computes the non-dominated set for a set of data points.\n if there are duplicate point(s) in the non-dominated set, this function will return\n as it is without removing the duplicate.\n\n :param observations: set of points with shape [N,D]\n :return: tf.Tensor of the non-dominated set [P,D] and the degree of dominance [N],\n P is the number of points in pareto front\n dominances gives the number of dominating points for each data point\n\n\n \"\"\"\n extended = tf.tile(observations[None], [len(observations), 1, 1])\n swapped_ext = tf.transpose(extended, [1, 0, 2])\n dominance = tf.math.count_nonzero(\n tf.logical_and(\n tf.reduce_all(extended <= swapped_ext, axis=2),\n tf.reduce_any(extended < swapped_ext, axis=2),\n ),\n axis=1,\n )\n\n return tf.boolean_mask(observations, dominance == 0), dominance\n\n\nclass BoundedVolumes:\n \"\"\"\n A :class:`BoundedVolumes` store the index of the Pareto front to form lower and upper\n bounds of the pseudo cells decomposition.\n \"\"\"\n\n def __init__(self, lower_idx: tf.Tensor, upper_idx: tf.Tensor):\n \"\"\"\n Construct bounded volumes.\n\n :param lower_idx: the lowerbounds index of the volumes\n :param upper_idx: the upperbounds index of the volumes\n \"\"\"\n\n tf.debugging.assert_shapes([(lower_idx, [\"N\", \"D\"]), (upper_idx, [\"N\", \"D\"])])\n self.lower_idx: Final[TensorType] = lower_idx\n self.upper_idx: Final[TensorType] = upper_idx\n\n\nclass Pareto:\n \"\"\"\n A :class:`Pareto` Construct a Pareto set.\n Stores a Pareto set and calculates the cell bounds covering the non-dominated region.\n The latter is needed for certain multiobjective acquisition functions.\n \"\"\"\n\n def __init__(self, observations: TensorType):\n \"\"\"\n :param observations: The observations for all objectives, with shape [N, 2].\n :raise ValueError (or InvalidArgumentError): If ``observations`` has an invalid shape.\n \"\"\"\n tf.debugging.assert_shapes([(observations, [None, 2])])\n\n pf, _ = non_dominated(observations)\n self.front: Final[TensorType] = tf.gather_nd(pf, tf.argsort(pf[:, :1], axis=0))\n self.bounds: Final[BoundedVolumes] = self._bounds_2d(self.front)\n\n @staticmethod\n def _bounds_2d(front: TensorType) -> BoundedVolumes:\n\n # this assumes the Pareto set has been sorted in ascending order on the first\n # objective, which implies the second objective is sorted in descending order\n len_front, number_of_objectives = front.shape\n\n pf_ext_idx = tf.concat(\n [\n tf.zeros([1, number_of_objectives], dtype=tf.int32),\n tf.argsort(front, axis=0) + 1,\n tf.ones([1, number_of_objectives], dtype=tf.int32) * len_front + 1,\n ],\n axis=0,\n )\n\n range_ = tf.range(len_front + 1)[:, None]\n lower = tf.concat([range_, tf.zeros_like(range_)], axis=-1)\n upper = tf.concat([range_ + 1, pf_ext_idx[::-1, 1:][: pf_ext_idx[-1, 0]]], axis=-1)\n\n return BoundedVolumes(lower, upper)\n", "path": "trieste/utils/pareto.py"}], "after_files": [{"content": "# Copyright 2020 The Trieste Contributors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" This module contains functions and classes for Pareto based multi-objective optimization. \"\"\"\n\nfrom typing import Tuple\n\nimport tensorflow as tf\nfrom typing_extensions import Final\n\nfrom ..type import TensorType\n\n\ndef non_dominated(observations: TensorType) -> Tuple[TensorType, TensorType]:\n \"\"\"\n Computes the non-dominated set for a set of data points.\n if there are duplicate point(s) in the non-dominated set, this function will return\n as it is without removing the duplicate.\n\n :param observations: set of points with shape [N,D]\n :return: tf.Tensor of the non-dominated set [P,D] and the degree of dominance [N],\n P is the number of points in pareto front\n dominances gives the number of dominating points for each data point\n\n\n \"\"\"\n extended = tf.tile(observations[None], [len(observations), 1, 1])\n swapped_ext = tf.transpose(extended, [1, 0, 2])\n dominance = tf.math.count_nonzero(\n tf.logical_and(\n tf.reduce_all(extended <= swapped_ext, axis=2),\n tf.reduce_any(extended < swapped_ext, axis=2),\n ),\n axis=1,\n )\n\n return tf.boolean_mask(observations, dominance == 0), dominance\n\n\nclass BoundedVolumes:\n \"\"\"\n A :class:`BoundedVolumes` store the index of the Pareto front to form lower and upper\n bounds of the pseudo cells decomposition.\n \"\"\"\n\n def __init__(self, lower_idx: tf.Tensor, upper_idx: tf.Tensor):\n \"\"\"\n Construct bounded volumes.\n\n :param lower_idx: the lowerbounds index of the volumes\n :param upper_idx: the upperbounds index of the volumes\n \"\"\"\n\n tf.debugging.assert_shapes([(lower_idx, [\"N\", \"D\"]), (upper_idx, [\"N\", \"D\"])])\n self.lower_idx: Final[TensorType] = lower_idx\n self.upper_idx: Final[TensorType] = upper_idx\n\n\nclass Pareto:\n \"\"\"\n A :class:`Pareto` Construct a Pareto set.\n Stores a Pareto set and calculates the cell bounds covering the non-dominated region.\n The latter is needed for certain multiobjective acquisition functions.\n \"\"\"\n\n def __init__(self, observations: TensorType):\n \"\"\"\n :param observations: The observations for all objectives, with shape [N, 2].\n :raise ValueError (or InvalidArgumentError): If ``observations`` has an invalid shape.\n \"\"\"\n tf.debugging.assert_shapes([(observations, [None, 2])])\n\n pf, _ = non_dominated(observations)\n self.front: Final[TensorType] = tf.gather_nd(pf, tf.argsort(pf[:, :1], axis=0))\n self.bounds: Final[BoundedVolumes] = self._bounds_2d(self.front)\n\n @staticmethod\n def _bounds_2d(front: TensorType) -> BoundedVolumes:\n\n # this assumes the Pareto set has been sorted in ascending order on the first\n # objective, which implies the second objective is sorted in descending order\n len_front, number_of_objectives = front.shape\n\n pf_ext_idx = tf.concat(\n [\n tf.zeros([1, number_of_objectives], dtype=tf.int32),\n tf.argsort(front, axis=0) + 1,\n tf.ones([1, number_of_objectives], dtype=tf.int32) * len_front + 1,\n ],\n axis=0,\n )\n\n range_ = tf.range(len_front + 1)[:, None]\n lower = tf.concat([range_, tf.zeros_like(range_)], axis=-1)\n upper = tf.concat([range_ + 1, pf_ext_idx[::-1, 1:][: pf_ext_idx[-1, 0]]], axis=-1)\n\n return BoundedVolumes(lower, upper)\n\n def hypervolume_indicator(self, reference: TensorType) -> TensorType:\n \"\"\"\n Calculate the hypervolume indicator\n The hypervolume indicator is the volume of the dominated region.\n\n :param reference: a reference point to use, with shape [D].\n Defines the upper bound of the hypervolume.\n Should be equal or bigger than the anti-ideal point of the Pareto set.\n For comparing results across runs, the same reference point must be used.\n :return: hypervolume indicator\n :raise ValueError (or `tf.errors.InvalidArgumentError`): If ``reference`` has an invalid\n shape.\n :raise `tf.errors.InvalidArgumentError`: If ``reference`` is less than the anti-ideal point\n in any dimension.\n \"\"\"\n tf.debugging.assert_greater_equal(reference, self.front)\n\n tf.debugging.assert_shapes(\n [\n (self.bounds.lower_idx, [\"N\", \"D\"]),\n (self.bounds.upper_idx, [\"N\", \"D\"]),\n (self.front, [\"M\", \"D\"]),\n (reference, [\"D\"]),\n ]\n )\n\n min_pfront = tf.reduce_min(self.front, 0, keepdims=True)\n pseudo_pfront = tf.concat((min_pfront, self.front, reference[None]), 0)\n N, D = tf.shape(self.bounds.upper_idx)\n\n idx = tf.tile(tf.expand_dims(tf.range(D), -1), [1, N])\n upper_idx = tf.reshape(\n tf.stack([tf.transpose(self.bounds.upper_idx), idx], axis=2), [N * D, 2]\n )\n lower_idx = tf.reshape(\n tf.stack([tf.transpose(self.bounds.lower_idx), idx], axis=2), [N * D, 2]\n )\n upper = tf.reshape(tf.gather_nd(pseudo_pfront, upper_idx), [D, N])\n lower = tf.reshape(tf.gather_nd(pseudo_pfront, lower_idx), [D, N])\n hypervolume = tf.reduce_sum(tf.reduce_prod(upper - lower, 0))\n\n return tf.reduce_prod(reference[None] - min_pfront) - hypervolume\n", "path": "trieste/utils/pareto.py"}]}
| 1,503 | 604 |
gh_patches_debug_20014
|
rasdani/github-patches
|
git_diff
|
huggingface__optimum-1141
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unpin onnx version to allow using v1.14
### Feature request
Remove the version constraint on onnx package to allow using onnx==1.14.
### Motivation
- The latest version of onnxruntime (v1.15 at the time of writing) supports onnx==1.14.
- onnx==1.14 introduces support for protobuf v4, which is also useful.
### Your contribution
Seems removing the pin from setup.py would be all that is needed as long as the existing tests provide enough validation.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import re
2
3 from setuptools import find_namespace_packages, setup
4
5
6 # Ensure we match the version set in src/optimum/version.py
7 try:
8 filepath = "optimum/version.py"
9 with open(filepath) as version_file:
10 (__version__,) = re.findall('__version__ = "(.*)"', version_file.read())
11 except Exception as error:
12 assert False, "Error: Could not open '%s' due %s\n" % (filepath, error)
13
14
15 REQUIRED_PKGS = [
16 "coloredlogs",
17 "sympy",
18 "transformers[sentencepiece]>=4.26.0",
19 "torch>=1.9",
20 "packaging",
21 "numpy",
22 "huggingface_hub>=0.8.0",
23 "datasets",
24 ]
25
26 TESTS_REQUIRE = [
27 "pytest",
28 "requests",
29 "parameterized",
30 "pytest-xdist",
31 "Pillow",
32 "sacremoses",
33 "torchvision",
34 "diffusers>=0.17.0",
35 "torchaudio",
36 ]
37
38 QUALITY_REQUIRE = ["black~=23.1", "ruff>=0.0.241,<=0.0.259"]
39
40 BENCHMARK_REQUIRE = ["optuna", "tqdm", "scikit-learn", "seqeval", "torchvision", "evaluate>=0.2.0"]
41
42 EXTRAS_REQUIRE = {
43 "onnxruntime": [
44 "onnx<1.14.0",
45 "onnxruntime>=1.9.0",
46 "datasets>=1.2.1",
47 "evaluate",
48 "protobuf>=3.20.1",
49 ],
50 "onnxruntime-gpu": [
51 "onnx<1.14.0",
52 "onnxruntime-gpu>=1.9.0",
53 "datasets>=1.2.1",
54 "evaluate",
55 "protobuf>=3.20.1",
56 ],
57 "exporters": ["onnx<1.14.0", "onnxruntime", "timm"],
58 "exporters-gpu": ["onnx<1.14.0", "onnxruntime-gpu", "timm"],
59 "exporters-tf": ["tensorflow>=2.4,<2.11", "tf2onnx", "onnx", "onnxruntime", "timm", "h5py", "numpy<1.24.0"],
60 "intel": "optimum-intel",
61 "openvino": "optimum-intel[openvino]",
62 "nncf": "optimum-intel[nncf]",
63 "neural-compressor": "optimum-intel[neural-compressor]",
64 "graphcore": "optimum-graphcore",
65 "habana": ["transformers<4.29.0", "optimum-habana"],
66 "neuron": "optimum-neuron[neuron]",
67 "neuronx": "optimum-neuron[neuronx]",
68 "dev": TESTS_REQUIRE + QUALITY_REQUIRE,
69 "tests": TESTS_REQUIRE,
70 "quality": QUALITY_REQUIRE,
71 "benchmark": BENCHMARK_REQUIRE,
72 }
73
74 setup(
75 name="optimum",
76 version=__version__,
77 description="Optimum Library is an extension of the Hugging Face Transformers library, providing a framework to "
78 "integrate third-party libraries from Hardware Partners and interface with their specific "
79 "functionality.",
80 long_description=open("README.md", "r", encoding="utf-8").read(),
81 long_description_content_type="text/markdown",
82 classifiers=[
83 "Development Status :: 5 - Production/Stable",
84 "License :: OSI Approved :: Apache Software License",
85 "Intended Audience :: Developers",
86 "Intended Audience :: Education",
87 "Intended Audience :: Science/Research",
88 "Operating System :: OS Independent",
89 "Programming Language :: Python :: 3.7",
90 "Programming Language :: Python :: 3.8",
91 "Programming Language :: Python :: 3.9",
92 "Topic :: Scientific/Engineering :: Artificial Intelligence",
93 ],
94 keywords="transformers, quantization, pruning, optimization, training, inference, onnx, onnx runtime, intel, "
95 "habana, graphcore, neural compressor, ipu, hpu",
96 url="https://github.com/huggingface/optimum",
97 author="HuggingFace Inc. Special Ops Team",
98 author_email="[email protected]",
99 license="Apache",
100 packages=find_namespace_packages(include=["optimum*"]),
101 install_requires=REQUIRED_PKGS,
102 extras_require=EXTRAS_REQUIRE,
103 python_requires=">=3.7.0",
104 include_package_data=True,
105 zip_safe=False,
106 entry_points={"console_scripts": ["optimum-cli=optimum.commands.optimum_cli:main"]},
107 )
108
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -41,21 +41,21 @@
EXTRAS_REQUIRE = {
"onnxruntime": [
- "onnx<1.14.0",
+ "onnx",
"onnxruntime>=1.9.0",
"datasets>=1.2.1",
"evaluate",
"protobuf>=3.20.1",
],
"onnxruntime-gpu": [
- "onnx<1.14.0",
+ "onnx",
"onnxruntime-gpu>=1.9.0",
"datasets>=1.2.1",
"evaluate",
"protobuf>=3.20.1",
],
- "exporters": ["onnx<1.14.0", "onnxruntime", "timm"],
- "exporters-gpu": ["onnx<1.14.0", "onnxruntime-gpu", "timm"],
+ "exporters": ["onnx", "onnxruntime", "timm"],
+ "exporters-gpu": ["onnx", "onnxruntime-gpu", "timm"],
"exporters-tf": ["tensorflow>=2.4,<2.11", "tf2onnx", "onnx", "onnxruntime", "timm", "h5py", "numpy<1.24.0"],
"intel": "optimum-intel",
"openvino": "optimum-intel[openvino]",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -41,21 +41,21 @@\n \n EXTRAS_REQUIRE = {\n \"onnxruntime\": [\n- \"onnx<1.14.0\",\n+ \"onnx\",\n \"onnxruntime>=1.9.0\",\n \"datasets>=1.2.1\",\n \"evaluate\",\n \"protobuf>=3.20.1\",\n ],\n \"onnxruntime-gpu\": [\n- \"onnx<1.14.0\",\n+ \"onnx\",\n \"onnxruntime-gpu>=1.9.0\",\n \"datasets>=1.2.1\",\n \"evaluate\",\n \"protobuf>=3.20.1\",\n ],\n- \"exporters\": [\"onnx<1.14.0\", \"onnxruntime\", \"timm\"],\n- \"exporters-gpu\": [\"onnx<1.14.0\", \"onnxruntime-gpu\", \"timm\"],\n+ \"exporters\": [\"onnx\", \"onnxruntime\", \"timm\"],\n+ \"exporters-gpu\": [\"onnx\", \"onnxruntime-gpu\", \"timm\"],\n \"exporters-tf\": [\"tensorflow>=2.4,<2.11\", \"tf2onnx\", \"onnx\", \"onnxruntime\", \"timm\", \"h5py\", \"numpy<1.24.0\"],\n \"intel\": \"optimum-intel\",\n \"openvino\": \"optimum-intel[openvino]\",\n", "issue": "Unpin onnx version to allow using v1.14\n### Feature request\r\n\r\nRemove the version constraint on onnx package to allow using onnx==1.14.\r\n\r\n### Motivation\r\n\r\n- The latest version of onnxruntime (v1.15 at the time of writing) supports onnx==1.14.\r\n- onnx==1.14 introduces support for protobuf v4, which is also useful.\r\n\r\n### Your contribution\r\n\r\nSeems removing the pin from setup.py would be all that is needed as long as the existing tests provide enough validation.\n", "before_files": [{"content": "import re\n\nfrom setuptools import find_namespace_packages, setup\n\n\n# Ensure we match the version set in src/optimum/version.py\ntry:\n filepath = \"optimum/version.py\"\n with open(filepath) as version_file:\n (__version__,) = re.findall('__version__ = \"(.*)\"', version_file.read())\nexcept Exception as error:\n assert False, \"Error: Could not open '%s' due %s\\n\" % (filepath, error)\n\n\nREQUIRED_PKGS = [\n \"coloredlogs\",\n \"sympy\",\n \"transformers[sentencepiece]>=4.26.0\",\n \"torch>=1.9\",\n \"packaging\",\n \"numpy\",\n \"huggingface_hub>=0.8.0\",\n \"datasets\",\n]\n\nTESTS_REQUIRE = [\n \"pytest\",\n \"requests\",\n \"parameterized\",\n \"pytest-xdist\",\n \"Pillow\",\n \"sacremoses\",\n \"torchvision\",\n \"diffusers>=0.17.0\",\n \"torchaudio\",\n]\n\nQUALITY_REQUIRE = [\"black~=23.1\", \"ruff>=0.0.241,<=0.0.259\"]\n\nBENCHMARK_REQUIRE = [\"optuna\", \"tqdm\", \"scikit-learn\", \"seqeval\", \"torchvision\", \"evaluate>=0.2.0\"]\n\nEXTRAS_REQUIRE = {\n \"onnxruntime\": [\n \"onnx<1.14.0\",\n \"onnxruntime>=1.9.0\",\n \"datasets>=1.2.1\",\n \"evaluate\",\n \"protobuf>=3.20.1\",\n ],\n \"onnxruntime-gpu\": [\n \"onnx<1.14.0\",\n \"onnxruntime-gpu>=1.9.0\",\n \"datasets>=1.2.1\",\n \"evaluate\",\n \"protobuf>=3.20.1\",\n ],\n \"exporters\": [\"onnx<1.14.0\", \"onnxruntime\", \"timm\"],\n \"exporters-gpu\": [\"onnx<1.14.0\", \"onnxruntime-gpu\", \"timm\"],\n \"exporters-tf\": [\"tensorflow>=2.4,<2.11\", \"tf2onnx\", \"onnx\", \"onnxruntime\", \"timm\", \"h5py\", \"numpy<1.24.0\"],\n \"intel\": \"optimum-intel\",\n \"openvino\": \"optimum-intel[openvino]\",\n \"nncf\": \"optimum-intel[nncf]\",\n \"neural-compressor\": \"optimum-intel[neural-compressor]\",\n \"graphcore\": \"optimum-graphcore\",\n \"habana\": [\"transformers<4.29.0\", \"optimum-habana\"],\n \"neuron\": \"optimum-neuron[neuron]\",\n \"neuronx\": \"optimum-neuron[neuronx]\",\n \"dev\": TESTS_REQUIRE + QUALITY_REQUIRE,\n \"tests\": TESTS_REQUIRE,\n \"quality\": QUALITY_REQUIRE,\n \"benchmark\": BENCHMARK_REQUIRE,\n}\n\nsetup(\n name=\"optimum\",\n version=__version__,\n description=\"Optimum Library is an extension of the Hugging Face Transformers library, providing a framework to \"\n \"integrate third-party libraries from Hardware Partners and interface with their specific \"\n \"functionality.\",\n long_description=open(\"README.md\", \"r\", encoding=\"utf-8\").read(),\n long_description_content_type=\"text/markdown\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Science/Research\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n keywords=\"transformers, quantization, pruning, optimization, training, inference, onnx, onnx runtime, intel, \"\n \"habana, graphcore, neural compressor, ipu, hpu\",\n url=\"https://github.com/huggingface/optimum\",\n author=\"HuggingFace Inc. Special Ops Team\",\n author_email=\"[email protected]\",\n license=\"Apache\",\n packages=find_namespace_packages(include=[\"optimum*\"]),\n install_requires=REQUIRED_PKGS,\n extras_require=EXTRAS_REQUIRE,\n python_requires=\">=3.7.0\",\n include_package_data=True,\n zip_safe=False,\n entry_points={\"console_scripts\": [\"optimum-cli=optimum.commands.optimum_cli:main\"]},\n)\n", "path": "setup.py"}], "after_files": [{"content": "import re\n\nfrom setuptools import find_namespace_packages, setup\n\n\n# Ensure we match the version set in src/optimum/version.py\ntry:\n filepath = \"optimum/version.py\"\n with open(filepath) as version_file:\n (__version__,) = re.findall('__version__ = \"(.*)\"', version_file.read())\nexcept Exception as error:\n assert False, \"Error: Could not open '%s' due %s\\n\" % (filepath, error)\n\n\nREQUIRED_PKGS = [\n \"coloredlogs\",\n \"sympy\",\n \"transformers[sentencepiece]>=4.26.0\",\n \"torch>=1.9\",\n \"packaging\",\n \"numpy\",\n \"huggingface_hub>=0.8.0\",\n \"datasets\",\n]\n\nTESTS_REQUIRE = [\n \"pytest\",\n \"requests\",\n \"parameterized\",\n \"pytest-xdist\",\n \"Pillow\",\n \"sacremoses\",\n \"torchvision\",\n \"diffusers>=0.17.0\",\n \"torchaudio\",\n]\n\nQUALITY_REQUIRE = [\"black~=23.1\", \"ruff>=0.0.241,<=0.0.259\"]\n\nBENCHMARK_REQUIRE = [\"optuna\", \"tqdm\", \"scikit-learn\", \"seqeval\", \"torchvision\", \"evaluate>=0.2.0\"]\n\nEXTRAS_REQUIRE = {\n \"onnxruntime\": [\n \"onnx\",\n \"onnxruntime>=1.9.0\",\n \"datasets>=1.2.1\",\n \"evaluate\",\n \"protobuf>=3.20.1\",\n ],\n \"onnxruntime-gpu\": [\n \"onnx\",\n \"onnxruntime-gpu>=1.9.0\",\n \"datasets>=1.2.1\",\n \"evaluate\",\n \"protobuf>=3.20.1\",\n ],\n \"exporters\": [\"onnx\", \"onnxruntime\", \"timm\"],\n \"exporters-gpu\": [\"onnx\", \"onnxruntime-gpu\", \"timm\"],\n \"exporters-tf\": [\"tensorflow>=2.4,<2.11\", \"tf2onnx\", \"onnx\", \"onnxruntime\", \"timm\", \"h5py\", \"numpy<1.24.0\"],\n \"intel\": \"optimum-intel\",\n \"openvino\": \"optimum-intel[openvino]\",\n \"nncf\": \"optimum-intel[nncf]\",\n \"neural-compressor\": \"optimum-intel[neural-compressor]\",\n \"graphcore\": \"optimum-graphcore\",\n \"habana\": [\"transformers<4.29.0\", \"optimum-habana\"],\n \"neuron\": \"optimum-neuron[neuron]\",\n \"neuronx\": \"optimum-neuron[neuronx]\",\n \"dev\": TESTS_REQUIRE + QUALITY_REQUIRE,\n \"tests\": TESTS_REQUIRE,\n \"quality\": QUALITY_REQUIRE,\n \"benchmark\": BENCHMARK_REQUIRE,\n}\n\nsetup(\n name=\"optimum\",\n version=__version__,\n description=\"Optimum Library is an extension of the Hugging Face Transformers library, providing a framework to \"\n \"integrate third-party libraries from Hardware Partners and interface with their specific \"\n \"functionality.\",\n long_description=open(\"README.md\", \"r\", encoding=\"utf-8\").read(),\n long_description_content_type=\"text/markdown\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Science/Research\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n keywords=\"transformers, quantization, pruning, optimization, training, inference, onnx, onnx runtime, intel, \"\n \"habana, graphcore, neural compressor, ipu, hpu\",\n url=\"https://github.com/huggingface/optimum\",\n author=\"HuggingFace Inc. Special Ops Team\",\n author_email=\"[email protected]\",\n license=\"Apache\",\n packages=find_namespace_packages(include=[\"optimum*\"]),\n install_requires=REQUIRED_PKGS,\n extras_require=EXTRAS_REQUIRE,\n python_requires=\">=3.7.0\",\n include_package_data=True,\n zip_safe=False,\n entry_points={\"console_scripts\": [\"optimum-cli=optimum.commands.optimum_cli:main\"]},\n)\n", "path": "setup.py"}]}
| 1,635 | 352 |
gh_patches_debug_28777
|
rasdani/github-patches
|
git_diff
|
lk-geimfari__mimesis-922
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add UUID objects support for uuid()
- [x] Add parameter `as_object`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mimesis/__init__.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 """Mimesis is a Python library, which helps generate fake data.
4
5 Copyright (c) 2016 - 2020 Isaak Uchakaev (Likid Geimfari)
6 Repository: https://github.com/lk-geimfari/mimesis
7 Email: <[email protected]>
8 """
9
10 from mimesis.providers import (
11 Address,
12 BaseDataProvider,
13 BaseProvider,
14 Business,
15 Choice,
16 Clothing,
17 Code,
18 Cryptographic,
19 Datetime,
20 Development,
21 File,
22 Food,
23 Generic,
24 Hardware,
25 Internet,
26 Numbers,
27 Path,
28 Payment,
29 Person,
30 Science,
31 Structure,
32 Text,
33 Transport,
34 UnitSystem,
35 )
36
37 __all__ = [
38 'Address',
39 'BaseDataProvider',
40 'BaseProvider',
41 'Business',
42 'Clothing',
43 'Code',
44 'Choice',
45 'Datetime',
46 'Development',
47 'File',
48 'Food',
49 'Hardware',
50 'Internet',
51 'Numbers',
52 'Path',
53 'Payment',
54 'Person',
55 'Science',
56 'Structure',
57 'Text',
58 'Transport',
59 'UnitSystem',
60 'Cryptographic',
61
62 # Has all:
63 'Generic',
64
65 # Meta:
66 '__version__',
67 '__title__',
68 '__description__',
69 '__url__',
70 '__author__',
71 '__author_email__',
72 '__license__',
73 ]
74
75 __version__ = '4.0.0'
76 __title__ = 'mimesis'
77 __description__ = 'Mimesis: fake data generator.'
78 __url__ = 'https://github.com/lk-geimfari/mimesis'
79 __author__ = 'Isaak Uchakaev (Likid Geimfari)'
80 __author_email__ = '[email protected]'
81 __license__ = 'MIT License'
82
```
Path: `mimesis/providers/cryptographic.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 """Cryptographic data provider."""
4
5 import hashlib
6 import secrets
7 import uuid
8 from typing import Optional
9
10 from mimesis.enums import Algorithm
11 from mimesis.providers.base import BaseProvider
12 from mimesis.providers.text import Text
13
14 __all__ = ['Cryptographic']
15
16
17 class Cryptographic(BaseProvider):
18 """Class that provides cryptographic data."""
19
20 def __init__(self, *args, **kwargs) -> None:
21 """Initialize attributes.
22
23 :param seed: Seed.
24 """
25 super().__init__(*args, **kwargs)
26 self.__words = Text('en')._data.get('words', {})
27
28 class Meta:
29 """Class for metadata."""
30
31 name = 'cryptographic'
32
33 def uuid(self, version: int = None) -> str:
34 """Generate random UUID.
35
36 :param version: UUID version.
37 :return: UUID
38 """
39 bits = self.random.getrandbits(128)
40 return str(uuid.UUID(int=bits, version=version))
41
42 def hash(self, algorithm: Algorithm = None) -> str: # noqa: A003
43 """Generate random hash.
44
45 To change hashing algorithm, pass parameter ``algorithm``
46 with needed value of the enum object :class:`~mimesis.enums.Algorithm`
47
48 :param algorithm: Enum object :class:`~mimesis.enums.Algorithm`.
49 :return: Hash.
50 :raises NonEnumerableError: When algorithm is unsupported.
51 """
52 key = self._validate_enum(algorithm, Algorithm)
53
54 if hasattr(hashlib, key):
55 fn = getattr(hashlib, key)
56 return fn(self.uuid().encode()).hexdigest()
57
58 @staticmethod
59 def token_bytes(entropy: int = 32) -> bytes:
60 """Generate byte string containing ``entropy`` bytes.
61
62 The string has ``entropy`` random bytes, each byte
63 converted to two hex digits.
64
65 .. warning:: Seed is not applicable to this method,
66 because of its cryptographic-safe nature.
67
68 :param entropy: Number of bytes (default: 32).
69 :return: Random bytes.
70 """
71 return secrets.token_bytes(entropy)
72
73 @staticmethod
74 def token_hex(entropy: int = 32) -> str:
75 """Return a random text string, in hexadecimal.
76
77 The string has *entropy* random bytes, each byte converted to two
78 hex digits. If *entropy* is ``None`` or not supplied, a reasonable
79 default is used.
80
81 .. warning:: Seed is not applicable to this method,
82 because of its cryptographic-safe nature.
83
84 :param entropy: Number of bytes (default: 32).
85 :return: Token.
86 """
87 return secrets.token_hex(entropy)
88
89 @staticmethod
90 def token_urlsafe(entropy: int = 32):
91 """Return a random URL-safe text string, in Base64 encoding.
92
93 The string has *entropy* random bytes. If *entropy* is ``None``
94 or not supplied, a reasonable default is used.
95
96 .. warning:: Seed is not applicable to this method,
97 because of its cryptographic-safe nature.
98
99 :param entropy: Number of bytes (default: 32).
100 :return: URL-safe token.
101 """
102 return secrets.token_urlsafe(entropy)
103
104 def mnemonic_phrase(self, length: int = 12,
105 separator: Optional[str] = None) -> str:
106 """Generate pseudo mnemonic phrase.
107
108 Please, keep in mind that this method generates
109 crypto-insecure values.
110
111 :param separator: Separator of phrases (Default is " ").
112 :param length: Number of words.
113 :return: Mnemonic phrase.
114 """
115 if not separator:
116 separator = ' '
117
118 words = self.__words['normal']
119 words_generator = (self.random.choice(words) for _ in range(length))
120 return '{}'.format(separator).join(words_generator)
121
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mimesis/__init__.py b/mimesis/__init__.py
--- a/mimesis/__init__.py
+++ b/mimesis/__init__.py
@@ -72,7 +72,7 @@
'__license__',
]
-__version__ = '4.0.0'
+__version__ = '4.1.0'
__title__ = 'mimesis'
__description__ = 'Mimesis: fake data generator.'
__url__ = 'https://github.com/lk-geimfari/mimesis'
diff --git a/mimesis/providers/cryptographic.py b/mimesis/providers/cryptographic.py
--- a/mimesis/providers/cryptographic.py
+++ b/mimesis/providers/cryptographic.py
@@ -4,8 +4,8 @@
import hashlib
import secrets
-import uuid
-from typing import Optional
+from typing import Optional, Union
+from uuid import UUID
from mimesis.enums import Algorithm
from mimesis.providers.base import BaseProvider
@@ -30,14 +30,25 @@
name = 'cryptographic'
- def uuid(self, version: int = None) -> str:
+ def uuid(self, version: int = None,
+ as_object: bool = False) -> Union[UUID, str]:
"""Generate random UUID.
+ This method returns string by default,
+ but yoy can make it return uuid.UUID object using
+ parameter **as_object**
+
+ :param as_object: Returns uuid.UUID object instead of string.
:param version: UUID version.
- :return: UUID
+ :return: UUID.
"""
bits = self.random.getrandbits(128)
- return str(uuid.UUID(int=bits, version=version))
+ uuid_obj = UUID(int=bits, version=version)
+
+ if not as_object:
+ return str(uuid_obj)
+
+ return uuid_obj
def hash(self, algorithm: Algorithm = None) -> str: # noqa: A003
"""Generate random hash.
|
{"golden_diff": "diff --git a/mimesis/__init__.py b/mimesis/__init__.py\n--- a/mimesis/__init__.py\n+++ b/mimesis/__init__.py\n@@ -72,7 +72,7 @@\n '__license__',\n ]\n \n-__version__ = '4.0.0'\n+__version__ = '4.1.0'\n __title__ = 'mimesis'\n __description__ = 'Mimesis: fake data generator.'\n __url__ = 'https://github.com/lk-geimfari/mimesis'\ndiff --git a/mimesis/providers/cryptographic.py b/mimesis/providers/cryptographic.py\n--- a/mimesis/providers/cryptographic.py\n+++ b/mimesis/providers/cryptographic.py\n@@ -4,8 +4,8 @@\n \n import hashlib\n import secrets\n-import uuid\n-from typing import Optional\n+from typing import Optional, Union\n+from uuid import UUID\n \n from mimesis.enums import Algorithm\n from mimesis.providers.base import BaseProvider\n@@ -30,14 +30,25 @@\n \n name = 'cryptographic'\n \n- def uuid(self, version: int = None) -> str:\n+ def uuid(self, version: int = None,\n+ as_object: bool = False) -> Union[UUID, str]:\n \"\"\"Generate random UUID.\n \n+ This method returns string by default,\n+ but yoy can make it return uuid.UUID object using\n+ parameter **as_object**\n+\n+ :param as_object: Returns uuid.UUID object instead of string.\n :param version: UUID version.\n- :return: UUID\n+ :return: UUID.\n \"\"\"\n bits = self.random.getrandbits(128)\n- return str(uuid.UUID(int=bits, version=version))\n+ uuid_obj = UUID(int=bits, version=version)\n+\n+ if not as_object:\n+ return str(uuid_obj)\n+\n+ return uuid_obj\n \n def hash(self, algorithm: Algorithm = None) -> str: # noqa: A003\n \"\"\"Generate random hash.\n", "issue": "Add UUID objects support for uuid()\n- [x] Add parameter `as_object` \n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Mimesis is a Python library, which helps generate fake data.\n\nCopyright (c) 2016 - 2020 Isaak Uchakaev (Likid Geimfari)\nRepository: https://github.com/lk-geimfari/mimesis\nEmail: <[email protected]>\n\"\"\"\n\nfrom mimesis.providers import (\n Address,\n BaseDataProvider,\n BaseProvider,\n Business,\n Choice,\n Clothing,\n Code,\n Cryptographic,\n Datetime,\n Development,\n File,\n Food,\n Generic,\n Hardware,\n Internet,\n Numbers,\n Path,\n Payment,\n Person,\n Science,\n Structure,\n Text,\n Transport,\n UnitSystem,\n)\n\n__all__ = [\n 'Address',\n 'BaseDataProvider',\n 'BaseProvider',\n 'Business',\n 'Clothing',\n 'Code',\n 'Choice',\n 'Datetime',\n 'Development',\n 'File',\n 'Food',\n 'Hardware',\n 'Internet',\n 'Numbers',\n 'Path',\n 'Payment',\n 'Person',\n 'Science',\n 'Structure',\n 'Text',\n 'Transport',\n 'UnitSystem',\n 'Cryptographic',\n\n # Has all:\n 'Generic',\n\n # Meta:\n '__version__',\n '__title__',\n '__description__',\n '__url__',\n '__author__',\n '__author_email__',\n '__license__',\n]\n\n__version__ = '4.0.0'\n__title__ = 'mimesis'\n__description__ = 'Mimesis: fake data generator.'\n__url__ = 'https://github.com/lk-geimfari/mimesis'\n__author__ = 'Isaak Uchakaev (Likid Geimfari)'\n__author_email__ = '[email protected]'\n__license__ = 'MIT License'\n", "path": "mimesis/__init__.py"}, {"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Cryptographic data provider.\"\"\"\n\nimport hashlib\nimport secrets\nimport uuid\nfrom typing import Optional\n\nfrom mimesis.enums import Algorithm\nfrom mimesis.providers.base import BaseProvider\nfrom mimesis.providers.text import Text\n\n__all__ = ['Cryptographic']\n\n\nclass Cryptographic(BaseProvider):\n \"\"\"Class that provides cryptographic data.\"\"\"\n\n def __init__(self, *args, **kwargs) -> None:\n \"\"\"Initialize attributes.\n\n :param seed: Seed.\n \"\"\"\n super().__init__(*args, **kwargs)\n self.__words = Text('en')._data.get('words', {})\n\n class Meta:\n \"\"\"Class for metadata.\"\"\"\n\n name = 'cryptographic'\n\n def uuid(self, version: int = None) -> str:\n \"\"\"Generate random UUID.\n\n :param version: UUID version.\n :return: UUID\n \"\"\"\n bits = self.random.getrandbits(128)\n return str(uuid.UUID(int=bits, version=version))\n\n def hash(self, algorithm: Algorithm = None) -> str: # noqa: A003\n \"\"\"Generate random hash.\n\n To change hashing algorithm, pass parameter ``algorithm``\n with needed value of the enum object :class:`~mimesis.enums.Algorithm`\n\n :param algorithm: Enum object :class:`~mimesis.enums.Algorithm`.\n :return: Hash.\n :raises NonEnumerableError: When algorithm is unsupported.\n \"\"\"\n key = self._validate_enum(algorithm, Algorithm)\n\n if hasattr(hashlib, key):\n fn = getattr(hashlib, key)\n return fn(self.uuid().encode()).hexdigest()\n\n @staticmethod\n def token_bytes(entropy: int = 32) -> bytes:\n \"\"\"Generate byte string containing ``entropy`` bytes.\n\n The string has ``entropy`` random bytes, each byte\n converted to two hex digits.\n\n .. warning:: Seed is not applicable to this method,\n because of its cryptographic-safe nature.\n\n :param entropy: Number of bytes (default: 32).\n :return: Random bytes.\n \"\"\"\n return secrets.token_bytes(entropy)\n\n @staticmethod\n def token_hex(entropy: int = 32) -> str:\n \"\"\"Return a random text string, in hexadecimal.\n\n The string has *entropy* random bytes, each byte converted to two\n hex digits. If *entropy* is ``None`` or not supplied, a reasonable\n default is used.\n\n .. warning:: Seed is not applicable to this method,\n because of its cryptographic-safe nature.\n\n :param entropy: Number of bytes (default: 32).\n :return: Token.\n \"\"\"\n return secrets.token_hex(entropy)\n\n @staticmethod\n def token_urlsafe(entropy: int = 32):\n \"\"\"Return a random URL-safe text string, in Base64 encoding.\n\n The string has *entropy* random bytes. If *entropy* is ``None``\n or not supplied, a reasonable default is used.\n\n .. warning:: Seed is not applicable to this method,\n because of its cryptographic-safe nature.\n\n :param entropy: Number of bytes (default: 32).\n :return: URL-safe token.\n \"\"\"\n return secrets.token_urlsafe(entropy)\n\n def mnemonic_phrase(self, length: int = 12,\n separator: Optional[str] = None) -> str:\n \"\"\"Generate pseudo mnemonic phrase.\n\n Please, keep in mind that this method generates\n crypto-insecure values.\n\n :param separator: Separator of phrases (Default is \" \").\n :param length: Number of words.\n :return: Mnemonic phrase.\n \"\"\"\n if not separator:\n separator = ' '\n\n words = self.__words['normal']\n words_generator = (self.random.choice(words) for _ in range(length))\n return '{}'.format(separator).join(words_generator)\n", "path": "mimesis/providers/cryptographic.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Mimesis is a Python library, which helps generate fake data.\n\nCopyright (c) 2016 - 2020 Isaak Uchakaev (Likid Geimfari)\nRepository: https://github.com/lk-geimfari/mimesis\nEmail: <[email protected]>\n\"\"\"\n\nfrom mimesis.providers import (\n Address,\n BaseDataProvider,\n BaseProvider,\n Business,\n Choice,\n Clothing,\n Code,\n Cryptographic,\n Datetime,\n Development,\n File,\n Food,\n Generic,\n Hardware,\n Internet,\n Numbers,\n Path,\n Payment,\n Person,\n Science,\n Structure,\n Text,\n Transport,\n UnitSystem,\n)\n\n__all__ = [\n 'Address',\n 'BaseDataProvider',\n 'BaseProvider',\n 'Business',\n 'Clothing',\n 'Code',\n 'Choice',\n 'Datetime',\n 'Development',\n 'File',\n 'Food',\n 'Hardware',\n 'Internet',\n 'Numbers',\n 'Path',\n 'Payment',\n 'Person',\n 'Science',\n 'Structure',\n 'Text',\n 'Transport',\n 'UnitSystem',\n 'Cryptographic',\n\n # Has all:\n 'Generic',\n\n # Meta:\n '__version__',\n '__title__',\n '__description__',\n '__url__',\n '__author__',\n '__author_email__',\n '__license__',\n]\n\n__version__ = '4.1.0'\n__title__ = 'mimesis'\n__description__ = 'Mimesis: fake data generator.'\n__url__ = 'https://github.com/lk-geimfari/mimesis'\n__author__ = 'Isaak Uchakaev (Likid Geimfari)'\n__author_email__ = '[email protected]'\n__license__ = 'MIT License'\n", "path": "mimesis/__init__.py"}, {"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Cryptographic data provider.\"\"\"\n\nimport hashlib\nimport secrets\nfrom typing import Optional, Union\nfrom uuid import UUID\n\nfrom mimesis.enums import Algorithm\nfrom mimesis.providers.base import BaseProvider\nfrom mimesis.providers.text import Text\n\n__all__ = ['Cryptographic']\n\n\nclass Cryptographic(BaseProvider):\n \"\"\"Class that provides cryptographic data.\"\"\"\n\n def __init__(self, *args, **kwargs) -> None:\n \"\"\"Initialize attributes.\n\n :param seed: Seed.\n \"\"\"\n super().__init__(*args, **kwargs)\n self.__words = Text('en')._data.get('words', {})\n\n class Meta:\n \"\"\"Class for metadata.\"\"\"\n\n name = 'cryptographic'\n\n def uuid(self, version: int = None,\n as_object: bool = False) -> Union[UUID, str]:\n \"\"\"Generate random UUID.\n\n This method returns string by default,\n but yoy can make it return uuid.UUID object using\n parameter **as_object**\n\n :param as_object: Returns uuid.UUID object instead of string.\n :param version: UUID version.\n :return: UUID.\n \"\"\"\n bits = self.random.getrandbits(128)\n uuid_obj = UUID(int=bits, version=version)\n\n if not as_object:\n return str(uuid_obj)\n\n return uuid_obj\n\n def hash(self, algorithm: Algorithm = None) -> str: # noqa: A003\n \"\"\"Generate random hash.\n\n To change hashing algorithm, pass parameter ``algorithm``\n with needed value of the enum object :class:`~mimesis.enums.Algorithm`\n\n :param algorithm: Enum object :class:`~mimesis.enums.Algorithm`.\n :return: Hash.\n :raises NonEnumerableError: When algorithm is unsupported.\n \"\"\"\n key = self._validate_enum(algorithm, Algorithm)\n\n if hasattr(hashlib, key):\n fn = getattr(hashlib, key)\n return fn(self.uuid().encode()).hexdigest()\n\n @staticmethod\n def token_bytes(entropy: int = 32) -> bytes:\n \"\"\"Generate byte string containing ``entropy`` bytes.\n\n The string has ``entropy`` random bytes, each byte\n converted to two hex digits.\n\n .. warning:: Seed is not applicable to this method,\n because of its cryptographic-safe nature.\n\n :param entropy: Number of bytes (default: 32).\n :return: Random bytes.\n \"\"\"\n return secrets.token_bytes(entropy)\n\n @staticmethod\n def token_hex(entropy: int = 32) -> str:\n \"\"\"Return a random text string, in hexadecimal.\n\n The string has *entropy* random bytes, each byte converted to two\n hex digits. If *entropy* is ``None`` or not supplied, a reasonable\n default is used.\n\n .. warning:: Seed is not applicable to this method,\n because of its cryptographic-safe nature.\n\n :param entropy: Number of bytes (default: 32).\n :return: Token.\n \"\"\"\n return secrets.token_hex(entropy)\n\n @staticmethod\n def token_urlsafe(entropy: int = 32):\n \"\"\"Return a random URL-safe text string, in Base64 encoding.\n\n The string has *entropy* random bytes. If *entropy* is ``None``\n or not supplied, a reasonable default is used.\n\n .. warning:: Seed is not applicable to this method,\n because of its cryptographic-safe nature.\n\n :param entropy: Number of bytes (default: 32).\n :return: URL-safe token.\n \"\"\"\n return secrets.token_urlsafe(entropy)\n\n def mnemonic_phrase(self, length: int = 12,\n separator: Optional[str] = None) -> str:\n \"\"\"Generate pseudo mnemonic phrase.\n\n Please, keep in mind that this method generates\n crypto-insecure values.\n\n :param separator: Separator of phrases (Default is \" \").\n :param length: Number of words.\n :return: Mnemonic phrase.\n \"\"\"\n if not separator:\n separator = ' '\n\n words = self.__words['normal']\n words_generator = (self.random.choice(words) for _ in range(length))\n return '{}'.format(separator).join(words_generator)\n", "path": "mimesis/providers/cryptographic.py"}]}
| 1,981 | 461 |
gh_patches_debug_14282
|
rasdani/github-patches
|
git_diff
|
google__turbinia-384
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Local installation requires filled-out GCP section of config
I'm attempting to get Turbinia working locally on a laptop. The laptop has internet for at least the installation part of this process.
Running into an issue with the config. I set the TASK_MANAGER to use Celery, but the installation process is still requiring me to have values for:
- TURBINIA_PROJECT
- TURBINIA_ZONE
- TURBINIA_REGION
- BUCKET_NAME
According to the instructions in the config, these values should only matter if the TASK_MANAGER is set to PSQ.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `turbinia/config/turbinia_config.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright 2016 Google Inc.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """Turbinia Config Template"""
16
17 from __future__ import unicode_literals
18
19 ################################################################################
20 # Base Turbinia configuration
21 #
22 # All options in this section are required to be set to non-empty values.
23 ################################################################################
24
25 # A unique ID per Turbinia instance. Used to keep multiple Turbinia instances
26 # separate when running with the same Cloud projects or backend servers.
27 INSTANCE_ID = 'turbinia-instance1'
28
29 # Which state manager to use. Valid options are 'Datastore' or 'Redis'.
30 STATE_MANAGER = 'Datastore'
31
32 # Which Task manager to use. Valid options are 'PSQ' and 'Celery'.
33 TASK_MANAGER = 'PSQ'
34
35 # Default base output directory for worker results and evidence.
36 OUTPUT_DIR = '/var/tmp'
37
38 # Directory for temporary files. Some temporary files can be quite large (e.g.
39 # Plaso files can easily be multiple gigabytes), so make sure there is enough
40 # space. Nothing from this directory will be saved. This directory should be
41 # different from the OUTPUT_DIR.
42 TMP_DIR = '/tmp'
43
44 # File to log debugging output to.
45 LOG_FILE = '%s/turbinia.log' % OUTPUT_DIR
46
47 # Path to a lock file used for the worker tasks.
48 LOCK_FILE = '%s/turbinia-worker.lock' % OUTPUT_DIR
49
50 # Time in seconds to sleep in task management loops
51 SLEEP_TIME = 10
52
53 # Whether to run as a single run, or to keep server running indefinitely
54 SINGLE_RUN = False
55
56 # Local directory in the worker to put other mount directories for locally
57 # mounting images/disks
58 MOUNT_DIR_PREFIX = '/mnt/turbinia-mounts'
59
60 # This indicates whether the workers are running in an environment with a shared
61 # filesystem. This should be False for environments with workers running in
62 # GCE, and True for environments that have workers on dedicated machines with
63 # NFS or a SAN for storing Evidence objects.
64 SHARED_FILESYSTEM = False
65
66 # This will set debugging flags for processes executed by Tasks (for
67 # Tasks/binaries that support it). This could cause performance issues with
68 # some tasks, so it is recommended to only set this to True when debugging
69 # problems.
70 DEBUG_TASKS = False
71
72 ################################################################################
73 # Google Cloud Platform (GCP)
74 #
75 # Options in this section are required if the TASK_MANAGER is set to 'PSQ'.
76 ################################################################################
77
78 # GCP project, region and zone where Turbinia will run. Note that Turbinia does
79 # not currently support multi-zone operation. Even if you are running Turbinia
80 # in Hybrid mode (with the Server and Workers running on local machines), you
81 # will still need to provide these three parameters.
82 TURBINIA_PROJECT = None
83 TURBINIA_ZONE = None
84 TURBINIA_REGION = None
85
86 # GCS bucket that has Turbinia specific scripts and can be used to store logs.
87 # This must be globally unique within GCP.
88 BUCKET_NAME = None
89
90 # This is the internal PubSub topic that PSQ will use. This should be different
91 # than the PUBSUB_TOPIC variable. The actual PubSub topic created will be this
92 # variable prefixed with 'psq-'.
93 PSQ_TOPIC = 'turbinia-psq'
94
95 # The PubSub topic Turbinia will listen on for new requests. This should be
96 # different than the PSQ_TOPIC variable.
97 PUBSUB_TOPIC = INSTANCE_ID
98
99 # GCS Path to copy worker results and Evidence output to.
100 # Otherwise, set this as 'None' if output will be stored in shared storage.
101 # GCS_OUTPUT_PATH = 'gs://%s/output' % BUCKET_NAME
102 GCS_OUTPUT_PATH = None
103
104 ################################################################################
105 # Celery / Redis / Kombu
106 #
107 # Options in this section are required if TASK_MANAGER is set to 'Celery'
108 ################################################################################
109
110 # Method for communication between nodes
111 CELERY_BROKER = 'redis://localhost'
112
113 # Storage for task results/status
114 CELERY_BACKEND = 'redis://localhost'
115
116 # Can be the same as CELERY_BROKER
117 KOMBU_BROKER = CELERY_BROKER
118
119 # Used to namespace communications.
120 KOMBU_CHANNEL = '%s-kombu' % INSTANCE_ID
121
122 # Will messages be persistent and require acknowledgment?
123 # http://docs.celeryproject.org/projects/kombu/en/4.0/reference/kombu.html#kombu.Connection.SimpleBuffer
124 KOMBU_DURABLE = True
125
126 # Use Redis for state management
127 REDIS_HOST = 'localhost'
128 REDIS_PORT = '6379'
129 REDIS_DB = '0'
130
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/turbinia/config/turbinia_config.py b/turbinia/config/turbinia_config.py
--- a/turbinia/config/turbinia_config.py
+++ b/turbinia/config/turbinia_config.py
@@ -26,10 +26,14 @@
# separate when running with the same Cloud projects or backend servers.
INSTANCE_ID = 'turbinia-instance1'
-# Which state manager to use. Valid options are 'Datastore' or 'Redis'.
+# Which state manager to use. Valid options are 'Datastore' or 'Redis'. Use
+# 'Datastore' for Cloud (GCP) or hybrid installations, and 'Redis' for local
+# installations.
STATE_MANAGER = 'Datastore'
-# Which Task manager to use. Valid options are 'PSQ' and 'Celery'.
+# Which Task manager to use. Valid options are 'PSQ' and 'Celery'. Use 'PSQ'
+# for Cloud (GCP) or hybrid installations, and 'Celery' for local
+# installations.
TASK_MANAGER = 'PSQ'
# Default base output directory for worker results and evidence.
|
{"golden_diff": "diff --git a/turbinia/config/turbinia_config.py b/turbinia/config/turbinia_config.py\n--- a/turbinia/config/turbinia_config.py\n+++ b/turbinia/config/turbinia_config.py\n@@ -26,10 +26,14 @@\n # separate when running with the same Cloud projects or backend servers.\n INSTANCE_ID = 'turbinia-instance1'\n \n-# Which state manager to use. Valid options are 'Datastore' or 'Redis'.\n+# Which state manager to use. Valid options are 'Datastore' or 'Redis'. Use\n+# 'Datastore' for Cloud (GCP) or hybrid installations, and 'Redis' for local\n+# installations.\n STATE_MANAGER = 'Datastore'\n \n-# Which Task manager to use. Valid options are 'PSQ' and 'Celery'.\n+# Which Task manager to use. Valid options are 'PSQ' and 'Celery'. Use 'PSQ'\n+# for Cloud (GCP) or hybrid installations, and 'Celery' for local\n+# installations.\n TASK_MANAGER = 'PSQ'\n \n # Default base output directory for worker results and evidence.\n", "issue": "Local installation requires filled-out GCP section of config\nI'm attempting to get Turbinia working locally on a laptop. The laptop has internet for at least the installation part of this process.\r\n\r\nRunning into an issue with the config. I set the TASK_MANAGER to use Celery, but the installation process is still requiring me to have values for:\r\n\r\n- TURBINIA_PROJECT\r\n- TURBINIA_ZONE\r\n- TURBINIA_REGION\r\n- BUCKET_NAME\r\n\r\nAccording to the instructions in the config, these values should only matter if the TASK_MANAGER is set to PSQ.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Turbinia Config Template\"\"\"\n\nfrom __future__ import unicode_literals\n\n################################################################################\n# Base Turbinia configuration\n#\n# All options in this section are required to be set to non-empty values.\n################################################################################\n\n# A unique ID per Turbinia instance. Used to keep multiple Turbinia instances\n# separate when running with the same Cloud projects or backend servers.\nINSTANCE_ID = 'turbinia-instance1'\n\n# Which state manager to use. Valid options are 'Datastore' or 'Redis'.\nSTATE_MANAGER = 'Datastore'\n\n# Which Task manager to use. Valid options are 'PSQ' and 'Celery'.\nTASK_MANAGER = 'PSQ'\n\n# Default base output directory for worker results and evidence.\nOUTPUT_DIR = '/var/tmp'\n\n# Directory for temporary files. Some temporary files can be quite large (e.g.\n# Plaso files can easily be multiple gigabytes), so make sure there is enough\n# space. Nothing from this directory will be saved. This directory should be\n# different from the OUTPUT_DIR.\nTMP_DIR = '/tmp'\n\n# File to log debugging output to.\nLOG_FILE = '%s/turbinia.log' % OUTPUT_DIR\n\n# Path to a lock file used for the worker tasks.\nLOCK_FILE = '%s/turbinia-worker.lock' % OUTPUT_DIR\n\n# Time in seconds to sleep in task management loops\nSLEEP_TIME = 10\n\n# Whether to run as a single run, or to keep server running indefinitely\nSINGLE_RUN = False\n\n# Local directory in the worker to put other mount directories for locally\n# mounting images/disks\nMOUNT_DIR_PREFIX = '/mnt/turbinia-mounts'\n\n# This indicates whether the workers are running in an environment with a shared\n# filesystem. This should be False for environments with workers running in\n# GCE, and True for environments that have workers on dedicated machines with\n# NFS or a SAN for storing Evidence objects.\nSHARED_FILESYSTEM = False\n\n# This will set debugging flags for processes executed by Tasks (for\n# Tasks/binaries that support it). This could cause performance issues with\n# some tasks, so it is recommended to only set this to True when debugging\n# problems.\nDEBUG_TASKS = False\n\n################################################################################\n# Google Cloud Platform (GCP)\n#\n# Options in this section are required if the TASK_MANAGER is set to 'PSQ'.\n################################################################################\n\n# GCP project, region and zone where Turbinia will run. Note that Turbinia does\n# not currently support multi-zone operation. Even if you are running Turbinia\n# in Hybrid mode (with the Server and Workers running on local machines), you\n# will still need to provide these three parameters.\nTURBINIA_PROJECT = None\nTURBINIA_ZONE = None\nTURBINIA_REGION = None\n\n# GCS bucket that has Turbinia specific scripts and can be used to store logs.\n# This must be globally unique within GCP.\nBUCKET_NAME = None\n\n# This is the internal PubSub topic that PSQ will use. This should be different\n# than the PUBSUB_TOPIC variable. The actual PubSub topic created will be this\n# variable prefixed with 'psq-'.\nPSQ_TOPIC = 'turbinia-psq'\n\n# The PubSub topic Turbinia will listen on for new requests. This should be\n# different than the PSQ_TOPIC variable.\nPUBSUB_TOPIC = INSTANCE_ID\n\n# GCS Path to copy worker results and Evidence output to.\n# Otherwise, set this as 'None' if output will be stored in shared storage.\n# GCS_OUTPUT_PATH = 'gs://%s/output' % BUCKET_NAME\nGCS_OUTPUT_PATH = None\n\n################################################################################\n# Celery / Redis / Kombu\n#\n# Options in this section are required if TASK_MANAGER is set to 'Celery'\n################################################################################\n\n# Method for communication between nodes\nCELERY_BROKER = 'redis://localhost'\n\n# Storage for task results/status\nCELERY_BACKEND = 'redis://localhost'\n\n# Can be the same as CELERY_BROKER\nKOMBU_BROKER = CELERY_BROKER\n\n# Used to namespace communications.\nKOMBU_CHANNEL = '%s-kombu' % INSTANCE_ID\n\n# Will messages be persistent and require acknowledgment?\n# http://docs.celeryproject.org/projects/kombu/en/4.0/reference/kombu.html#kombu.Connection.SimpleBuffer\nKOMBU_DURABLE = True\n\n# Use Redis for state management\nREDIS_HOST = 'localhost'\nREDIS_PORT = '6379'\nREDIS_DB = '0'\n", "path": "turbinia/config/turbinia_config.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Turbinia Config Template\"\"\"\n\nfrom __future__ import unicode_literals\n\n################################################################################\n# Base Turbinia configuration\n#\n# All options in this section are required to be set to non-empty values.\n################################################################################\n\n# A unique ID per Turbinia instance. Used to keep multiple Turbinia instances\n# separate when running with the same Cloud projects or backend servers.\nINSTANCE_ID = 'turbinia-instance1'\n\n# Which state manager to use. Valid options are 'Datastore' or 'Redis'. Use\n# 'Datastore' for Cloud (GCP) or hybrid installations, and 'Redis' for local\n# installations.\nSTATE_MANAGER = 'Datastore'\n\n# Which Task manager to use. Valid options are 'PSQ' and 'Celery'. Use 'PSQ'\n# for Cloud (GCP) or hybrid installations, and 'Celery' for local\n# installations.\nTASK_MANAGER = 'PSQ'\n\n# Default base output directory for worker results and evidence.\nOUTPUT_DIR = '/var/tmp'\n\n# Directory for temporary files. Some temporary files can be quite large (e.g.\n# Plaso files can easily be multiple gigabytes), so make sure there is enough\n# space. Nothing from this directory will be saved. This directory should be\n# different from the OUTPUT_DIR.\nTMP_DIR = '/tmp'\n\n# File to log debugging output to.\nLOG_FILE = '%s/turbinia.log' % OUTPUT_DIR\n\n# Path to a lock file used for the worker tasks.\nLOCK_FILE = '%s/turbinia-worker.lock' % OUTPUT_DIR\n\n# Time in seconds to sleep in task management loops\nSLEEP_TIME = 10\n\n# Whether to run as a single run, or to keep server running indefinitely\nSINGLE_RUN = False\n\n# Local directory in the worker to put other mount directories for locally\n# mounting images/disks\nMOUNT_DIR_PREFIX = '/mnt/turbinia-mounts'\n\n# This indicates whether the workers are running in an environment with a shared\n# filesystem. This should be False for environments with workers running in\n# GCE, and True for environments that have workers on dedicated machines with\n# NFS or a SAN for storing Evidence objects.\nSHARED_FILESYSTEM = False\n\n# This will set debugging flags for processes executed by Tasks (for\n# Tasks/binaries that support it). This could cause performance issues with\n# some tasks, so it is recommended to only set this to True when debugging\n# problems.\nDEBUG_TASKS = False\n\n################################################################################\n# Google Cloud Platform (GCP)\n#\n# Options in this section are required if the TASK_MANAGER is set to 'PSQ'.\n################################################################################\n\n# GCP project, region and zone where Turbinia will run. Note that Turbinia does\n# not currently support multi-zone operation. Even if you are running Turbinia\n# in Hybrid mode (with the Server and Workers running on local machines), you\n# will still need to provide these three parameters.\nTURBINIA_PROJECT = None\nTURBINIA_ZONE = None\nTURBINIA_REGION = None\n\n# GCS bucket that has Turbinia specific scripts and can be used to store logs.\n# This must be globally unique within GCP.\nBUCKET_NAME = None\n\n# This is the internal PubSub topic that PSQ will use. This should be different\n# than the PUBSUB_TOPIC variable. The actual PubSub topic created will be this\n# variable prefixed with 'psq-'.\nPSQ_TOPIC = 'turbinia-psq'\n\n# The PubSub topic Turbinia will listen on for new requests. This should be\n# different than the PSQ_TOPIC variable.\nPUBSUB_TOPIC = INSTANCE_ID\n\n# GCS Path to copy worker results and Evidence output to.\n# Otherwise, set this as 'None' if output will be stored in shared storage.\n# GCS_OUTPUT_PATH = 'gs://%s/output' % BUCKET_NAME\nGCS_OUTPUT_PATH = None\n\n################################################################################\n# Celery / Redis / Kombu\n#\n# Options in this section are required if TASK_MANAGER is set to 'Celery'\n################################################################################\n\n# Method for communication between nodes\nCELERY_BROKER = 'redis://localhost'\n\n# Storage for task results/status\nCELERY_BACKEND = 'redis://localhost'\n\n# Can be the same as CELERY_BROKER\nKOMBU_BROKER = CELERY_BROKER\n\n# Used to namespace communications.\nKOMBU_CHANNEL = '%s-kombu' % INSTANCE_ID\n\n# Will messages be persistent and require acknowledgment?\n# http://docs.celeryproject.org/projects/kombu/en/4.0/reference/kombu.html#kombu.Connection.SimpleBuffer\nKOMBU_DURABLE = True\n\n# Use Redis for state management\nREDIS_HOST = 'localhost'\nREDIS_PORT = '6379'\nREDIS_DB = '0'\n", "path": "turbinia/config/turbinia_config.py"}]}
| 1,798 | 253 |
gh_patches_debug_37430
|
rasdani/github-patches
|
git_diff
|
streamlink__streamlink-4638
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
plugins.vk: fixes required
### Checklist
- [X] This is a plugin issue and not a different kind of issue
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)
- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)
### Streamlink version
Latest build from the master branch
### Description
Needs support for the web application firewall cookie.
~Seems necessary to add all parameters to the API POST data in `_get_streams()` now.~
ref: https://github.com/streamlink/streamlink/pull/4613#issuecomment-1173040359
### Debug log
```text
$ streamlink -l debug https://vk.com/video-211154316_456239169
[cli][debug] OS: Linux-4.9.0-18-amd64-x86_64-with-debian-9.13
[cli][debug] Python: 3.7.3
[cli][debug] Streamlink: 4.1.0+45.gb277114d
[cli][debug] Dependencies:
[cli][debug] isodate: 0.6.1
[cli][debug] lxml: 4.9.1
[cli][debug] pycountry: 22.3.5
[cli][debug] pycryptodome: 3.15.0
[cli][debug] PySocks: 1.7.1
[cli][debug] requests: 2.28.1
[cli][debug] websocket-client: 1.3.3
[cli][debug] importlib-metadata: 4.12.0
[cli][debug] Arguments:
[cli][debug] url=https://vk.com/video-211154316_456239169
[cli][debug] --loglevel=debug
[cli][info] Found matching plugin vk for URL https://vk.com/video-211154316_456239169
[plugins.vk][debug] video ID: -211154316_456239169
[plugins.vk][error] Could not parse API response
error: No playable streams found on this URL: https://vk.com/video-211154316_456239169
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/vk.py`
Content:
```
1 """
2 $description Russian live-streaming and video hosting social platform.
3 $url vk.com
4 $type live, vod
5 """
6
7 import logging
8 import re
9 from urllib.parse import parse_qsl, unquote, urlparse
10
11 from streamlink.exceptions import NoStreamsError
12 from streamlink.plugin import Plugin, PluginError, pluginmatcher
13 from streamlink.plugin.api import validate
14 from streamlink.stream.dash import DASHStream
15 from streamlink.stream.hls import HLSStream
16
17 log = logging.getLogger(__name__)
18
19
20 @pluginmatcher(re.compile(
21 r"https?://(?:\w+\.)?vk\.com/videos?(?:\?z=video)?(?P<video_id>-?\d+_\d+)"
22 ))
23 @pluginmatcher(re.compile(
24 r"https?://(\w+\.)?vk\.com/.+"
25 ))
26 class VK(Plugin):
27 API_URL = "https://vk.com/al_video.php"
28
29 def _has_video_id(self):
30 return any(m for m in self.matches[:-1])
31
32 def follow_vk_redirect(self):
33 if self._has_video_id():
34 return
35
36 try:
37 parsed_url = urlparse(self.url)
38 true_path = next(unquote(v).split("/")[0] for k, v in parse_qsl(parsed_url.query) if k == "z" and len(v) > 0)
39 self.url = f"{parsed_url.scheme}://{parsed_url.netloc}/{true_path}"
40 if self._has_video_id():
41 return
42 except StopIteration:
43 pass
44
45 try:
46 self.url = self.session.http.get(self.url, schema=validate.Schema(
47 validate.parse_html(),
48 validate.xml_xpath_string(".//head/meta[@property='og:url'][@content]/@content"),
49 str
50 ))
51 except PluginError:
52 pass
53 if self._has_video_id():
54 return
55
56 raise NoStreamsError(self.url)
57
58 def _get_streams(self):
59 self.follow_vk_redirect()
60
61 video_id = self.match.group("video_id")
62 if not video_id:
63 return
64
65 log.debug(f"video ID: {video_id}")
66 try:
67 data = self.session.http.post(
68 self.API_URL,
69 params={
70 "act": "show",
71 "al": "1",
72 "video": video_id,
73 },
74 headers={"Referer": self.url},
75 schema=validate.Schema(
76 validate.transform(lambda text: re.sub(r"^\s*<!--\s*", "", text)),
77 validate.parse_json(),
78 {"payload": list},
79 validate.get(("payload", -1)),
80 list,
81 validate.get(-1),
82 {"player": {"params": [dict]}},
83 validate.get(("player", "params", 0)),
84 {
85 validate.optional("hls"): validate.url(),
86 validate.optional("manifest"): validate.startswith("<?xml"),
87 validate.optional("md_author"): validate.any(str, None),
88 validate.optional("md_title"): validate.any(str, None),
89 }
90 )
91 )
92 except PluginError:
93 log.error("Could not parse API response")
94 return
95
96 self.id = video_id
97 self.author = data.get("md_author")
98 self.title = data.get("md_title")
99
100 hls = data.get("hls")
101 if hls:
102 return HLSStream.parse_variant_playlist(self.session, hls)
103
104 dash_manifest = data.get("manifest")
105 if dash_manifest:
106 return DASHStream.parse_manifest(self.session, dash_manifest)
107
108
109 __plugin__ = VK
110
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/streamlink/plugins/vk.py b/src/streamlink/plugins/vk.py
--- a/src/streamlink/plugins/vk.py
+++ b/src/streamlink/plugins/vk.py
@@ -6,6 +6,7 @@
import logging
import re
+from hashlib import md5
from urllib.parse import parse_qsl, unquote, urlparse
from streamlink.exceptions import NoStreamsError
@@ -13,6 +14,7 @@
from streamlink.plugin.api import validate
from streamlink.stream.dash import DASHStream
from streamlink.stream.hls import HLSStream
+from streamlink.utils.url import update_qsd
log = logging.getLogger(__name__)
@@ -25,6 +27,22 @@
))
class VK(Plugin):
API_URL = "https://vk.com/al_video.php"
+ HASH_COOKIE = "hash429"
+
+ def _get_cookies(self):
+ def on_response(res, **kwargs):
+ if res.headers.get("x-waf-redirect") == "1":
+ if not res.headers.get("X-WAF-Backend-Status"):
+ log.debug("Getting WAF cookie")
+ cookie = res.cookies.get(self.HASH_COOKIE)
+ key = md5(cookie.encode("utf-8")).hexdigest()
+ res.headers["Location"] = update_qsd(res.headers["Location"], qsd={"key": key})
+ return res
+ elif res.headers.get("X-WAF-Backend-Status") == "challenge_success":
+ self.session.http.cookies.update(res.cookies)
+ return res
+
+ self.session.http.get("https://vk.com/", hooks={"response": on_response})
def _has_video_id(self):
return any(m for m in self.matches[:-1])
@@ -56,21 +74,19 @@
raise NoStreamsError(self.url)
def _get_streams(self):
+ self._get_cookies()
self.follow_vk_redirect()
video_id = self.match.group("video_id")
if not video_id:
return
- log.debug(f"video ID: {video_id}")
+ log.debug(f"Video ID: {video_id}")
try:
data = self.session.http.post(
self.API_URL,
- params={
- "act": "show",
- "al": "1",
- "video": video_id,
- },
+ params={"act": "show"},
+ data={"act": "show", "al": "1", "video": video_id},
headers={"Referer": self.url},
schema=validate.Schema(
validate.transform(lambda text: re.sub(r"^\s*<!--\s*", "", text)),
|
{"golden_diff": "diff --git a/src/streamlink/plugins/vk.py b/src/streamlink/plugins/vk.py\n--- a/src/streamlink/plugins/vk.py\n+++ b/src/streamlink/plugins/vk.py\n@@ -6,6 +6,7 @@\n \n import logging\n import re\n+from hashlib import md5\n from urllib.parse import parse_qsl, unquote, urlparse\n \n from streamlink.exceptions import NoStreamsError\n@@ -13,6 +14,7 @@\n from streamlink.plugin.api import validate\n from streamlink.stream.dash import DASHStream\n from streamlink.stream.hls import HLSStream\n+from streamlink.utils.url import update_qsd\n \n log = logging.getLogger(__name__)\n \n@@ -25,6 +27,22 @@\n ))\n class VK(Plugin):\n API_URL = \"https://vk.com/al_video.php\"\n+ HASH_COOKIE = \"hash429\"\n+\n+ def _get_cookies(self):\n+ def on_response(res, **kwargs):\n+ if res.headers.get(\"x-waf-redirect\") == \"1\":\n+ if not res.headers.get(\"X-WAF-Backend-Status\"):\n+ log.debug(\"Getting WAF cookie\")\n+ cookie = res.cookies.get(self.HASH_COOKIE)\n+ key = md5(cookie.encode(\"utf-8\")).hexdigest()\n+ res.headers[\"Location\"] = update_qsd(res.headers[\"Location\"], qsd={\"key\": key})\n+ return res\n+ elif res.headers.get(\"X-WAF-Backend-Status\") == \"challenge_success\":\n+ self.session.http.cookies.update(res.cookies)\n+ return res\n+\n+ self.session.http.get(\"https://vk.com/\", hooks={\"response\": on_response})\n \n def _has_video_id(self):\n return any(m for m in self.matches[:-1])\n@@ -56,21 +74,19 @@\n raise NoStreamsError(self.url)\n \n def _get_streams(self):\n+ self._get_cookies()\n self.follow_vk_redirect()\n \n video_id = self.match.group(\"video_id\")\n if not video_id:\n return\n \n- log.debug(f\"video ID: {video_id}\")\n+ log.debug(f\"Video ID: {video_id}\")\n try:\n data = self.session.http.post(\n self.API_URL,\n- params={\n- \"act\": \"show\",\n- \"al\": \"1\",\n- \"video\": video_id,\n- },\n+ params={\"act\": \"show\"},\n+ data={\"act\": \"show\", \"al\": \"1\", \"video\": video_id},\n headers={\"Referer\": self.url},\n schema=validate.Schema(\n validate.transform(lambda text: re.sub(r\"^\\s*<!--\\s*\", \"\", text)),\n", "issue": "plugins.vk: fixes required\n### Checklist\r\n\r\n- [X] This is a plugin issue and not a different kind of issue\r\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\r\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\r\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\r\n\r\n### Streamlink version\r\n\r\nLatest build from the master branch\r\n\r\n### Description\r\n\r\nNeeds support for the web application firewall cookie.\r\n\r\n~Seems necessary to add all parameters to the API POST data in `_get_streams()` now.~\r\n\r\nref: https://github.com/streamlink/streamlink/pull/4613#issuecomment-1173040359\r\n\r\n\r\n### Debug log\r\n\r\n```text\r\n$ streamlink -l debug https://vk.com/video-211154316_456239169\r\n[cli][debug] OS: Linux-4.9.0-18-amd64-x86_64-with-debian-9.13\r\n[cli][debug] Python: 3.7.3\r\n[cli][debug] Streamlink: 4.1.0+45.gb277114d\r\n[cli][debug] Dependencies:\r\n[cli][debug] isodate: 0.6.1\r\n[cli][debug] lxml: 4.9.1\r\n[cli][debug] pycountry: 22.3.5\r\n[cli][debug] pycryptodome: 3.15.0\r\n[cli][debug] PySocks: 1.7.1\r\n[cli][debug] requests: 2.28.1\r\n[cli][debug] websocket-client: 1.3.3\r\n[cli][debug] importlib-metadata: 4.12.0\r\n[cli][debug] Arguments:\r\n[cli][debug] url=https://vk.com/video-211154316_456239169\r\n[cli][debug] --loglevel=debug\r\n[cli][info] Found matching plugin vk for URL https://vk.com/video-211154316_456239169\r\n[plugins.vk][debug] video ID: -211154316_456239169\r\n[plugins.vk][error] Could not parse API response\r\nerror: No playable streams found on this URL: https://vk.com/video-211154316_456239169\r\n```\r\n\n", "before_files": [{"content": "\"\"\"\n$description Russian live-streaming and video hosting social platform.\n$url vk.com\n$type live, vod\n\"\"\"\n\nimport logging\nimport re\nfrom urllib.parse import parse_qsl, unquote, urlparse\n\nfrom streamlink.exceptions import NoStreamsError\nfrom streamlink.plugin import Plugin, PluginError, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.dash import DASHStream\nfrom streamlink.stream.hls import HLSStream\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r\"https?://(?:\\w+\\.)?vk\\.com/videos?(?:\\?z=video)?(?P<video_id>-?\\d+_\\d+)\"\n))\n@pluginmatcher(re.compile(\n r\"https?://(\\w+\\.)?vk\\.com/.+\"\n))\nclass VK(Plugin):\n API_URL = \"https://vk.com/al_video.php\"\n\n def _has_video_id(self):\n return any(m for m in self.matches[:-1])\n\n def follow_vk_redirect(self):\n if self._has_video_id():\n return\n\n try:\n parsed_url = urlparse(self.url)\n true_path = next(unquote(v).split(\"/\")[0] for k, v in parse_qsl(parsed_url.query) if k == \"z\" and len(v) > 0)\n self.url = f\"{parsed_url.scheme}://{parsed_url.netloc}/{true_path}\"\n if self._has_video_id():\n return\n except StopIteration:\n pass\n\n try:\n self.url = self.session.http.get(self.url, schema=validate.Schema(\n validate.parse_html(),\n validate.xml_xpath_string(\".//head/meta[@property='og:url'][@content]/@content\"),\n str\n ))\n except PluginError:\n pass\n if self._has_video_id():\n return\n\n raise NoStreamsError(self.url)\n\n def _get_streams(self):\n self.follow_vk_redirect()\n\n video_id = self.match.group(\"video_id\")\n if not video_id:\n return\n\n log.debug(f\"video ID: {video_id}\")\n try:\n data = self.session.http.post(\n self.API_URL,\n params={\n \"act\": \"show\",\n \"al\": \"1\",\n \"video\": video_id,\n },\n headers={\"Referer\": self.url},\n schema=validate.Schema(\n validate.transform(lambda text: re.sub(r\"^\\s*<!--\\s*\", \"\", text)),\n validate.parse_json(),\n {\"payload\": list},\n validate.get((\"payload\", -1)),\n list,\n validate.get(-1),\n {\"player\": {\"params\": [dict]}},\n validate.get((\"player\", \"params\", 0)),\n {\n validate.optional(\"hls\"): validate.url(),\n validate.optional(\"manifest\"): validate.startswith(\"<?xml\"),\n validate.optional(\"md_author\"): validate.any(str, None),\n validate.optional(\"md_title\"): validate.any(str, None),\n }\n )\n )\n except PluginError:\n log.error(\"Could not parse API response\")\n return\n\n self.id = video_id\n self.author = data.get(\"md_author\")\n self.title = data.get(\"md_title\")\n\n hls = data.get(\"hls\")\n if hls:\n return HLSStream.parse_variant_playlist(self.session, hls)\n\n dash_manifest = data.get(\"manifest\")\n if dash_manifest:\n return DASHStream.parse_manifest(self.session, dash_manifest)\n\n\n__plugin__ = VK\n", "path": "src/streamlink/plugins/vk.py"}], "after_files": [{"content": "\"\"\"\n$description Russian live-streaming and video hosting social platform.\n$url vk.com\n$type live, vod\n\"\"\"\n\nimport logging\nimport re\nfrom hashlib import md5\nfrom urllib.parse import parse_qsl, unquote, urlparse\n\nfrom streamlink.exceptions import NoStreamsError\nfrom streamlink.plugin import Plugin, PluginError, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.dash import DASHStream\nfrom streamlink.stream.hls import HLSStream\nfrom streamlink.utils.url import update_qsd\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r\"https?://(?:\\w+\\.)?vk\\.com/videos?(?:\\?z=video)?(?P<video_id>-?\\d+_\\d+)\"\n))\n@pluginmatcher(re.compile(\n r\"https?://(\\w+\\.)?vk\\.com/.+\"\n))\nclass VK(Plugin):\n API_URL = \"https://vk.com/al_video.php\"\n HASH_COOKIE = \"hash429\"\n\n def _get_cookies(self):\n def on_response(res, **kwargs):\n if res.headers.get(\"x-waf-redirect\") == \"1\":\n if not res.headers.get(\"X-WAF-Backend-Status\"):\n log.debug(\"Getting WAF cookie\")\n cookie = res.cookies.get(self.HASH_COOKIE)\n key = md5(cookie.encode(\"utf-8\")).hexdigest()\n res.headers[\"Location\"] = update_qsd(res.headers[\"Location\"], qsd={\"key\": key})\n return res\n elif res.headers.get(\"X-WAF-Backend-Status\") == \"challenge_success\":\n self.session.http.cookies.update(res.cookies)\n return res\n\n self.session.http.get(\"https://vk.com/\", hooks={\"response\": on_response})\n\n def _has_video_id(self):\n return any(m for m in self.matches[:-1])\n\n def follow_vk_redirect(self):\n if self._has_video_id():\n return\n\n try:\n parsed_url = urlparse(self.url)\n true_path = next(unquote(v).split(\"/\")[0] for k, v in parse_qsl(parsed_url.query) if k == \"z\" and len(v) > 0)\n self.url = f\"{parsed_url.scheme}://{parsed_url.netloc}/{true_path}\"\n if self._has_video_id():\n return\n except StopIteration:\n pass\n\n try:\n self.url = self.session.http.get(self.url, schema=validate.Schema(\n validate.parse_html(),\n validate.xml_xpath_string(\".//head/meta[@property='og:url'][@content]/@content\"),\n str\n ))\n except PluginError:\n pass\n if self._has_video_id():\n return\n\n raise NoStreamsError(self.url)\n\n def _get_streams(self):\n self._get_cookies()\n self.follow_vk_redirect()\n\n video_id = self.match.group(\"video_id\")\n if not video_id:\n return\n\n log.debug(f\"Video ID: {video_id}\")\n try:\n data = self.session.http.post(\n self.API_URL,\n params={\"act\": \"show\"},\n data={\"act\": \"show\", \"al\": \"1\", \"video\": video_id},\n headers={\"Referer\": self.url},\n schema=validate.Schema(\n validate.transform(lambda text: re.sub(r\"^\\s*<!--\\s*\", \"\", text)),\n validate.parse_json(),\n {\"payload\": list},\n validate.get((\"payload\", -1)),\n list,\n validate.get(-1),\n {\"player\": {\"params\": [dict]}},\n validate.get((\"player\", \"params\", 0)),\n {\n validate.optional(\"hls\"): validate.url(),\n validate.optional(\"manifest\"): validate.startswith(\"<?xml\"),\n validate.optional(\"md_author\"): validate.any(str, None),\n validate.optional(\"md_title\"): validate.any(str, None),\n }\n )\n )\n except PluginError:\n log.error(\"Could not parse API response\")\n return\n\n self.id = video_id\n self.author = data.get(\"md_author\")\n self.title = data.get(\"md_title\")\n\n hls = data.get(\"hls\")\n if hls:\n return HLSStream.parse_variant_playlist(self.session, hls)\n\n dash_manifest = data.get(\"manifest\")\n if dash_manifest:\n return DASHStream.parse_manifest(self.session, dash_manifest)\n\n\n__plugin__ = VK\n", "path": "src/streamlink/plugins/vk.py"}]}
| 1,876 | 584 |
gh_patches_debug_6189
|
rasdani/github-patches
|
git_diff
|
facebookresearch__hydra-594
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fish completion is not working
Fish completion does not seem to work:
```
$ python my_app.py -sc install=fish | source
$ python my_app.py <TAB> string split: Unknown option “-n”
- (line 2):
string split -n ' ' $COMP_LINE
^
in command substitution
called on line 0 of file -
in function “hydra_fish_completion”
called on standard input
in command substitution
called on standard input
stringstring - manipulate strings
-
Synopsis
string escape [(-n | --no-quoted)] [--style=xxx] [STRING...]
string join [(-q | --quiet)] SEP [STRING...]
string length [(-q | --quiet)] [STRING...]
string lower [(-q | --quiet)] [STRING...]
string match [(-a | --all)] [((-e | --entire)] [(-i | --ignore-case)] [(-r | --regex)]
[(-n | --index)] [(-q |
```
Fish completion is not working
Fish completion does not seem to work:
```
$ python my_app.py -sc install=fish | source
$ python my_app.py <TAB> string split: Unknown option “-n”
- (line 2):
string split -n ' ' $COMP_LINE
^
in command substitution
called on line 0 of file -
in function “hydra_fish_completion”
called on standard input
in command substitution
called on standard input
stringstring - manipulate strings
-
Synopsis
string escape [(-n | --no-quoted)] [--style=xxx] [STRING...]
string join [(-q | --quiet)] SEP [STRING...]
string length [(-q | --quiet)] [STRING...]
string lower [(-q | --quiet)] [STRING...]
string match [(-a | --all)] [((-e | --entire)] [(-i | --ignore-case)] [(-r | --regex)]
[(-n | --index)] [(-q |
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `hydra/_internal/core_plugins/fish_completion.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 import logging
3 import os
4 import sys
5 from typing import List, Optional, Tuple
6
7 from hydra.plugins.completion_plugin import CompletionPlugin
8
9 log = logging.getLogger(__name__)
10
11
12 class FishCompletion(CompletionPlugin):
13 def install(self) -> None:
14 script = """function hydra_fish_completion
15 # Hydra will access COMP_LINE to generate completion candidates
16 set -lx COMP_LINE (commandline -cp)
17
18 # Find out how to call the underlying script
19 set -l parts (string split -n ' ' $COMP_LINE)
20 if test "$parts[1]" = "python" -o "$parts[1]" = "python3"
21 set cmd "$parts[1] $parts[2]"
22 if not grep -q "@hydra.main" $parts[2]
23 return
24 end
25 else
26 set cmd "$parts[1]"
27 end
28
29 # Generate candidates
30 eval "$cmd -sc query=fish"
31 end
32 """
33 output = self._get_exec()
34 reg_cmd = []
35 for name, cond in output:
36 reg_cmd.append(
37 f"complete -c {name} {cond}-x -a '(hydra_fish_completion)'\n"
38 )
39 print(script)
40 print("".join(reg_cmd))
41
42 def uninstall(self) -> None:
43 name = self._get_uninstall_exec()
44 print(f"complete -e -c {name}")
45 print("function hydra_fish_completion\nend")
46
47 @staticmethod
48 def provides() -> str:
49 return "fish"
50
51 def query(self, config_name: Optional[str]) -> None:
52 line = os.environ["COMP_LINE"]
53 line = self.strip_python_or_app_name(line)
54 print("\n".join(self._query(config_name=config_name, line=line)))
55
56 @staticmethod
57 def help(command: str) -> str:
58 assert command in ["install", "uninstall"]
59 return f"{{}} -sc {command}=fish | source"
60
61 @staticmethod
62 def _get_exec() -> List[Tuple[str, str]]:
63 # Running as an installed app (setuptools entry point)
64 output = []
65 # User scenario 1: python script.py
66 name = os.path.basename(sys.executable)
67 cond = f"-n '__fish_seen_subcommand_from {sys.argv[0]}' "
68 output.append((name, cond))
69
70 # User scenario 2: ./script.py or src/script.py or script.py
71 name = os.path.basename(sys.argv[0])
72 cond = ""
73 output.append((name, cond))
74
75 return output
76
77 @staticmethod
78 def _get_uninstall_exec() -> str:
79 name = os.path.basename(sys.argv[0])
80
81 return name
82
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/hydra/_internal/core_plugins/fish_completion.py b/hydra/_internal/core_plugins/fish_completion.py
--- a/hydra/_internal/core_plugins/fish_completion.py
+++ b/hydra/_internal/core_plugins/fish_completion.py
@@ -16,7 +16,7 @@
set -lx COMP_LINE (commandline -cp)
# Find out how to call the underlying script
- set -l parts (string split -n ' ' $COMP_LINE)
+ set -l parts (commandline -cpo)
if test "$parts[1]" = "python" -o "$parts[1]" = "python3"
set cmd "$parts[1] $parts[2]"
if not grep -q "@hydra.main" $parts[2]
|
{"golden_diff": "diff --git a/hydra/_internal/core_plugins/fish_completion.py b/hydra/_internal/core_plugins/fish_completion.py\n--- a/hydra/_internal/core_plugins/fish_completion.py\n+++ b/hydra/_internal/core_plugins/fish_completion.py\n@@ -16,7 +16,7 @@\n set -lx COMP_LINE (commandline -cp)\n \n # Find out how to call the underlying script\n- set -l parts (string split -n ' ' $COMP_LINE)\n+ set -l parts (commandline -cpo)\n if test \"$parts[1]\" = \"python\" -o \"$parts[1]\" = \"python3\"\n set cmd \"$parts[1] $parts[2]\"\n if not grep -q \"@hydra.main\" $parts[2]\n", "issue": "Fish completion is not working\nFish completion does not seem to work:\r\n\r\n```\r\n$ python my_app.py -sc install=fish | source\r\n$ python my_app.py <TAB> string split: Unknown option \u201c-n\u201d\r\n- (line 2): \r\nstring split -n ' ' $COMP_LINE\r\n^ \r\nin command substitution \r\n called on line 0 of file -\r\n \r\nin function \u201chydra_fish_completion\u201d\r\n called on standard input\r\n \r\nin command substitution\r\n called on standard input\r\n \r\n\r\n stringstring - manipulate strings\r\n - \r\n\r\n Synopsis \r\n string escape [(-n | --no-quoted)] [--style=xxx] [STRING...]\r\n string join [(-q | --quiet)] SEP [STRING...]\r\n string length [(-q | --quiet)] [STRING...]\r\n string lower [(-q | --quiet)] [STRING...]\r\n string match [(-a | --all)] [((-e | --entire)] [(-i | --ignore-case)] [(-r | --regex)]\r\n [(-n | --index)] [(-q |\r\n```\nFish completion is not working\nFish completion does not seem to work:\r\n\r\n```\r\n$ python my_app.py -sc install=fish | source\r\n$ python my_app.py <TAB> string split: Unknown option \u201c-n\u201d\r\n- (line 2): \r\nstring split -n ' ' $COMP_LINE\r\n^ \r\nin command substitution \r\n called on line 0 of file -\r\n \r\nin function \u201chydra_fish_completion\u201d\r\n called on standard input\r\n \r\nin command substitution\r\n called on standard input\r\n \r\n\r\n stringstring - manipulate strings\r\n - \r\n\r\n Synopsis \r\n string escape [(-n | --no-quoted)] [--style=xxx] [STRING...]\r\n string join [(-q | --quiet)] SEP [STRING...]\r\n string length [(-q | --quiet)] [STRING...]\r\n string lower [(-q | --quiet)] [STRING...]\r\n string match [(-a | --all)] [((-e | --entire)] [(-i | --ignore-case)] [(-r | --regex)]\r\n [(-n | --index)] [(-q |\r\n```\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport logging\nimport os\nimport sys\nfrom typing import List, Optional, Tuple\n\nfrom hydra.plugins.completion_plugin import CompletionPlugin\n\nlog = logging.getLogger(__name__)\n\n\nclass FishCompletion(CompletionPlugin):\n def install(self) -> None:\n script = \"\"\"function hydra_fish_completion\n # Hydra will access COMP_LINE to generate completion candidates\n set -lx COMP_LINE (commandline -cp)\n\n # Find out how to call the underlying script\n set -l parts (string split -n ' ' $COMP_LINE)\n if test \"$parts[1]\" = \"python\" -o \"$parts[1]\" = \"python3\"\n set cmd \"$parts[1] $parts[2]\"\n if not grep -q \"@hydra.main\" $parts[2]\n return\n end\n else\n set cmd \"$parts[1]\"\n end\n\n # Generate candidates\n eval \"$cmd -sc query=fish\"\nend\n \"\"\"\n output = self._get_exec()\n reg_cmd = []\n for name, cond in output:\n reg_cmd.append(\n f\"complete -c {name} {cond}-x -a '(hydra_fish_completion)'\\n\"\n )\n print(script)\n print(\"\".join(reg_cmd))\n\n def uninstall(self) -> None:\n name = self._get_uninstall_exec()\n print(f\"complete -e -c {name}\")\n print(\"function hydra_fish_completion\\nend\")\n\n @staticmethod\n def provides() -> str:\n return \"fish\"\n\n def query(self, config_name: Optional[str]) -> None:\n line = os.environ[\"COMP_LINE\"]\n line = self.strip_python_or_app_name(line)\n print(\"\\n\".join(self._query(config_name=config_name, line=line)))\n\n @staticmethod\n def help(command: str) -> str:\n assert command in [\"install\", \"uninstall\"]\n return f\"{{}} -sc {command}=fish | source\"\n\n @staticmethod\n def _get_exec() -> List[Tuple[str, str]]:\n # Running as an installed app (setuptools entry point)\n output = []\n # User scenario 1: python script.py\n name = os.path.basename(sys.executable)\n cond = f\"-n '__fish_seen_subcommand_from {sys.argv[0]}' \"\n output.append((name, cond))\n\n # User scenario 2: ./script.py or src/script.py or script.py\n name = os.path.basename(sys.argv[0])\n cond = \"\"\n output.append((name, cond))\n\n return output\n\n @staticmethod\n def _get_uninstall_exec() -> str:\n name = os.path.basename(sys.argv[0])\n\n return name\n", "path": "hydra/_internal/core_plugins/fish_completion.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport logging\nimport os\nimport sys\nfrom typing import List, Optional, Tuple\n\nfrom hydra.plugins.completion_plugin import CompletionPlugin\n\nlog = logging.getLogger(__name__)\n\n\nclass FishCompletion(CompletionPlugin):\n def install(self) -> None:\n script = \"\"\"function hydra_fish_completion\n # Hydra will access COMP_LINE to generate completion candidates\n set -lx COMP_LINE (commandline -cp)\n\n # Find out how to call the underlying script\n set -l parts (commandline -cpo)\n if test \"$parts[1]\" = \"python\" -o \"$parts[1]\" = \"python3\"\n set cmd \"$parts[1] $parts[2]\"\n if not grep -q \"@hydra.main\" $parts[2]\n return\n end\n else\n set cmd \"$parts[1]\"\n end\n\n # Generate candidates\n eval \"$cmd -sc query=fish\"\nend\n \"\"\"\n output = self._get_exec()\n reg_cmd = []\n for name, cond in output:\n reg_cmd.append(\n f\"complete -c {name} {cond}-x -a '(hydra_fish_completion)'\\n\"\n )\n print(script)\n print(\"\".join(reg_cmd))\n\n def uninstall(self) -> None:\n name = self._get_uninstall_exec()\n print(f\"complete -e -c {name}\")\n print(\"function hydra_fish_completion\\nend\")\n\n @staticmethod\n def provides() -> str:\n return \"fish\"\n\n def query(self, config_name: Optional[str]) -> None:\n line = os.environ[\"COMP_LINE\"]\n line = self.strip_python_or_app_name(line)\n print(\"\\n\".join(self._query(config_name=config_name, line=line)))\n\n @staticmethod\n def help(command: str) -> str:\n assert command in [\"install\", \"uninstall\"]\n return f\"{{}} -sc {command}=fish | source\"\n\n @staticmethod\n def _get_exec() -> List[Tuple[str, str]]:\n # Running as an installed app (setuptools entry point)\n output = []\n # User scenario 1: python script.py\n name = os.path.basename(sys.executable)\n cond = f\"-n '__fish_seen_subcommand_from {sys.argv[0]}' \"\n output.append((name, cond))\n\n # User scenario 2: ./script.py or src/script.py or script.py\n name = os.path.basename(sys.argv[0])\n cond = \"\"\n output.append((name, cond))\n\n return output\n\n @staticmethod\n def _get_uninstall_exec() -> str:\n name = os.path.basename(sys.argv[0])\n\n return name\n", "path": "hydra/_internal/core_plugins/fish_completion.py"}]}
| 1,507 | 175 |
gh_patches_debug_29530
|
rasdani/github-patches
|
git_diff
|
Flexget__Flexget-1667
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Adding new column to `plugins` output
I'm trying to understand the concepts of 'phase' and 'interface' of a plugin. The output of the CLI command `flexget plugins` has been helpful. But I think I noticed that the output is missing a column. It has 'Keyword', 'Phases' and 'Flags', but no 'Interfaces'. I found out that all plugins _do_ defined a list of interfaces for themselves.
Shall I create a PR adding the column 'Interfaces' to the output?
I wanted to ask before I put the effort in.
And if someone can explain what a 'phase' and 'interface' is I'll write up a wiki page. ;)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flexget/plugins/cli/plugins.py`
Content:
```
1 from __future__ import unicode_literals, division, absolute_import
2 from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
3
4 import logging
5
6 from colorclass.toggles import disable_all_colors
7 from flexget import options
8 from flexget.event import event
9 from flexget.plugin import get_plugins
10 from flexget.terminal import TerminalTable, TerminalTableError, table_parser, console, colorize
11
12 log = logging.getLogger('plugins')
13
14
15 def plugins_summary(manager, options):
16 if options.table_type == 'porcelain':
17 disable_all_colors()
18 header = ['Keyword', 'Phases', 'Flags']
19 table_data = [header]
20 for plugin in sorted(get_plugins(phase=options.phase, interface=options.interface)):
21 if options.builtins and not plugin.builtin:
22 continue
23 flags = []
24 if plugin.instance.__doc__:
25 flags.append('doc')
26 if plugin.builtin:
27 flags.append('builtin')
28 if plugin.debug:
29 if not options.debug:
30 continue
31 flags.append('developers')
32 handlers = plugin.phase_handlers
33 roles = []
34 for phase in handlers:
35 priority = handlers[phase].priority
36 roles.append('{0}({1})'.format(phase, priority))
37
38 name = colorize('green', plugin.name) if 'builtin' in flags else plugin.name
39 table_data.append([name, ', '.join(roles), ', '.join(flags)])
40
41 try:
42 table = TerminalTable(options.table_type, table_data, wrap_columns=[1, 2])
43 console(table.output)
44 except TerminalTableError as e:
45 console('ERROR: %s' % str(e))
46 return
47 console(colorize('green', ' Built-in plugins'))
48
49
50 @event('options.register')
51 def register_parser_arguments():
52 parser = options.register_command('plugins', plugins_summary, help='Print registered plugin summaries',
53 parents=[table_parser])
54 parser.add_argument('--interface', help='Show plugins belonging to this interface')
55 parser.add_argument('--phase', help='Show plugins that act on this phase')
56 parser.add_argument('--builtins', action='store_true', help='Show just builtin plugins')
57
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/flexget/plugins/cli/plugins.py b/flexget/plugins/cli/plugins.py
--- a/flexget/plugins/cli/plugins.py
+++ b/flexget/plugins/cli/plugins.py
@@ -15,11 +15,12 @@
def plugins_summary(manager, options):
if options.table_type == 'porcelain':
disable_all_colors()
- header = ['Keyword', 'Phases', 'Flags']
+ header = ['Keyword', 'Interfaces', 'Phases', 'Flags']
table_data = [header]
for plugin in sorted(get_plugins(phase=options.phase, interface=options.interface)):
if options.builtins and not plugin.builtin:
continue
+
flags = []
if plugin.instance.__doc__:
flags.append('doc')
@@ -29,6 +30,7 @@
if not options.debug:
continue
flags.append('developers')
+
handlers = plugin.phase_handlers
roles = []
for phase in handlers:
@@ -36,7 +38,7 @@
roles.append('{0}({1})'.format(phase, priority))
name = colorize('green', plugin.name) if 'builtin' in flags else plugin.name
- table_data.append([name, ', '.join(roles), ', '.join(flags)])
+ table_data.append([name, ', '.join(plugin.interfaces), ', '.join(roles), ', '.join(flags)])
try:
table = TerminalTable(options.table_type, table_data, wrap_columns=[1, 2])
|
{"golden_diff": "diff --git a/flexget/plugins/cli/plugins.py b/flexget/plugins/cli/plugins.py\n--- a/flexget/plugins/cli/plugins.py\n+++ b/flexget/plugins/cli/plugins.py\n@@ -15,11 +15,12 @@\n def plugins_summary(manager, options):\n if options.table_type == 'porcelain':\n disable_all_colors()\n- header = ['Keyword', 'Phases', 'Flags']\n+ header = ['Keyword', 'Interfaces', 'Phases', 'Flags']\n table_data = [header]\n for plugin in sorted(get_plugins(phase=options.phase, interface=options.interface)):\n if options.builtins and not plugin.builtin:\n continue\n+\n flags = []\n if plugin.instance.__doc__:\n flags.append('doc')\n@@ -29,6 +30,7 @@\n if not options.debug:\n continue\n flags.append('developers')\n+\n handlers = plugin.phase_handlers\n roles = []\n for phase in handlers:\n@@ -36,7 +38,7 @@\n roles.append('{0}({1})'.format(phase, priority))\n \n name = colorize('green', plugin.name) if 'builtin' in flags else plugin.name\n- table_data.append([name, ', '.join(roles), ', '.join(flags)])\n+ table_data.append([name, ', '.join(plugin.interfaces), ', '.join(roles), ', '.join(flags)])\n \n try:\n table = TerminalTable(options.table_type, table_data, wrap_columns=[1, 2])\n", "issue": "Adding new column to `plugins` output\nI'm trying to understand the concepts of 'phase' and 'interface' of a plugin. The output of the CLI command `flexget plugins` has been helpful. But I think I noticed that the output is missing a column. It has 'Keyword', 'Phases' and 'Flags', but no 'Interfaces'. I found out that all plugins _do_ defined a list of interfaces for themselves. \r\n\r\nShall I create a PR adding the column 'Interfaces' to the output?\r\nI wanted to ask before I put the effort in.\r\n\r\nAnd if someone can explain what a 'phase' and 'interface' is I'll write up a wiki page. ;)\n", "before_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nfrom builtins import * # noqa pylint: disable=unused-import, redefined-builtin\n\nimport logging\n\nfrom colorclass.toggles import disable_all_colors\nfrom flexget import options\nfrom flexget.event import event\nfrom flexget.plugin import get_plugins\nfrom flexget.terminal import TerminalTable, TerminalTableError, table_parser, console, colorize\n\nlog = logging.getLogger('plugins')\n\n\ndef plugins_summary(manager, options):\n if options.table_type == 'porcelain':\n disable_all_colors()\n header = ['Keyword', 'Phases', 'Flags']\n table_data = [header]\n for plugin in sorted(get_plugins(phase=options.phase, interface=options.interface)):\n if options.builtins and not plugin.builtin:\n continue\n flags = []\n if plugin.instance.__doc__:\n flags.append('doc')\n if plugin.builtin:\n flags.append('builtin')\n if plugin.debug:\n if not options.debug:\n continue\n flags.append('developers')\n handlers = plugin.phase_handlers\n roles = []\n for phase in handlers:\n priority = handlers[phase].priority\n roles.append('{0}({1})'.format(phase, priority))\n\n name = colorize('green', plugin.name) if 'builtin' in flags else plugin.name\n table_data.append([name, ', '.join(roles), ', '.join(flags)])\n\n try:\n table = TerminalTable(options.table_type, table_data, wrap_columns=[1, 2])\n console(table.output)\n except TerminalTableError as e:\n console('ERROR: %s' % str(e))\n return\n console(colorize('green', ' Built-in plugins'))\n\n\n@event('options.register')\ndef register_parser_arguments():\n parser = options.register_command('plugins', plugins_summary, help='Print registered plugin summaries',\n parents=[table_parser])\n parser.add_argument('--interface', help='Show plugins belonging to this interface')\n parser.add_argument('--phase', help='Show plugins that act on this phase')\n parser.add_argument('--builtins', action='store_true', help='Show just builtin plugins')\n", "path": "flexget/plugins/cli/plugins.py"}], "after_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nfrom builtins import * # noqa pylint: disable=unused-import, redefined-builtin\n\nimport logging\n\nfrom colorclass.toggles import disable_all_colors\nfrom flexget import options\nfrom flexget.event import event\nfrom flexget.plugin import get_plugins\nfrom flexget.terminal import TerminalTable, TerminalTableError, table_parser, console, colorize\n\nlog = logging.getLogger('plugins')\n\n\ndef plugins_summary(manager, options):\n if options.table_type == 'porcelain':\n disable_all_colors()\n header = ['Keyword', 'Interfaces', 'Phases', 'Flags']\n table_data = [header]\n for plugin in sorted(get_plugins(phase=options.phase, interface=options.interface)):\n if options.builtins and not plugin.builtin:\n continue\n\n flags = []\n if plugin.instance.__doc__:\n flags.append('doc')\n if plugin.builtin:\n flags.append('builtin')\n if plugin.debug:\n if not options.debug:\n continue\n flags.append('developers')\n\n handlers = plugin.phase_handlers\n roles = []\n for phase in handlers:\n priority = handlers[phase].priority\n roles.append('{0}({1})'.format(phase, priority))\n\n name = colorize('green', plugin.name) if 'builtin' in flags else plugin.name\n table_data.append([name, ', '.join(plugin.interfaces), ', '.join(roles), ', '.join(flags)])\n\n try:\n table = TerminalTable(options.table_type, table_data, wrap_columns=[1, 2])\n console(table.output)\n except TerminalTableError as e:\n console('ERROR: %s' % str(e))\n return\n console(colorize('green', ' Built-in plugins'))\n\n\n@event('options.register')\ndef register_parser_arguments():\n parser = options.register_command('plugins', plugins_summary, help='Print registered plugin summaries',\n parents=[table_parser])\n parser.add_argument('--interface', help='Show plugins belonging to this interface')\n parser.add_argument('--phase', help='Show plugins that act on this phase')\n parser.add_argument('--builtins', action='store_true', help='Show just builtin plugins')\n", "path": "flexget/plugins/cli/plugins.py"}]}
| 966 | 325 |
gh_patches_debug_5568
|
rasdani/github-patches
|
git_diff
|
pyodide__pyodide-689
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Typo?
Wondering what "wv" means or if this should read "more complex things."
https://github.com/iodide-project/pyodide/blob/163ab43b64180223d010cdcdcdecd17307cc5a45/pyodide_build/mkpkg.py#L77-L79
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyodide_build/mkpkg.py`
Content:
```
1 #!/usr/bin/env python3
2
3 import argparse
4 import json
5 import os
6 import shutil
7 import urllib.request
8 from pathlib import Path
9
10 PACKAGES_ROOT = Path(__file__).parent.parent / 'packages'
11
12 SDIST_EXTENSIONS = []
13
14
15 def get_sdist_extensions():
16 if SDIST_EXTENSIONS:
17 return SDIST_EXTENSIONS
18
19 for format in shutil.get_unpack_formats():
20 for ext in format[1]:
21 SDIST_EXTENSIONS.append(ext)
22
23 return SDIST_EXTENSIONS
24
25
26 def get_sdist_url_entry(json_content):
27 sdist_extensions_tuple = tuple(get_sdist_extensions())
28
29 for entry in json_content['urls']:
30 if entry['filename'].endswith(sdist_extensions_tuple):
31 return entry
32
33 raise Exception('No sdist URL found for package %s (%s)' % (
34 json_content['info'].get('name'),
35 json_content['info'].get('package_url'),
36 ))
37
38
39 def make_package(package, version=None):
40 import yaml
41
42 version = ('/' + version) if version is not None else ''
43 url = f"https://pypi.org/pypi/{package}{version}/json"
44
45 with urllib.request.urlopen(url) as fd:
46 json_content = json.load(fd)
47
48 entry = get_sdist_url_entry(json_content)
49 download_url = entry['url']
50 sha256 = entry['digests']['sha256']
51 version = json_content['info']['version']
52
53 yaml_content = {
54 'package': {
55 'name': package,
56 'version': version
57 },
58 'source': {
59 'url': download_url,
60 'sha256': sha256
61 },
62 'test': {
63 'imports': [
64 package
65 ]
66 }
67 }
68
69 if not (PACKAGES_ROOT / package).is_dir():
70 os.makedirs(PACKAGES_ROOT / package)
71 with open(PACKAGES_ROOT / package / 'meta.yaml', 'w') as fd:
72 yaml.dump(yaml_content, fd, default_flow_style=False)
73
74
75 def make_parser(parser):
76 parser.description = '''
77 Make a new pyodide package. Creates a simple template that will work
78 for most pure Python packages, but will have to be edited for more wv
79 complex things.'''.strip()
80 parser.add_argument(
81 'package', type=str, nargs=1,
82 help="The package name on PyPI")
83 parser.add_argument(
84 '--version', type=str, default=None,
85 help="Package version string, "
86 "e.g. v1.2.1 (defaults to latest stable release)")
87 return parser
88
89
90 def main(args):
91 package = args.package[0]
92 make_package(package, args.version)
93
94
95 if __name__ == '__main__':
96 parser = make_parser(argparse.ArgumentParser())
97 args = parser.parse_args()
98 main(args)
99
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pyodide_build/mkpkg.py b/pyodide_build/mkpkg.py
--- a/pyodide_build/mkpkg.py
+++ b/pyodide_build/mkpkg.py
@@ -75,7 +75,7 @@
def make_parser(parser):
parser.description = '''
Make a new pyodide package. Creates a simple template that will work
-for most pure Python packages, but will have to be edited for more wv
+for most pure Python packages, but will have to be edited for more
complex things.'''.strip()
parser.add_argument(
'package', type=str, nargs=1,
|
{"golden_diff": "diff --git a/pyodide_build/mkpkg.py b/pyodide_build/mkpkg.py\n--- a/pyodide_build/mkpkg.py\n+++ b/pyodide_build/mkpkg.py\n@@ -75,7 +75,7 @@\n def make_parser(parser):\n parser.description = '''\n Make a new pyodide package. Creates a simple template that will work\n-for most pure Python packages, but will have to be edited for more wv\n+for most pure Python packages, but will have to be edited for more\n complex things.'''.strip()\n parser.add_argument(\n 'package', type=str, nargs=1,\n", "issue": "Typo?\nWondering what \"wv\" means or if this should read \"more complex things.\"\r\n\r\nhttps://github.com/iodide-project/pyodide/blob/163ab43b64180223d010cdcdcdecd17307cc5a45/pyodide_build/mkpkg.py#L77-L79\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport argparse\nimport json\nimport os\nimport shutil\nimport urllib.request\nfrom pathlib import Path\n\nPACKAGES_ROOT = Path(__file__).parent.parent / 'packages'\n\nSDIST_EXTENSIONS = []\n\n\ndef get_sdist_extensions():\n if SDIST_EXTENSIONS:\n return SDIST_EXTENSIONS\n\n for format in shutil.get_unpack_formats():\n for ext in format[1]:\n SDIST_EXTENSIONS.append(ext)\n\n return SDIST_EXTENSIONS\n\n\ndef get_sdist_url_entry(json_content):\n sdist_extensions_tuple = tuple(get_sdist_extensions())\n\n for entry in json_content['urls']:\n if entry['filename'].endswith(sdist_extensions_tuple):\n return entry\n\n raise Exception('No sdist URL found for package %s (%s)' % (\n json_content['info'].get('name'),\n json_content['info'].get('package_url'),\n ))\n\n\ndef make_package(package, version=None):\n import yaml\n\n version = ('/' + version) if version is not None else ''\n url = f\"https://pypi.org/pypi/{package}{version}/json\"\n\n with urllib.request.urlopen(url) as fd:\n json_content = json.load(fd)\n\n entry = get_sdist_url_entry(json_content)\n download_url = entry['url']\n sha256 = entry['digests']['sha256']\n version = json_content['info']['version']\n\n yaml_content = {\n 'package': {\n 'name': package,\n 'version': version\n },\n 'source': {\n 'url': download_url,\n 'sha256': sha256\n },\n 'test': {\n 'imports': [\n package\n ]\n }\n }\n\n if not (PACKAGES_ROOT / package).is_dir():\n os.makedirs(PACKAGES_ROOT / package)\n with open(PACKAGES_ROOT / package / 'meta.yaml', 'w') as fd:\n yaml.dump(yaml_content, fd, default_flow_style=False)\n\n\ndef make_parser(parser):\n parser.description = '''\nMake a new pyodide package. Creates a simple template that will work\nfor most pure Python packages, but will have to be edited for more wv\ncomplex things.'''.strip()\n parser.add_argument(\n 'package', type=str, nargs=1,\n help=\"The package name on PyPI\")\n parser.add_argument(\n '--version', type=str, default=None,\n help=\"Package version string, \"\n \"e.g. v1.2.1 (defaults to latest stable release)\")\n return parser\n\n\ndef main(args):\n package = args.package[0]\n make_package(package, args.version)\n\n\nif __name__ == '__main__':\n parser = make_parser(argparse.ArgumentParser())\n args = parser.parse_args()\n main(args)\n", "path": "pyodide_build/mkpkg.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\nimport argparse\nimport json\nimport os\nimport shutil\nimport urllib.request\nfrom pathlib import Path\n\nPACKAGES_ROOT = Path(__file__).parent.parent / 'packages'\n\nSDIST_EXTENSIONS = []\n\n\ndef get_sdist_extensions():\n if SDIST_EXTENSIONS:\n return SDIST_EXTENSIONS\n\n for format in shutil.get_unpack_formats():\n for ext in format[1]:\n SDIST_EXTENSIONS.append(ext)\n\n return SDIST_EXTENSIONS\n\n\ndef get_sdist_url_entry(json_content):\n sdist_extensions_tuple = tuple(get_sdist_extensions())\n\n for entry in json_content['urls']:\n if entry['filename'].endswith(sdist_extensions_tuple):\n return entry\n\n raise Exception('No sdist URL found for package %s (%s)' % (\n json_content['info'].get('name'),\n json_content['info'].get('package_url'),\n ))\n\n\ndef make_package(package, version=None):\n import yaml\n\n version = ('/' + version) if version is not None else ''\n url = f\"https://pypi.org/pypi/{package}{version}/json\"\n\n with urllib.request.urlopen(url) as fd:\n json_content = json.load(fd)\n\n entry = get_sdist_url_entry(json_content)\n download_url = entry['url']\n sha256 = entry['digests']['sha256']\n version = json_content['info']['version']\n\n yaml_content = {\n 'package': {\n 'name': package,\n 'version': version\n },\n 'source': {\n 'url': download_url,\n 'sha256': sha256\n },\n 'test': {\n 'imports': [\n package\n ]\n }\n }\n\n if not (PACKAGES_ROOT / package).is_dir():\n os.makedirs(PACKAGES_ROOT / package)\n with open(PACKAGES_ROOT / package / 'meta.yaml', 'w') as fd:\n yaml.dump(yaml_content, fd, default_flow_style=False)\n\n\ndef make_parser(parser):\n parser.description = '''\nMake a new pyodide package. Creates a simple template that will work\nfor most pure Python packages, but will have to be edited for more\ncomplex things.'''.strip()\n parser.add_argument(\n 'package', type=str, nargs=1,\n help=\"The package name on PyPI\")\n parser.add_argument(\n '--version', type=str, default=None,\n help=\"Package version string, \"\n \"e.g. v1.2.1 (defaults to latest stable release)\")\n return parser\n\n\ndef main(args):\n package = args.package[0]\n make_package(package, args.version)\n\n\nif __name__ == '__main__':\n parser = make_parser(argparse.ArgumentParser())\n args = parser.parse_args()\n main(args)\n", "path": "pyodide_build/mkpkg.py"}]}
| 1,145 | 137 |
gh_patches_debug_30846
|
rasdani/github-patches
|
git_diff
|
alltheplaces__alltheplaces-1877
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider tmobile_us is broken
During the global build at 2021-05-26-14-42-23, spider **tmobile_us** failed with **7356 features** and **2 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/tmobile_us.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tmobile_us.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tmobile_us.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/tmobile_us.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import json
3 from urllib.parse import urlencode
4
5 import scrapy
6
7 from locations.items import GeojsonPointItem
8 from locations.hours import OpeningHours
9
10 DAY_MAPPING = {'Monday': 'Mo',
11 'Tuesday': 'Tu',
12 'Wednesday': 'We',
13 'Thursday': 'Th',
14 'Friday': 'Fr',
15 'Saturday': 'Sa',
16 'Sunday': 'Su'}
17
18 BASE_URL = 'https://onmyj41p3c.execute-api.us-west-2.amazonaws.com/prod/getStoresByCoordinates?'
19
20
21 class TMobileUSSpider(scrapy.Spider):
22 name = "tmobile_us"
23 item_attributes = { 'brand': "T-Mobile" }
24 allowed_domains = ["www.t-mobile.com"]
25 download_delay = 0.2
26
27 def parse_hours(self, store_hours):
28 opening_hours = OpeningHours()
29 if store_hours is None:
30 return
31
32 for store_day in store_hours:
33 day = DAY_MAPPING[store_day.get("day")]
34 open_time = store_day.get("opens")
35 close_time = store_day.get("closes")
36 if open_time is None and close_time is None:
37 continue
38 opening_hours.add_range(day=day,
39 open_time=open_time,
40 close_time=close_time,
41 time_format='%H:%M'
42 )
43
44 return opening_hours.as_opening_hours()
45
46 def start_requests(self):
47 url = BASE_URL
48
49 with open('./locations/searchable_points/us_centroids_25mile_radius.csv') as points:
50
51 next(points) # Ignore the header
52 for point in points:
53 _, lat, lon = point.strip().split(',')
54
55 params = {
56 'latitude': '{}'.format(lat),
57 'longitude': '{}'.format(lon),
58 'count': '1000',
59 'radius': '25',
60 'ignoreLoadingBar': 'false'
61 }
62
63 yield scrapy.http.Request(url + urlencode(params), callback=self.parse)
64
65 def parse(self, response):
66 data = json.loads(response.body_as_unicode())
67
68 for store in data:
69 properties = {
70 'name': store["name"],
71 'ref': store["id"],
72 'addr_full': store["location"]["address"]["streetAddress"],
73 'city': store["location"]["address"]["addressLocality"],
74 'state': store["location"]["address"]["addressRegion"],
75 'postcode': store["location"]["address"]["postalCode"],
76 'phone': store.get("telephone"),
77 'website': store.get("url") or response.url,
78 'lat': float(store["location"]["latitude"]),
79 'lon': float(store["location"]["longitude"]),
80 }
81
82 hours = self.parse_hours(store.get("hours", []))
83 if hours:
84 properties["opening_hours"] = hours
85
86 yield GeojsonPointItem(**properties)
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/locations/spiders/tmobile_us.py b/locations/spiders/tmobile_us.py
--- a/locations/spiders/tmobile_us.py
+++ b/locations/spiders/tmobile_us.py
@@ -15,12 +15,12 @@
'Saturday': 'Sa',
'Sunday': 'Su'}
-BASE_URL = 'https://onmyj41p3c.execute-api.us-west-2.amazonaws.com/prod/getStoresByCoordinates?'
+BASE_URL = 'https://onmyj41p3c.execute-api.us-west-2.amazonaws.com/prod/v2.1/getStoresByCoordinates?'
class TMobileUSSpider(scrapy.Spider):
name = "tmobile_us"
- item_attributes = { 'brand': "T-Mobile" }
+ item_attributes = {'brand': "T-Mobile"}
allowed_domains = ["www.t-mobile.com"]
download_delay = 0.2
@@ -67,7 +67,7 @@
for store in data:
properties = {
- 'name': store["name"],
+ 'name': store.get("name"),
'ref': store["id"],
'addr_full': store["location"]["address"]["streetAddress"],
'city': store["location"]["address"]["addressLocality"],
@@ -75,8 +75,8 @@
'postcode': store["location"]["address"]["postalCode"],
'phone': store.get("telephone"),
'website': store.get("url") or response.url,
- 'lat': float(store["location"]["latitude"]),
- 'lon': float(store["location"]["longitude"]),
+ 'lat': store["location"]["latitude"],
+ 'lon': store["location"]["longitude"],
}
hours = self.parse_hours(store.get("hours", []))
|
{"golden_diff": "diff --git a/locations/spiders/tmobile_us.py b/locations/spiders/tmobile_us.py\n--- a/locations/spiders/tmobile_us.py\n+++ b/locations/spiders/tmobile_us.py\n@@ -15,12 +15,12 @@\n 'Saturday': 'Sa',\n 'Sunday': 'Su'}\n \n-BASE_URL = 'https://onmyj41p3c.execute-api.us-west-2.amazonaws.com/prod/getStoresByCoordinates?'\n+BASE_URL = 'https://onmyj41p3c.execute-api.us-west-2.amazonaws.com/prod/v2.1/getStoresByCoordinates?'\n \n \n class TMobileUSSpider(scrapy.Spider):\n name = \"tmobile_us\"\n- item_attributes = { 'brand': \"T-Mobile\" }\n+ item_attributes = {'brand': \"T-Mobile\"}\n allowed_domains = [\"www.t-mobile.com\"]\n download_delay = 0.2\n \n@@ -67,7 +67,7 @@\n \n for store in data:\n properties = {\n- 'name': store[\"name\"],\n+ 'name': store.get(\"name\"),\n 'ref': store[\"id\"],\n 'addr_full': store[\"location\"][\"address\"][\"streetAddress\"],\n 'city': store[\"location\"][\"address\"][\"addressLocality\"],\n@@ -75,8 +75,8 @@\n 'postcode': store[\"location\"][\"address\"][\"postalCode\"],\n 'phone': store.get(\"telephone\"),\n 'website': store.get(\"url\") or response.url,\n- 'lat': float(store[\"location\"][\"latitude\"]),\n- 'lon': float(store[\"location\"][\"longitude\"]),\n+ 'lat': store[\"location\"][\"latitude\"],\n+ 'lon': store[\"location\"][\"longitude\"],\n }\n \n hours = self.parse_hours(store.get(\"hours\", []))\n", "issue": "Spider tmobile_us is broken\nDuring the global build at 2021-05-26-14-42-23, spider **tmobile_us** failed with **7356 features** and **2 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/tmobile_us.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tmobile_us.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tmobile_us.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nfrom urllib.parse import urlencode\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\nDAY_MAPPING = {'Monday': 'Mo',\n 'Tuesday': 'Tu',\n 'Wednesday': 'We',\n 'Thursday': 'Th',\n 'Friday': 'Fr',\n 'Saturday': 'Sa',\n 'Sunday': 'Su'}\n\nBASE_URL = 'https://onmyj41p3c.execute-api.us-west-2.amazonaws.com/prod/getStoresByCoordinates?'\n\n\nclass TMobileUSSpider(scrapy.Spider):\n name = \"tmobile_us\"\n item_attributes = { 'brand': \"T-Mobile\" }\n allowed_domains = [\"www.t-mobile.com\"]\n download_delay = 0.2\n\n def parse_hours(self, store_hours):\n opening_hours = OpeningHours()\n if store_hours is None:\n return\n\n for store_day in store_hours:\n day = DAY_MAPPING[store_day.get(\"day\")]\n open_time = store_day.get(\"opens\")\n close_time = store_day.get(\"closes\")\n if open_time is None and close_time is None:\n continue\n opening_hours.add_range(day=day,\n open_time=open_time,\n close_time=close_time,\n time_format='%H:%M'\n )\n\n return opening_hours.as_opening_hours()\n\n def start_requests(self):\n url = BASE_URL\n\n with open('./locations/searchable_points/us_centroids_25mile_radius.csv') as points:\n\n next(points) # Ignore the header\n for point in points:\n _, lat, lon = point.strip().split(',')\n\n params = {\n 'latitude': '{}'.format(lat),\n 'longitude': '{}'.format(lon),\n 'count': '1000',\n 'radius': '25',\n 'ignoreLoadingBar': 'false'\n }\n\n yield scrapy.http.Request(url + urlencode(params), callback=self.parse)\n\n def parse(self, response):\n data = json.loads(response.body_as_unicode())\n\n for store in data:\n properties = {\n 'name': store[\"name\"],\n 'ref': store[\"id\"],\n 'addr_full': store[\"location\"][\"address\"][\"streetAddress\"],\n 'city': store[\"location\"][\"address\"][\"addressLocality\"],\n 'state': store[\"location\"][\"address\"][\"addressRegion\"],\n 'postcode': store[\"location\"][\"address\"][\"postalCode\"],\n 'phone': store.get(\"telephone\"),\n 'website': store.get(\"url\") or response.url,\n 'lat': float(store[\"location\"][\"latitude\"]),\n 'lon': float(store[\"location\"][\"longitude\"]),\n }\n\n hours = self.parse_hours(store.get(\"hours\", []))\n if hours:\n properties[\"opening_hours\"] = hours\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/tmobile_us.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nfrom urllib.parse import urlencode\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\nDAY_MAPPING = {'Monday': 'Mo',\n 'Tuesday': 'Tu',\n 'Wednesday': 'We',\n 'Thursday': 'Th',\n 'Friday': 'Fr',\n 'Saturday': 'Sa',\n 'Sunday': 'Su'}\n\nBASE_URL = 'https://onmyj41p3c.execute-api.us-west-2.amazonaws.com/prod/v2.1/getStoresByCoordinates?'\n\n\nclass TMobileUSSpider(scrapy.Spider):\n name = \"tmobile_us\"\n item_attributes = {'brand': \"T-Mobile\"}\n allowed_domains = [\"www.t-mobile.com\"]\n download_delay = 0.2\n\n def parse_hours(self, store_hours):\n opening_hours = OpeningHours()\n if store_hours is None:\n return\n\n for store_day in store_hours:\n day = DAY_MAPPING[store_day.get(\"day\")]\n open_time = store_day.get(\"opens\")\n close_time = store_day.get(\"closes\")\n if open_time is None and close_time is None:\n continue\n opening_hours.add_range(day=day,\n open_time=open_time,\n close_time=close_time,\n time_format='%H:%M'\n )\n\n return opening_hours.as_opening_hours()\n\n def start_requests(self):\n url = BASE_URL\n\n with open('./locations/searchable_points/us_centroids_25mile_radius.csv') as points:\n\n next(points) # Ignore the header\n for point in points:\n _, lat, lon = point.strip().split(',')\n\n params = {\n 'latitude': '{}'.format(lat),\n 'longitude': '{}'.format(lon),\n 'count': '1000',\n 'radius': '25',\n 'ignoreLoadingBar': 'false'\n }\n\n yield scrapy.http.Request(url + urlencode(params), callback=self.parse)\n\n def parse(self, response):\n data = json.loads(response.body_as_unicode())\n\n for store in data:\n properties = {\n 'name': store.get(\"name\"),\n 'ref': store[\"id\"],\n 'addr_full': store[\"location\"][\"address\"][\"streetAddress\"],\n 'city': store[\"location\"][\"address\"][\"addressLocality\"],\n 'state': store[\"location\"][\"address\"][\"addressRegion\"],\n 'postcode': store[\"location\"][\"address\"][\"postalCode\"],\n 'phone': store.get(\"telephone\"),\n 'website': store.get(\"url\") or response.url,\n 'lat': store[\"location\"][\"latitude\"],\n 'lon': store[\"location\"][\"longitude\"],\n }\n\n hours = self.parse_hours(store.get(\"hours\", []))\n if hours:\n properties[\"opening_hours\"] = hours\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/tmobile_us.py"}]}
| 1,227 | 392 |
gh_patches_debug_54784
|
rasdani/github-patches
|
git_diff
|
encode__httpx-589
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AttributeError: module 'rfc3986.api' has no attribute 'iri_reference'
Just did a pip install of httpx, and it appeared to meet all the pre-reqs. However, I'd encounter the error in the title.
(python37) $ pip install httpx
Collecting httpx
Using cached https://files.pythonhosted.org/packages/15/fb/81861a0dbe87a0de57491b1b587419845f9a99e5b3d48e56cd440b1fcb68/httpx-0.7.6-py2.py3-none-any.whl
Requirement already satisfied: certifi in .../envs/python37/lib/python3.7/site-packages (from httpx) (2019.6.16)
Requirement already satisfied: h2==3.* in .../envs/python37/lib/python3.7/site-packages (from httpx) (3.1.0)
Requirement already satisfied: chardet==3.* in .../envs/python37/lib/python3.7/site-packages (from httpx) (3.0.4)
Requirement already satisfied: h11==0.8.* in .../envs/python37/lib/python3.7/site-packages (from httpx) (0.8.1)
Requirement already satisfied: rfc3986==1.* in .../envs/python37/lib/python3.7/site-packages (from httpx) (1.2.0)
Requirement already satisfied: idna==2.* in .../envs/python37/lib/python3.7/site-packages (from httpx) (2.8)
Collecting hstspreload>=2019.8.27 (from httpx)
Downloading https://files.pythonhosted.org/packages/55/77/e0031fbb2930891db135af5a53afdf5567ca6f1b1ff55bf231ed5330f66d/hstspreload-2019.11.4.tar.gz (701kB)
|████████████████████████████████| 706kB 740kB/s
Requirement already satisfied: hpack<4,>=2.3 in .../envs/python37/lib/python3.7/site-packages (from h2==3.*->httpx) (3.0.0)
Requirement already satisfied: hyperframe<6,>=5.2.0 in .../envs/python37/lib/python3.7/site-packages (from h2==3.*->httpx) (5.2.0)
Building wheels for collected packages: hstspreload
Building wheel for hstspreload (setup.py) ... done
Created wheel for hstspreload: filename=hstspreload-2019.11.4-cp37-none-any.whl size=704190 sha256=ac82e6b8abe795fa1a179318d64d73660fa3024715b04b7db0746971c3e5a87b
Stored in directory: .../Library/Caches/pip/wheels/49/f1/9d/dc01d2c108593d8d2c32d00cdd9daf595e6ad4a727d57b7033
Successfully built hstspreload
Installing collected packages: hstspreload, httpx
Successfully installed hstspreload-2019.11.4 httpx-0.7.6
Manually upgrading rfc3986 to 1.3.2 resolved the issue. Seems the installer should be checking for rfc3986==1.3.2 or greater?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 import re
5 from pathlib import Path
6
7 from setuptools import setup
8
9
10 def get_version(package):
11 """
12 Return package version as listed in `__version__` in `init.py`.
13 """
14 version = Path(package, "__version__.py").read_text()
15 return re.search("__version__ = ['\"]([^'\"]+)['\"]", version).group(1)
16
17
18 def get_long_description():
19 """
20 Return the README.
21 """
22 long_description = ""
23 with open("README.md", encoding="utf8") as f:
24 long_description += f.read()
25 long_description += "\n\n"
26 with open("CHANGELOG.md", encoding="utf8") as f:
27 long_description += f.read()
28 return long_description
29
30
31 def get_packages(package):
32 """
33 Return root package and all sub-packages.
34 """
35 return [str(path.parent) for path in Path(package).glob("**/__init__.py")]
36
37
38 setup(
39 name="httpx",
40 python_requires=">=3.6",
41 version=get_version("httpx"),
42 url="https://github.com/encode/httpx",
43 license="BSD",
44 description="The next generation HTTP client.",
45 long_description=get_long_description(),
46 long_description_content_type="text/markdown",
47 author="Tom Christie",
48 author_email="[email protected]",
49 package_data={"httpx": ["py.typed"]},
50 packages=get_packages("httpx"),
51 include_package_data=True,
52 zip_safe=False,
53 install_requires=[
54 "certifi",
55 "hstspreload",
56 "chardet==3.*",
57 "h11==0.8.*",
58 "h2==3.*",
59 "idna==2.*",
60 "rfc3986==1.*",
61 "sniffio==1.*",
62 ],
63 classifiers=[
64 "Development Status :: 3 - Alpha",
65 "Environment :: Web Environment",
66 "Intended Audience :: Developers",
67 "License :: OSI Approved :: BSD License",
68 "Operating System :: OS Independent",
69 "Topic :: Internet :: WWW/HTTP",
70 "Framework :: AsyncIO",
71 "Framework :: Trio",
72 "Programming Language :: Python :: 3",
73 "Programming Language :: Python :: 3.6",
74 "Programming Language :: Python :: 3.7",
75 "Programming Language :: Python :: 3.8",
76 ],
77 )
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -57,7 +57,7 @@
"h11==0.8.*",
"h2==3.*",
"idna==2.*",
- "rfc3986==1.*",
+ "rfc3986>=1.3,<2",
"sniffio==1.*",
],
classifiers=[
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -57,7 +57,7 @@\n \"h11==0.8.*\",\n \"h2==3.*\",\n \"idna==2.*\",\n- \"rfc3986==1.*\",\n+ \"rfc3986>=1.3,<2\",\n \"sniffio==1.*\",\n ],\n classifiers=[\n", "issue": "AttributeError: module 'rfc3986.api' has no attribute 'iri_reference'\nJust did a pip install of httpx, and it appeared to meet all the pre-reqs. However, I'd encounter the error in the title.\r\n\r\n\r\n(python37) $ pip install httpx\r\nCollecting httpx\r\n Using cached https://files.pythonhosted.org/packages/15/fb/81861a0dbe87a0de57491b1b587419845f9a99e5b3d48e56cd440b1fcb68/httpx-0.7.6-py2.py3-none-any.whl\r\nRequirement already satisfied: certifi in .../envs/python37/lib/python3.7/site-packages (from httpx) (2019.6.16)\r\nRequirement already satisfied: h2==3.* in .../envs/python37/lib/python3.7/site-packages (from httpx) (3.1.0)\r\nRequirement already satisfied: chardet==3.* in .../envs/python37/lib/python3.7/site-packages (from httpx) (3.0.4)\r\nRequirement already satisfied: h11==0.8.* in .../envs/python37/lib/python3.7/site-packages (from httpx) (0.8.1)\r\nRequirement already satisfied: rfc3986==1.* in .../envs/python37/lib/python3.7/site-packages (from httpx) (1.2.0)\r\nRequirement already satisfied: idna==2.* in .../envs/python37/lib/python3.7/site-packages (from httpx) (2.8)\r\nCollecting hstspreload>=2019.8.27 (from httpx)\r\n Downloading https://files.pythonhosted.org/packages/55/77/e0031fbb2930891db135af5a53afdf5567ca6f1b1ff55bf231ed5330f66d/hstspreload-2019.11.4.tar.gz (701kB)\r\n |\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 706kB 740kB/s \r\nRequirement already satisfied: hpack<4,>=2.3 in .../envs/python37/lib/python3.7/site-packages (from h2==3.*->httpx) (3.0.0)\r\nRequirement already satisfied: hyperframe<6,>=5.2.0 in .../envs/python37/lib/python3.7/site-packages (from h2==3.*->httpx) (5.2.0)\r\nBuilding wheels for collected packages: hstspreload\r\n Building wheel for hstspreload (setup.py) ... done\r\n Created wheel for hstspreload: filename=hstspreload-2019.11.4-cp37-none-any.whl size=704190 sha256=ac82e6b8abe795fa1a179318d64d73660fa3024715b04b7db0746971c3e5a87b\r\n Stored in directory: .../Library/Caches/pip/wheels/49/f1/9d/dc01d2c108593d8d2c32d00cdd9daf595e6ad4a727d57b7033\r\nSuccessfully built hstspreload\r\nInstalling collected packages: hstspreload, httpx\r\nSuccessfully installed hstspreload-2019.11.4 httpx-0.7.6\r\n\r\nManually upgrading rfc3986 to 1.3.2 resolved the issue. Seems the installer should be checking for rfc3986==1.3.2 or greater? \n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport re\nfrom pathlib import Path\n\nfrom setuptools import setup\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n version = Path(package, \"__version__.py\").read_text()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", version).group(1)\n\n\ndef get_long_description():\n \"\"\"\n Return the README.\n \"\"\"\n long_description = \"\"\n with open(\"README.md\", encoding=\"utf8\") as f:\n long_description += f.read()\n long_description += \"\\n\\n\"\n with open(\"CHANGELOG.md\", encoding=\"utf8\") as f:\n long_description += f.read()\n return long_description\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [str(path.parent) for path in Path(package).glob(\"**/__init__.py\")]\n\n\nsetup(\n name=\"httpx\",\n python_requires=\">=3.6\",\n version=get_version(\"httpx\"),\n url=\"https://github.com/encode/httpx\",\n license=\"BSD\",\n description=\"The next generation HTTP client.\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author=\"Tom Christie\",\n author_email=\"[email protected]\",\n package_data={\"httpx\": [\"py.typed\"]},\n packages=get_packages(\"httpx\"),\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n \"certifi\",\n \"hstspreload\",\n \"chardet==3.*\",\n \"h11==0.8.*\",\n \"h2==3.*\",\n \"idna==2.*\",\n \"rfc3986==1.*\",\n \"sniffio==1.*\",\n ],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Framework :: AsyncIO\",\n \"Framework :: Trio\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport re\nfrom pathlib import Path\n\nfrom setuptools import setup\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n version = Path(package, \"__version__.py\").read_text()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", version).group(1)\n\n\ndef get_long_description():\n \"\"\"\n Return the README.\n \"\"\"\n long_description = \"\"\n with open(\"README.md\", encoding=\"utf8\") as f:\n long_description += f.read()\n long_description += \"\\n\\n\"\n with open(\"CHANGELOG.md\", encoding=\"utf8\") as f:\n long_description += f.read()\n return long_description\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [str(path.parent) for path in Path(package).glob(\"**/__init__.py\")]\n\n\nsetup(\n name=\"httpx\",\n python_requires=\">=3.6\",\n version=get_version(\"httpx\"),\n url=\"https://github.com/encode/httpx\",\n license=\"BSD\",\n description=\"The next generation HTTP client.\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author=\"Tom Christie\",\n author_email=\"[email protected]\",\n package_data={\"httpx\": [\"py.typed\"]},\n packages=get_packages(\"httpx\"),\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n \"certifi\",\n \"hstspreload\",\n \"chardet==3.*\",\n \"h11==0.8.*\",\n \"h2==3.*\",\n \"idna==2.*\",\n \"rfc3986>=1.3,<2\",\n \"sniffio==1.*\",\n ],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Framework :: AsyncIO\",\n \"Framework :: Trio\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n)\n", "path": "setup.py"}]}
| 1,814 | 102 |
gh_patches_debug_44285
|
rasdani/github-patches
|
git_diff
|
lisa-lab__pylearn2-579
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update default training algorithm to support multiple monitoring datasets
Just follow SGD or BGD for an example
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pylearn2/training_algorithms/default.py`
Content:
```
1 """
2 .. todo::
3
4 WRITEME
5 """
6 from pylearn2.monitor import Monitor
7 from pylearn2.training_algorithms.training_algorithm import TrainingAlgorithm
8 from pylearn2.utils import safe_zip
9 from pylearn2.utils.data_specs import DataSpecsMapping
10 import theano.tensor as T
11
12
13 class DefaultTrainingAlgorithm(TrainingAlgorithm):
14 """
15 .. todo::
16
17 WRITEME
18 """
19 def __init__(self, batch_size=None, batches_per_iter=1000,
20 monitoring_batches=-1, monitoring_dataset=None,
21 termination_criterion=None):
22 """
23 Parameters
24 ----------
25 batch_size : int
26 If batch_size is None, reverts to the `force_batch_size` field of \
27 the model
28 batches_per_iter : int
29 WRITEME
30 monitoring_batches : int
31 WRITEME
32 monitoring_dataset : WRITEME
33 termination_criterion : WRITEME
34 If specified, can cause the algorithm to terminate before \
35 `model.learn_batch` says to
36 """
37 self.batch_size, self.batches_per_iter = batch_size, batches_per_iter
38 if monitoring_dataset is None:
39 assert monitoring_batches == -1
40 self.monitoring_dataset = monitoring_dataset
41 self.monitoring_batches = monitoring_batches
42 self.bSetup = False
43 self.termination_criterion = termination_criterion
44
45 def setup(self, model, dataset):
46 """
47 Allows the training algorithm to do some preliminary configuration
48 *before* we actually start training the model. The dataset is provided
49 in case other derived training algorithms need to modify model based on
50 the dataset.
51
52 Parameters
53 ----------
54 model : object
55 Python object representing the model to train loosely \
56 implementing the interface of models.model.Model.
57
58 dataset : pylearn2.datasets.dataset.Dataset
59 Dataset object used to draw training data
60 """
61 self.model = model
62
63 self.monitor = Monitor.get_monitor(model)
64
65 if self.monitoring_dataset is not None:
66 # Get the data specifications needed by the model
67 space, source = model.get_monitoring_data_specs()
68
69 # Create Theano variables for each of the individual components
70 # of that data. Usually, it will be X for inputs and Y for targets.
71 # First, we need to find these components, and put them in a tuple
72 mapping = DataSpecsMapping((space, source))
73 space_tuple = mapping.flatten(space, return_tuple=True)
74 source_tuple = mapping.flatten(source, return_tuple=True)
75 # Then, build a flat tuple of these Theano variables
76 ipt = tuple(sp.make_theano_batch(name='monitor_%s' % src)
77 for (sp, src) in safe_zip(space_tuple, source_tuple))
78 # Finally, organize them back into a structure expected by the
79 # monitoring channels of the model
80 nested_ipt = mapping.nest(ipt)
81
82 self.monitor.add_dataset(dataset=self.monitoring_dataset,
83 mode="sequential",
84 batch_size=self.batch_size,
85 num_batches=self.monitoring_batches)
86
87 channels = model.get_monitoring_channels(nested_ipt)
88 if not isinstance(channels, dict):
89 raise TypeError("model.get_monitoring_channels must return a "
90 "dictionary, but it returned " + str(channels))
91 for name in channels:
92 J = channels[name]
93 if isinstance(J, tuple):
94 assert len(J) == 2
95 J, prereqs = J
96 else:
97 prereqs = None
98
99 self.monitor.add_channel(name=name,
100 ipt=nested_ipt,
101 val=J,
102 prereqs=prereqs,
103 data_specs=(space, source))
104 self.first = True
105 self.bSetup = True
106
107 def train(self, dataset):
108 """
109 .. todo::
110
111 WRITEME
112 """
113 assert self.bSetup
114 model = self.model
115 if self.batch_size is None:
116 batch_size = model.force_batch_size
117 else:
118 batch_size = self.batch_size
119 if hasattr(model, 'force_batch_size'):
120 assert (model.force_batch_size <= 0 or batch_size ==
121 model.force_batch_size)
122
123 for i in xrange(self.batches_per_iter):
124 # model.train_batch and self.train both return False when training
125 # should terminate.
126 learn_more = model.train_batch(dataset, batch_size)
127 model.monitor.report_batch(batch_size)
128 if not learn_more:
129 break
130
131 # Make sure we didn't exit training loop because Model.learn
132 # hasn't been updated to new interface yet.
133 if learn_more not in [True,False]:
134 msg = ('The learn method of model %s did not return a boolean ' +
135 'value. Please update your model accordingly.')
136 raise ValueError(msg % str(model))
137 self.learn_more = learn_more
138
139 def continue_learning(self, model):
140 """
141 .. todo::
142
143 WRITEME
144 """
145 if self.learn_more:
146 if self.termination_criterion is not None:
147 return self.termination_criterion.continue_learning(model)
148 return True
149 return False
150
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pylearn2/training_algorithms/default.py b/pylearn2/training_algorithms/default.py
--- a/pylearn2/training_algorithms/default.py
+++ b/pylearn2/training_algorithms/default.py
@@ -29,7 +29,8 @@
WRITEME
monitoring_batches : int
WRITEME
- monitoring_dataset : WRITEME
+ monitoring_dataset: Dataset or dict
+ A Dataset or a dictionary mapping string dataset names to Datasets
termination_criterion : WRITEME
If specified, can cause the algorithm to terminate before \
`model.learn_batch` says to
@@ -37,7 +38,8 @@
self.batch_size, self.batches_per_iter = batch_size, batches_per_iter
if monitoring_dataset is None:
assert monitoring_batches == -1
- self.monitoring_dataset = monitoring_dataset
+
+ self._set_monitoring_dataset(monitoring_dataset)
self.monitoring_batches = monitoring_batches
self.bSetup = False
self.termination_criterion = termination_criterion
@@ -74,33 +76,42 @@
source_tuple = mapping.flatten(source, return_tuple=True)
# Then, build a flat tuple of these Theano variables
ipt = tuple(sp.make_theano_batch(name='monitor_%s' % src)
- for (sp, src) in safe_zip(space_tuple, source_tuple))
+ for (sp, src) in safe_zip(space_tuple, source_tuple))
# Finally, organize them back into a structure expected by the
# monitoring channels of the model
nested_ipt = mapping.nest(ipt)
- self.monitor.add_dataset(dataset=self.monitoring_dataset,
- mode="sequential",
- batch_size=self.batch_size,
- num_batches=self.monitoring_batches)
-
channels = model.get_monitoring_channels(nested_ipt)
if not isinstance(channels, dict):
raise TypeError("model.get_monitoring_channels must return a "
"dictionary, but it returned " + str(channels))
- for name in channels:
- J = channels[name]
- if isinstance(J, tuple):
- assert len(J) == 2
- J, prereqs = J
+
+ for dataset_name in self.monitoring_dataset:
+ if dataset_name == '':
+ prefix = ''
else:
- prereqs = None
+ prefix = dataset_name + '_'
+ monitoring_dataset = self.monitoring_dataset[dataset_name]
+
+ self.monitor.add_dataset(dataset=monitoring_dataset,
+ mode="sequential",
+ batch_size=self.batch_size)
+
+ for name in channels:
+ J = channels[name]
+ if isinstance(J, tuple):
+ assert len(J) == 2
+ J, prereqs = J
+ else:
+ prereqs = None
+
+ self.monitor.add_channel(name=prefix + name,
+ ipt=nested_ipt,
+ val=J,
+ dataset=monitoring_dataset,
+ prereqs=prereqs,
+ data_specs=(space, source))
- self.monitor.add_channel(name=name,
- ipt=nested_ipt,
- val=J,
- prereqs=prereqs,
- data_specs=(space, source))
self.first = True
self.bSetup = True
@@ -130,7 +141,7 @@
# Make sure we didn't exit training loop because Model.learn
# hasn't been updated to new interface yet.
- if learn_more not in [True,False]:
+ if learn_more not in [True, False]:
msg = ('The learn method of model %s did not return a boolean ' +
'value. Please update your model accordingly.')
raise ValueError(msg % str(model))
|
{"golden_diff": "diff --git a/pylearn2/training_algorithms/default.py b/pylearn2/training_algorithms/default.py\n--- a/pylearn2/training_algorithms/default.py\n+++ b/pylearn2/training_algorithms/default.py\n@@ -29,7 +29,8 @@\n WRITEME\n monitoring_batches : int\n WRITEME\n- monitoring_dataset : WRITEME\n+ monitoring_dataset: Dataset or dict\n+ A Dataset or a dictionary mapping string dataset names to Datasets\n termination_criterion : WRITEME\n If specified, can cause the algorithm to terminate before \\\n `model.learn_batch` says to\n@@ -37,7 +38,8 @@\n self.batch_size, self.batches_per_iter = batch_size, batches_per_iter\n if monitoring_dataset is None:\n assert monitoring_batches == -1\n- self.monitoring_dataset = monitoring_dataset\n+\n+ self._set_monitoring_dataset(monitoring_dataset)\n self.monitoring_batches = monitoring_batches\n self.bSetup = False\n self.termination_criterion = termination_criterion\n@@ -74,33 +76,42 @@\n source_tuple = mapping.flatten(source, return_tuple=True)\n # Then, build a flat tuple of these Theano variables\n ipt = tuple(sp.make_theano_batch(name='monitor_%s' % src)\n- for (sp, src) in safe_zip(space_tuple, source_tuple))\n+ for (sp, src) in safe_zip(space_tuple, source_tuple))\n # Finally, organize them back into a structure expected by the\n # monitoring channels of the model\n nested_ipt = mapping.nest(ipt)\n \n- self.monitor.add_dataset(dataset=self.monitoring_dataset,\n- mode=\"sequential\",\n- batch_size=self.batch_size,\n- num_batches=self.monitoring_batches)\n-\n channels = model.get_monitoring_channels(nested_ipt)\n if not isinstance(channels, dict):\n raise TypeError(\"model.get_monitoring_channels must return a \"\n \"dictionary, but it returned \" + str(channels))\n- for name in channels:\n- J = channels[name]\n- if isinstance(J, tuple):\n- assert len(J) == 2\n- J, prereqs = J\n+\n+ for dataset_name in self.monitoring_dataset:\n+ if dataset_name == '':\n+ prefix = ''\n else:\n- prereqs = None\n+ prefix = dataset_name + '_'\n+ monitoring_dataset = self.monitoring_dataset[dataset_name]\n+\n+ self.monitor.add_dataset(dataset=monitoring_dataset,\n+ mode=\"sequential\",\n+ batch_size=self.batch_size)\n+\n+ for name in channels:\n+ J = channels[name]\n+ if isinstance(J, tuple):\n+ assert len(J) == 2\n+ J, prereqs = J\n+ else:\n+ prereqs = None\n+\n+ self.monitor.add_channel(name=prefix + name,\n+ ipt=nested_ipt,\n+ val=J,\n+ dataset=monitoring_dataset,\n+ prereqs=prereqs,\n+ data_specs=(space, source))\n \n- self.monitor.add_channel(name=name,\n- ipt=nested_ipt,\n- val=J,\n- prereqs=prereqs,\n- data_specs=(space, source))\n self.first = True\n self.bSetup = True\n \n@@ -130,7 +141,7 @@\n \n # Make sure we didn't exit training loop because Model.learn\n # hasn't been updated to new interface yet.\n- if learn_more not in [True,False]:\n+ if learn_more not in [True, False]:\n msg = ('The learn method of model %s did not return a boolean ' +\n 'value. Please update your model accordingly.')\n raise ValueError(msg % str(model))\n", "issue": "Update default training algorithm to support multiple monitoring datasets\nJust follow SGD or BGD for an example\n\n", "before_files": [{"content": "\"\"\"\n.. todo::\n\n WRITEME\n\"\"\"\nfrom pylearn2.monitor import Monitor\nfrom pylearn2.training_algorithms.training_algorithm import TrainingAlgorithm\nfrom pylearn2.utils import safe_zip\nfrom pylearn2.utils.data_specs import DataSpecsMapping\nimport theano.tensor as T\n\n\nclass DefaultTrainingAlgorithm(TrainingAlgorithm):\n \"\"\"\n .. todo::\n\n WRITEME\n \"\"\"\n def __init__(self, batch_size=None, batches_per_iter=1000,\n monitoring_batches=-1, monitoring_dataset=None,\n termination_criterion=None):\n \"\"\"\n Parameters\n ----------\n batch_size : int\n If batch_size is None, reverts to the `force_batch_size` field of \\\n the model\n batches_per_iter : int\n WRITEME\n monitoring_batches : int\n WRITEME\n monitoring_dataset : WRITEME\n termination_criterion : WRITEME\n If specified, can cause the algorithm to terminate before \\\n `model.learn_batch` says to\n \"\"\"\n self.batch_size, self.batches_per_iter = batch_size, batches_per_iter\n if monitoring_dataset is None:\n assert monitoring_batches == -1\n self.monitoring_dataset = monitoring_dataset\n self.monitoring_batches = monitoring_batches\n self.bSetup = False\n self.termination_criterion = termination_criterion\n\n def setup(self, model, dataset):\n \"\"\"\n Allows the training algorithm to do some preliminary configuration\n *before* we actually start training the model. The dataset is provided\n in case other derived training algorithms need to modify model based on\n the dataset.\n\n Parameters\n ----------\n model : object\n Python object representing the model to train loosely \\\n implementing the interface of models.model.Model.\n\n dataset : pylearn2.datasets.dataset.Dataset\n Dataset object used to draw training data\n \"\"\"\n self.model = model\n\n self.monitor = Monitor.get_monitor(model)\n\n if self.monitoring_dataset is not None:\n # Get the data specifications needed by the model\n space, source = model.get_monitoring_data_specs()\n\n # Create Theano variables for each of the individual components\n # of that data. Usually, it will be X for inputs and Y for targets.\n # First, we need to find these components, and put them in a tuple\n mapping = DataSpecsMapping((space, source))\n space_tuple = mapping.flatten(space, return_tuple=True)\n source_tuple = mapping.flatten(source, return_tuple=True)\n # Then, build a flat tuple of these Theano variables\n ipt = tuple(sp.make_theano_batch(name='monitor_%s' % src)\n for (sp, src) in safe_zip(space_tuple, source_tuple))\n # Finally, organize them back into a structure expected by the\n # monitoring channels of the model\n nested_ipt = mapping.nest(ipt)\n\n self.monitor.add_dataset(dataset=self.monitoring_dataset,\n mode=\"sequential\",\n batch_size=self.batch_size,\n num_batches=self.monitoring_batches)\n\n channels = model.get_monitoring_channels(nested_ipt)\n if not isinstance(channels, dict):\n raise TypeError(\"model.get_monitoring_channels must return a \"\n \"dictionary, but it returned \" + str(channels))\n for name in channels:\n J = channels[name]\n if isinstance(J, tuple):\n assert len(J) == 2\n J, prereqs = J\n else:\n prereqs = None\n\n self.monitor.add_channel(name=name,\n ipt=nested_ipt,\n val=J,\n prereqs=prereqs,\n data_specs=(space, source))\n self.first = True\n self.bSetup = True\n\n def train(self, dataset):\n \"\"\"\n .. todo::\n\n WRITEME\n \"\"\"\n assert self.bSetup\n model = self.model\n if self.batch_size is None:\n batch_size = model.force_batch_size\n else:\n batch_size = self.batch_size\n if hasattr(model, 'force_batch_size'):\n assert (model.force_batch_size <= 0 or batch_size ==\n model.force_batch_size)\n\n for i in xrange(self.batches_per_iter):\n # model.train_batch and self.train both return False when training\n # should terminate.\n learn_more = model.train_batch(dataset, batch_size)\n model.monitor.report_batch(batch_size)\n if not learn_more:\n break\n\n # Make sure we didn't exit training loop because Model.learn\n # hasn't been updated to new interface yet.\n if learn_more not in [True,False]:\n msg = ('The learn method of model %s did not return a boolean ' +\n 'value. Please update your model accordingly.')\n raise ValueError(msg % str(model))\n self.learn_more = learn_more\n\n def continue_learning(self, model):\n \"\"\"\n .. todo::\n\n WRITEME\n \"\"\"\n if self.learn_more:\n if self.termination_criterion is not None:\n return self.termination_criterion.continue_learning(model)\n return True\n return False\n", "path": "pylearn2/training_algorithms/default.py"}], "after_files": [{"content": "\"\"\"\n.. todo::\n\n WRITEME\n\"\"\"\nfrom pylearn2.monitor import Monitor\nfrom pylearn2.training_algorithms.training_algorithm import TrainingAlgorithm\nfrom pylearn2.utils import safe_zip\nfrom pylearn2.utils.data_specs import DataSpecsMapping\nimport theano.tensor as T\n\n\nclass DefaultTrainingAlgorithm(TrainingAlgorithm):\n \"\"\"\n .. todo::\n\n WRITEME\n \"\"\"\n def __init__(self, batch_size=None, batches_per_iter=1000,\n monitoring_batches=-1, monitoring_dataset=None,\n termination_criterion=None):\n \"\"\"\n Parameters\n ----------\n batch_size : int\n If batch_size is None, reverts to the `force_batch_size` field of \\\n the model\n batches_per_iter : int\n WRITEME\n monitoring_batches : int\n WRITEME\n monitoring_dataset: Dataset or dict\n A Dataset or a dictionary mapping string dataset names to Datasets\n termination_criterion : WRITEME\n If specified, can cause the algorithm to terminate before \\\n `model.learn_batch` says to\n \"\"\"\n self.batch_size, self.batches_per_iter = batch_size, batches_per_iter\n if monitoring_dataset is None:\n assert monitoring_batches == -1\n\n self._set_monitoring_dataset(monitoring_dataset)\n self.monitoring_batches = monitoring_batches\n self.bSetup = False\n self.termination_criterion = termination_criterion\n\n def setup(self, model, dataset):\n \"\"\"\n Allows the training algorithm to do some preliminary configuration\n *before* we actually start training the model. The dataset is provided\n in case other derived training algorithms need to modify model based on\n the dataset.\n\n Parameters\n ----------\n model : object\n Python object representing the model to train loosely \\\n implementing the interface of models.model.Model.\n\n dataset : pylearn2.datasets.dataset.Dataset\n Dataset object used to draw training data\n \"\"\"\n self.model = model\n\n self.monitor = Monitor.get_monitor(model)\n\n if self.monitoring_dataset is not None:\n # Get the data specifications needed by the model\n space, source = model.get_monitoring_data_specs()\n\n # Create Theano variables for each of the individual components\n # of that data. Usually, it will be X for inputs and Y for targets.\n # First, we need to find these components, and put them in a tuple\n mapping = DataSpecsMapping((space, source))\n space_tuple = mapping.flatten(space, return_tuple=True)\n source_tuple = mapping.flatten(source, return_tuple=True)\n # Then, build a flat tuple of these Theano variables\n ipt = tuple(sp.make_theano_batch(name='monitor_%s' % src)\n for (sp, src) in safe_zip(space_tuple, source_tuple))\n # Finally, organize them back into a structure expected by the\n # monitoring channels of the model\n nested_ipt = mapping.nest(ipt)\n\n channels = model.get_monitoring_channels(nested_ipt)\n if not isinstance(channels, dict):\n raise TypeError(\"model.get_monitoring_channels must return a \"\n \"dictionary, but it returned \" + str(channels))\n\n for dataset_name in self.monitoring_dataset:\n if dataset_name == '':\n prefix = ''\n else:\n prefix = dataset_name + '_'\n monitoring_dataset = self.monitoring_dataset[dataset_name]\n\n self.monitor.add_dataset(dataset=monitoring_dataset,\n mode=\"sequential\",\n batch_size=self.batch_size)\n\n for name in channels:\n J = channels[name]\n if isinstance(J, tuple):\n assert len(J) == 2\n J, prereqs = J\n else:\n prereqs = None\n\n self.monitor.add_channel(name=prefix + name,\n ipt=nested_ipt,\n val=J,\n dataset=monitoring_dataset,\n prereqs=prereqs,\n data_specs=(space, source))\n\n self.first = True\n self.bSetup = True\n\n def train(self, dataset):\n \"\"\"\n .. todo::\n\n WRITEME\n \"\"\"\n assert self.bSetup\n model = self.model\n if self.batch_size is None:\n batch_size = model.force_batch_size\n else:\n batch_size = self.batch_size\n if hasattr(model, 'force_batch_size'):\n assert (model.force_batch_size <= 0 or batch_size ==\n model.force_batch_size)\n\n for i in xrange(self.batches_per_iter):\n # model.train_batch and self.train both return False when training\n # should terminate.\n learn_more = model.train_batch(dataset, batch_size)\n model.monitor.report_batch(batch_size)\n if not learn_more:\n break\n\n # Make sure we didn't exit training loop because Model.learn\n # hasn't been updated to new interface yet.\n if learn_more not in [True, False]:\n msg = ('The learn method of model %s did not return a boolean ' +\n 'value. Please update your model accordingly.')\n raise ValueError(msg % str(model))\n self.learn_more = learn_more\n\n def continue_learning(self, model):\n \"\"\"\n .. todo::\n\n WRITEME\n \"\"\"\n if self.learn_more:\n if self.termination_criterion is not None:\n return self.termination_criterion.continue_learning(model)\n return True\n return False\n", "path": "pylearn2/training_algorithms/default.py"}]}
| 1,714 | 833 |
gh_patches_debug_34098
|
rasdani/github-patches
|
git_diff
|
airctic__icevision-646
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Allow passing a Config object to the MMDetection models
## 🚀 Feature
**Is your feature request related to a problem? Please describe.**
In the current version, to update the loss_weight for example, We have to duplicate an existing config .py file and make the changes in the new file, and pass it the model method. The latter expect a config file_path
**Describe the solution you'd like**
I think it would be better to pass the MMDetection config object instead to the model method. By doing so, we could let the users populate the config object and update any field they wish to change, without creating and updating external files, like this:
```
cfg = Config.fromfile(cfg_filepath)
cfg.model.bbox_head.loss_cls.loss_weight = 0.8
cfg.model.bbox_head.loss_bbox.loss_weight = 2
```
**Additional context**
There is a discussion in our Discord forum around this issue:
https://discordapp.com/channels/735877944085446747/780951884683935744/811650062706540644
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `icevision/models/mmdet/common/bbox/two_stage/model.py`
Content:
```
1 __all__ = ["model"]
2
3 from icevision.imports import *
4 from mmcv import Config
5 from mmdet.models import build_detector
6 from mmcv.runner import load_checkpoint
7
8
9 def model(
10 cfg_path: Union[str, Path],
11 num_classes: int,
12 weights_path: Optional[Union[str, Path]] = None,
13 ) -> nn.Module:
14 cfg = Config.fromfile(str(cfg_path))
15 cfg.model.roi_head.bbox_head.num_classes = num_classes - 1
16 if weights_path is not None:
17 cfg.model.pretrained = None
18
19 _model = build_detector(cfg.model, cfg.get("train_cfg"), cfg.get("test_cfg"))
20
21 if weights_path is not None:
22 load_checkpoint(_model, str(weights_path))
23
24 return _model
25
```
Path: `icevision/models/mmdet/common/bbox/single_stage/model.py`
Content:
```
1 __all__ = ["model"]
2
3 from icevision.imports import *
4 from mmcv import Config
5 from mmdet.models import build_detector
6 from mmcv.runner import load_checkpoint
7
8
9 def model(
10 cfg_path: Union[str, Path],
11 num_classes: int,
12 weights_path: Optional[Union[str, Path]] = None,
13 ) -> nn.Module:
14 cfg = Config.fromfile(str(cfg_path))
15 cfg.model.bbox_head.num_classes = num_classes - 1
16 if weights_path is not None:
17 cfg.model.pretrained = None
18
19 _model = build_detector(cfg.model, cfg.get("train_cfg"), cfg.get("test_cfg"))
20
21 if weights_path is not None:
22 load_checkpoint(_model, str(weights_path))
23
24 return _model
25
```
Path: `icevision/models/mmdet/common/mask/two_stage/model.py`
Content:
```
1 __all__ = ["model"]
2
3 from icevision.imports import *
4 from mmcv import Config
5 from mmdet.models import build_detector
6 from mmcv.runner import load_checkpoint
7
8
9 def model(
10 cfg_path: Union[str, Path],
11 num_classes: int,
12 weights_path: Optional[Union[str, Path]] = None,
13 ) -> nn.Module:
14 cfg = Config.fromfile(str(cfg_path))
15 cfg.model.roi_head.bbox_head.num_classes = num_classes - 1
16 cfg.model.roi_head.mask_head.num_classes = num_classes - 1
17 if weights_path is not None:
18 cfg.model.pretrained = None
19
20 _model = build_detector(cfg.model, cfg.get("train_cfg"), cfg.get("test_cfg"))
21
22 if weights_path is not None:
23 load_checkpoint(_model, str(weights_path))
24
25 return _model
26
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/icevision/models/mmdet/common/bbox/single_stage/model.py b/icevision/models/mmdet/common/bbox/single_stage/model.py
--- a/icevision/models/mmdet/common/bbox/single_stage/model.py
+++ b/icevision/models/mmdet/common/bbox/single_stage/model.py
@@ -7,11 +7,16 @@
def model(
- cfg_path: Union[str, Path],
+ cfg: Union[str, Path, Config],
num_classes: int,
weights_path: Optional[Union[str, Path]] = None,
) -> nn.Module:
- cfg = Config.fromfile(str(cfg_path))
+
+ # if `cfg` argument is a path (str, Path) create an Config object from the file
+ # otherwise cfg should be already an Config object
+ if isinstance(cfg, (str, Path)):
+ cfg = Config.fromfile(str(cfg))
+
cfg.model.bbox_head.num_classes = num_classes - 1
if weights_path is not None:
cfg.model.pretrained = None
diff --git a/icevision/models/mmdet/common/bbox/two_stage/model.py b/icevision/models/mmdet/common/bbox/two_stage/model.py
--- a/icevision/models/mmdet/common/bbox/two_stage/model.py
+++ b/icevision/models/mmdet/common/bbox/two_stage/model.py
@@ -7,11 +7,16 @@
def model(
- cfg_path: Union[str, Path],
+ cfg: Union[str, Path, Config],
num_classes: int,
weights_path: Optional[Union[str, Path]] = None,
) -> nn.Module:
- cfg = Config.fromfile(str(cfg_path))
+
+ # if `cfg` argument is a path (str, Path) create an Config object from the file
+ # otherwise cfg should be already an Config object
+ if isinstance(cfg, (str, Path)):
+ cfg = Config.fromfile(str(cfg))
+
cfg.model.roi_head.bbox_head.num_classes = num_classes - 1
if weights_path is not None:
cfg.model.pretrained = None
diff --git a/icevision/models/mmdet/common/mask/two_stage/model.py b/icevision/models/mmdet/common/mask/two_stage/model.py
--- a/icevision/models/mmdet/common/mask/two_stage/model.py
+++ b/icevision/models/mmdet/common/mask/two_stage/model.py
@@ -7,11 +7,16 @@
def model(
- cfg_path: Union[str, Path],
+ cfg: Union[str, Path, Config],
num_classes: int,
weights_path: Optional[Union[str, Path]] = None,
) -> nn.Module:
- cfg = Config.fromfile(str(cfg_path))
+
+ # if `cfg` argument is a path (str, Path) create an Config object from the file
+ # otherwise cfg should be already an Config object
+ if isinstance(cfg, (str, Path)):
+ cfg = Config.fromfile(str(cfg))
+
cfg.model.roi_head.bbox_head.num_classes = num_classes - 1
cfg.model.roi_head.mask_head.num_classes = num_classes - 1
if weights_path is not None:
|
{"golden_diff": "diff --git a/icevision/models/mmdet/common/bbox/single_stage/model.py b/icevision/models/mmdet/common/bbox/single_stage/model.py\n--- a/icevision/models/mmdet/common/bbox/single_stage/model.py\n+++ b/icevision/models/mmdet/common/bbox/single_stage/model.py\n@@ -7,11 +7,16 @@\n \n \n def model(\n- cfg_path: Union[str, Path],\n+ cfg: Union[str, Path, Config],\n num_classes: int,\n weights_path: Optional[Union[str, Path]] = None,\n ) -> nn.Module:\n- cfg = Config.fromfile(str(cfg_path))\n+\n+ # if `cfg` argument is a path (str, Path) create an Config object from the file\n+ # otherwise cfg should be already an Config object\n+ if isinstance(cfg, (str, Path)):\n+ cfg = Config.fromfile(str(cfg))\n+\n cfg.model.bbox_head.num_classes = num_classes - 1\n if weights_path is not None:\n cfg.model.pretrained = None\ndiff --git a/icevision/models/mmdet/common/bbox/two_stage/model.py b/icevision/models/mmdet/common/bbox/two_stage/model.py\n--- a/icevision/models/mmdet/common/bbox/two_stage/model.py\n+++ b/icevision/models/mmdet/common/bbox/two_stage/model.py\n@@ -7,11 +7,16 @@\n \n \n def model(\n- cfg_path: Union[str, Path],\n+ cfg: Union[str, Path, Config],\n num_classes: int,\n weights_path: Optional[Union[str, Path]] = None,\n ) -> nn.Module:\n- cfg = Config.fromfile(str(cfg_path))\n+\n+ # if `cfg` argument is a path (str, Path) create an Config object from the file\n+ # otherwise cfg should be already an Config object\n+ if isinstance(cfg, (str, Path)):\n+ cfg = Config.fromfile(str(cfg))\n+\n cfg.model.roi_head.bbox_head.num_classes = num_classes - 1\n if weights_path is not None:\n cfg.model.pretrained = None\ndiff --git a/icevision/models/mmdet/common/mask/two_stage/model.py b/icevision/models/mmdet/common/mask/two_stage/model.py\n--- a/icevision/models/mmdet/common/mask/two_stage/model.py\n+++ b/icevision/models/mmdet/common/mask/two_stage/model.py\n@@ -7,11 +7,16 @@\n \n \n def model(\n- cfg_path: Union[str, Path],\n+ cfg: Union[str, Path, Config],\n num_classes: int,\n weights_path: Optional[Union[str, Path]] = None,\n ) -> nn.Module:\n- cfg = Config.fromfile(str(cfg_path))\n+\n+ # if `cfg` argument is a path (str, Path) create an Config object from the file\n+ # otherwise cfg should be already an Config object\n+ if isinstance(cfg, (str, Path)):\n+ cfg = Config.fromfile(str(cfg))\n+\n cfg.model.roi_head.bbox_head.num_classes = num_classes - 1\n cfg.model.roi_head.mask_head.num_classes = num_classes - 1\n if weights_path is not None:\n", "issue": "Allow passing a Config object to the MMDetection models\n## \ud83d\ude80 Feature\r\n**Is your feature request related to a problem? Please describe.**\r\nIn the current version, to update the loss_weight for example, We have to duplicate an existing config .py file and make the changes in the new file, and pass it the model method. The latter expect a config file_path\r\n\r\n\r\n**Describe the solution you'd like**\r\n I think it would be better to pass the MMDetection config object instead to the model method. By doing so, we could let the users populate the config object and update any field they wish to change, without creating and updating external files, like this: \r\n\r\n```\r\ncfg = Config.fromfile(cfg_filepath)\r\ncfg.model.bbox_head.loss_cls.loss_weight = 0.8\r\ncfg.model.bbox_head.loss_bbox.loss_weight = 2\r\n```\r\n\r\n\r\n**Additional context**\r\nThere is a discussion in our Discord forum around this issue:\r\nhttps://discordapp.com/channels/735877944085446747/780951884683935744/811650062706540644\r\n\n", "before_files": [{"content": "__all__ = [\"model\"]\n\nfrom icevision.imports import *\nfrom mmcv import Config\nfrom mmdet.models import build_detector\nfrom mmcv.runner import load_checkpoint\n\n\ndef model(\n cfg_path: Union[str, Path],\n num_classes: int,\n weights_path: Optional[Union[str, Path]] = None,\n) -> nn.Module:\n cfg = Config.fromfile(str(cfg_path))\n cfg.model.roi_head.bbox_head.num_classes = num_classes - 1\n if weights_path is not None:\n cfg.model.pretrained = None\n\n _model = build_detector(cfg.model, cfg.get(\"train_cfg\"), cfg.get(\"test_cfg\"))\n\n if weights_path is not None:\n load_checkpoint(_model, str(weights_path))\n\n return _model\n", "path": "icevision/models/mmdet/common/bbox/two_stage/model.py"}, {"content": "__all__ = [\"model\"]\n\nfrom icevision.imports import *\nfrom mmcv import Config\nfrom mmdet.models import build_detector\nfrom mmcv.runner import load_checkpoint\n\n\ndef model(\n cfg_path: Union[str, Path],\n num_classes: int,\n weights_path: Optional[Union[str, Path]] = None,\n) -> nn.Module:\n cfg = Config.fromfile(str(cfg_path))\n cfg.model.bbox_head.num_classes = num_classes - 1\n if weights_path is not None:\n cfg.model.pretrained = None\n\n _model = build_detector(cfg.model, cfg.get(\"train_cfg\"), cfg.get(\"test_cfg\"))\n\n if weights_path is not None:\n load_checkpoint(_model, str(weights_path))\n\n return _model\n", "path": "icevision/models/mmdet/common/bbox/single_stage/model.py"}, {"content": "__all__ = [\"model\"]\n\nfrom icevision.imports import *\nfrom mmcv import Config\nfrom mmdet.models import build_detector\nfrom mmcv.runner import load_checkpoint\n\n\ndef model(\n cfg_path: Union[str, Path],\n num_classes: int,\n weights_path: Optional[Union[str, Path]] = None,\n) -> nn.Module:\n cfg = Config.fromfile(str(cfg_path))\n cfg.model.roi_head.bbox_head.num_classes = num_classes - 1\n cfg.model.roi_head.mask_head.num_classes = num_classes - 1\n if weights_path is not None:\n cfg.model.pretrained = None\n\n _model = build_detector(cfg.model, cfg.get(\"train_cfg\"), cfg.get(\"test_cfg\"))\n\n if weights_path is not None:\n load_checkpoint(_model, str(weights_path))\n\n return _model\n", "path": "icevision/models/mmdet/common/mask/two_stage/model.py"}], "after_files": [{"content": "__all__ = [\"model\"]\n\nfrom icevision.imports import *\nfrom mmcv import Config\nfrom mmdet.models import build_detector\nfrom mmcv.runner import load_checkpoint\n\n\ndef model(\n cfg: Union[str, Path, Config],\n num_classes: int,\n weights_path: Optional[Union[str, Path]] = None,\n) -> nn.Module:\n\n # if `cfg` argument is a path (str, Path) create an Config object from the file\n # otherwise cfg should be already an Config object\n if isinstance(cfg, (str, Path)):\n cfg = Config.fromfile(str(cfg))\n\n cfg.model.roi_head.bbox_head.num_classes = num_classes - 1\n if weights_path is not None:\n cfg.model.pretrained = None\n\n _model = build_detector(cfg.model, cfg.get(\"train_cfg\"), cfg.get(\"test_cfg\"))\n\n if weights_path is not None:\n load_checkpoint(_model, str(weights_path))\n\n return _model\n", "path": "icevision/models/mmdet/common/bbox/two_stage/model.py"}, {"content": "__all__ = [\"model\"]\n\nfrom icevision.imports import *\nfrom mmcv import Config\nfrom mmdet.models import build_detector\nfrom mmcv.runner import load_checkpoint\n\n\ndef model(\n cfg: Union[str, Path, Config],\n num_classes: int,\n weights_path: Optional[Union[str, Path]] = None,\n) -> nn.Module:\n\n # if `cfg` argument is a path (str, Path) create an Config object from the file\n # otherwise cfg should be already an Config object\n if isinstance(cfg, (str, Path)):\n cfg = Config.fromfile(str(cfg))\n\n cfg.model.bbox_head.num_classes = num_classes - 1\n if weights_path is not None:\n cfg.model.pretrained = None\n\n _model = build_detector(cfg.model, cfg.get(\"train_cfg\"), cfg.get(\"test_cfg\"))\n\n if weights_path is not None:\n load_checkpoint(_model, str(weights_path))\n\n return _model\n", "path": "icevision/models/mmdet/common/bbox/single_stage/model.py"}, {"content": "__all__ = [\"model\"]\n\nfrom icevision.imports import *\nfrom mmcv import Config\nfrom mmdet.models import build_detector\nfrom mmcv.runner import load_checkpoint\n\n\ndef model(\n cfg: Union[str, Path, Config],\n num_classes: int,\n weights_path: Optional[Union[str, Path]] = None,\n) -> nn.Module:\n\n # if `cfg` argument is a path (str, Path) create an Config object from the file\n # otherwise cfg should be already an Config object\n if isinstance(cfg, (str, Path)):\n cfg = Config.fromfile(str(cfg))\n\n cfg.model.roi_head.bbox_head.num_classes = num_classes - 1\n cfg.model.roi_head.mask_head.num_classes = num_classes - 1\n if weights_path is not None:\n cfg.model.pretrained = None\n\n _model = build_detector(cfg.model, cfg.get(\"train_cfg\"), cfg.get(\"test_cfg\"))\n\n if weights_path is not None:\n load_checkpoint(_model, str(weights_path))\n\n return _model\n", "path": "icevision/models/mmdet/common/mask/two_stage/model.py"}]}
| 1,222 | 716 |
gh_patches_debug_4636
|
rasdani/github-patches
|
git_diff
|
enthought__chaco-885
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ImportError: cannot import name 'marker_trait' from 'chaco.api'
**Problem Description**
ImportError: cannot import name 'marker_trait' from 'chaco.api' when trying to run a 2008 demo
**Reproduction Steps:**
```python
python chaco/examples/tutorials/scipy2008/traits_example.py
```
**Expected behavior:**
test past
**OS, Python version:** [MacOS, python3.8 (with or without edm) or 3.11]
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/tutorials/scipy2008/traits_example.py`
Content:
```
1 from numpy import linspace, sin
2
3 from enable.api import ColorTrait
4 from chaco.api import ArrayPlotData, Plot, marker_trait
5 from enable.api import ComponentEditor
6 from traits.api import HasTraits, Instance, Int
7 from traitsui.api import Group, Item, View
8
9
10 class ScatterPlotTraits(HasTraits):
11
12 plot = Instance(Plot)
13 color = ColorTrait("blue")
14 marker = marker_trait
15 marker_size = Int(4)
16
17 traits_view = View(
18 Group(
19 Item("color", label="Color", style="custom"),
20 Item("marker", label="Marker"),
21 Item("marker_size", label="Size"),
22 Item("plot", editor=ComponentEditor(), show_label=False),
23 orientation="vertical",
24 ),
25 width=800,
26 height=600,
27 resizable=True,
28 title="Chaco Plot",
29 )
30
31 def __init__(self):
32 # Create the data and the PlotData object
33 x = linspace(-14, 14, 100)
34 y = sin(x) * x ** 3
35 plotdata = ArrayPlotData(x=x, y=y)
36 # Create a Plot and associate it with the PlotData
37 plot = Plot(plotdata)
38 # Create a line plot in the Plot
39 self.renderer = plot.plot(("x", "y"), type="scatter", color="blue")[0]
40 self.plot = plot
41
42 def _color_changed(self):
43 self.renderer.color = self.color
44
45 def _marker_changed(self):
46 self.renderer.marker = self.marker
47
48 def _marker_size_changed(self):
49 self.renderer.marker_size = self.marker_size
50
51
52 # ===============================================================================
53 # demo object that is used by the demo.py application.
54 # ===============================================================================
55 demo = ScatterPlotTraits()
56
57 if __name__ == "__main__":
58 demo.configure_traits()
59
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/examples/tutorials/scipy2008/traits_example.py b/examples/tutorials/scipy2008/traits_example.py
--- a/examples/tutorials/scipy2008/traits_example.py
+++ b/examples/tutorials/scipy2008/traits_example.py
@@ -1,7 +1,7 @@
from numpy import linspace, sin
-from enable.api import ColorTrait
-from chaco.api import ArrayPlotData, Plot, marker_trait
+from enable.api import ColorTrait, marker_trait
+from chaco.api import ArrayPlotData, Plot
from enable.api import ComponentEditor
from traits.api import HasTraits, Instance, Int
from traitsui.api import Group, Item, View
|
{"golden_diff": "diff --git a/examples/tutorials/scipy2008/traits_example.py b/examples/tutorials/scipy2008/traits_example.py\n--- a/examples/tutorials/scipy2008/traits_example.py\n+++ b/examples/tutorials/scipy2008/traits_example.py\n@@ -1,7 +1,7 @@\n from numpy import linspace, sin\n \n-from enable.api import ColorTrait\n-from chaco.api import ArrayPlotData, Plot, marker_trait\n+from enable.api import ColorTrait, marker_trait\n+from chaco.api import ArrayPlotData, Plot\n from enable.api import ComponentEditor\n from traits.api import HasTraits, Instance, Int\n from traitsui.api import Group, Item, View\n", "issue": "ImportError: cannot import name 'marker_trait' from 'chaco.api'\n**Problem Description**\nImportError: cannot import name 'marker_trait' from 'chaco.api' when trying to run a 2008 demo\n\n**Reproduction Steps:**\n\n```python\npython chaco/examples/tutorials/scipy2008/traits_example.py\n```\n\n**Expected behavior:**\ntest past\n\n**OS, Python version:** [MacOS, python3.8 (with or without edm) or 3.11]\n\n", "before_files": [{"content": "from numpy import linspace, sin\n\nfrom enable.api import ColorTrait\nfrom chaco.api import ArrayPlotData, Plot, marker_trait\nfrom enable.api import ComponentEditor\nfrom traits.api import HasTraits, Instance, Int\nfrom traitsui.api import Group, Item, View\n\n\nclass ScatterPlotTraits(HasTraits):\n\n plot = Instance(Plot)\n color = ColorTrait(\"blue\")\n marker = marker_trait\n marker_size = Int(4)\n\n traits_view = View(\n Group(\n Item(\"color\", label=\"Color\", style=\"custom\"),\n Item(\"marker\", label=\"Marker\"),\n Item(\"marker_size\", label=\"Size\"),\n Item(\"plot\", editor=ComponentEditor(), show_label=False),\n orientation=\"vertical\",\n ),\n width=800,\n height=600,\n resizable=True,\n title=\"Chaco Plot\",\n )\n\n def __init__(self):\n # Create the data and the PlotData object\n x = linspace(-14, 14, 100)\n y = sin(x) * x ** 3\n plotdata = ArrayPlotData(x=x, y=y)\n # Create a Plot and associate it with the PlotData\n plot = Plot(plotdata)\n # Create a line plot in the Plot\n self.renderer = plot.plot((\"x\", \"y\"), type=\"scatter\", color=\"blue\")[0]\n self.plot = plot\n\n def _color_changed(self):\n self.renderer.color = self.color\n\n def _marker_changed(self):\n self.renderer.marker = self.marker\n\n def _marker_size_changed(self):\n self.renderer.marker_size = self.marker_size\n\n\n# ===============================================================================\n# demo object that is used by the demo.py application.\n# ===============================================================================\ndemo = ScatterPlotTraits()\n\nif __name__ == \"__main__\":\n demo.configure_traits()\n", "path": "examples/tutorials/scipy2008/traits_example.py"}], "after_files": [{"content": "from numpy import linspace, sin\n\nfrom enable.api import ColorTrait, marker_trait\nfrom chaco.api import ArrayPlotData, Plot\nfrom enable.api import ComponentEditor\nfrom traits.api import HasTraits, Instance, Int\nfrom traitsui.api import Group, Item, View\n\n\nclass ScatterPlotTraits(HasTraits):\n\n plot = Instance(Plot)\n color = ColorTrait(\"blue\")\n marker = marker_trait\n marker_size = Int(4)\n\n traits_view = View(\n Group(\n Item(\"color\", label=\"Color\", style=\"custom\"),\n Item(\"marker\", label=\"Marker\"),\n Item(\"marker_size\", label=\"Size\"),\n Item(\"plot\", editor=ComponentEditor(), show_label=False),\n orientation=\"vertical\",\n ),\n width=800,\n height=600,\n resizable=True,\n title=\"Chaco Plot\",\n )\n\n def __init__(self):\n # Create the data and the PlotData object\n x = linspace(-14, 14, 100)\n y = sin(x) * x ** 3\n plotdata = ArrayPlotData(x=x, y=y)\n # Create a Plot and associate it with the PlotData\n plot = Plot(plotdata)\n # Create a line plot in the Plot\n self.renderer = plot.plot((\"x\", \"y\"), type=\"scatter\", color=\"blue\")[0]\n self.plot = plot\n\n def _color_changed(self):\n self.renderer.color = self.color\n\n def _marker_changed(self):\n self.renderer.marker = self.marker\n\n def _marker_size_changed(self):\n self.renderer.marker_size = self.marker_size\n\n\n# ===============================================================================\n# demo object that is used by the demo.py application.\n# ===============================================================================\ndemo = ScatterPlotTraits()\n\nif __name__ == \"__main__\":\n demo.configure_traits()\n", "path": "examples/tutorials/scipy2008/traits_example.py"}]}
| 885 | 154 |
gh_patches_debug_19957
|
rasdani/github-patches
|
git_diff
|
huggingface__accelerate-43
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AcceleratedOptimizer `zero_grad` argument not supported: `set_to_none`
Currently the `AcceleratedOptimizer` class doesn't support the argument `set_to_none`, is this an intentional exclusion?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/accelerate/optimizer.py`
Content:
```
1 # Copyright 2021 The HuggingFace Team. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import torch
16
17 from .state import AcceleratorState, DistributedType, is_tpu_available
18
19
20 if is_tpu_available():
21 import torch_xla.core.xla_model as xm
22
23
24 def move_to_device(state, device):
25 if isinstance(state, (list, tuple)):
26 return type(state)(move_to_device(t, device) for t in state)
27 elif isinstance(state, dict):
28 return type(state)({k: move_to_device(v, device) for k, v in state.items()})
29 elif isinstance(state, torch.Tensor):
30 return state.to(device)
31 return state
32
33
34 class AcceleratedOptimizer(torch.optim.Optimizer):
35 """
36 Internal wrapper around a torch optimizer.
37
38 Args:
39 optimizer (:obj:`torch.optim.optimizer.Optimizer`):
40 The optimizer to wrap.
41 device_placement (:obj:`bool`, `optional`, defaults to :obj:`True`):
42 Whether or not the optimizer should handle device placement. If so, it will place the state dictionary of
43 :obj:`optimizer` on the right device.
44 scaler (:obj:`torch.cuda.amp.grad_scaler.GradScaler`, `optional`):
45 The scaler to use in the step function if training with mixed precision.
46 """
47
48 def __init__(self, optimizer, device_placement=True, scaler=None):
49 self.optimizer = optimizer
50 self.scaler = scaler
51 self.state = AcceleratorState()
52
53 # Handle device placement
54 if device_placement:
55 state_dict = self.optimizer.state_dict()
56 if self.state.distributed_type == DistributedType.TPU:
57 xm.send_cpu_data_to_device(state_dict, self.state.device)
58 else:
59 state_dict = move_to_device(state_dict, self.state.device)
60 self.optimizer.load_state_dict(state_dict)
61
62 @property
63 def param_groups(self):
64 return self.optimizer.param_groups
65
66 @param_groups.setter
67 def param_groups(self, param_groups):
68 self.optimizer.param_groups = param_groups
69
70 @property
71 def defaults(self):
72 return self.optimizer.defaults
73
74 @defaults.setter
75 def defaults(self, defaults):
76 self.optimizer.defaults = defaults
77
78 def add_param_group(self, param_group):
79 self.optimizer.add_param_group(param_group)
80
81 def load_state_dict(self, state_dict):
82 if self.state.distributed_type == DistributedType.TPU and self.device_placement:
83 xm.send_cpu_data_to_device(state_dict, self.state.device)
84 self.optimizer.load_state_dict(state_dict)
85
86 def state_dict(self):
87 return self.optimizer.state_dict()
88
89 def zero_grad(self):
90 self.optimizer.zero_grad()
91
92 def step(self):
93 if self.state.distributed_type == DistributedType.TPU:
94 xm.optimizer_step(self.optimizer)
95 elif self.scaler is not None:
96 self.scaler.step(self.optimizer)
97 self.scaler.update()
98 else:
99 self.optimizer.step()
100
101 def _switch_parameters(self, parameters_map):
102 for param_group in self.optimizer.param_groups:
103 param_group["params"] = [parameters_map.get(p, p) for p in param_group["params"]]
104
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/accelerate/optimizer.py b/src/accelerate/optimizer.py
--- a/src/accelerate/optimizer.py
+++ b/src/accelerate/optimizer.py
@@ -14,6 +14,8 @@
import torch
+from packaging import version
+
from .state import AcceleratorState, DistributedType, is_tpu_available
@@ -86,8 +88,18 @@
def state_dict(self):
return self.optimizer.state_dict()
- def zero_grad(self):
- self.optimizer.zero_grad()
+ def zero_grad(self, set_to_none=None):
+ if version.parse(torch.__version__) < version.parse("1.7.0"):
+ if set_to_none is not None:
+ raise ValueError(
+ "`set_to_none` for Optimizer.zero_grad` was introduced in PyTorch 1.7.0 and can't be used for "
+ f"earlier versions (found version {torch.__version__})."
+ )
+ self.optimizer.zero_grad()
+ else:
+ if set_to_none is not None:
+ set_to_none = False
+ self.optimizer.zero_grad(set_to_none=set_to_none)
def step(self):
if self.state.distributed_type == DistributedType.TPU:
|
{"golden_diff": "diff --git a/src/accelerate/optimizer.py b/src/accelerate/optimizer.py\n--- a/src/accelerate/optimizer.py\n+++ b/src/accelerate/optimizer.py\n@@ -14,6 +14,8 @@\n \n import torch\n \n+from packaging import version\n+\n from .state import AcceleratorState, DistributedType, is_tpu_available\n \n \n@@ -86,8 +88,18 @@\n def state_dict(self):\n return self.optimizer.state_dict()\n \n- def zero_grad(self):\n- self.optimizer.zero_grad()\n+ def zero_grad(self, set_to_none=None):\n+ if version.parse(torch.__version__) < version.parse(\"1.7.0\"):\n+ if set_to_none is not None:\n+ raise ValueError(\n+ \"`set_to_none` for Optimizer.zero_grad` was introduced in PyTorch 1.7.0 and can't be used for \"\n+ f\"earlier versions (found version {torch.__version__}).\"\n+ )\n+ self.optimizer.zero_grad()\n+ else:\n+ if set_to_none is not None:\n+ set_to_none = False\n+ self.optimizer.zero_grad(set_to_none=set_to_none)\n \n def step(self):\n if self.state.distributed_type == DistributedType.TPU:\n", "issue": "AcceleratedOptimizer `zero_grad` argument not supported: `set_to_none`\nCurrently the `AcceleratedOptimizer` class doesn't support the argument `set_to_none`, is this an intentional exclusion?\n", "before_files": [{"content": "# Copyright 2021 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\n\nfrom .state import AcceleratorState, DistributedType, is_tpu_available\n\n\nif is_tpu_available():\n import torch_xla.core.xla_model as xm\n\n\ndef move_to_device(state, device):\n if isinstance(state, (list, tuple)):\n return type(state)(move_to_device(t, device) for t in state)\n elif isinstance(state, dict):\n return type(state)({k: move_to_device(v, device) for k, v in state.items()})\n elif isinstance(state, torch.Tensor):\n return state.to(device)\n return state\n\n\nclass AcceleratedOptimizer(torch.optim.Optimizer):\n \"\"\"\n Internal wrapper around a torch optimizer.\n\n Args:\n optimizer (:obj:`torch.optim.optimizer.Optimizer`):\n The optimizer to wrap.\n device_placement (:obj:`bool`, `optional`, defaults to :obj:`True`):\n Whether or not the optimizer should handle device placement. If so, it will place the state dictionary of\n :obj:`optimizer` on the right device.\n scaler (:obj:`torch.cuda.amp.grad_scaler.GradScaler`, `optional`):\n The scaler to use in the step function if training with mixed precision.\n \"\"\"\n\n def __init__(self, optimizer, device_placement=True, scaler=None):\n self.optimizer = optimizer\n self.scaler = scaler\n self.state = AcceleratorState()\n\n # Handle device placement\n if device_placement:\n state_dict = self.optimizer.state_dict()\n if self.state.distributed_type == DistributedType.TPU:\n xm.send_cpu_data_to_device(state_dict, self.state.device)\n else:\n state_dict = move_to_device(state_dict, self.state.device)\n self.optimizer.load_state_dict(state_dict)\n\n @property\n def param_groups(self):\n return self.optimizer.param_groups\n\n @param_groups.setter\n def param_groups(self, param_groups):\n self.optimizer.param_groups = param_groups\n\n @property\n def defaults(self):\n return self.optimizer.defaults\n\n @defaults.setter\n def defaults(self, defaults):\n self.optimizer.defaults = defaults\n\n def add_param_group(self, param_group):\n self.optimizer.add_param_group(param_group)\n\n def load_state_dict(self, state_dict):\n if self.state.distributed_type == DistributedType.TPU and self.device_placement:\n xm.send_cpu_data_to_device(state_dict, self.state.device)\n self.optimizer.load_state_dict(state_dict)\n\n def state_dict(self):\n return self.optimizer.state_dict()\n\n def zero_grad(self):\n self.optimizer.zero_grad()\n\n def step(self):\n if self.state.distributed_type == DistributedType.TPU:\n xm.optimizer_step(self.optimizer)\n elif self.scaler is not None:\n self.scaler.step(self.optimizer)\n self.scaler.update()\n else:\n self.optimizer.step()\n\n def _switch_parameters(self, parameters_map):\n for param_group in self.optimizer.param_groups:\n param_group[\"params\"] = [parameters_map.get(p, p) for p in param_group[\"params\"]]\n", "path": "src/accelerate/optimizer.py"}], "after_files": [{"content": "# Copyright 2021 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\n\nfrom packaging import version\n\nfrom .state import AcceleratorState, DistributedType, is_tpu_available\n\n\nif is_tpu_available():\n import torch_xla.core.xla_model as xm\n\n\ndef move_to_device(state, device):\n if isinstance(state, (list, tuple)):\n return type(state)(move_to_device(t, device) for t in state)\n elif isinstance(state, dict):\n return type(state)({k: move_to_device(v, device) for k, v in state.items()})\n elif isinstance(state, torch.Tensor):\n return state.to(device)\n return state\n\n\nclass AcceleratedOptimizer(torch.optim.Optimizer):\n \"\"\"\n Internal wrapper around a torch optimizer.\n\n Args:\n optimizer (:obj:`torch.optim.optimizer.Optimizer`):\n The optimizer to wrap.\n device_placement (:obj:`bool`, `optional`, defaults to :obj:`True`):\n Whether or not the optimizer should handle device placement. If so, it will place the state dictionary of\n :obj:`optimizer` on the right device.\n scaler (:obj:`torch.cuda.amp.grad_scaler.GradScaler`, `optional`):\n The scaler to use in the step function if training with mixed precision.\n \"\"\"\n\n def __init__(self, optimizer, device_placement=True, scaler=None):\n self.optimizer = optimizer\n self.scaler = scaler\n self.state = AcceleratorState()\n\n # Handle device placement\n if device_placement:\n state_dict = self.optimizer.state_dict()\n if self.state.distributed_type == DistributedType.TPU:\n xm.send_cpu_data_to_device(state_dict, self.state.device)\n else:\n state_dict = move_to_device(state_dict, self.state.device)\n self.optimizer.load_state_dict(state_dict)\n\n @property\n def param_groups(self):\n return self.optimizer.param_groups\n\n @param_groups.setter\n def param_groups(self, param_groups):\n self.optimizer.param_groups = param_groups\n\n @property\n def defaults(self):\n return self.optimizer.defaults\n\n @defaults.setter\n def defaults(self, defaults):\n self.optimizer.defaults = defaults\n\n def add_param_group(self, param_group):\n self.optimizer.add_param_group(param_group)\n\n def load_state_dict(self, state_dict):\n if self.state.distributed_type == DistributedType.TPU and self.device_placement:\n xm.send_cpu_data_to_device(state_dict, self.state.device)\n self.optimizer.load_state_dict(state_dict)\n\n def state_dict(self):\n return self.optimizer.state_dict()\n\n def zero_grad(self, set_to_none=None):\n if version.parse(torch.__version__) < version.parse(\"1.7.0\"):\n if set_to_none is not None:\n raise ValueError(\n \"`set_to_none` for Optimizer.zero_grad` was introduced in PyTorch 1.7.0 and can't be used for \"\n f\"earlier versions (found version {torch.__version__}).\"\n )\n self.optimizer.zero_grad()\n else:\n if set_to_none is not None:\n set_to_none = False\n self.optimizer.zero_grad(set_to_none=set_to_none)\n\n def step(self):\n if self.state.distributed_type == DistributedType.TPU:\n xm.optimizer_step(self.optimizer)\n elif self.scaler is not None:\n self.scaler.step(self.optimizer)\n self.scaler.update()\n else:\n self.optimizer.step()\n\n def _switch_parameters(self, parameters_map):\n for param_group in self.optimizer.param_groups:\n param_group[\"params\"] = [parameters_map.get(p, p) for p in param_group[\"params\"]]\n", "path": "src/accelerate/optimizer.py"}]}
| 1,292 | 280 |
gh_patches_debug_12535
|
rasdani/github-patches
|
git_diff
|
conda__conda-6918
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
deprecate 'conda help' in favor of --help CLI flag
With the install on Windows 10:
_platform : win-64 conda version : 4.3.30
conda is private : False conda-env version : 4.3.30
conda-build version : 3.0.22 python version : 3.6.3.final.0
requests version : 2.18.4 root environment : C:\Anaconda3 (writable)_
the command: `conda help install` (or other similar, say, conda help create, etc.) produces the message
`C:\Anaconda3\python.exe: can't open file 'C:\Anaconda3\Scripts\conda': [Errno 2] No such file or directory`
This syntax works under Linux (Ubuntu), but of course, there is no just "conda" file in Windows Anaconda Scripts.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conda/cli/main_help.py`
Content:
```
1 # (c) 2012-2013 Continuum Analytics, Inc. / http://continuum.io
2 # All Rights Reserved
3 #
4 # conda is distributed under the terms of the BSD 3-clause license.
5 # Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.
6 from __future__ import absolute_import, division, print_function, unicode_literals
7
8 import subprocess
9 import sys
10
11
12 def execute(args, parser):
13 if not args.command:
14 parser.print_help()
15 return
16 subprocess.call([sys.executable, sys.argv[0], args.command, '-h']) # pragma: no cover
17
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/conda/cli/main_help.py b/conda/cli/main_help.py
--- a/conda/cli/main_help.py
+++ b/conda/cli/main_help.py
@@ -5,7 +5,6 @@
# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.
from __future__ import absolute_import, division, print_function, unicode_literals
-import subprocess
import sys
@@ -13,4 +12,7 @@
if not args.command:
parser.print_help()
return
- subprocess.call([sys.executable, sys.argv[0], args.command, '-h']) # pragma: no cover
+ print("ERROR: The 'conda help' command is deprecated.\n"
+ "Instead use 'conda %s --help'." % args.command,
+ file=sys.stderr)
+ return 1
|
{"golden_diff": "diff --git a/conda/cli/main_help.py b/conda/cli/main_help.py\n--- a/conda/cli/main_help.py\n+++ b/conda/cli/main_help.py\n@@ -5,7 +5,6 @@\n # Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.\n from __future__ import absolute_import, division, print_function, unicode_literals\n \n-import subprocess\n import sys\n \n \n@@ -13,4 +12,7 @@\n if not args.command:\n parser.print_help()\n return\n- subprocess.call([sys.executable, sys.argv[0], args.command, '-h']) # pragma: no cover\n+ print(\"ERROR: The 'conda help' command is deprecated.\\n\"\n+ \"Instead use 'conda %s --help'.\" % args.command,\n+ file=sys.stderr)\n+ return 1\n", "issue": "deprecate 'conda help' in favor of --help CLI flag\nWith the install on Windows 10:\r\n\r\n _platform : win-64 conda version : 4.3.30\r\n conda is private : False conda-env version : 4.3.30\r\n conda-build version : 3.0.22 python version : 3.6.3.final.0\r\n requests version : 2.18.4 root environment : C:\\Anaconda3 (writable)_\r\n\r\nthe command: `conda help install` (or other similar, say, conda help create, etc.) produces the message\r\n\r\n`C:\\Anaconda3\\python.exe: can't open file 'C:\\Anaconda3\\Scripts\\conda': [Errno 2] No such file or directory`\r\n\r\nThis syntax works under Linux (Ubuntu), but of course, there is no just \"conda\" file in Windows Anaconda Scripts.\n", "before_files": [{"content": "# (c) 2012-2013 Continuum Analytics, Inc. / http://continuum.io\n# All Rights Reserved\n#\n# conda is distributed under the terms of the BSD 3-clause license.\n# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport subprocess\nimport sys\n\n\ndef execute(args, parser):\n if not args.command:\n parser.print_help()\n return\n subprocess.call([sys.executable, sys.argv[0], args.command, '-h']) # pragma: no cover\n", "path": "conda/cli/main_help.py"}], "after_files": [{"content": "# (c) 2012-2013 Continuum Analytics, Inc. / http://continuum.io\n# All Rights Reserved\n#\n# conda is distributed under the terms of the BSD 3-clause license.\n# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport sys\n\n\ndef execute(args, parser):\n if not args.command:\n parser.print_help()\n return\n print(\"ERROR: The 'conda help' command is deprecated.\\n\"\n \"Instead use 'conda %s --help'.\" % args.command,\n file=sys.stderr)\n return 1\n", "path": "conda/cli/main_help.py"}]}
| 632 | 184 |
gh_patches_debug_24251
|
rasdani/github-patches
|
git_diff
|
nvaccess__nvda-7114
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
When the -c option is used when starting NVDA user configuration can not be saved
Steps to reproduce:
- Open a command prompt and cd to the nvda.exe directory.
- start nvda with `nvda.exe -c "C:\myTemp"
- open the nvda menu and select "save configuration"
An error is shown "Could not save configuration - probably read only file system".
Regression caused by atomic rename/write fix. Config path is unicode without `-c`, but when `-c` is passed, the config path is ANSI. fileUtils assumes path is always Unicode.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `source/fileUtils.py`
Content:
```
1 #fileUtils.py
2 #A part of NonVisual Desktop Access (NVDA)
3 #Copyright (C) 2017 NV Access Limited, Bram Duvigneau
4 #This file is covered by the GNU General Public License.
5 #See the file COPYING for more details.
6 import os
7 import ctypes
8 from contextlib import contextmanager
9 from tempfile import NamedTemporaryFile
10 from logHandler import log
11
12 #: Constant; flag for MoveFileEx(). If a file with the destination filename already exists, it is overwritten.
13 MOVEFILE_REPLACE_EXISTING = 1
14
15 @contextmanager
16 def FaultTolerantFile(name):
17 '''Used to write out files in a more fault tolerant way. A temporary file is used, and replaces the
18 file `name' when the context manager scope ends and the the context manager __exit__ is called. This
19 means writing out the complete file can be performed with less concern of corrupting the original file
20 if the process is interrupted by windows shutting down.
21
22 Usage:
23 with FaultTolerantFile("myFile.txt") as f:
24 f.write("This is a test")
25
26 This creates a temporary file, and the writes actually happen on this temp file. At the end of the
27 `with` block, when `f` goes out of context the temporary file is closed and, this temporary file replaces "myFile.txt"
28 '''
29 dirpath, filename = os.path.split(name)
30 with NamedTemporaryFile(dir=dirpath, prefix=filename, suffix='.tmp', delete=False) as f:
31 log.debug(f.name)
32 yield f
33 f.flush()
34 os.fsync(f)
35 f.close()
36 moveFileResult = ctypes.windll.kernel32.MoveFileExW(f.name, name, MOVEFILE_REPLACE_EXISTING)
37 if moveFileResult == 0:
38 raise ctypes.WinError()
39
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/source/fileUtils.py b/source/fileUtils.py
--- a/source/fileUtils.py
+++ b/source/fileUtils.py
@@ -18,6 +18,7 @@
file `name' when the context manager scope ends and the the context manager __exit__ is called. This
means writing out the complete file can be performed with less concern of corrupting the original file
if the process is interrupted by windows shutting down.
+ `name` must be unicode.
Usage:
with FaultTolerantFile("myFile.txt") as f:
@@ -26,6 +27,8 @@
This creates a temporary file, and the writes actually happen on this temp file. At the end of the
`with` block, when `f` goes out of context the temporary file is closed and, this temporary file replaces "myFile.txt"
'''
+ if not isinstance(name, unicode):
+ raise TypeError("name must be unicode")
dirpath, filename = os.path.split(name)
with NamedTemporaryFile(dir=dirpath, prefix=filename, suffix='.tmp', delete=False) as f:
log.debug(f.name)
|
{"golden_diff": "diff --git a/source/fileUtils.py b/source/fileUtils.py\n--- a/source/fileUtils.py\n+++ b/source/fileUtils.py\n@@ -18,6 +18,7 @@\n \tfile `name' when the context manager scope ends and the the context manager __exit__ is called. This\n \tmeans writing out the complete file can be performed with less concern of corrupting the original file\n \tif the process is interrupted by windows shutting down.\n+\t`name` must be unicode.\n \n \tUsage:\n \t\twith FaultTolerantFile(\"myFile.txt\") as f:\n@@ -26,6 +27,8 @@\n \tThis creates a temporary file, and the writes actually happen on this temp file. At the end of the \n \t`with` block, when `f` goes out of context the temporary file is closed and, this temporary file replaces \"myFile.txt\"\n \t'''\n+\tif not isinstance(name, unicode):\n+\t\traise TypeError(\"name must be unicode\")\n \tdirpath, filename = os.path.split(name)\n \twith NamedTemporaryFile(dir=dirpath, prefix=filename, suffix='.tmp', delete=False) as f:\n \t\tlog.debug(f.name)\n", "issue": "When the -c option is used when starting NVDA user configuration can not be saved\nSteps to reproduce:\r\n\r\n- Open a command prompt and cd to the nvda.exe directory.\r\n- start nvda with `nvda.exe -c \"C:\\myTemp\"\r\n- open the nvda menu and select \"save configuration\"\r\n\r\nAn error is shown \"Could not save configuration - probably read only file system\".\r\n\r\nRegression caused by atomic rename/write fix. Config path is unicode without `-c`, but when `-c` is passed, the config path is ANSI. fileUtils assumes path is always Unicode.\n", "before_files": [{"content": "#fileUtils.py\n#A part of NonVisual Desktop Access (NVDA)\n#Copyright (C) 2017 NV Access Limited, Bram Duvigneau\n#This file is covered by the GNU General Public License.\n#See the file COPYING for more details.\nimport os\nimport ctypes\nfrom contextlib import contextmanager\nfrom tempfile import NamedTemporaryFile\nfrom logHandler import log\n\n#: Constant; flag for MoveFileEx(). If a file with the destination filename already exists, it is overwritten.\nMOVEFILE_REPLACE_EXISTING = 1\n\n@contextmanager\ndef FaultTolerantFile(name):\n\t'''Used to write out files in a more fault tolerant way. A temporary file is used, and replaces the \n\tfile `name' when the context manager scope ends and the the context manager __exit__ is called. This\n\tmeans writing out the complete file can be performed with less concern of corrupting the original file\n\tif the process is interrupted by windows shutting down.\n\n\tUsage:\n\t\twith FaultTolerantFile(\"myFile.txt\") as f:\n\t\t\tf.write(\"This is a test\")\n\n\tThis creates a temporary file, and the writes actually happen on this temp file. At the end of the \n\t`with` block, when `f` goes out of context the temporary file is closed and, this temporary file replaces \"myFile.txt\"\n\t'''\n\tdirpath, filename = os.path.split(name)\n\twith NamedTemporaryFile(dir=dirpath, prefix=filename, suffix='.tmp', delete=False) as f:\n\t\tlog.debug(f.name)\n\t\tyield f\n\t\tf.flush()\n\t\tos.fsync(f)\n\t\tf.close()\n\t\tmoveFileResult = ctypes.windll.kernel32.MoveFileExW(f.name, name, MOVEFILE_REPLACE_EXISTING)\n\t\tif moveFileResult == 0:\n\t\t\traise ctypes.WinError()\n", "path": "source/fileUtils.py"}], "after_files": [{"content": "#fileUtils.py\n#A part of NonVisual Desktop Access (NVDA)\n#Copyright (C) 2017 NV Access Limited, Bram Duvigneau\n#This file is covered by the GNU General Public License.\n#See the file COPYING for more details.\nimport os\nimport ctypes\nfrom contextlib import contextmanager\nfrom tempfile import NamedTemporaryFile\nfrom logHandler import log\n\n#: Constant; flag for MoveFileEx(). If a file with the destination filename already exists, it is overwritten.\nMOVEFILE_REPLACE_EXISTING = 1\n\n@contextmanager\ndef FaultTolerantFile(name):\n\t'''Used to write out files in a more fault tolerant way. A temporary file is used, and replaces the \n\tfile `name' when the context manager scope ends and the the context manager __exit__ is called. This\n\tmeans writing out the complete file can be performed with less concern of corrupting the original file\n\tif the process is interrupted by windows shutting down.\n\t`name` must be unicode.\n\n\tUsage:\n\t\twith FaultTolerantFile(\"myFile.txt\") as f:\n\t\t\tf.write(\"This is a test\")\n\n\tThis creates a temporary file, and the writes actually happen on this temp file. At the end of the \n\t`with` block, when `f` goes out of context the temporary file is closed and, this temporary file replaces \"myFile.txt\"\n\t'''\n\tif not isinstance(name, unicode):\n\t\traise TypeError(\"name must be unicode\")\n\tdirpath, filename = os.path.split(name)\n\twith NamedTemporaryFile(dir=dirpath, prefix=filename, suffix='.tmp', delete=False) as f:\n\t\tlog.debug(f.name)\n\t\tyield f\n\t\tf.flush()\n\t\tos.fsync(f)\n\t\tf.close()\n\t\tmoveFileResult = ctypes.windll.kernel32.MoveFileExW(f.name, name, MOVEFILE_REPLACE_EXISTING)\n\t\tif moveFileResult == 0:\n\t\t\traise ctypes.WinError()\n", "path": "source/fileUtils.py"}]}
| 845 | 253 |
gh_patches_debug_25184
|
rasdani/github-patches
|
git_diff
|
getsentry__sentry-6688
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Search related events by event ID

Right now you can search [by message or tags](https://github.com/getsentry/sentry/blob/master/src/sentry/api/endpoints/group_events.py#L104), but not individual event ID. This is useful when trying to locate a specific event. More useful now that sampling has been disabled by default.
We should also add a column for event ID here.
Refs #3147
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/sentry/api/endpoints/group_events.py`
Content:
```
1 from __future__ import absolute_import
2
3 import six
4
5 from sentry import tagstore
6 from sentry.api.base import DocSection, EnvironmentMixin
7 from sentry.api.bases import GroupEndpoint
8 from sentry.api.serializers import serialize
9 from sentry.api.paginator import DateTimePaginator
10 from sentry.models import Environment, Event, Group
11 from sentry.search.utils import parse_query
12 from sentry.utils.apidocs import scenario, attach_scenarios
13 from rest_framework.response import Response
14 from sentry.search.utils import InvalidQuery
15
16
17 @scenario('ListAvailableSamples')
18 def list_available_samples_scenario(runner):
19 group = Group.objects.filter(project=runner.default_project).first()
20 runner.request(method='GET', path='/issues/%s/events/' % group.id)
21
22
23 class GroupEventsEndpoint(GroupEndpoint, EnvironmentMixin):
24 doc_section = DocSection.EVENTS
25
26 @attach_scenarios([list_available_samples_scenario])
27 def get(self, request, group):
28 """
29 List an Issue's Events
30 ``````````````````````
31
32 This endpoint lists an issue's events.
33
34 :pparam string issue_id: the ID of the issue to retrieve.
35 :auth: required
36 """
37
38 events = Event.objects.filter(
39 group_id=group.id,
40 )
41
42 query = request.GET.get('query')
43 if query:
44 try:
45 query_kwargs = parse_query(group.project, query, request.user)
46 except InvalidQuery as exc:
47 return Response({'detail': six.text_type(exc)}, status=400)
48
49 if query_kwargs['query']:
50 events = events.filter(
51 message__icontains=query_kwargs['query'],
52 )
53
54 if query_kwargs['tags']:
55 try:
56 environment_id = self._get_environment_id_from_request(
57 request, group.project.organization_id)
58 except Environment.DoesNotExist:
59 event_ids = []
60 else:
61 event_ids = tagstore.get_group_event_ids(
62 group.project_id, group.id, environment_id, query_kwargs['tags'])
63
64 if event_ids:
65 events = events.filter(
66 id__in=event_ids,
67 )
68 else:
69 events = events.none()
70
71 return self.paginate(
72 request=request,
73 queryset=events,
74 order_by='-datetime',
75 on_results=lambda x: serialize(x, request.user),
76 paginator_cls=DateTimePaginator,
77 )
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/sentry/api/endpoints/group_events.py b/src/sentry/api/endpoints/group_events.py
--- a/src/sentry/api/endpoints/group_events.py
+++ b/src/sentry/api/endpoints/group_events.py
@@ -12,6 +12,7 @@
from sentry.utils.apidocs import scenario, attach_scenarios
from rest_framework.response import Response
from sentry.search.utils import InvalidQuery
+from django.db.models import Q
@scenario('ListAvailableSamples')
@@ -40,6 +41,7 @@
)
query = request.GET.get('query')
+
if query:
try:
query_kwargs = parse_query(group.project, query, request.user)
@@ -47,9 +49,12 @@
return Response({'detail': six.text_type(exc)}, status=400)
if query_kwargs['query']:
- events = events.filter(
- message__icontains=query_kwargs['query'],
- )
+ q = Q(message__icontains=query_kwargs['query'])
+
+ if len(query) == 32:
+ q |= Q(event_id__exact=query_kwargs['query'])
+
+ events = events.filter(q)
if query_kwargs['tags']:
try:
|
{"golden_diff": "diff --git a/src/sentry/api/endpoints/group_events.py b/src/sentry/api/endpoints/group_events.py\n--- a/src/sentry/api/endpoints/group_events.py\n+++ b/src/sentry/api/endpoints/group_events.py\n@@ -12,6 +12,7 @@\n from sentry.utils.apidocs import scenario, attach_scenarios\n from rest_framework.response import Response\n from sentry.search.utils import InvalidQuery\n+from django.db.models import Q\n \n \n @scenario('ListAvailableSamples')\n@@ -40,6 +41,7 @@\n )\n \n query = request.GET.get('query')\n+\n if query:\n try:\n query_kwargs = parse_query(group.project, query, request.user)\n@@ -47,9 +49,12 @@\n return Response({'detail': six.text_type(exc)}, status=400)\n \n if query_kwargs['query']:\n- events = events.filter(\n- message__icontains=query_kwargs['query'],\n- )\n+ q = Q(message__icontains=query_kwargs['query'])\n+\n+ if len(query) == 32:\n+ q |= Q(event_id__exact=query_kwargs['query'])\n+\n+ events = events.filter(q)\n \n if query_kwargs['tags']:\n try:\n", "issue": "Search related events by event ID\n\r\n\r\n\r\nRight now you can search [by message or tags](https://github.com/getsentry/sentry/blob/master/src/sentry/api/endpoints/group_events.py#L104), but not individual event ID. This is useful when trying to locate a specific event. More useful now that sampling has been disabled by default.\r\n\r\nWe should also add a column for event ID here.\r\n\r\nRefs #3147\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport six\n\nfrom sentry import tagstore\nfrom sentry.api.base import DocSection, EnvironmentMixin\nfrom sentry.api.bases import GroupEndpoint\nfrom sentry.api.serializers import serialize\nfrom sentry.api.paginator import DateTimePaginator\nfrom sentry.models import Environment, Event, Group\nfrom sentry.search.utils import parse_query\nfrom sentry.utils.apidocs import scenario, attach_scenarios\nfrom rest_framework.response import Response\nfrom sentry.search.utils import InvalidQuery\n\n\n@scenario('ListAvailableSamples')\ndef list_available_samples_scenario(runner):\n group = Group.objects.filter(project=runner.default_project).first()\n runner.request(method='GET', path='/issues/%s/events/' % group.id)\n\n\nclass GroupEventsEndpoint(GroupEndpoint, EnvironmentMixin):\n doc_section = DocSection.EVENTS\n\n @attach_scenarios([list_available_samples_scenario])\n def get(self, request, group):\n \"\"\"\n List an Issue's Events\n ``````````````````````\n\n This endpoint lists an issue's events.\n\n :pparam string issue_id: the ID of the issue to retrieve.\n :auth: required\n \"\"\"\n\n events = Event.objects.filter(\n group_id=group.id,\n )\n\n query = request.GET.get('query')\n if query:\n try:\n query_kwargs = parse_query(group.project, query, request.user)\n except InvalidQuery as exc:\n return Response({'detail': six.text_type(exc)}, status=400)\n\n if query_kwargs['query']:\n events = events.filter(\n message__icontains=query_kwargs['query'],\n )\n\n if query_kwargs['tags']:\n try:\n environment_id = self._get_environment_id_from_request(\n request, group.project.organization_id)\n except Environment.DoesNotExist:\n event_ids = []\n else:\n event_ids = tagstore.get_group_event_ids(\n group.project_id, group.id, environment_id, query_kwargs['tags'])\n\n if event_ids:\n events = events.filter(\n id__in=event_ids,\n )\n else:\n events = events.none()\n\n return self.paginate(\n request=request,\n queryset=events,\n order_by='-datetime',\n on_results=lambda x: serialize(x, request.user),\n paginator_cls=DateTimePaginator,\n )\n", "path": "src/sentry/api/endpoints/group_events.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nimport six\n\nfrom sentry import tagstore\nfrom sentry.api.base import DocSection, EnvironmentMixin\nfrom sentry.api.bases import GroupEndpoint\nfrom sentry.api.serializers import serialize\nfrom sentry.api.paginator import DateTimePaginator\nfrom sentry.models import Environment, Event, Group\nfrom sentry.search.utils import parse_query\nfrom sentry.utils.apidocs import scenario, attach_scenarios\nfrom rest_framework.response import Response\nfrom sentry.search.utils import InvalidQuery\nfrom django.db.models import Q\n\n\n@scenario('ListAvailableSamples')\ndef list_available_samples_scenario(runner):\n group = Group.objects.filter(project=runner.default_project).first()\n runner.request(method='GET', path='/issues/%s/events/' % group.id)\n\n\nclass GroupEventsEndpoint(GroupEndpoint, EnvironmentMixin):\n doc_section = DocSection.EVENTS\n\n @attach_scenarios([list_available_samples_scenario])\n def get(self, request, group):\n \"\"\"\n List an Issue's Events\n ``````````````````````\n\n This endpoint lists an issue's events.\n\n :pparam string issue_id: the ID of the issue to retrieve.\n :auth: required\n \"\"\"\n\n events = Event.objects.filter(\n group_id=group.id,\n )\n\n query = request.GET.get('query')\n\n if query:\n try:\n query_kwargs = parse_query(group.project, query, request.user)\n except InvalidQuery as exc:\n return Response({'detail': six.text_type(exc)}, status=400)\n\n if query_kwargs['query']:\n q = Q(message__icontains=query_kwargs['query'])\n\n if len(query) == 32:\n q |= Q(event_id__exact=query_kwargs['query'])\n\n events = events.filter(q)\n\n if query_kwargs['tags']:\n try:\n environment_id = self._get_environment_id_from_request(\n request, group.project.organization_id)\n except Environment.DoesNotExist:\n event_ids = []\n else:\n event_ids = tagstore.get_group_event_ids(\n group.project_id, group.id, environment_id, query_kwargs['tags'])\n\n if event_ids:\n events = events.filter(\n id__in=event_ids,\n )\n else:\n events = events.none()\n\n return self.paginate(\n request=request,\n queryset=events,\n order_by='-datetime',\n on_results=lambda x: serialize(x, request.user),\n paginator_cls=DateTimePaginator,\n )\n", "path": "src/sentry/api/endpoints/group_events.py"}]}
| 1,057 | 269 |
gh_patches_debug_43222
|
rasdani/github-patches
|
git_diff
|
hydroshare__hydroshare-5067
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
add a select for "subject areas"
**Describe the feature you'd like and what it will do**
Currently, subject areas is free text.
Should be a typeahead -- noninvasive.
@martinseul suggested that we should populate a list similar to
https://has.arizona.edu/research-focus-areas
Related to https://github.com/hydroshare/hydroshare/issues/4733
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `theme/migrations/0023_alter_userprofile_subject_areas.py`
Content:
```
1 # Generated by Django 3.2.15 on 2023-04-11 18:41
2
3 import django.contrib.postgres.fields
4 from django.db import migrations, models
5
6
7 def migrate_csv_subject_areas(apps, schema_editor):
8 SubjectArea = apps.get_model('hs_dictionary.SubjectArea')
9 UserProfile = apps.get_model('theme.UserProfile')
10 # Attempt to match existing SAs from profiles
11 profiles_with_sa = UserProfile.objects \
12 .exclude(subject_areas__isnull=True) \
13 .exclude(subject_areas='')
14
15 subject_area_objects = SubjectArea.objects.all()
16
17 for profile in profiles_with_sa:
18 old_subject_areas = profile.subject_areas.split(',')
19 old_subject_areas = [s for s in old_subject_areas]
20 print('*' * 100)
21 print(f'Searching user #{profile.pk} which has subject areas: {profile.subject_areas}')
22 new_subj_areas = []
23 for subject in old_subject_areas:
24 print(f"Searching for a match with '{subject}'")
25 match = [sa for sa in subject_area_objects if sa.name.lower() == subject.strip().lower()]
26 if match:
27 new_subj_areas.append(match[0].name)
28 if match[0].name == subject:
29 print(f'- Exact match with pre-existing subject area: {subject}')
30 else:
31 print(f'- Near match with pre-existing subject area: {subject}')
32 else:
33 if subject.strip() == subject:
34 print(f"- Unmatched subject area '{subject}' will remain unaltered")
35 new_subj_areas.append(subject)
36 else:
37 print(f"- Unmatched subject area '{subject}' contains whitespace that will be stripped")
38 new_subj_areas.append(subject.strip())
39
40 sas = ','.join(new_subj_areas)
41 print(f'Updating {profile} from {profile.subject_areas} subject_areas to {{{sas}}}')
42 profile.subject_areas = f'{{{sas}}}'
43 profile.save()
44
45 profiles_without_sa = UserProfile.objects \
46 .filter(subject_areas='')
47 for profile in profiles_without_sa:
48 print(f'Updating {profile} from "" to {{}}')
49 profile.subject_areas = '{}'
50 profile.save()
51
52
53 class Migration(migrations.Migration):
54
55 dependencies = [
56 ('theme', '0022_alter_userprofile_subject_areas'),
57 ]
58
59 operations = [
60 migrations.RunSQL("UPDATE theme_userprofile set subject_areas = NULL "
61 "where theme_userprofile.subject_areas like '';"),
62 migrations.RunPython(migrate_csv_subject_areas),
63 migrations.AlterField(
64 model_name='userprofile',
65 name='subject_areas',
66 field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=1024), blank=True, help_text='A list of subject areas you are interested in researching. e.g. "Water Management." Free text entry or select from the suggestions', null=True, size=None),
67 ),
68 ]
69
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/theme/migrations/0023_alter_userprofile_subject_areas.py b/theme/migrations/0023_alter_userprofile_subject_areas.py
--- a/theme/migrations/0023_alter_userprofile_subject_areas.py
+++ b/theme/migrations/0023_alter_userprofile_subject_areas.py
@@ -2,12 +2,25 @@
import django.contrib.postgres.fields
from django.db import migrations, models
+from django.db.utils import DataError
+from django.core.management import call_command
+import re
def migrate_csv_subject_areas(apps, schema_editor):
+ def strip_for_dict(string=""):
+ # Check if the string consists of only spaces and braces
+ res, _ = re.subn('{|}', '', string)
+ if res.strip() == "":
+ return ""
+ # replace invalid braces and quotes
+ string = string.replace("{", "[").replace("}", "]").replace("\"", "\'")
+ return string.strip()
+ call_command('create_subject_areas_dict')
SubjectArea = apps.get_model('hs_dictionary.SubjectArea')
UserProfile = apps.get_model('theme.UserProfile')
# Attempt to match existing SAs from profiles
+ errors = []
profiles_with_sa = UserProfile.objects \
.exclude(subject_areas__isnull=True) \
.exclude(subject_areas='')
@@ -21,8 +34,16 @@
print(f'Searching user #{profile.pk} which has subject areas: {profile.subject_areas}')
new_subj_areas = []
for subject in old_subject_areas:
+ if subject == '':
+ # There is a trailing comma that we need to remove
+ continue
+ stripped_subject = strip_for_dict(subject)
+ if stripped_subject == '':
+ # The subject contained only invalid chars
+ print(f"- Unmatched subject area '{subject}' contains invalid chars that will be stripped")
+ continue
print(f"Searching for a match with '{subject}'")
- match = [sa for sa in subject_area_objects if sa.name.lower() == subject.strip().lower()]
+ match = [sa for sa in subject_area_objects if sa.name.lower() == stripped_subject.lower()]
if match:
new_subj_areas.append(match[0].name)
if match[0].name == subject:
@@ -30,24 +51,33 @@
else:
print(f'- Near match with pre-existing subject area: {subject}')
else:
- if subject.strip() == subject:
+ if stripped_subject == subject:
print(f"- Unmatched subject area '{subject}' will remain unaltered")
new_subj_areas.append(subject)
else:
- print(f"- Unmatched subject area '{subject}' contains whitespace that will be stripped")
- new_subj_areas.append(subject.strip())
+ print(f"- Unmatched subject area '{subject}' contains invalid chars that will be stripped")
+ new_subj_areas.append(stripped_subject)
sas = ','.join(new_subj_areas)
- print(f'Updating {profile} from {profile.subject_areas} subject_areas to {{{sas}}}')
+ message = f'Updating {profile} from {profile.subject_areas} subject_areas to {{{sas}}}'
+ print(message)
profile.subject_areas = f'{{{sas}}}'
+ try:
+ profile.save()
+ except DataError as e:
+ errors.append(f'Error saving profile: {e}' + message)
+
+ profiles_without_sa = UserProfile.objects.filter(subject_areas='')
+ for profile in profiles_without_sa:
+ print(f'Updating {profile} from "" to {{}}')
+ profile.subject_areas = '{}'
profile.save()
- profiles_without_sa = UserProfile.objects \
- .filter(subject_areas='')
- for profile in profiles_without_sa:
- print(f'Updating {profile} from "" to {{}}')
- profile.subject_areas = '{}'
- profile.save()
+ print("Done updating Subject Areas.")
+ if errors:
+ print("Errors during update:")
+ for error in errors:
+ print(error)
class Migration(migrations.Migration):
|
{"golden_diff": "diff --git a/theme/migrations/0023_alter_userprofile_subject_areas.py b/theme/migrations/0023_alter_userprofile_subject_areas.py\n--- a/theme/migrations/0023_alter_userprofile_subject_areas.py\n+++ b/theme/migrations/0023_alter_userprofile_subject_areas.py\n@@ -2,12 +2,25 @@\n \n import django.contrib.postgres.fields\n from django.db import migrations, models\n+from django.db.utils import DataError\n+from django.core.management import call_command\n+import re\n \n \n def migrate_csv_subject_areas(apps, schema_editor):\n+ def strip_for_dict(string=\"\"):\n+ # Check if the string consists of only spaces and braces\n+ res, _ = re.subn('{|}', '', string)\n+ if res.strip() == \"\":\n+ return \"\"\n+ # replace invalid braces and quotes\n+ string = string.replace(\"{\", \"[\").replace(\"}\", \"]\").replace(\"\\\"\", \"\\'\")\n+ return string.strip()\n+ call_command('create_subject_areas_dict')\n SubjectArea = apps.get_model('hs_dictionary.SubjectArea')\n UserProfile = apps.get_model('theme.UserProfile')\n # Attempt to match existing SAs from profiles\n+ errors = []\n profiles_with_sa = UserProfile.objects \\\n .exclude(subject_areas__isnull=True) \\\n .exclude(subject_areas='')\n@@ -21,8 +34,16 @@\n print(f'Searching user #{profile.pk} which has subject areas: {profile.subject_areas}')\n new_subj_areas = []\n for subject in old_subject_areas:\n+ if subject == '':\n+ # There is a trailing comma that we need to remove\n+ continue\n+ stripped_subject = strip_for_dict(subject)\n+ if stripped_subject == '':\n+ # The subject contained only invalid chars\n+ print(f\"- Unmatched subject area '{subject}' contains invalid chars that will be stripped\")\n+ continue\n print(f\"Searching for a match with '{subject}'\")\n- match = [sa for sa in subject_area_objects if sa.name.lower() == subject.strip().lower()]\n+ match = [sa for sa in subject_area_objects if sa.name.lower() == stripped_subject.lower()]\n if match:\n new_subj_areas.append(match[0].name)\n if match[0].name == subject:\n@@ -30,24 +51,33 @@\n else:\n print(f'- Near match with pre-existing subject area: {subject}')\n else:\n- if subject.strip() == subject:\n+ if stripped_subject == subject:\n print(f\"- Unmatched subject area '{subject}' will remain unaltered\")\n new_subj_areas.append(subject)\n else:\n- print(f\"- Unmatched subject area '{subject}' contains whitespace that will be stripped\")\n- new_subj_areas.append(subject.strip())\n+ print(f\"- Unmatched subject area '{subject}' contains invalid chars that will be stripped\")\n+ new_subj_areas.append(stripped_subject)\n \n sas = ','.join(new_subj_areas)\n- print(f'Updating {profile} from {profile.subject_areas} subject_areas to {{{sas}}}')\n+ message = f'Updating {profile} from {profile.subject_areas} subject_areas to {{{sas}}}'\n+ print(message)\n profile.subject_areas = f'{{{sas}}}'\n+ try:\n+ profile.save()\n+ except DataError as e:\n+ errors.append(f'Error saving profile: {e}' + message)\n+\n+ profiles_without_sa = UserProfile.objects.filter(subject_areas='')\n+ for profile in profiles_without_sa:\n+ print(f'Updating {profile} from \"\" to {{}}')\n+ profile.subject_areas = '{}'\n profile.save()\n \n- profiles_without_sa = UserProfile.objects \\\n- .filter(subject_areas='')\n- for profile in profiles_without_sa:\n- print(f'Updating {profile} from \"\" to {{}}')\n- profile.subject_areas = '{}'\n- profile.save()\n+ print(\"Done updating Subject Areas.\")\n+ if errors:\n+ print(\"Errors during update:\")\n+ for error in errors:\n+ print(error)\n \n \n class Migration(migrations.Migration):\n", "issue": "add a select for \"subject areas\"\n**Describe the feature you'd like and what it will do**\r\nCurrently, subject areas is free text.\r\nShould be a typeahead -- noninvasive.\r\n@martinseul suggested that we should populate a list similar to\r\nhttps://has.arizona.edu/research-focus-areas\r\n\r\n\r\nRelated to https://github.com/hydroshare/hydroshare/issues/4733\r\n\n", "before_files": [{"content": "# Generated by Django 3.2.15 on 2023-04-11 18:41\n\nimport django.contrib.postgres.fields\nfrom django.db import migrations, models\n\n\ndef migrate_csv_subject_areas(apps, schema_editor):\n SubjectArea = apps.get_model('hs_dictionary.SubjectArea')\n UserProfile = apps.get_model('theme.UserProfile')\n # Attempt to match existing SAs from profiles\n profiles_with_sa = UserProfile.objects \\\n .exclude(subject_areas__isnull=True) \\\n .exclude(subject_areas='')\n\n subject_area_objects = SubjectArea.objects.all()\n\n for profile in profiles_with_sa:\n old_subject_areas = profile.subject_areas.split(',')\n old_subject_areas = [s for s in old_subject_areas]\n print('*' * 100)\n print(f'Searching user #{profile.pk} which has subject areas: {profile.subject_areas}')\n new_subj_areas = []\n for subject in old_subject_areas:\n print(f\"Searching for a match with '{subject}'\")\n match = [sa for sa in subject_area_objects if sa.name.lower() == subject.strip().lower()]\n if match:\n new_subj_areas.append(match[0].name)\n if match[0].name == subject:\n print(f'- Exact match with pre-existing subject area: {subject}')\n else:\n print(f'- Near match with pre-existing subject area: {subject}')\n else:\n if subject.strip() == subject:\n print(f\"- Unmatched subject area '{subject}' will remain unaltered\")\n new_subj_areas.append(subject)\n else:\n print(f\"- Unmatched subject area '{subject}' contains whitespace that will be stripped\")\n new_subj_areas.append(subject.strip())\n\n sas = ','.join(new_subj_areas)\n print(f'Updating {profile} from {profile.subject_areas} subject_areas to {{{sas}}}')\n profile.subject_areas = f'{{{sas}}}'\n profile.save()\n\n profiles_without_sa = UserProfile.objects \\\n .filter(subject_areas='')\n for profile in profiles_without_sa:\n print(f'Updating {profile} from \"\" to {{}}')\n profile.subject_areas = '{}'\n profile.save()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('theme', '0022_alter_userprofile_subject_areas'),\n ]\n\n operations = [\n migrations.RunSQL(\"UPDATE theme_userprofile set subject_areas = NULL \"\n \"where theme_userprofile.subject_areas like '';\"),\n migrations.RunPython(migrate_csv_subject_areas),\n migrations.AlterField(\n model_name='userprofile',\n name='subject_areas',\n field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=1024), blank=True, help_text='A list of subject areas you are interested in researching. e.g. \"Water Management.\" Free text entry or select from the suggestions', null=True, size=None),\n ),\n ]\n", "path": "theme/migrations/0023_alter_userprofile_subject_areas.py"}], "after_files": [{"content": "# Generated by Django 3.2.15 on 2023-04-11 18:41\n\nimport django.contrib.postgres.fields\nfrom django.db import migrations, models\nfrom django.db.utils import DataError\nfrom django.core.management import call_command\nimport re\n\n\ndef migrate_csv_subject_areas(apps, schema_editor):\n def strip_for_dict(string=\"\"):\n # Check if the string consists of only spaces and braces\n res, _ = re.subn('{|}', '', string)\n if res.strip() == \"\":\n return \"\"\n # replace invalid braces and quotes\n string = string.replace(\"{\", \"[\").replace(\"}\", \"]\").replace(\"\\\"\", \"\\'\")\n return string.strip()\n call_command('create_subject_areas_dict')\n SubjectArea = apps.get_model('hs_dictionary.SubjectArea')\n UserProfile = apps.get_model('theme.UserProfile')\n # Attempt to match existing SAs from profiles\n errors = []\n profiles_with_sa = UserProfile.objects \\\n .exclude(subject_areas__isnull=True) \\\n .exclude(subject_areas='')\n\n subject_area_objects = SubjectArea.objects.all()\n\n for profile in profiles_with_sa:\n old_subject_areas = profile.subject_areas.split(',')\n old_subject_areas = [s for s in old_subject_areas]\n print('*' * 100)\n print(f'Searching user #{profile.pk} which has subject areas: {profile.subject_areas}')\n new_subj_areas = []\n for subject in old_subject_areas:\n if subject == '':\n # There is a trailing comma that we need to remove\n continue\n stripped_subject = strip_for_dict(subject)\n if stripped_subject == '':\n # The subject contained only invalid chars\n print(f\"- Unmatched subject area '{subject}' contains invalid chars that will be stripped\")\n continue\n print(f\"Searching for a match with '{subject}'\")\n match = [sa for sa in subject_area_objects if sa.name.lower() == stripped_subject.lower()]\n if match:\n new_subj_areas.append(match[0].name)\n if match[0].name == subject:\n print(f'- Exact match with pre-existing subject area: {subject}')\n else:\n print(f'- Near match with pre-existing subject area: {subject}')\n else:\n if stripped_subject == subject:\n print(f\"- Unmatched subject area '{subject}' will remain unaltered\")\n new_subj_areas.append(subject)\n else:\n print(f\"- Unmatched subject area '{subject}' contains invalid chars that will be stripped\")\n new_subj_areas.append(stripped_subject)\n\n sas = ','.join(new_subj_areas)\n message = f'Updating {profile} from {profile.subject_areas} subject_areas to {{{sas}}}'\n print(message)\n profile.subject_areas = f'{{{sas}}}'\n try:\n profile.save()\n except DataError as e:\n errors.append(f'Error saving profile: {e}' + message)\n\n profiles_without_sa = UserProfile.objects.filter(subject_areas='')\n for profile in profiles_without_sa:\n print(f'Updating {profile} from \"\" to {{}}')\n profile.subject_areas = '{}'\n profile.save()\n\n print(\"Done updating Subject Areas.\")\n if errors:\n print(\"Errors during update:\")\n for error in errors:\n print(error)\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('theme', '0022_alter_userprofile_subject_areas'),\n ]\n\n operations = [\n migrations.RunSQL(\"UPDATE theme_userprofile set subject_areas = NULL \"\n \"where theme_userprofile.subject_areas like '';\"),\n migrations.RunPython(migrate_csv_subject_areas),\n migrations.AlterField(\n model_name='userprofile',\n name='subject_areas',\n field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=1024), blank=True, help_text='A list of subject areas you are interested in researching. e.g. \"Water Management.\" Free text entry or select from the suggestions', null=True, size=None),\n ),\n ]\n", "path": "theme/migrations/0023_alter_userprofile_subject_areas.py"}]}
| 1,107 | 897 |
gh_patches_debug_14475
|
rasdani/github-patches
|
git_diff
|
pre-commit__pre-commit-2686
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ParserError exception raised for invalid configuration
### search you tried in the issue tracker
ParserError and unicode
### describe your issue
I executed `pre-commit autoupdate` with an invalid configuration file (the second `- repo` is indented incorrectly) and got this error message:
````
$ pre-commit autoupdate
An unexpected error has occurred: ParserError: while parsing a block mapping
in "<unicode string>", line 1, column 1
did not find expected key
in "<unicode string>", line 7, column 1
Check the log at /home/carsten/.cache/pre-commit/pre-commit.log
````
This is an expected error and I would expect an error message like `Your configuration file "..." is wrongly formatted at <pos>. Please review the format of the content.'.
Thank you,
Carsten
### pre-commit --version
pre-commit 2.21.0
### .pre-commit-config.yaml
```yaml
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0
hooks:
- id: check-executables-have-shebangs
- repo: https://github.com/shellcheck-py/shellcheck-py
rev: v0.9.0.2
hooks:
- id: shellcheck
```
### ~/.cache/pre-commit/pre-commit.log (if present)
### version information
```
pre-commit version: 2.21.0
git --version: git version 2.35.3
sys.version:
3.10.8 (main, Oct 28 2022, 17:28:32) [GCC]
sys.executable: /home/carsten/virtualenv/bin/python3.10
os.name: posix
sys.platform: linux
```
### error information
```
An unexpected error has occurred: ParserError: while parsing a block mapping
in "<unicode string>", line 1, column 1
did not find expected key
in "<unicode string>", line 7, column 1
```
```
Traceback (most recent call last):
File "/home/carsten/virtualenv/lib64/python3.10/site-packages/pre_commit/error_handler.py", line 73, in error_handler
yield
File "/home/carsten/virtualenv/lib64/python3.10/site-packages/pre_commit/main.py", line 355, in main
return autoupdate(
File "/home/carsten/virtualenv/lib64/python3.10/site-packages/pre_commit/commands/autoupdate.py", line 154, in autoupdate
migrate_config(config_file, quiet=True)
File "/home/carsten/virtualenv/lib64/python3.10/site-packages/pre_commit/commands/migrate_config.py", line 47, in migrate_config
contents = _migrate_map(contents)
File "/home/carsten/virtualenv/lib64/python3.10/site-packages/pre_commit/commands/migrate_config.py", line 16, in _migrate_map
if isinstance(yaml_load(contents), list):
File "/home/carsten/virtualenv/lib64/python3.10/site-packages/yaml/__init__.py", line 81, in load
return loader.get_single_data()
File "/home/carsten/virtualenv/lib64/python3.10/site-packages/yaml/constructor.py", line 49, in get_single_data
node = self.get_single_node()
File "yaml/_yaml.pyx", line 673, in yaml._yaml.CParser.get_single_node
File "yaml/_yaml.pyx", line 687, in yaml._yaml.CParser._compose_document
File "yaml/_yaml.pyx", line 731, in yaml._yaml.CParser._compose_node
File "yaml/_yaml.pyx", line 847, in yaml._yaml.CParser._compose_mapping_node
File "yaml/_yaml.pyx", line 860, in yaml._yaml.CParser._parse_next_event
yaml.parser.ParserError: while parsing a block mapping
in "<unicode string>", line 1, column 1
did not find expected key
in "<unicode string>", line 7, column 1
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/commands/migrate_config.py`
Content:
```
1 from __future__ import annotations
2
3 import re
4 import textwrap
5
6 import yaml
7
8 from pre_commit.yaml import yaml_load
9
10
11 def _is_header_line(line: str) -> bool:
12 return line.startswith(('#', '---')) or not line.strip()
13
14
15 def _migrate_map(contents: str) -> str:
16 if isinstance(yaml_load(contents), list):
17 # Find the first non-header line
18 lines = contents.splitlines(True)
19 i = 0
20 # Only loop on non empty configuration file
21 while i < len(lines) and _is_header_line(lines[i]):
22 i += 1
23
24 header = ''.join(lines[:i])
25 rest = ''.join(lines[i:])
26
27 # If they are using the "default" flow style of yaml, this operation
28 # will yield a valid configuration
29 try:
30 trial_contents = f'{header}repos:\n{rest}'
31 yaml_load(trial_contents)
32 contents = trial_contents
33 except yaml.YAMLError:
34 contents = f'{header}repos:\n{textwrap.indent(rest, " " * 4)}'
35
36 return contents
37
38
39 def _migrate_sha_to_rev(contents: str) -> str:
40 return re.sub(r'(\n\s+)sha:', r'\1rev:', contents)
41
42
43 def migrate_config(config_file: str, quiet: bool = False) -> int:
44 with open(config_file) as f:
45 orig_contents = contents = f.read()
46
47 contents = _migrate_map(contents)
48 contents = _migrate_sha_to_rev(contents)
49
50 if contents != orig_contents:
51 with open(config_file, 'w') as f:
52 f.write(contents)
53
54 print('Configuration has been migrated.')
55 elif not quiet:
56 print('Configuration is already migrated.')
57 return 0
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pre_commit/commands/migrate_config.py b/pre_commit/commands/migrate_config.py
--- a/pre_commit/commands/migrate_config.py
+++ b/pre_commit/commands/migrate_config.py
@@ -3,8 +3,10 @@
import re
import textwrap
+import cfgv
import yaml
+from pre_commit.clientlib import InvalidConfigError
from pre_commit.yaml import yaml_load
@@ -44,6 +46,13 @@
with open(config_file) as f:
orig_contents = contents = f.read()
+ with cfgv.reraise_as(InvalidConfigError):
+ with cfgv.validate_context(f'File {config_file}'):
+ try:
+ yaml_load(orig_contents)
+ except Exception as e:
+ raise cfgv.ValidationError(str(e))
+
contents = _migrate_map(contents)
contents = _migrate_sha_to_rev(contents)
|
{"golden_diff": "diff --git a/pre_commit/commands/migrate_config.py b/pre_commit/commands/migrate_config.py\n--- a/pre_commit/commands/migrate_config.py\n+++ b/pre_commit/commands/migrate_config.py\n@@ -3,8 +3,10 @@\n import re\n import textwrap\n \n+import cfgv\n import yaml\n \n+from pre_commit.clientlib import InvalidConfigError\n from pre_commit.yaml import yaml_load\n \n \n@@ -44,6 +46,13 @@\n with open(config_file) as f:\n orig_contents = contents = f.read()\n \n+ with cfgv.reraise_as(InvalidConfigError):\n+ with cfgv.validate_context(f'File {config_file}'):\n+ try:\n+ yaml_load(orig_contents)\n+ except Exception as e:\n+ raise cfgv.ValidationError(str(e))\n+\n contents = _migrate_map(contents)\n contents = _migrate_sha_to_rev(contents)\n", "issue": "ParserError exception raised for invalid configuration\n### search you tried in the issue tracker\r\n\r\nParserError and unicode\r\n\r\n### describe your issue\r\n\r\nI executed `pre-commit autoupdate` with an invalid configuration file (the second `- repo` is indented incorrectly) and got this error message:\r\n\r\n````\r\n$ pre-commit autoupdate\r\nAn unexpected error has occurred: ParserError: while parsing a block mapping\r\n in \"<unicode string>\", line 1, column 1\r\ndid not find expected key\r\n in \"<unicode string>\", line 7, column 1\r\nCheck the log at /home/carsten/.cache/pre-commit/pre-commit.log\r\n````\r\n\r\nThis is an expected error and I would expect an error message like `Your configuration file \"...\" is wrongly formatted at <pos>. Please review the format of the content.'.\r\n\r\nThank you,\r\nCarsten\r\n\r\n### pre-commit --version\r\n\r\npre-commit 2.21.0\r\n\r\n### .pre-commit-config.yaml\r\n\r\n```yaml\r\nrepos:\r\n - repo: https://github.com/pre-commit/pre-commit-hooks\r\n rev: v4.4.0\r\n hooks:\r\n - id: check-executables-have-shebangs\r\n\r\n- repo: https://github.com/shellcheck-py/shellcheck-py\r\n rev: v0.9.0.2\r\n hooks:\r\n - id: shellcheck\r\n```\r\n\r\n\r\n### ~/.cache/pre-commit/pre-commit.log (if present)\r\n\r\n### version information\r\n\r\n```\r\npre-commit version: 2.21.0\r\ngit --version: git version 2.35.3\r\nsys.version:\r\n 3.10.8 (main, Oct 28 2022, 17:28:32) [GCC]\r\nsys.executable: /home/carsten/virtualenv/bin/python3.10\r\nos.name: posix\r\nsys.platform: linux\r\n```\r\n\r\n### error information\r\n\r\n```\r\nAn unexpected error has occurred: ParserError: while parsing a block mapping\r\n in \"<unicode string>\", line 1, column 1\r\ndid not find expected key\r\n in \"<unicode string>\", line 7, column 1\r\n```\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/carsten/virtualenv/lib64/python3.10/site-packages/pre_commit/error_handler.py\", line 73, in error_handler\r\n yield\r\n File \"/home/carsten/virtualenv/lib64/python3.10/site-packages/pre_commit/main.py\", line 355, in main\r\n return autoupdate(\r\n File \"/home/carsten/virtualenv/lib64/python3.10/site-packages/pre_commit/commands/autoupdate.py\", line 154, in autoupdate\r\n migrate_config(config_file, quiet=True)\r\n File \"/home/carsten/virtualenv/lib64/python3.10/site-packages/pre_commit/commands/migrate_config.py\", line 47, in migrate_config\r\n contents = _migrate_map(contents)\r\n File \"/home/carsten/virtualenv/lib64/python3.10/site-packages/pre_commit/commands/migrate_config.py\", line 16, in _migrate_map\r\n if isinstance(yaml_load(contents), list):\r\n File \"/home/carsten/virtualenv/lib64/python3.10/site-packages/yaml/__init__.py\", line 81, in load\r\n return loader.get_single_data()\r\n File \"/home/carsten/virtualenv/lib64/python3.10/site-packages/yaml/constructor.py\", line 49, in get_single_data\r\n node = self.get_single_node()\r\n File \"yaml/_yaml.pyx\", line 673, in yaml._yaml.CParser.get_single_node\r\n File \"yaml/_yaml.pyx\", line 687, in yaml._yaml.CParser._compose_document\r\n File \"yaml/_yaml.pyx\", line 731, in yaml._yaml.CParser._compose_node\r\n File \"yaml/_yaml.pyx\", line 847, in yaml._yaml.CParser._compose_mapping_node\r\n File \"yaml/_yaml.pyx\", line 860, in yaml._yaml.CParser._parse_next_event\r\nyaml.parser.ParserError: while parsing a block mapping\r\n in \"<unicode string>\", line 1, column 1\r\ndid not find expected key\r\n in \"<unicode string>\", line 7, column 1\r\n```\n", "before_files": [{"content": "from __future__ import annotations\n\nimport re\nimport textwrap\n\nimport yaml\n\nfrom pre_commit.yaml import yaml_load\n\n\ndef _is_header_line(line: str) -> bool:\n return line.startswith(('#', '---')) or not line.strip()\n\n\ndef _migrate_map(contents: str) -> str:\n if isinstance(yaml_load(contents), list):\n # Find the first non-header line\n lines = contents.splitlines(True)\n i = 0\n # Only loop on non empty configuration file\n while i < len(lines) and _is_header_line(lines[i]):\n i += 1\n\n header = ''.join(lines[:i])\n rest = ''.join(lines[i:])\n\n # If they are using the \"default\" flow style of yaml, this operation\n # will yield a valid configuration\n try:\n trial_contents = f'{header}repos:\\n{rest}'\n yaml_load(trial_contents)\n contents = trial_contents\n except yaml.YAMLError:\n contents = f'{header}repos:\\n{textwrap.indent(rest, \" \" * 4)}'\n\n return contents\n\n\ndef _migrate_sha_to_rev(contents: str) -> str:\n return re.sub(r'(\\n\\s+)sha:', r'\\1rev:', contents)\n\n\ndef migrate_config(config_file: str, quiet: bool = False) -> int:\n with open(config_file) as f:\n orig_contents = contents = f.read()\n\n contents = _migrate_map(contents)\n contents = _migrate_sha_to_rev(contents)\n\n if contents != orig_contents:\n with open(config_file, 'w') as f:\n f.write(contents)\n\n print('Configuration has been migrated.')\n elif not quiet:\n print('Configuration is already migrated.')\n return 0\n", "path": "pre_commit/commands/migrate_config.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport re\nimport textwrap\n\nimport cfgv\nimport yaml\n\nfrom pre_commit.clientlib import InvalidConfigError\nfrom pre_commit.yaml import yaml_load\n\n\ndef _is_header_line(line: str) -> bool:\n return line.startswith(('#', '---')) or not line.strip()\n\n\ndef _migrate_map(contents: str) -> str:\n if isinstance(yaml_load(contents), list):\n # Find the first non-header line\n lines = contents.splitlines(True)\n i = 0\n # Only loop on non empty configuration file\n while i < len(lines) and _is_header_line(lines[i]):\n i += 1\n\n header = ''.join(lines[:i])\n rest = ''.join(lines[i:])\n\n # If they are using the \"default\" flow style of yaml, this operation\n # will yield a valid configuration\n try:\n trial_contents = f'{header}repos:\\n{rest}'\n yaml_load(trial_contents)\n contents = trial_contents\n except yaml.YAMLError:\n contents = f'{header}repos:\\n{textwrap.indent(rest, \" \" * 4)}'\n\n return contents\n\n\ndef _migrate_sha_to_rev(contents: str) -> str:\n return re.sub(r'(\\n\\s+)sha:', r'\\1rev:', contents)\n\n\ndef migrate_config(config_file: str, quiet: bool = False) -> int:\n with open(config_file) as f:\n orig_contents = contents = f.read()\n\n with cfgv.reraise_as(InvalidConfigError):\n with cfgv.validate_context(f'File {config_file}'):\n try:\n yaml_load(orig_contents)\n except Exception as e:\n raise cfgv.ValidationError(str(e))\n\n contents = _migrate_map(contents)\n contents = _migrate_sha_to_rev(contents)\n\n if contents != orig_contents:\n with open(config_file, 'w') as f:\n f.write(contents)\n\n print('Configuration has been migrated.')\n elif not quiet:\n print('Configuration is already migrated.')\n return 0\n", "path": "pre_commit/commands/migrate_config.py"}]}
| 1,703 | 199 |
gh_patches_debug_11999
|
rasdani/github-patches
|
git_diff
|
chainer__chainer-249
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add type check to LeakyReLU function
Related to #123
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chainer/functions/leaky_relu.py`
Content:
```
1 from chainer import cuda
2 from chainer import function
3
4
5 def _kern():
6 return cuda.elementwise(
7 'float* y, const float* cond, const float* x, float slope',
8 'y[i] = cond[i] >= 0 ? x[i] : slope * x[i]', 'lrelu')
9
10
11 class LeakyReLU(function.Function):
12
13 """Leaky rectifier unit."""
14
15 def __init__(self, slope=0.2):
16 self.slope = slope
17
18 def forward_cpu(self, x):
19 y = x[0].copy()
20 y[x[0] < 0] *= self.slope
21 return y,
22
23 def forward_gpu(self, x):
24 y = cuda.empty_like(x[0])
25 _kern()(y, x[0], x[0], self.slope)
26 return y,
27
28 def backward_cpu(self, x, gy):
29 gx = gy[0].copy()
30 gx[x[0] < 0] *= self.slope
31 return gx,
32
33 def backward_gpu(self, x, gy):
34 gx = cuda.empty_like(x[0])
35 _kern()(gx, x[0], gy[0], self.slope)
36 return gx,
37
38
39 def leaky_relu(x, slope=0.2):
40 """Leaky Rectified Linear Unit function.
41
42 This function is expressed as :math:`f(x) = \max(x, ax)`, where :math:`a`
43 is a configurable slope value.
44
45 Args:
46 x (~chainer.Variable): Input variable.
47 slope (float): Slope value :math:`a`.
48
49 Returns:
50 ~chainer.Variable: Output variable.
51
52 """
53 return LeakyReLU(slope)(x)
54
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/chainer/functions/leaky_relu.py b/chainer/functions/leaky_relu.py
--- a/chainer/functions/leaky_relu.py
+++ b/chainer/functions/leaky_relu.py
@@ -1,5 +1,8 @@
+import numpy
+
from chainer import cuda
from chainer import function
+from chainer.utils import type_check
def _kern():
@@ -15,6 +18,13 @@
def __init__(self, slope=0.2):
self.slope = slope
+ def check_type_forward(self, in_types):
+ type_check.expect(in_types.size() == 1)
+ x_type, = in_types
+ type_check.expect(
+ x_type.dtype == numpy.float32,
+ )
+
def forward_cpu(self, x):
y = x[0].copy()
y[x[0] < 0] *= self.slope
|
{"golden_diff": "diff --git a/chainer/functions/leaky_relu.py b/chainer/functions/leaky_relu.py\n--- a/chainer/functions/leaky_relu.py\n+++ b/chainer/functions/leaky_relu.py\n@@ -1,5 +1,8 @@\n+import numpy\n+\n from chainer import cuda\n from chainer import function\n+from chainer.utils import type_check\n \n \n def _kern():\n@@ -15,6 +18,13 @@\n def __init__(self, slope=0.2):\n self.slope = slope\n \n+ def check_type_forward(self, in_types):\n+ type_check.expect(in_types.size() == 1)\n+ x_type, = in_types\n+ type_check.expect(\n+ x_type.dtype == numpy.float32,\n+ )\n+\n def forward_cpu(self, x):\n y = x[0].copy()\n y[x[0] < 0] *= self.slope\n", "issue": "Add type check to LeakyReLU function\nRelated to #123 \n\n", "before_files": [{"content": "from chainer import cuda\nfrom chainer import function\n\n\ndef _kern():\n return cuda.elementwise(\n 'float* y, const float* cond, const float* x, float slope',\n 'y[i] = cond[i] >= 0 ? x[i] : slope * x[i]', 'lrelu')\n\n\nclass LeakyReLU(function.Function):\n\n \"\"\"Leaky rectifier unit.\"\"\"\n\n def __init__(self, slope=0.2):\n self.slope = slope\n\n def forward_cpu(self, x):\n y = x[0].copy()\n y[x[0] < 0] *= self.slope\n return y,\n\n def forward_gpu(self, x):\n y = cuda.empty_like(x[0])\n _kern()(y, x[0], x[0], self.slope)\n return y,\n\n def backward_cpu(self, x, gy):\n gx = gy[0].copy()\n gx[x[0] < 0] *= self.slope\n return gx,\n\n def backward_gpu(self, x, gy):\n gx = cuda.empty_like(x[0])\n _kern()(gx, x[0], gy[0], self.slope)\n return gx,\n\n\ndef leaky_relu(x, slope=0.2):\n \"\"\"Leaky Rectified Linear Unit function.\n\n This function is expressed as :math:`f(x) = \\max(x, ax)`, where :math:`a`\n is a configurable slope value.\n\n Args:\n x (~chainer.Variable): Input variable.\n slope (float): Slope value :math:`a`.\n\n Returns:\n ~chainer.Variable: Output variable.\n\n \"\"\"\n return LeakyReLU(slope)(x)\n", "path": "chainer/functions/leaky_relu.py"}], "after_files": [{"content": "import numpy\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\ndef _kern():\n return cuda.elementwise(\n 'float* y, const float* cond, const float* x, float slope',\n 'y[i] = cond[i] >= 0 ? x[i] : slope * x[i]', 'lrelu')\n\n\nclass LeakyReLU(function.Function):\n\n \"\"\"Leaky rectifier unit.\"\"\"\n\n def __init__(self, slope=0.2):\n self.slope = slope\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n x_type, = in_types\n type_check.expect(\n x_type.dtype == numpy.float32,\n )\n\n def forward_cpu(self, x):\n y = x[0].copy()\n y[x[0] < 0] *= self.slope\n return y,\n\n def forward_gpu(self, x):\n y = cuda.empty_like(x[0])\n _kern()(y, x[0], x[0], self.slope)\n return y,\n\n def backward_cpu(self, x, gy):\n gx = gy[0].copy()\n gx[x[0] < 0] *= self.slope\n return gx,\n\n def backward_gpu(self, x, gy):\n gx = cuda.empty_like(x[0])\n _kern()(gx, x[0], gy[0], self.slope)\n return gx,\n\n\ndef leaky_relu(x, slope=0.2):\n \"\"\"Leaky Rectified Linear Unit function.\n\n This function is expressed as :math:`f(x) = \\max(x, ax)`, where :math:`a`\n is a configurable slope value.\n\n Args:\n x (~chainer.Variable): Input variable.\n slope (float): Slope value :math:`a`.\n\n Returns:\n ~chainer.Variable: Output variable.\n\n \"\"\"\n return LeakyReLU(slope)(x)\n", "path": "chainer/functions/leaky_relu.py"}]}
| 759 | 205 |
gh_patches_debug_1427
|
rasdani/github-patches
|
git_diff
|
saleor__saleor-340
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Move py.test config to tox.ini
Pytest (like many tools) can read its configuration from `tox.ini`. There is no need to keep a separate `pytest.ini`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #! /usr/bin/env python
2 from setuptools import setup, find_packages
3 from setuptools.command.test import test as TestCommand
4 import os
5 import sys
6
7 os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'saleor.settings')
8
9
10 class PyTest(TestCommand):
11 user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
12
13 def initialize_options(self):
14 TestCommand.initialize_options(self)
15 self.pytest_args = []
16
17 def finalize_options(self):
18 TestCommand.finalize_options(self)
19 self.test_args = []
20 self.test_suite = True
21
22 def run_tests(self):
23 #import here, cause outside the eggs aren't loaded
24 import pytest
25 errno = pytest.main(self.pytest_args)
26 sys.exit(errno)
27
28
29 setup(
30 name='saleor',
31 author='Mirumee Software',
32 author_email='[email protected]',
33 description="A fork'n'play e-commerce in Django",
34 license='BSD',
35 version='0.1.0a0',
36 url='http://getsaleor.com/',
37 packages=find_packages(),
38 include_package_data=True,
39 install_requires=[
40 'Babel>=1.3,<1.4a0',
41 'BabelDjango>=0.2,<0.3a0',
42 'Django>=1.8',
43 'dj_database_url>=0.3.0',
44 'django-emailit>=0.2.2',
45 'django-materializecss-form==0.0.64',
46 'django-model-utils>=2.0.0,<2.1a0',
47 'django-mptt>=0.7.1',
48 'django-offsite-storage>=0.0.5',
49 'django-payments>=0.7.0,<0.8a0',
50 'django-prices>=0.4.0,<0.5a0',
51 'djangorestframework>=3.1,<3.2a0',
52 'django-selectable==0.8.0',
53 'django-versatileimagefield>=1.0.1,<1.1a0',
54 'fake-factory>=0.3.2',
55 'google-measurement-protocol>=0.1.2,<0.2a0',
56 'jsonfield>=1.0.3',
57 'Markdown>=2.4',
58 'prices>=0.5,<0.6a0',
59 'requests>=1.2.0',
60 'satchless>=1.1.2,<1.2a0',
61 'unidecode'
62 ],
63 extras_require={
64 'PaaS': [
65 'whitenoise==1.0.6',
66 'gunicorn==19.2.1',
67 'psycopg2==2.6']},
68 cmdclass={
69 'test': PyTest},
70 entry_points={
71 'console_scripts': ['saleor = saleor:manage']},
72 tests_require=[
73 'mock==1.0.1',
74 'purl>=0.4.1',
75 'pytest',
76 'pytest-django'])
77
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -70,7 +70,7 @@
entry_points={
'console_scripts': ['saleor = saleor:manage']},
tests_require=[
- 'mock==1.0.1',
+ 'mock==1.3.0',
'purl>=0.4.1',
'pytest',
'pytest-django'])
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -70,7 +70,7 @@\n entry_points={\n 'console_scripts': ['saleor = saleor:manage']},\n tests_require=[\n- 'mock==1.0.1',\n+ 'mock==1.3.0',\n 'purl>=0.4.1',\n 'pytest',\n 'pytest-django'])\n", "issue": "Move py.test config to tox.ini\nPytest (like many tools) can read its configuration from `tox.ini`. There is no need to keep a separate `pytest.ini`.\n\n", "before_files": [{"content": "#! /usr/bin/env python\nfrom setuptools import setup, find_packages\nfrom setuptools.command.test import test as TestCommand\nimport os\nimport sys\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'saleor.settings')\n\n\nclass PyTest(TestCommand):\n user_options = [('pytest-args=', 'a', \"Arguments to pass to py.test\")]\n\n def initialize_options(self):\n TestCommand.initialize_options(self)\n self.pytest_args = []\n\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n def run_tests(self):\n #import here, cause outside the eggs aren't loaded\n import pytest\n errno = pytest.main(self.pytest_args)\n sys.exit(errno)\n\n\nsetup(\n name='saleor',\n author='Mirumee Software',\n author_email='[email protected]',\n description=\"A fork'n'play e-commerce in Django\",\n license='BSD',\n version='0.1.0a0',\n url='http://getsaleor.com/',\n packages=find_packages(),\n include_package_data=True,\n install_requires=[\n 'Babel>=1.3,<1.4a0',\n 'BabelDjango>=0.2,<0.3a0',\n 'Django>=1.8',\n 'dj_database_url>=0.3.0',\n 'django-emailit>=0.2.2',\n 'django-materializecss-form==0.0.64',\n 'django-model-utils>=2.0.0,<2.1a0',\n 'django-mptt>=0.7.1',\n 'django-offsite-storage>=0.0.5',\n 'django-payments>=0.7.0,<0.8a0',\n 'django-prices>=0.4.0,<0.5a0',\n 'djangorestframework>=3.1,<3.2a0',\n 'django-selectable==0.8.0',\n 'django-versatileimagefield>=1.0.1,<1.1a0',\n 'fake-factory>=0.3.2',\n 'google-measurement-protocol>=0.1.2,<0.2a0',\n 'jsonfield>=1.0.3',\n 'Markdown>=2.4',\n 'prices>=0.5,<0.6a0',\n 'requests>=1.2.0',\n 'satchless>=1.1.2,<1.2a0',\n 'unidecode'\n ],\n extras_require={\n 'PaaS': [\n 'whitenoise==1.0.6',\n 'gunicorn==19.2.1',\n 'psycopg2==2.6']},\n cmdclass={\n 'test': PyTest},\n entry_points={\n 'console_scripts': ['saleor = saleor:manage']},\n tests_require=[\n 'mock==1.0.1',\n 'purl>=0.4.1',\n 'pytest',\n 'pytest-django'])\n", "path": "setup.py"}], "after_files": [{"content": "#! /usr/bin/env python\nfrom setuptools import setup, find_packages\nfrom setuptools.command.test import test as TestCommand\nimport os\nimport sys\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'saleor.settings')\n\n\nclass PyTest(TestCommand):\n user_options = [('pytest-args=', 'a', \"Arguments to pass to py.test\")]\n\n def initialize_options(self):\n TestCommand.initialize_options(self)\n self.pytest_args = []\n\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n def run_tests(self):\n #import here, cause outside the eggs aren't loaded\n import pytest\n errno = pytest.main(self.pytest_args)\n sys.exit(errno)\n\n\nsetup(\n name='saleor',\n author='Mirumee Software',\n author_email='[email protected]',\n description=\"A fork'n'play e-commerce in Django\",\n license='BSD',\n version='0.1.0a0',\n url='http://getsaleor.com/',\n packages=find_packages(),\n include_package_data=True,\n install_requires=[\n 'Babel>=1.3,<1.4a0',\n 'BabelDjango>=0.2,<0.3a0',\n 'Django>=1.8',\n 'dj_database_url>=0.3.0',\n 'django-emailit>=0.2.2',\n 'django-materializecss-form==0.0.64',\n 'django-model-utils>=2.0.0,<2.1a0',\n 'django-mptt>=0.7.1',\n 'django-offsite-storage>=0.0.5',\n 'django-payments>=0.7.0,<0.8a0',\n 'django-prices>=0.4.0,<0.5a0',\n 'djangorestframework>=3.1,<3.2a0',\n 'django-selectable==0.8.0',\n 'django-versatileimagefield>=1.0.1,<1.1a0',\n 'fake-factory>=0.3.2',\n 'google-measurement-protocol>=0.1.2,<0.2a0',\n 'jsonfield>=1.0.3',\n 'Markdown>=2.4',\n 'prices>=0.5,<0.6a0',\n 'requests>=1.2.0',\n 'satchless>=1.1.2,<1.2a0',\n 'unidecode'\n ],\n extras_require={\n 'PaaS': [\n 'whitenoise==1.0.6',\n 'gunicorn==19.2.1',\n 'psycopg2==2.6']},\n cmdclass={\n 'test': PyTest},\n entry_points={\n 'console_scripts': ['saleor = saleor:manage']},\n tests_require=[\n 'mock==1.3.0',\n 'purl>=0.4.1',\n 'pytest',\n 'pytest-django'])\n", "path": "setup.py"}]}
| 1,104 | 97 |
gh_patches_debug_837
|
rasdani/github-patches
|
git_diff
|
optuna__optuna-1088
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[RFC] CI for examples/visualization
It'll be better if we can validate that all the visualization examples run without failures with CI.
I guess https://github.com/computationalmodelling/nbval/ might be useful.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import os
2 import sys
3
4 import pkg_resources
5 from setuptools import find_packages
6 from setuptools import setup
7
8 from typing import Dict
9 from typing import List
10 from typing import Optional
11
12
13 def get_version() -> str:
14
15 version_filepath = os.path.join(os.path.dirname(__file__), "optuna", "version.py")
16 with open(version_filepath) as f:
17 for line in f:
18 if line.startswith("__version__"):
19 return line.strip().split()[-1][1:-1]
20 assert False
21
22
23 def get_long_description() -> str:
24
25 readme_filepath = os.path.join(os.path.dirname(__file__), "README.md")
26 with open(readme_filepath) as f:
27 return f.read()
28
29
30 def get_install_requires() -> List[str]:
31
32 return [
33 "alembic",
34 "cliff",
35 "cmaes>=0.3.2",
36 "colorlog",
37 "joblib",
38 "numpy",
39 "scipy!=1.4.0",
40 "sqlalchemy>=1.1.0",
41 "tqdm",
42 ]
43
44
45 def get_tests_require() -> List[str]:
46
47 return get_extras_require()["testing"]
48
49
50 def get_extras_require() -> Dict[str, List[str]]:
51
52 requirements = {
53 "checking": ["black", "hacking", "mypy",],
54 "codecov": ["codecov", "pytest-cov",],
55 "doctest": ["cma", "pandas", "plotly>=4.0.0", "scikit-learn>=0.19.0", "scikit-optimize",],
56 "document": ["sphinx", "sphinx_rtd_theme",],
57 "example": [
58 "catboost",
59 "chainer",
60 "lightgbm",
61 "mlflow",
62 "mpi4py",
63 "mxnet",
64 "pytorch-ignite",
65 "scikit-image",
66 "scikit-learn",
67 "torch",
68 "torchvision>=0.5.0",
69 "xgboost",
70 ]
71 + (["allennlp", "fastai<2"] if (3, 5) < sys.version_info[:2] < (3, 8) else [])
72 + (
73 [
74 "dask[dataframe]",
75 "dask-ml",
76 "keras",
77 # TODO(toshihikoyanase): Remove the version constraint after resolving the issue
78 # https://github.com/optuna/optuna/issues/997.
79 "pytorch-lightning<0.7.0",
80 "tensorflow>=2.0.0",
81 "tensorflow-datasets",
82 ]
83 if sys.version_info[:2] < (3, 8)
84 else []
85 ),
86 "testing": [
87 # TODO(toshihikoyanase): Remove the version constraint after resolving the issue
88 # https://github.com/optuna/optuna/issues/1000.
89 "bokeh<2.0.0",
90 "chainer>=5.0.0",
91 "cma",
92 "fanova",
93 "lightgbm",
94 "mock",
95 "mpi4py",
96 "mxnet",
97 "pandas",
98 "plotly>=4.0.0",
99 "pytest",
100 "pytorch-ignite",
101 "scikit-learn>=0.19.0",
102 "scikit-optimize",
103 "torch",
104 "torchvision>=0.5.0",
105 "xgboost",
106 ]
107 + (["fastai<2"] if (3, 5) < sys.version_info[:2] < (3, 8) else [])
108 + (
109 [
110 "keras",
111 # TODO(toshihikoyanase): Remove the version constraint after resolving the issue
112 # https://github.com/optuna/optuna/issues/997.
113 "pytorch-lightning<0.7.0",
114 "tensorflow",
115 "tensorflow-datasets",
116 ]
117 if sys.version_info[:2] < (3, 8)
118 else []
119 ),
120 }
121
122 return requirements
123
124
125 def find_any_distribution(pkgs: List[str]) -> Optional[pkg_resources.Distribution]:
126
127 for pkg in pkgs:
128 try:
129 return pkg_resources.get_distribution(pkg)
130 except pkg_resources.DistributionNotFound:
131 pass
132 return None
133
134
135 pfnopt_pkg = find_any_distribution(["pfnopt"])
136 if pfnopt_pkg is not None:
137 msg = (
138 "We detected that PFNOpt is installed in your environment.\n"
139 "PFNOpt has been renamed Optuna. Please uninstall the old\n"
140 "PFNOpt in advance (e.g. by executing `$ pip uninstall pfnopt`)."
141 )
142 print(msg)
143 exit(1)
144
145 setup(
146 name="optuna",
147 version=get_version(),
148 description="A hyperparameter optimization framework",
149 long_description=get_long_description(),
150 long_description_content_type="text/markdown",
151 author="Takuya Akiba",
152 author_email="[email protected]",
153 url="https://optuna.org/",
154 packages=find_packages(),
155 package_data={
156 "optuna": [
157 "storages/rdb/alembic.ini",
158 "storages/rdb/alembic/*.*",
159 "storages/rdb/alembic/versions/*.*",
160 ]
161 },
162 install_requires=get_install_requires(),
163 tests_require=get_tests_require(),
164 extras_require=get_extras_require(),
165 entry_points={"console_scripts": ["optuna = optuna.cli:main"]},
166 )
167
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -61,6 +61,7 @@
"mlflow",
"mpi4py",
"mxnet",
+ "nbval",
"pytorch-ignite",
"scikit-image",
"scikit-learn",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -61,6 +61,7 @@\n \"mlflow\",\n \"mpi4py\",\n \"mxnet\",\n+ \"nbval\",\n \"pytorch-ignite\",\n \"scikit-image\",\n \"scikit-learn\",\n", "issue": "[RFC] CI for examples/visualization\nIt'll be better if we can validate that all the visualization examples run without failures with CI.\r\n\r\nI guess https://github.com/computationalmodelling/nbval/ might be useful.\n", "before_files": [{"content": "import os\nimport sys\n\nimport pkg_resources\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\n\n\ndef get_version() -> str:\n\n version_filepath = os.path.join(os.path.dirname(__file__), \"optuna\", \"version.py\")\n with open(version_filepath) as f:\n for line in f:\n if line.startswith(\"__version__\"):\n return line.strip().split()[-1][1:-1]\n assert False\n\n\ndef get_long_description() -> str:\n\n readme_filepath = os.path.join(os.path.dirname(__file__), \"README.md\")\n with open(readme_filepath) as f:\n return f.read()\n\n\ndef get_install_requires() -> List[str]:\n\n return [\n \"alembic\",\n \"cliff\",\n \"cmaes>=0.3.2\",\n \"colorlog\",\n \"joblib\",\n \"numpy\",\n \"scipy!=1.4.0\",\n \"sqlalchemy>=1.1.0\",\n \"tqdm\",\n ]\n\n\ndef get_tests_require() -> List[str]:\n\n return get_extras_require()[\"testing\"]\n\n\ndef get_extras_require() -> Dict[str, List[str]]:\n\n requirements = {\n \"checking\": [\"black\", \"hacking\", \"mypy\",],\n \"codecov\": [\"codecov\", \"pytest-cov\",],\n \"doctest\": [\"cma\", \"pandas\", \"plotly>=4.0.0\", \"scikit-learn>=0.19.0\", \"scikit-optimize\",],\n \"document\": [\"sphinx\", \"sphinx_rtd_theme\",],\n \"example\": [\n \"catboost\",\n \"chainer\",\n \"lightgbm\",\n \"mlflow\",\n \"mpi4py\",\n \"mxnet\",\n \"pytorch-ignite\",\n \"scikit-image\",\n \"scikit-learn\",\n \"torch\",\n \"torchvision>=0.5.0\",\n \"xgboost\",\n ]\n + ([\"allennlp\", \"fastai<2\"] if (3, 5) < sys.version_info[:2] < (3, 8) else [])\n + (\n [\n \"dask[dataframe]\",\n \"dask-ml\",\n \"keras\",\n # TODO(toshihikoyanase): Remove the version constraint after resolving the issue\n # https://github.com/optuna/optuna/issues/997.\n \"pytorch-lightning<0.7.0\",\n \"tensorflow>=2.0.0\",\n \"tensorflow-datasets\",\n ]\n if sys.version_info[:2] < (3, 8)\n else []\n ),\n \"testing\": [\n # TODO(toshihikoyanase): Remove the version constraint after resolving the issue\n # https://github.com/optuna/optuna/issues/1000.\n \"bokeh<2.0.0\",\n \"chainer>=5.0.0\",\n \"cma\",\n \"fanova\",\n \"lightgbm\",\n \"mock\",\n \"mpi4py\",\n \"mxnet\",\n \"pandas\",\n \"plotly>=4.0.0\",\n \"pytest\",\n \"pytorch-ignite\",\n \"scikit-learn>=0.19.0\",\n \"scikit-optimize\",\n \"torch\",\n \"torchvision>=0.5.0\",\n \"xgboost\",\n ]\n + ([\"fastai<2\"] if (3, 5) < sys.version_info[:2] < (3, 8) else [])\n + (\n [\n \"keras\",\n # TODO(toshihikoyanase): Remove the version constraint after resolving the issue\n # https://github.com/optuna/optuna/issues/997.\n \"pytorch-lightning<0.7.0\",\n \"tensorflow\",\n \"tensorflow-datasets\",\n ]\n if sys.version_info[:2] < (3, 8)\n else []\n ),\n }\n\n return requirements\n\n\ndef find_any_distribution(pkgs: List[str]) -> Optional[pkg_resources.Distribution]:\n\n for pkg in pkgs:\n try:\n return pkg_resources.get_distribution(pkg)\n except pkg_resources.DistributionNotFound:\n pass\n return None\n\n\npfnopt_pkg = find_any_distribution([\"pfnopt\"])\nif pfnopt_pkg is not None:\n msg = (\n \"We detected that PFNOpt is installed in your environment.\\n\"\n \"PFNOpt has been renamed Optuna. Please uninstall the old\\n\"\n \"PFNOpt in advance (e.g. by executing `$ pip uninstall pfnopt`).\"\n )\n print(msg)\n exit(1)\n\nsetup(\n name=\"optuna\",\n version=get_version(),\n description=\"A hyperparameter optimization framework\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author=\"Takuya Akiba\",\n author_email=\"[email protected]\",\n url=\"https://optuna.org/\",\n packages=find_packages(),\n package_data={\n \"optuna\": [\n \"storages/rdb/alembic.ini\",\n \"storages/rdb/alembic/*.*\",\n \"storages/rdb/alembic/versions/*.*\",\n ]\n },\n install_requires=get_install_requires(),\n tests_require=get_tests_require(),\n extras_require=get_extras_require(),\n entry_points={\"console_scripts\": [\"optuna = optuna.cli:main\"]},\n)\n", "path": "setup.py"}], "after_files": [{"content": "import os\nimport sys\n\nimport pkg_resources\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\n\n\ndef get_version() -> str:\n\n version_filepath = os.path.join(os.path.dirname(__file__), \"optuna\", \"version.py\")\n with open(version_filepath) as f:\n for line in f:\n if line.startswith(\"__version__\"):\n return line.strip().split()[-1][1:-1]\n assert False\n\n\ndef get_long_description() -> str:\n\n readme_filepath = os.path.join(os.path.dirname(__file__), \"README.md\")\n with open(readme_filepath) as f:\n return f.read()\n\n\ndef get_install_requires() -> List[str]:\n\n return [\n \"alembic\",\n \"cliff\",\n \"cmaes\",\n \"colorlog\",\n \"joblib\",\n \"numpy\",\n \"scipy!=1.4.0\",\n \"sqlalchemy>=1.1.0\",\n \"tqdm\",\n ]\n\n\ndef get_tests_require() -> List[str]:\n\n return get_extras_require()[\"testing\"]\n\n\ndef get_extras_require() -> Dict[str, List[str]]:\n\n requirements = {\n \"checking\": [\"black\", \"hacking\", \"mypy\",],\n \"codecov\": [\"codecov\", \"pytest-cov\",],\n \"doctest\": [\"cma\", \"pandas\", \"plotly>=4.0.0\", \"scikit-learn>=0.19.0\", \"scikit-optimize\",],\n \"document\": [\"sphinx\", \"sphinx_rtd_theme\",],\n \"example\": [\n \"catboost\",\n \"chainer\",\n \"lightgbm\",\n \"mlflow\",\n \"mpi4py\",\n \"mxnet\",\n \"nbval\",\n \"pytorch-ignite\",\n \"scikit-image\",\n \"scikit-learn\",\n \"torch\",\n \"torchvision>=0.5.0\",\n \"xgboost\",\n ]\n + ([\"allennlp\", \"fastai<2\"] if (3, 5) < sys.version_info[:2] < (3, 8) else [])\n + (\n [\n \"dask[dataframe]\",\n \"dask-ml\",\n \"keras\",\n # TODO(toshihikoyanase): Remove the version constraint after resolving the issue\n # https://github.com/optuna/optuna/issues/997.\n \"pytorch-lightning<0.7.0\",\n \"tensorflow>=2.0.0\",\n \"tensorflow-datasets\",\n ]\n if sys.version_info[:2] < (3, 8)\n else []\n ),\n \"testing\": [\n # TODO(toshihikoyanase): Remove the version constraint after resolving the issue\n # https://github.com/optuna/optuna/issues/1000.\n \"bokeh<2.0.0\",\n \"chainer>=5.0.0\",\n \"cma\",\n \"fanova\",\n \"lightgbm\",\n \"mock\",\n \"mpi4py\",\n \"mxnet\",\n \"pandas\",\n \"plotly>=4.0.0\",\n \"pytest\",\n \"pytorch-ignite\",\n \"scikit-learn>=0.19.0\",\n \"scikit-optimize\",\n \"torch\",\n \"torchvision>=0.5.0\",\n \"xgboost\",\n ]\n + ([\"fastai<2\"] if (3, 5) < sys.version_info[:2] < (3, 8) else [])\n + (\n [\n \"keras\",\n # TODO(toshihikoyanase): Remove the version constraint after resolving the issue\n # https://github.com/optuna/optuna/issues/997.\n \"pytorch-lightning<0.7.0\",\n \"tensorflow\",\n \"tensorflow-datasets\",\n ]\n if sys.version_info[:2] < (3, 8)\n else []\n ),\n }\n\n return requirements\n\n\ndef find_any_distribution(pkgs: List[str]) -> Optional[pkg_resources.Distribution]:\n\n for pkg in pkgs:\n try:\n return pkg_resources.get_distribution(pkg)\n except pkg_resources.DistributionNotFound:\n pass\n return None\n\n\npfnopt_pkg = find_any_distribution([\"pfnopt\"])\nif pfnopt_pkg is not None:\n msg = (\n \"We detected that PFNOpt is installed in your environment.\\n\"\n \"PFNOpt has been renamed Optuna. Please uninstall the old\\n\"\n \"PFNOpt in advance (e.g. by executing `$ pip uninstall pfnopt`).\"\n )\n print(msg)\n exit(1)\n\nsetup(\n name=\"optuna\",\n version=get_version(),\n description=\"A hyperparameter optimization framework\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author=\"Takuya Akiba\",\n author_email=\"[email protected]\",\n url=\"https://optuna.org/\",\n packages=find_packages(),\n package_data={\n \"optuna\": [\n \"storages/rdb/alembic.ini\",\n \"storages/rdb/alembic/*.*\",\n \"storages/rdb/alembic/versions/*.*\",\n ]\n },\n install_requires=get_install_requires(),\n tests_require=get_tests_require(),\n extras_require=get_extras_require(),\n entry_points={\"console_scripts\": [\"optuna = optuna.cli:main\"]},\n)\n", "path": "setup.py"}]}
| 1,901 | 74 |
gh_patches_debug_2789
|
rasdani/github-patches
|
git_diff
|
ivy-llc__ivy-18204
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
meshgrid
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/paddle/tensor/creation.py`
Content:
```
1 # global
2 import ivy
3 from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes
4 from .tensor import Tensor
5 from ivy.functional.frontends.paddle.func_wrapper import (
6 to_ivy_arrays_and_back,
7 )
8
9
10 @to_ivy_arrays_and_back
11 def to_tensor(data, /, *, dtype=None, place=None, stop_gradient=True):
12 array = ivy.array(data, dtype=dtype, device=place)
13 return Tensor(array, dtype=dtype, place=place)
14
15
16 @with_unsupported_dtypes({"2.5.0 and below": "int8"}, "paddle")
17 @to_ivy_arrays_and_back
18 def ones(shape, /, *, dtype=None, name=None):
19 dtype = "float32" if dtype is None else dtype
20 return ivy.ones(shape, dtype=dtype)
21
22
23 @with_unsupported_dtypes(
24 {"2.5.0 and below": ("uint8", "int8", "complex64", "complex128")}, "paddle"
25 )
26 @to_ivy_arrays_and_back
27 def ones_like(x, /, *, dtype=None, name=None):
28 dtype = x.dtype if dtype is None else dtype
29 return ivy.ones_like(x, dtype=dtype)
30
31
32 @with_unsupported_dtypes({"2.5.0 and below": "int8"}, "paddle")
33 @to_ivy_arrays_and_back
34 def zeros(shape, /, *, dtype=None, name=None):
35 dtype = "float32" if dtype is None else dtype
36 return ivy.zeros(shape, dtype=dtype)
37
38
39 @with_unsupported_dtypes(
40 {"2.5.0 and below": ("uint8", "int8", "complex64", "complex128")}, "paddle"
41 )
42 @to_ivy_arrays_and_back
43 def zeros_like(x, /, *, dtype=None, name=None):
44 dtype = x.dtype if dtype is None else dtype
45 return ivy.zeros_like(x, dtype=dtype)
46
47
48 @to_ivy_arrays_and_back
49 def full(shape, fill_value, /, *, dtype=None, name=None):
50 dtype = "float32" if dtype is None else dtype
51 return ivy.full(shape, fill_value, dtype=dtype)
52
53
54 @to_ivy_arrays_and_back
55 def full_like(x, fill_value, /, *, dtype=None, name=None):
56 dtype = x.dtype if dtype is None else dtype
57 return ivy.full_like(x, fill_value, dtype=dtype)
58
59
60 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle")
61 @to_ivy_arrays_and_back
62 def arange(start, end=None, step=1, dtype=None, name=None):
63 return ivy.arange(start, end, step=step, dtype=dtype)
64
65
66 @to_ivy_arrays_and_back
67 def empty(shape, dtype=None):
68 return ivy.empty(shape=shape, dtype=dtype)
69
70
71 @to_ivy_arrays_and_back
72 def eye(num_rows, num_columns=None, dtype=None, name=None):
73 return ivy.eye(num_rows, num_columns, dtype=dtype)
74
75
76 @to_ivy_arrays_and_back
77 def empty_like(x, dtype=None, name=None):
78 return ivy.empty_like(x, dtype=dtype)
79
80
81 @with_unsupported_dtypes(
82 {
83 "2.5.0 and below": (
84 "uint8",
85 "int8",
86 "int16",
87 "float16",
88 "complex64",
89 "complex128",
90 "bool",
91 )
92 },
93 "paddle",
94 )
95 @to_ivy_arrays_and_back
96 def tril(x, diagonal=0, name=None):
97 return ivy.tril(x, k=diagonal)
98
99
100 @with_unsupported_dtypes(
101 {
102 "2.5.0 and below": (
103 "uint8",
104 "int8",
105 "int16",
106 "float16",
107 "complex64",
108 "complex128",
109 "bool",
110 )
111 },
112 "paddle",
113 )
114 @to_ivy_arrays_and_back
115 def triu(x, diagonal=0, name=None):
116 return ivy.triu(x, k=diagonal)
117
118
119 @with_supported_dtypes(
120 {"2.5.0 and below": ("float32", "float64", "int32", "int64")}, "paddle"
121 )
122 @to_ivy_arrays_and_back
123 def diagflat(x, offset=0, name=None):
124 arr = ivy.diagflat(x, offset=offset)
125 return arr
126
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ivy/functional/frontends/paddle/tensor/creation.py b/ivy/functional/frontends/paddle/tensor/creation.py
--- a/ivy/functional/frontends/paddle/tensor/creation.py
+++ b/ivy/functional/frontends/paddle/tensor/creation.py
@@ -123,3 +123,11 @@
def diagflat(x, offset=0, name=None):
arr = ivy.diagflat(x, offset=offset)
return arr
+
+
+@with_supported_dtypes(
+ {"2.5.0 and below": ("float32", "float64", "int32", "int64")}, "paddle"
+)
+@to_ivy_arrays_and_back
+def meshgrid(*args, **kwargs):
+ return ivy.meshgrid(*args, indexing="ij")
|
{"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/creation.py b/ivy/functional/frontends/paddle/tensor/creation.py\n--- a/ivy/functional/frontends/paddle/tensor/creation.py\n+++ b/ivy/functional/frontends/paddle/tensor/creation.py\n@@ -123,3 +123,11 @@\n def diagflat(x, offset=0, name=None):\r\n arr = ivy.diagflat(x, offset=offset)\r\n return arr\r\n+\r\n+\r\n+@with_supported_dtypes(\r\n+ {\"2.5.0 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\r\n+)\r\n+@to_ivy_arrays_and_back\r\n+def meshgrid(*args, **kwargs):\r\n+ return ivy.meshgrid(*args, indexing=\"ij\")\n", "issue": "meshgrid\n\n", "before_files": [{"content": "# global\r\nimport ivy\r\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\r\nfrom .tensor import Tensor\r\nfrom ivy.functional.frontends.paddle.func_wrapper import (\r\n to_ivy_arrays_and_back,\r\n)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef to_tensor(data, /, *, dtype=None, place=None, stop_gradient=True):\r\n array = ivy.array(data, dtype=dtype, device=place)\r\n return Tensor(array, dtype=dtype, place=place)\r\n\r\n\r\n@with_unsupported_dtypes({\"2.5.0 and below\": \"int8\"}, \"paddle\")\r\n@to_ivy_arrays_and_back\r\ndef ones(shape, /, *, dtype=None, name=None):\r\n dtype = \"float32\" if dtype is None else dtype\r\n return ivy.ones(shape, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes(\r\n {\"2.5.0 and below\": (\"uint8\", \"int8\", \"complex64\", \"complex128\")}, \"paddle\"\r\n)\r\n@to_ivy_arrays_and_back\r\ndef ones_like(x, /, *, dtype=None, name=None):\r\n dtype = x.dtype if dtype is None else dtype\r\n return ivy.ones_like(x, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes({\"2.5.0 and below\": \"int8\"}, \"paddle\")\r\n@to_ivy_arrays_and_back\r\ndef zeros(shape, /, *, dtype=None, name=None):\r\n dtype = \"float32\" if dtype is None else dtype\r\n return ivy.zeros(shape, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes(\r\n {\"2.5.0 and below\": (\"uint8\", \"int8\", \"complex64\", \"complex128\")}, \"paddle\"\r\n)\r\n@to_ivy_arrays_and_back\r\ndef zeros_like(x, /, *, dtype=None, name=None):\r\n dtype = x.dtype if dtype is None else dtype\r\n return ivy.zeros_like(x, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef full(shape, fill_value, /, *, dtype=None, name=None):\r\n dtype = \"float32\" if dtype is None else dtype\r\n return ivy.full(shape, fill_value, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef full_like(x, fill_value, /, *, dtype=None, name=None):\r\n dtype = x.dtype if dtype is None else dtype\r\n return ivy.full_like(x, fill_value, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n@to_ivy_arrays_and_back\r\ndef arange(start, end=None, step=1, dtype=None, name=None):\r\n return ivy.arange(start, end, step=step, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef empty(shape, dtype=None):\r\n return ivy.empty(shape=shape, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef eye(num_rows, num_columns=None, dtype=None, name=None):\r\n return ivy.eye(num_rows, num_columns, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef empty_like(x, dtype=None, name=None):\r\n return ivy.empty_like(x, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes(\r\n {\r\n \"2.5.0 and below\": (\r\n \"uint8\",\r\n \"int8\",\r\n \"int16\",\r\n \"float16\",\r\n \"complex64\",\r\n \"complex128\",\r\n \"bool\",\r\n )\r\n },\r\n \"paddle\",\r\n)\r\n@to_ivy_arrays_and_back\r\ndef tril(x, diagonal=0, name=None):\r\n return ivy.tril(x, k=diagonal)\r\n\r\n\r\n@with_unsupported_dtypes(\r\n {\r\n \"2.5.0 and below\": (\r\n \"uint8\",\r\n \"int8\",\r\n \"int16\",\r\n \"float16\",\r\n \"complex64\",\r\n \"complex128\",\r\n \"bool\",\r\n )\r\n },\r\n \"paddle\",\r\n)\r\n@to_ivy_arrays_and_back\r\ndef triu(x, diagonal=0, name=None):\r\n return ivy.triu(x, k=diagonal)\r\n\r\n\r\n@with_supported_dtypes(\r\n {\"2.5.0 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\r\n)\r\n@to_ivy_arrays_and_back\r\ndef diagflat(x, offset=0, name=None):\r\n arr = ivy.diagflat(x, offset=offset)\r\n return arr\r\n", "path": "ivy/functional/frontends/paddle/tensor/creation.py"}], "after_files": [{"content": "# global\r\nimport ivy\r\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\r\nfrom .tensor import Tensor\r\nfrom ivy.functional.frontends.paddle.func_wrapper import (\r\n to_ivy_arrays_and_back,\r\n)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef to_tensor(data, /, *, dtype=None, place=None, stop_gradient=True):\r\n array = ivy.array(data, dtype=dtype, device=place)\r\n return Tensor(array, dtype=dtype, place=place)\r\n\r\n\r\n@with_unsupported_dtypes({\"2.5.0 and below\": \"int8\"}, \"paddle\")\r\n@to_ivy_arrays_and_back\r\ndef ones(shape, /, *, dtype=None, name=None):\r\n dtype = \"float32\" if dtype is None else dtype\r\n return ivy.ones(shape, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes(\r\n {\"2.5.0 and below\": (\"uint8\", \"int8\", \"complex64\", \"complex128\")}, \"paddle\"\r\n)\r\n@to_ivy_arrays_and_back\r\ndef ones_like(x, /, *, dtype=None, name=None):\r\n dtype = x.dtype if dtype is None else dtype\r\n return ivy.ones_like(x, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes({\"2.5.0 and below\": \"int8\"}, \"paddle\")\r\n@to_ivy_arrays_and_back\r\ndef zeros(shape, /, *, dtype=None, name=None):\r\n dtype = \"float32\" if dtype is None else dtype\r\n return ivy.zeros(shape, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes(\r\n {\"2.5.0 and below\": (\"uint8\", \"int8\", \"complex64\", \"complex128\")}, \"paddle\"\r\n)\r\n@to_ivy_arrays_and_back\r\ndef zeros_like(x, /, *, dtype=None, name=None):\r\n dtype = x.dtype if dtype is None else dtype\r\n return ivy.zeros_like(x, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef full(shape, fill_value, /, *, dtype=None, name=None):\r\n dtype = \"float32\" if dtype is None else dtype\r\n return ivy.full(shape, fill_value, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef full_like(x, fill_value, /, *, dtype=None, name=None):\r\n dtype = x.dtype if dtype is None else dtype\r\n return ivy.full_like(x, fill_value, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n@to_ivy_arrays_and_back\r\ndef arange(start, end=None, step=1, dtype=None, name=None):\r\n return ivy.arange(start, end, step=step, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef empty(shape, dtype=None):\r\n return ivy.empty(shape=shape, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef eye(num_rows, num_columns=None, dtype=None, name=None):\r\n return ivy.eye(num_rows, num_columns, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef empty_like(x, dtype=None, name=None):\r\n return ivy.empty_like(x, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes(\r\n {\r\n \"2.5.0 and below\": (\r\n \"uint8\",\r\n \"int8\",\r\n \"int16\",\r\n \"float16\",\r\n \"complex64\",\r\n \"complex128\",\r\n \"bool\",\r\n )\r\n },\r\n \"paddle\",\r\n)\r\n@to_ivy_arrays_and_back\r\ndef tril(x, diagonal=0, name=None):\r\n return ivy.tril(x, k=diagonal)\r\n\r\n\r\n@with_unsupported_dtypes(\r\n {\r\n \"2.5.0 and below\": (\r\n \"uint8\",\r\n \"int8\",\r\n \"int16\",\r\n \"float16\",\r\n \"complex64\",\r\n \"complex128\",\r\n \"bool\",\r\n )\r\n },\r\n \"paddle\",\r\n)\r\n@to_ivy_arrays_and_back\r\ndef triu(x, diagonal=0, name=None):\r\n return ivy.triu(x, k=diagonal)\r\n\r\n\r\n@with_supported_dtypes(\r\n {\"2.5.0 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\r\n)\r\n@to_ivy_arrays_and_back\r\ndef diagflat(x, offset=0, name=None):\r\n arr = ivy.diagflat(x, offset=offset)\r\n return arr\r\n\r\n\r\n@with_supported_dtypes(\r\n {\"2.5.0 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\r\n)\r\n@to_ivy_arrays_and_back\r\ndef meshgrid(*args, **kwargs):\r\n return ivy.meshgrid(*args, indexing=\"ij\")\r\n", "path": "ivy/functional/frontends/paddle/tensor/creation.py"}]}
| 1,548 | 190 |
gh_patches_debug_2003
|
rasdani/github-patches
|
git_diff
|
googleapis__google-cloud-python-9604
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release google-cloud-storage
Hi @tseaver, could you help cut a release for google-cloud-storage?
cc: @JesseLovelace
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `storage/setup.py`
Content:
```
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import io
16 import os
17
18 import setuptools
19
20
21 # Package metadata.
22
23 name = "google-cloud-storage"
24 description = "Google Cloud Storage API client library"
25 version = "1.21.0"
26 # Should be one of:
27 # 'Development Status :: 3 - Alpha'
28 # 'Development Status :: 4 - Beta'
29 # 'Development Status :: 5 - Production/Stable'
30 release_status = "Development Status :: 5 - Production/Stable"
31 dependencies = [
32 "google-auth >= 1.2.0",
33 "google-cloud-core >= 1.0.3, < 2.0dev",
34 "google-resumable-media >= 0.3.1, != 0.4.0, < 0.5dev",
35 ]
36 extras = {}
37
38
39 # Setup boilerplate below this line.
40
41 package_root = os.path.abspath(os.path.dirname(__file__))
42
43 readme_filename = os.path.join(package_root, "README.rst")
44 with io.open(readme_filename, encoding="utf-8") as readme_file:
45 readme = readme_file.read()
46
47 # Only include packages under the 'google' namespace. Do not include tests,
48 # benchmarks, etc.
49 packages = [
50 package for package in setuptools.find_packages() if package.startswith("google")
51 ]
52
53 # Determine which namespaces are needed.
54 namespaces = ["google"]
55 if "google.cloud" in packages:
56 namespaces.append("google.cloud")
57
58
59 setuptools.setup(
60 name=name,
61 version=version,
62 description=description,
63 long_description=readme,
64 author="Google LLC",
65 author_email="[email protected]",
66 license="Apache 2.0",
67 url="https://github.com/GoogleCloudPlatform/google-cloud-python",
68 classifiers=[
69 release_status,
70 "Intended Audience :: Developers",
71 "License :: OSI Approved :: Apache Software License",
72 "Programming Language :: Python",
73 "Programming Language :: Python :: 2",
74 "Programming Language :: Python :: 2.7",
75 "Programming Language :: Python :: 3",
76 "Programming Language :: Python :: 3.5",
77 "Programming Language :: Python :: 3.6",
78 "Programming Language :: Python :: 3.7",
79 "Operating System :: OS Independent",
80 "Topic :: Internet",
81 ],
82 platforms="Posix; MacOS X; Windows",
83 packages=packages,
84 namespace_packages=namespaces,
85 install_requires=dependencies,
86 extras_require=extras,
87 python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*",
88 include_package_data=True,
89 zip_safe=False,
90 )
91
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/storage/setup.py b/storage/setup.py
--- a/storage/setup.py
+++ b/storage/setup.py
@@ -22,7 +22,7 @@
name = "google-cloud-storage"
description = "Google Cloud Storage API client library"
-version = "1.21.0"
+version = "1.22.0"
# Should be one of:
# 'Development Status :: 3 - Alpha'
# 'Development Status :: 4 - Beta'
|
{"golden_diff": "diff --git a/storage/setup.py b/storage/setup.py\n--- a/storage/setup.py\n+++ b/storage/setup.py\n@@ -22,7 +22,7 @@\n \n name = \"google-cloud-storage\"\n description = \"Google Cloud Storage API client library\"\n-version = \"1.21.0\"\n+version = \"1.22.0\"\n # Should be one of:\n # 'Development Status :: 3 - Alpha'\n # 'Development Status :: 4 - Beta'\n", "issue": "Release google-cloud-storage \nHi @tseaver, could you help cut a release for google-cloud-storage?\r\n\r\ncc: @JesseLovelace \n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = \"google-cloud-storage\"\ndescription = \"Google Cloud Storage API client library\"\nversion = \"1.21.0\"\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = \"Development Status :: 5 - Production/Stable\"\ndependencies = [\n \"google-auth >= 1.2.0\",\n \"google-cloud-core >= 1.0.3, < 2.0dev\",\n \"google-resumable-media >= 0.3.1, != 0.4.0, < 0.5dev\",\n]\nextras = {}\n\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.rst\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package for package in setuptools.find_packages() if package.startswith(\"google\")\n]\n\n# Determine which namespaces are needed.\nnamespaces = [\"google\"]\nif \"google.cloud\" in packages:\n namespaces.append(\"google.cloud\")\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n url=\"https://github.com/GoogleCloudPlatform/google-cloud-python\",\n classifiers=[\n release_status,\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet\",\n ],\n platforms=\"Posix; MacOS X; Windows\",\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*\",\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "storage/setup.py"}], "after_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = \"google-cloud-storage\"\ndescription = \"Google Cloud Storage API client library\"\nversion = \"1.22.0\"\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = \"Development Status :: 5 - Production/Stable\"\ndependencies = [\n \"google-auth >= 1.2.0\",\n \"google-cloud-core >= 1.0.3, < 2.0dev\",\n \"google-resumable-media >= 0.3.1, != 0.4.0, < 0.5dev\",\n]\nextras = {}\n\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.rst\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package for package in setuptools.find_packages() if package.startswith(\"google\")\n]\n\n# Determine which namespaces are needed.\nnamespaces = [\"google\"]\nif \"google.cloud\" in packages:\n namespaces.append(\"google.cloud\")\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n url=\"https://github.com/GoogleCloudPlatform/google-cloud-python\",\n classifiers=[\n release_status,\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet\",\n ],\n platforms=\"Posix; MacOS X; Windows\",\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*\",\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "storage/setup.py"}]}
| 1,149 | 101 |
gh_patches_debug_318
|
rasdani/github-patches
|
git_diff
|
TencentBlueKing__bk-user-164
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
部门查询接口 ?lookup_field=name,当部门名称中含有 "." 时返回 404
**用文字描述你遇到的问题**
请用简练的文字描述你遇到的问题,问题描述的清晰程度决定了问题被解决的效率。
**重现方法**
1. 创建一个目录,名字包含点,如【广东省.深圳市】
2. 使用api查询, http:://{host:port}/api/v2/departments/广东省.深圳市/?lookup_field=name
查询结果是404
请描述问题重现的方法,如果不方便描述,可以通过截图或者视频辅助。
**预期行为**
预期的正常行为
**版本**
- 提供用户管理的具体版本号
- 是否是企业版问题?
**如果是 SaaS 页面问题,请提供使用的操作系统和浏览器信息**
- OS: [e.g. iOS]
- Browser [e.g. chrome, safari]
- Version [e.g. 22]
**额外信息**
任何你觉得有助于问题解决的内容
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/api/bkuser_core/departments/urls.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """
3 TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-用户管理(Bk-User) available.
4 Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
5 Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at http://opensource.org/licenses/MIT
7 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
8 an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
9 specific language governing permissions and limitations under the License.
10 """
11 from bkuser_core.common.constants import LOOKUP_FIELD_NAME
12 from django.conf.urls import url
13
14 from . import views
15
16 PVAR_DEPARTMENT_ID = r"(?P<%s>[\w\-]+)" % LOOKUP_FIELD_NAME
17
18 urlpatterns = [
19 url(
20 r"^api/v2/departments/$",
21 views.DepartmentViewSet.as_view(
22 {
23 "get": "list",
24 "post": "create",
25 }
26 ),
27 name="departments",
28 ),
29 url(
30 r"^api/v2/departments/%s/$" % PVAR_DEPARTMENT_ID,
31 views.DepartmentViewSet.as_view(
32 {
33 "get": "retrieve",
34 "post": "update",
35 "delete": "destroy",
36 "patch": "partial_update",
37 }
38 ),
39 name="departments.action",
40 ),
41 url(
42 r"^api/v2/departments/%s/restoration/$" % PVAR_DEPARTMENT_ID,
43 views.DepartmentViewSet.as_view(
44 {
45 "post": "restoration",
46 }
47 ),
48 name="departments.restoration",
49 ),
50 url(
51 r"^api/v2/departments/%s/ancestors/$" % PVAR_DEPARTMENT_ID,
52 views.DepartmentViewSet.as_view(
53 {
54 "get": "get_ancestor",
55 }
56 ),
57 name="departments.ancestors",
58 ),
59 url(
60 r"^api/v2/departments/%s/children/$" % PVAR_DEPARTMENT_ID,
61 views.DepartmentViewSet.as_view(
62 {
63 "get": "get_children",
64 }
65 ),
66 name="departments.children",
67 ),
68 url(
69 r"^api/v2/departments/%s/profiles/$" % PVAR_DEPARTMENT_ID,
70 views.DepartmentViewSet.as_view({"get": "get_profiles", "post": "add_profiles"}),
71 name="departments.profiles",
72 ),
73 #########
74 # Batch #
75 #########
76 url(
77 r"^api/v2/batch/departments/profiles/$",
78 views.BatchDepartmentsViewSet.as_view(
79 {
80 "get": "multiple_retrieve_profiles",
81 }
82 ),
83 name="department.batch",
84 ),
85 ########
86 # Edge #
87 ########
88 url(
89 r"^api/v2/edges/department_profile/$",
90 views.DepartmentProfileEdgeViewSet.as_view({"get": "list"}),
91 name="edge.department_profile",
92 ),
93 #############
94 # shortcuts #
95 #############
96 url(
97 r"^api/v2/shortcuts/departments/tops/$",
98 views.DepartmentViewSet.as_view({"get": "list_tops"}),
99 name="shortcuts.departments.list.tops",
100 ),
101 ]
102
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/api/bkuser_core/departments/urls.py b/src/api/bkuser_core/departments/urls.py
--- a/src/api/bkuser_core/departments/urls.py
+++ b/src/api/bkuser_core/departments/urls.py
@@ -13,7 +13,7 @@
from . import views
-PVAR_DEPARTMENT_ID = r"(?P<%s>[\w\-]+)" % LOOKUP_FIELD_NAME
+PVAR_DEPARTMENT_ID = r"(?P<%s>[\w\-\.]+)" % LOOKUP_FIELD_NAME
urlpatterns = [
url(
|
{"golden_diff": "diff --git a/src/api/bkuser_core/departments/urls.py b/src/api/bkuser_core/departments/urls.py\n--- a/src/api/bkuser_core/departments/urls.py\n+++ b/src/api/bkuser_core/departments/urls.py\n@@ -13,7 +13,7 @@\n \n from . import views\n \n-PVAR_DEPARTMENT_ID = r\"(?P<%s>[\\w\\-]+)\" % LOOKUP_FIELD_NAME\n+PVAR_DEPARTMENT_ID = r\"(?P<%s>[\\w\\-\\.]+)\" % LOOKUP_FIELD_NAME\n \n urlpatterns = [\n url(\n", "issue": "\u90e8\u95e8\u67e5\u8be2\u63a5\u53e3 ?lookup_field=name\uff0c\u5f53\u90e8\u95e8\u540d\u79f0\u4e2d\u542b\u6709 \".\" \u65f6\u8fd4\u56de 404\n**\u7528\u6587\u5b57\u63cf\u8ff0\u4f60\u9047\u5230\u7684\u95ee\u9898**\r\n\r\n\u8bf7\u7528\u7b80\u7ec3\u7684\u6587\u5b57\u63cf\u8ff0\u4f60\u9047\u5230\u7684\u95ee\u9898\uff0c\u95ee\u9898\u63cf\u8ff0\u7684\u6e05\u6670\u7a0b\u5ea6\u51b3\u5b9a\u4e86\u95ee\u9898\u88ab\u89e3\u51b3\u7684\u6548\u7387\u3002\r\n\r\n**\u91cd\u73b0\u65b9\u6cd5**\r\n1. \u521b\u5efa\u4e00\u4e2a\u76ee\u5f55\uff0c\u540d\u5b57\u5305\u542b\u70b9\uff0c\u5982\u3010\u5e7f\u4e1c\u7701.\u6df1\u5733\u5e02\u3011\r\n2. \u4f7f\u7528api\u67e5\u8be2\uff0c http:://{host:port}/api/v2/departments/\u5e7f\u4e1c\u7701.\u6df1\u5733\u5e02/?lookup_field=name\r\n\r\n\u67e5\u8be2\u7ed3\u679c\u662f404\r\n\r\n\u8bf7\u63cf\u8ff0\u95ee\u9898\u91cd\u73b0\u7684\u65b9\u6cd5\uff0c\u5982\u679c\u4e0d\u65b9\u4fbf\u63cf\u8ff0\uff0c\u53ef\u4ee5\u901a\u8fc7\u622a\u56fe\u6216\u8005\u89c6\u9891\u8f85\u52a9\u3002\r\n\r\n**\u9884\u671f\u884c\u4e3a**\r\n\r\n\u9884\u671f\u7684\u6b63\u5e38\u884c\u4e3a\r\n\r\n**\u7248\u672c**\r\n- \u63d0\u4f9b\u7528\u6237\u7ba1\u7406\u7684\u5177\u4f53\u7248\u672c\u53f7\r\n- \u662f\u5426\u662f\u4f01\u4e1a\u7248\u95ee\u9898\uff1f\r\n\r\n**\u5982\u679c\u662f SaaS \u9875\u9762\u95ee\u9898\uff0c\u8bf7\u63d0\u4f9b\u4f7f\u7528\u7684\u64cd\u4f5c\u7cfb\u7edf\u548c\u6d4f\u89c8\u5668\u4fe1\u606f**\r\n - OS: [e.g. iOS]\r\n - Browser [e.g. chrome, safari]\r\n - Version [e.g. 22]\r\n\r\n**\u989d\u5916\u4fe1\u606f**\r\n\r\n\u4efb\u4f55\u4f60\u89c9\u5f97\u6709\u52a9\u4e8e\u95ee\u9898\u89e3\u51b3\u7684\u5185\u5bb9\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nTencentBlueKing is pleased to support the open source community by making \u84dd\u9cb8\u667a\u4e91-\u7528\u6237\u7ba1\u7406(Bk-User) available.\nCopyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.\nLicensed under the MIT License (the \"License\"); you may not use this file except in compliance with the License.\nYou may obtain a copy of the License at http://opensource.org/licenses/MIT\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on\nan \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\nspecific language governing permissions and limitations under the License.\n\"\"\"\nfrom bkuser_core.common.constants import LOOKUP_FIELD_NAME\nfrom django.conf.urls import url\n\nfrom . import views\n\nPVAR_DEPARTMENT_ID = r\"(?P<%s>[\\w\\-]+)\" % LOOKUP_FIELD_NAME\n\nurlpatterns = [\n url(\n r\"^api/v2/departments/$\",\n views.DepartmentViewSet.as_view(\n {\n \"get\": \"list\",\n \"post\": \"create\",\n }\n ),\n name=\"departments\",\n ),\n url(\n r\"^api/v2/departments/%s/$\" % PVAR_DEPARTMENT_ID,\n views.DepartmentViewSet.as_view(\n {\n \"get\": \"retrieve\",\n \"post\": \"update\",\n \"delete\": \"destroy\",\n \"patch\": \"partial_update\",\n }\n ),\n name=\"departments.action\",\n ),\n url(\n r\"^api/v2/departments/%s/restoration/$\" % PVAR_DEPARTMENT_ID,\n views.DepartmentViewSet.as_view(\n {\n \"post\": \"restoration\",\n }\n ),\n name=\"departments.restoration\",\n ),\n url(\n r\"^api/v2/departments/%s/ancestors/$\" % PVAR_DEPARTMENT_ID,\n views.DepartmentViewSet.as_view(\n {\n \"get\": \"get_ancestor\",\n }\n ),\n name=\"departments.ancestors\",\n ),\n url(\n r\"^api/v2/departments/%s/children/$\" % PVAR_DEPARTMENT_ID,\n views.DepartmentViewSet.as_view(\n {\n \"get\": \"get_children\",\n }\n ),\n name=\"departments.children\",\n ),\n url(\n r\"^api/v2/departments/%s/profiles/$\" % PVAR_DEPARTMENT_ID,\n views.DepartmentViewSet.as_view({\"get\": \"get_profiles\", \"post\": \"add_profiles\"}),\n name=\"departments.profiles\",\n ),\n #########\n # Batch #\n #########\n url(\n r\"^api/v2/batch/departments/profiles/$\",\n views.BatchDepartmentsViewSet.as_view(\n {\n \"get\": \"multiple_retrieve_profiles\",\n }\n ),\n name=\"department.batch\",\n ),\n ########\n # Edge #\n ########\n url(\n r\"^api/v2/edges/department_profile/$\",\n views.DepartmentProfileEdgeViewSet.as_view({\"get\": \"list\"}),\n name=\"edge.department_profile\",\n ),\n #############\n # shortcuts #\n #############\n url(\n r\"^api/v2/shortcuts/departments/tops/$\",\n views.DepartmentViewSet.as_view({\"get\": \"list_tops\"}),\n name=\"shortcuts.departments.list.tops\",\n ),\n]\n", "path": "src/api/bkuser_core/departments/urls.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nTencentBlueKing is pleased to support the open source community by making \u84dd\u9cb8\u667a\u4e91-\u7528\u6237\u7ba1\u7406(Bk-User) available.\nCopyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.\nLicensed under the MIT License (the \"License\"); you may not use this file except in compliance with the License.\nYou may obtain a copy of the License at http://opensource.org/licenses/MIT\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on\nan \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\nspecific language governing permissions and limitations under the License.\n\"\"\"\nfrom bkuser_core.common.constants import LOOKUP_FIELD_NAME\nfrom django.conf.urls import url\n\nfrom . import views\n\nPVAR_DEPARTMENT_ID = r\"(?P<%s>[\\w\\-\\.]+)\" % LOOKUP_FIELD_NAME\n\nurlpatterns = [\n url(\n r\"^api/v2/departments/$\",\n views.DepartmentViewSet.as_view(\n {\n \"get\": \"list\",\n \"post\": \"create\",\n }\n ),\n name=\"departments\",\n ),\n url(\n r\"^api/v2/departments/%s/$\" % PVAR_DEPARTMENT_ID,\n views.DepartmentViewSet.as_view(\n {\n \"get\": \"retrieve\",\n \"post\": \"update\",\n \"delete\": \"destroy\",\n \"patch\": \"partial_update\",\n }\n ),\n name=\"departments.action\",\n ),\n url(\n r\"^api/v2/departments/%s/restoration/$\" % PVAR_DEPARTMENT_ID,\n views.DepartmentViewSet.as_view(\n {\n \"post\": \"restoration\",\n }\n ),\n name=\"departments.restoration\",\n ),\n url(\n r\"^api/v2/departments/%s/ancestors/$\" % PVAR_DEPARTMENT_ID,\n views.DepartmentViewSet.as_view(\n {\n \"get\": \"get_ancestor\",\n }\n ),\n name=\"departments.ancestors\",\n ),\n url(\n r\"^api/v2/departments/%s/children/$\" % PVAR_DEPARTMENT_ID,\n views.DepartmentViewSet.as_view(\n {\n \"get\": \"get_children\",\n }\n ),\n name=\"departments.children\",\n ),\n url(\n r\"^api/v2/departments/%s/profiles/$\" % PVAR_DEPARTMENT_ID,\n views.DepartmentViewSet.as_view({\"get\": \"get_profiles\", \"post\": \"add_profiles\"}),\n name=\"departments.profiles\",\n ),\n #########\n # Batch #\n #########\n url(\n r\"^api/v2/batch/departments/profiles/$\",\n views.BatchDepartmentsViewSet.as_view(\n {\n \"get\": \"multiple_retrieve_profiles\",\n }\n ),\n name=\"department.batch\",\n ),\n ########\n # Edge #\n ########\n url(\n r\"^api/v2/edges/department_profile/$\",\n views.DepartmentProfileEdgeViewSet.as_view({\"get\": \"list\"}),\n name=\"edge.department_profile\",\n ),\n #############\n # shortcuts #\n #############\n url(\n r\"^api/v2/shortcuts/departments/tops/$\",\n views.DepartmentViewSet.as_view({\"get\": \"list_tops\"}),\n name=\"shortcuts.departments.list.tops\",\n ),\n]\n", "path": "src/api/bkuser_core/departments/urls.py"}]}
| 1,406 | 136 |
gh_patches_debug_2253
|
rasdani/github-patches
|
git_diff
|
coala__coala-bears-1082
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
GofmtBear: Add advanced asciinema
The coala bear GofmtBear does not have a proper asciinema.
`gofmt` is a command line tool that automatically solves formatting / styling issues to the absolute coding style that Go has.
I'm planning to use a working code as the sample where it's filled with mixed indentation (spaces and tabs), semicolons and demonstrate how gofmt formats the code automatically and correctly to the absolute Go coding style.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bears/go/GofmtBear.py`
Content:
```
1 from coalib.bearlib.abstractions.Linter import linter
2 from coalib.bears.requirements.GoRequirement import GoRequirement
3
4
5 @linter(executable='gofmt',
6 use_stdin=True,
7 output_format='corrected',
8 result_message='Formatting can be improved.')
9 class GofmtBear:
10 """
11 Suggest better formatting options in Go code. Basic checks like alignment,
12 indentation, and redundant parentheses are provided.
13
14 This is done using the ``gofmt`` utility. For more information visit
15 <https://golang.org/cmd/gofmt/>.
16 """
17 LANGUAGES = {'Go'}
18 REQUIREMENTS = {GoRequirement(package='golang.org/cmd/gofmt', flag='-u')}
19 AUTHORS = {'The coala developers'}
20 AUTHORS_EMAILS = {'[email protected]'}
21 LICENSE = 'AGPL-3.0'
22 CAN_FIX = {'Formatting'}
23
24 @staticmethod
25 def create_arguments(filename, file, config_file):
26 return ()
27
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bears/go/GofmtBear.py b/bears/go/GofmtBear.py
--- a/bears/go/GofmtBear.py
+++ b/bears/go/GofmtBear.py
@@ -20,6 +20,7 @@
AUTHORS_EMAILS = {'[email protected]'}
LICENSE = 'AGPL-3.0'
CAN_FIX = {'Formatting'}
+ ASCIINEMA_URL = 'https://asciinema.org/a/94812'
@staticmethod
def create_arguments(filename, file, config_file):
|
{"golden_diff": "diff --git a/bears/go/GofmtBear.py b/bears/go/GofmtBear.py\n--- a/bears/go/GofmtBear.py\n+++ b/bears/go/GofmtBear.py\n@@ -20,6 +20,7 @@\n AUTHORS_EMAILS = {'[email protected]'}\n LICENSE = 'AGPL-3.0'\n CAN_FIX = {'Formatting'}\n+ ASCIINEMA_URL = 'https://asciinema.org/a/94812'\n \n @staticmethod\n def create_arguments(filename, file, config_file):\n", "issue": "GofmtBear: Add advanced asciinema\nThe coala bear GofmtBear does not have a proper asciinema.\r\n\r\n`gofmt` is a command line tool that automatically solves formatting / styling issues to the absolute coding style that Go has.\r\n\r\nI'm planning to use a working code as the sample where it's filled with mixed indentation (spaces and tabs), semicolons and demonstrate how gofmt formats the code automatically and correctly to the absolute Go coding style.\r\n\n", "before_files": [{"content": "from coalib.bearlib.abstractions.Linter import linter\nfrom coalib.bears.requirements.GoRequirement import GoRequirement\n\n\n@linter(executable='gofmt',\n use_stdin=True,\n output_format='corrected',\n result_message='Formatting can be improved.')\nclass GofmtBear:\n \"\"\"\n Suggest better formatting options in Go code. Basic checks like alignment,\n indentation, and redundant parentheses are provided.\n\n This is done using the ``gofmt`` utility. For more information visit\n <https://golang.org/cmd/gofmt/>.\n \"\"\"\n LANGUAGES = {'Go'}\n REQUIREMENTS = {GoRequirement(package='golang.org/cmd/gofmt', flag='-u')}\n AUTHORS = {'The coala developers'}\n AUTHORS_EMAILS = {'[email protected]'}\n LICENSE = 'AGPL-3.0'\n CAN_FIX = {'Formatting'}\n\n @staticmethod\n def create_arguments(filename, file, config_file):\n return ()\n", "path": "bears/go/GofmtBear.py"}], "after_files": [{"content": "from coalib.bearlib.abstractions.Linter import linter\nfrom coalib.bears.requirements.GoRequirement import GoRequirement\n\n\n@linter(executable='gofmt',\n use_stdin=True,\n output_format='corrected',\n result_message='Formatting can be improved.')\nclass GofmtBear:\n \"\"\"\n Suggest better formatting options in Go code. Basic checks like alignment,\n indentation, and redundant parentheses are provided.\n\n This is done using the ``gofmt`` utility. For more information visit\n <https://golang.org/cmd/gofmt/>.\n \"\"\"\n LANGUAGES = {'Go'}\n REQUIREMENTS = {GoRequirement(package='golang.org/cmd/gofmt', flag='-u')}\n AUTHORS = {'The coala developers'}\n AUTHORS_EMAILS = {'[email protected]'}\n LICENSE = 'AGPL-3.0'\n CAN_FIX = {'Formatting'}\n ASCIINEMA_URL = 'https://asciinema.org/a/94812'\n\n @staticmethod\n def create_arguments(filename, file, config_file):\n return ()\n", "path": "bears/go/GofmtBear.py"}]}
| 623 | 128 |
gh_patches_debug_9694
|
rasdani/github-patches
|
git_diff
|
pretix__pretix-883
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
rich_text: tel schema
When providing email links using the mailto schema in richtext description like `[Email us!](mailto:[email protected])`, resulting in [Email us!](mailto:[email protected]), Pretix creates the correct `<a>` tag. However, users also use their mobile phone. It would be awesome to also be able to use the `tel` schema like `[Call us!](tel:+1-202-555-0102)`. At the moment, pretix just creates an `<a>` tag without an `href`, so the Telephone app is not opened.
rich_text: tel schema
When providing email links using the mailto schema in richtext description like `[Email us!](mailto:[email protected])`, resulting in [Email us!](mailto:[email protected]), Pretix creates the correct `<a>` tag. However, users also use their mobile phone. It would be awesome to also be able to use the `tel` schema like `[Call us!](tel:+1-202-555-0102)`. At the moment, pretix just creates an `<a>` tag without an `href`, so the Telephone app is not opened.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pretix/base/templatetags/rich_text.py`
Content:
```
1 import urllib.parse
2
3 import bleach
4 import markdown
5 from bleach import DEFAULT_CALLBACKS
6 from django import template
7 from django.conf import settings
8 from django.core import signing
9 from django.urls import reverse
10 from django.utils.http import is_safe_url
11 from django.utils.safestring import mark_safe
12
13 register = template.Library()
14
15 ALLOWED_TAGS = [
16 'a',
17 'abbr',
18 'acronym',
19 'b',
20 'blockquote',
21 'br',
22 'code',
23 'em',
24 'i',
25 'li',
26 'ol',
27 'strong',
28 'ul',
29 'p',
30 'table',
31 'tbody',
32 'thead',
33 'tr',
34 'td',
35 'th',
36 'div',
37 'span',
38 'hr',
39 'h1',
40 'h2',
41 'h3',
42 'h4',
43 'h5',
44 'h6',
45 'pre',
46 # Update doc/user/markdown.rst if you change this!
47 ]
48
49 ALLOWED_ATTRIBUTES = {
50 'a': ['href', 'title'],
51 'abbr': ['title'],
52 'acronym': ['title'],
53 'table': ['width'],
54 'td': ['width', 'align'],
55 'div': ['class'],
56 'p': ['class'],
57 'span': ['class'],
58 # Update doc/user/markdown.rst if you change this!
59 }
60
61 ALLOWED_PROTOCOLS = ['http', 'https', 'mailto', 'tel']
62
63
64 def safelink_callback(attrs, new=False):
65 url = attrs.get((None, 'href'), '/')
66 if not is_safe_url(url) and not url.startswith('mailto:'):
67 signer = signing.Signer(salt='safe-redirect')
68 attrs[None, 'href'] = reverse('redirect') + '?url=' + urllib.parse.quote(signer.sign(url))
69 attrs[None, 'target'] = '_blank'
70 attrs[None, 'rel'] = 'noopener'
71 return attrs
72
73
74 def abslink_callback(attrs, new=False):
75 attrs[None, 'href'] = urllib.parse.urljoin(settings.SITE_URL, attrs.get((None, 'href'), '/'))
76 attrs[None, 'target'] = '_blank'
77 attrs[None, 'rel'] = 'noopener'
78 return attrs
79
80
81 def markdown_compile(source):
82 return bleach.clean(
83 markdown.markdown(
84 source,
85 extensions=[
86 'markdown.extensions.sane_lists',
87 # 'markdown.extensions.nl2br', # TODO: Enable, but check backwards-compatibility issues e.g. with mails
88 ]
89 ),
90 tags=ALLOWED_TAGS,
91 attributes=ALLOWED_ATTRIBUTES,
92 protocols=ALLOWED_PROTOCOLS,
93 )
94
95
96 @register.filter
97 def rich_text(text: str, **kwargs):
98 """
99 Processes markdown and cleans HTML in a text input.
100 """
101 text = str(text)
102 body_md = bleach.linkify(
103 markdown_compile(text),
104 callbacks=DEFAULT_CALLBACKS + ([safelink_callback] if kwargs.get('safelinks', True) else [abslink_callback])
105 )
106 return mark_safe(body_md)
107
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/pretix/base/templatetags/rich_text.py b/src/pretix/base/templatetags/rich_text.py
--- a/src/pretix/base/templatetags/rich_text.py
+++ b/src/pretix/base/templatetags/rich_text.py
@@ -63,7 +63,7 @@
def safelink_callback(attrs, new=False):
url = attrs.get((None, 'href'), '/')
- if not is_safe_url(url) and not url.startswith('mailto:'):
+ if not is_safe_url(url) and not url.startswith('mailto:') and not url.startswith('tel:'):
signer = signing.Signer(salt='safe-redirect')
attrs[None, 'href'] = reverse('redirect') + '?url=' + urllib.parse.quote(signer.sign(url))
attrs[None, 'target'] = '_blank'
|
{"golden_diff": "diff --git a/src/pretix/base/templatetags/rich_text.py b/src/pretix/base/templatetags/rich_text.py\n--- a/src/pretix/base/templatetags/rich_text.py\n+++ b/src/pretix/base/templatetags/rich_text.py\n@@ -63,7 +63,7 @@\n \n def safelink_callback(attrs, new=False):\n url = attrs.get((None, 'href'), '/')\n- if not is_safe_url(url) and not url.startswith('mailto:'):\n+ if not is_safe_url(url) and not url.startswith('mailto:') and not url.startswith('tel:'):\n signer = signing.Signer(salt='safe-redirect')\n attrs[None, 'href'] = reverse('redirect') + '?url=' + urllib.parse.quote(signer.sign(url))\n attrs[None, 'target'] = '_blank'\n", "issue": "rich_text: tel schema\nWhen providing email links using the mailto schema in richtext description like `[Email us!](mailto:[email protected])`, resulting in [Email us!](mailto:[email protected]), Pretix creates the correct `<a>` tag. However, users also use their mobile phone. It would be awesome to also be able to use the `tel` schema like `[Call us!](tel:+1-202-555-0102)`. At the moment, pretix just creates an `<a>` tag without an `href`, so the Telephone app is not opened.\nrich_text: tel schema\nWhen providing email links using the mailto schema in richtext description like `[Email us!](mailto:[email protected])`, resulting in [Email us!](mailto:[email protected]), Pretix creates the correct `<a>` tag. However, users also use their mobile phone. It would be awesome to also be able to use the `tel` schema like `[Call us!](tel:+1-202-555-0102)`. At the moment, pretix just creates an `<a>` tag without an `href`, so the Telephone app is not opened.\n", "before_files": [{"content": "import urllib.parse\n\nimport bleach\nimport markdown\nfrom bleach import DEFAULT_CALLBACKS\nfrom django import template\nfrom django.conf import settings\nfrom django.core import signing\nfrom django.urls import reverse\nfrom django.utils.http import is_safe_url\nfrom django.utils.safestring import mark_safe\n\nregister = template.Library()\n\nALLOWED_TAGS = [\n 'a',\n 'abbr',\n 'acronym',\n 'b',\n 'blockquote',\n 'br',\n 'code',\n 'em',\n 'i',\n 'li',\n 'ol',\n 'strong',\n 'ul',\n 'p',\n 'table',\n 'tbody',\n 'thead',\n 'tr',\n 'td',\n 'th',\n 'div',\n 'span',\n 'hr',\n 'h1',\n 'h2',\n 'h3',\n 'h4',\n 'h5',\n 'h6',\n 'pre',\n # Update doc/user/markdown.rst if you change this!\n]\n\nALLOWED_ATTRIBUTES = {\n 'a': ['href', 'title'],\n 'abbr': ['title'],\n 'acronym': ['title'],\n 'table': ['width'],\n 'td': ['width', 'align'],\n 'div': ['class'],\n 'p': ['class'],\n 'span': ['class'],\n # Update doc/user/markdown.rst if you change this!\n}\n\nALLOWED_PROTOCOLS = ['http', 'https', 'mailto', 'tel']\n\n\ndef safelink_callback(attrs, new=False):\n url = attrs.get((None, 'href'), '/')\n if not is_safe_url(url) and not url.startswith('mailto:'):\n signer = signing.Signer(salt='safe-redirect')\n attrs[None, 'href'] = reverse('redirect') + '?url=' + urllib.parse.quote(signer.sign(url))\n attrs[None, 'target'] = '_blank'\n attrs[None, 'rel'] = 'noopener'\n return attrs\n\n\ndef abslink_callback(attrs, new=False):\n attrs[None, 'href'] = urllib.parse.urljoin(settings.SITE_URL, attrs.get((None, 'href'), '/'))\n attrs[None, 'target'] = '_blank'\n attrs[None, 'rel'] = 'noopener'\n return attrs\n\n\ndef markdown_compile(source):\n return bleach.clean(\n markdown.markdown(\n source,\n extensions=[\n 'markdown.extensions.sane_lists',\n # 'markdown.extensions.nl2br', # TODO: Enable, but check backwards-compatibility issues e.g. with mails\n ]\n ),\n tags=ALLOWED_TAGS,\n attributes=ALLOWED_ATTRIBUTES,\n protocols=ALLOWED_PROTOCOLS,\n )\n\n\[email protected]\ndef rich_text(text: str, **kwargs):\n \"\"\"\n Processes markdown and cleans HTML in a text input.\n \"\"\"\n text = str(text)\n body_md = bleach.linkify(\n markdown_compile(text),\n callbacks=DEFAULT_CALLBACKS + ([safelink_callback] if kwargs.get('safelinks', True) else [abslink_callback])\n )\n return mark_safe(body_md)\n", "path": "src/pretix/base/templatetags/rich_text.py"}], "after_files": [{"content": "import urllib.parse\n\nimport bleach\nimport markdown\nfrom bleach import DEFAULT_CALLBACKS\nfrom django import template\nfrom django.conf import settings\nfrom django.core import signing\nfrom django.urls import reverse\nfrom django.utils.http import is_safe_url\nfrom django.utils.safestring import mark_safe\n\nregister = template.Library()\n\nALLOWED_TAGS = [\n 'a',\n 'abbr',\n 'acronym',\n 'b',\n 'blockquote',\n 'br',\n 'code',\n 'em',\n 'i',\n 'li',\n 'ol',\n 'strong',\n 'ul',\n 'p',\n 'table',\n 'tbody',\n 'thead',\n 'tr',\n 'td',\n 'th',\n 'div',\n 'span',\n 'hr',\n 'h1',\n 'h2',\n 'h3',\n 'h4',\n 'h5',\n 'h6',\n 'pre',\n # Update doc/user/markdown.rst if you change this!\n]\n\nALLOWED_ATTRIBUTES = {\n 'a': ['href', 'title'],\n 'abbr': ['title'],\n 'acronym': ['title'],\n 'table': ['width'],\n 'td': ['width', 'align'],\n 'div': ['class'],\n 'p': ['class'],\n 'span': ['class'],\n # Update doc/user/markdown.rst if you change this!\n}\n\nALLOWED_PROTOCOLS = ['http', 'https', 'mailto', 'tel']\n\n\ndef safelink_callback(attrs, new=False):\n url = attrs.get((None, 'href'), '/')\n if not is_safe_url(url) and not url.startswith('mailto:') and not url.startswith('tel:'):\n signer = signing.Signer(salt='safe-redirect')\n attrs[None, 'href'] = reverse('redirect') + '?url=' + urllib.parse.quote(signer.sign(url))\n attrs[None, 'target'] = '_blank'\n attrs[None, 'rel'] = 'noopener'\n return attrs\n\n\ndef abslink_callback(attrs, new=False):\n attrs[None, 'href'] = urllib.parse.urljoin(settings.SITE_URL, attrs.get((None, 'href'), '/'))\n attrs[None, 'target'] = '_blank'\n attrs[None, 'rel'] = 'noopener'\n return attrs\n\n\ndef markdown_compile(source):\n return bleach.clean(\n markdown.markdown(\n source,\n extensions=[\n 'markdown.extensions.sane_lists',\n # 'markdown.extensions.nl2br', # TODO: Enable, but check backwards-compatibility issues e.g. with mails\n ]\n ),\n tags=ALLOWED_TAGS,\n attributes=ALLOWED_ATTRIBUTES,\n protocols=ALLOWED_PROTOCOLS,\n )\n\n\[email protected]\ndef rich_text(text: str, **kwargs):\n \"\"\"\n Processes markdown and cleans HTML in a text input.\n \"\"\"\n text = str(text)\n body_md = bleach.linkify(\n markdown_compile(text),\n callbacks=DEFAULT_CALLBACKS + ([safelink_callback] if kwargs.get('safelinks', True) else [abslink_callback])\n )\n return mark_safe(body_md)\n", "path": "src/pretix/base/templatetags/rich_text.py"}]}
| 1,417 | 195 |
gh_patches_debug_37392
|
rasdani/github-patches
|
git_diff
|
wemake-services__wemake-python-styleguide-1670
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Stray WPS513 warning.
# Bug report
`WPS513` is emitted on `if`/`else`/`if` chains that contain other statements in the last `if` block.
## What's wrong
The following snippet:
```python
if private_key is not None:
if isinstance(private_key, PKey):
key = private_key
else:
if isinstance(private_key, str): # WPS513 Found implicit `elif` condition
stream = io.StringIO()
stream.write(private_key)
stream.seek(0)
private_key = stream
key = RSAKey.from_private_key(private_key)
```
triggers a stray WPS513 warning. Seems like the assignment to `key` is not taken into account.
## How is that should be
`WPS513` should only be emitted if the dangling `if` statement is the only one present in the `else` block.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wemake_python_styleguide/visitors/tokenize/conditions.py`
Content:
```
1 import tokenize
2 from typing import ClassVar, FrozenSet
3
4 from typing_extensions import final
5
6 from wemake_python_styleguide.violations.refactoring import (
7 ImplicitElifViolation,
8 )
9 from wemake_python_styleguide.visitors.base import BaseTokenVisitor
10
11
12 @final
13 class IfElseVisitor(BaseTokenVisitor):
14 """
15 Checks if tokens tokens.
16
17 We use ``tokenize`` instead of ``ast`` because
18
19 .. code:: python
20
21 if some:
22 ...
23 else:
24 if other:
25 ...
26
27 has the same ``ast`` representation as:
28
29 .. code:: python
30
31 if some:
32 ...
33 elif other:
34 ...
35
36 That's why we have to use ``tokenize`` to find
37 the raw tokens inside the text.
38
39 """
40
41 _allowed_token_types: ClassVar[FrozenSet[int]] = frozenset((
42 tokenize.NEWLINE,
43 tokenize.NL,
44 tokenize.COLON,
45 tokenize.INDENT,
46 ))
47
48 def visit_name(self, token: tokenize.TokenInfo) -> None:
49 """
50 Checks that ``if`` nodes are defined correctly.
51
52 Raises:
53 ImplicitElifViolation
54
55 """
56 self._check_implicit_elif(token)
57
58 def _does_else_belong_to_if(self, start_index: int) -> bool:
59 previous_token = self.file_tokens[start_index - 1]
60
61 if previous_token.type != tokenize.DEDENT:
62 # This is not the first token on the line, which means that it can
63 # also be "embedded" else: x if A else B
64 return False
65
66 for token in reversed(self.file_tokens[:start_index - 1]):
67 if token.type != tokenize.NAME:
68 continue
69
70 # Here we rely upon an intuition that in Python else have to be
71 # on the same level (same indentation) as parent statement.
72 if token.start[1] == previous_token.start[1]:
73 return token.string in {'if', 'elif'}
74
75 return False
76
77 def _check_implicit_elif(self, token: tokenize.TokenInfo) -> None:
78 if token.string != 'else':
79 return
80
81 index = self.file_tokens.index(token)
82
83 # `else` token can belong also to `for` and `try/except` statement,
84 # which can trigger false positive for that violation.
85 if not self._does_else_belong_to_if(index):
86 return
87
88 # There's a bug in coverage, I am not sure how to make it work.
89 for next_token in self.file_tokens[index + 1:]: # pragma: no cover
90 if next_token.exact_type in self._allowed_token_types:
91 continue
92 elif next_token.string == 'if':
93 self.add_violation(ImplicitElifViolation(next_token))
94 return
95
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/wemake_python_styleguide/visitors/tokenize/conditions.py b/wemake_python_styleguide/visitors/tokenize/conditions.py
--- a/wemake_python_styleguide/visitors/tokenize/conditions.py
+++ b/wemake_python_styleguide/visitors/tokenize/conditions.py
@@ -1,5 +1,5 @@
import tokenize
-from typing import ClassVar, FrozenSet
+from typing import ClassVar, FrozenSet, Sequence
from typing_extensions import final
@@ -74,21 +74,63 @@
return False
- def _check_implicit_elif(self, token: tokenize.TokenInfo) -> None:
- if token.string != 'else':
- return
+ def _if_has_code_below(
+ self,
+ remaining_tokens: Sequence[tokenize.TokenInfo],
+ ) -> bool:
+ """
+ Checks code immediately below an if statement to remove false positives.
+
+ Checks that, below an if that comes immediately after an else, there is
+ more code to be considered so as not to throw an incorrect violation.
+ """
+ index = 1
+
+ while remaining_tokens[index - 1].exact_type != tokenize.INDENT:
+ index += 1
+
+ context_count = 1
- index = self.file_tokens.index(token)
+ while context_count:
+ next_token = remaining_tokens[index]
+ if next_token.exact_type == tokenize.INDENT:
+ context_count += 1
+ if next_token.exact_type == tokenize.DEDENT:
+ context_count -= 1
+ index += 1
+
+ return remaining_tokens[index].exact_type != tokenize.DEDENT
+
+ def _check_complex_else(
+ self,
+ tokens: Sequence[tokenize.TokenInfo],
+ current_token: tokenize.TokenInfo,
+ index: int,
+ ) -> None:
+ complex_else = self._if_has_code_below(tokens[index + 1:])
+ if not complex_else:
+ self.add_violation(ImplicitElifViolation(current_token))
+
+ def _is_invalid_token(self, index: int, token: tokenize.TokenInfo) -> bool:
+ is_not_else = token.string != 'else'
# `else` token can belong also to `for` and `try/except` statement,
# which can trigger false positive for that violation.
- if not self._does_else_belong_to_if(index):
+ belongs_to_if = self._does_else_belong_to_if(index)
+
+ return is_not_else or not belongs_to_if
+
+ def _check_implicit_elif(self, token: tokenize.TokenInfo) -> None:
+ token_index = self.file_tokens.index(token)
+
+ if self._is_invalid_token(token_index, token):
return
# There's a bug in coverage, I am not sure how to make it work.
- for next_token in self.file_tokens[index + 1:]: # pragma: no cover
+ next_tokens = self.file_tokens[token_index + 1:]
+ for index, next_token in enumerate(next_tokens): # pragma: no cover
if next_token.exact_type in self._allowed_token_types:
continue
elif next_token.string == 'if':
- self.add_violation(ImplicitElifViolation(next_token))
+ self._check_complex_else(next_tokens, next_token, index)
return
|
{"golden_diff": "diff --git a/wemake_python_styleguide/visitors/tokenize/conditions.py b/wemake_python_styleguide/visitors/tokenize/conditions.py\n--- a/wemake_python_styleguide/visitors/tokenize/conditions.py\n+++ b/wemake_python_styleguide/visitors/tokenize/conditions.py\n@@ -1,5 +1,5 @@\n import tokenize\n-from typing import ClassVar, FrozenSet\n+from typing import ClassVar, FrozenSet, Sequence\n \n from typing_extensions import final\n \n@@ -74,21 +74,63 @@\n \n return False\n \n- def _check_implicit_elif(self, token: tokenize.TokenInfo) -> None:\n- if token.string != 'else':\n- return\n+ def _if_has_code_below(\n+ self,\n+ remaining_tokens: Sequence[tokenize.TokenInfo],\n+ ) -> bool:\n+ \"\"\"\n+ Checks code immediately below an if statement to remove false positives.\n+\n+ Checks that, below an if that comes immediately after an else, there is\n+ more code to be considered so as not to throw an incorrect violation.\n+ \"\"\"\n+ index = 1\n+\n+ while remaining_tokens[index - 1].exact_type != tokenize.INDENT:\n+ index += 1\n+\n+ context_count = 1\n \n- index = self.file_tokens.index(token)\n+ while context_count:\n+ next_token = remaining_tokens[index]\n+ if next_token.exact_type == tokenize.INDENT:\n+ context_count += 1\n+ if next_token.exact_type == tokenize.DEDENT:\n+ context_count -= 1\n+ index += 1\n+\n+ return remaining_tokens[index].exact_type != tokenize.DEDENT\n+\n+ def _check_complex_else(\n+ self,\n+ tokens: Sequence[tokenize.TokenInfo],\n+ current_token: tokenize.TokenInfo,\n+ index: int,\n+ ) -> None:\n+ complex_else = self._if_has_code_below(tokens[index + 1:])\n+ if not complex_else:\n+ self.add_violation(ImplicitElifViolation(current_token))\n+\n+ def _is_invalid_token(self, index: int, token: tokenize.TokenInfo) -> bool:\n+ is_not_else = token.string != 'else'\n \n # `else` token can belong also to `for` and `try/except` statement,\n # which can trigger false positive for that violation.\n- if not self._does_else_belong_to_if(index):\n+ belongs_to_if = self._does_else_belong_to_if(index)\n+\n+ return is_not_else or not belongs_to_if\n+\n+ def _check_implicit_elif(self, token: tokenize.TokenInfo) -> None:\n+ token_index = self.file_tokens.index(token)\n+\n+ if self._is_invalid_token(token_index, token):\n return\n \n # There's a bug in coverage, I am not sure how to make it work.\n- for next_token in self.file_tokens[index + 1:]: # pragma: no cover\n+ next_tokens = self.file_tokens[token_index + 1:]\n+ for index, next_token in enumerate(next_tokens): # pragma: no cover\n if next_token.exact_type in self._allowed_token_types:\n continue\n elif next_token.string == 'if':\n- self.add_violation(ImplicitElifViolation(next_token))\n+ self._check_complex_else(next_tokens, next_token, index)\n return\n", "issue": "Stray WPS513 warning.\n# Bug report\r\n\r\n`WPS513` is emitted on `if`/`else`/`if` chains that contain other statements in the last `if` block.\r\n\r\n## What's wrong\r\n\r\nThe following snippet:\r\n\r\n```python\r\nif private_key is not None:\r\n if isinstance(private_key, PKey):\r\n key = private_key\r\n else:\r\n if isinstance(private_key, str): # WPS513 Found implicit `elif` condition\r\n stream = io.StringIO()\r\n stream.write(private_key)\r\n stream.seek(0)\r\n private_key = stream\r\n key = RSAKey.from_private_key(private_key)\r\n```\r\n\r\ntriggers a stray WPS513 warning. Seems like the assignment to `key` is not taken into account.\r\n\r\n## How is that should be\r\n\r\n`WPS513` should only be emitted if the dangling `if` statement is the only one present in the `else` block.\n", "before_files": [{"content": "import tokenize\nfrom typing import ClassVar, FrozenSet\n\nfrom typing_extensions import final\n\nfrom wemake_python_styleguide.violations.refactoring import (\n ImplicitElifViolation,\n)\nfrom wemake_python_styleguide.visitors.base import BaseTokenVisitor\n\n\n@final\nclass IfElseVisitor(BaseTokenVisitor):\n \"\"\"\n Checks if tokens tokens.\n\n We use ``tokenize`` instead of ``ast`` because\n\n .. code:: python\n\n if some:\n ...\n else:\n if other:\n ...\n\n has the same ``ast`` representation as:\n\n .. code:: python\n\n if some:\n ...\n elif other:\n ...\n\n That's why we have to use ``tokenize`` to find\n the raw tokens inside the text.\n\n \"\"\"\n\n _allowed_token_types: ClassVar[FrozenSet[int]] = frozenset((\n tokenize.NEWLINE,\n tokenize.NL,\n tokenize.COLON,\n tokenize.INDENT,\n ))\n\n def visit_name(self, token: tokenize.TokenInfo) -> None:\n \"\"\"\n Checks that ``if`` nodes are defined correctly.\n\n Raises:\n ImplicitElifViolation\n\n \"\"\"\n self._check_implicit_elif(token)\n\n def _does_else_belong_to_if(self, start_index: int) -> bool:\n previous_token = self.file_tokens[start_index - 1]\n\n if previous_token.type != tokenize.DEDENT:\n # This is not the first token on the line, which means that it can\n # also be \"embedded\" else: x if A else B\n return False\n\n for token in reversed(self.file_tokens[:start_index - 1]):\n if token.type != tokenize.NAME:\n continue\n\n # Here we rely upon an intuition that in Python else have to be\n # on the same level (same indentation) as parent statement.\n if token.start[1] == previous_token.start[1]:\n return token.string in {'if', 'elif'}\n\n return False\n\n def _check_implicit_elif(self, token: tokenize.TokenInfo) -> None:\n if token.string != 'else':\n return\n\n index = self.file_tokens.index(token)\n\n # `else` token can belong also to `for` and `try/except` statement,\n # which can trigger false positive for that violation.\n if not self._does_else_belong_to_if(index):\n return\n\n # There's a bug in coverage, I am not sure how to make it work.\n for next_token in self.file_tokens[index + 1:]: # pragma: no cover\n if next_token.exact_type in self._allowed_token_types:\n continue\n elif next_token.string == 'if':\n self.add_violation(ImplicitElifViolation(next_token))\n return\n", "path": "wemake_python_styleguide/visitors/tokenize/conditions.py"}], "after_files": [{"content": "import tokenize\nfrom typing import ClassVar, FrozenSet, Sequence\n\nfrom typing_extensions import final\n\nfrom wemake_python_styleguide.violations.refactoring import (\n ImplicitElifViolation,\n)\nfrom wemake_python_styleguide.visitors.base import BaseTokenVisitor\n\n\n@final\nclass IfElseVisitor(BaseTokenVisitor):\n \"\"\"\n Checks if tokens tokens.\n\n We use ``tokenize`` instead of ``ast`` because\n\n .. code:: python\n\n if some:\n ...\n else:\n if other:\n ...\n\n has the same ``ast`` representation as:\n\n .. code:: python\n\n if some:\n ...\n elif other:\n ...\n\n That's why we have to use ``tokenize`` to find\n the raw tokens inside the text.\n\n \"\"\"\n\n _allowed_token_types: ClassVar[FrozenSet[int]] = frozenset((\n tokenize.NEWLINE,\n tokenize.NL,\n tokenize.COLON,\n tokenize.INDENT,\n ))\n\n def visit_name(self, token: tokenize.TokenInfo) -> None:\n \"\"\"\n Checks that ``if`` nodes are defined correctly.\n\n Raises:\n ImplicitElifViolation\n\n \"\"\"\n self._check_implicit_elif(token)\n\n def _does_else_belong_to_if(self, start_index: int) -> bool:\n previous_token = self.file_tokens[start_index - 1]\n\n if previous_token.type != tokenize.DEDENT:\n # This is not the first token on the line, which means that it can\n # also be \"embedded\" else: x if A else B\n return False\n\n for token in reversed(self.file_tokens[:start_index - 1]):\n if token.type != tokenize.NAME:\n continue\n\n # Here we rely upon an intuition that in Python else have to be\n # on the same level (same indentation) as parent statement.\n if token.start[1] == previous_token.start[1]:\n return token.string in {'if', 'elif'}\n\n return False\n\n def _if_has_code_below(\n self,\n remaining_tokens: Sequence[tokenize.TokenInfo],\n ) -> bool:\n \"\"\"\n Checks code immediately below an if statement to remove false positives.\n\n Checks that, below an if that comes immediately after an else, there is\n more code to be considered so as not to throw an incorrect violation.\n \"\"\"\n index = 1\n\n while remaining_tokens[index - 1].exact_type != tokenize.INDENT:\n index += 1\n\n context_count = 1\n\n while context_count:\n next_token = remaining_tokens[index]\n if next_token.exact_type == tokenize.INDENT:\n context_count += 1\n if next_token.exact_type == tokenize.DEDENT:\n context_count -= 1\n index += 1\n\n return remaining_tokens[index].exact_type != tokenize.DEDENT\n\n def _check_complex_else(\n self,\n tokens: Sequence[tokenize.TokenInfo],\n current_token: tokenize.TokenInfo,\n index: int,\n ) -> None:\n complex_else = self._if_has_code_below(tokens[index + 1:])\n if not complex_else:\n self.add_violation(ImplicitElifViolation(current_token))\n\n def _is_invalid_token(self, index: int, token: tokenize.TokenInfo) -> bool:\n is_not_else = token.string != 'else'\n\n # `else` token can belong also to `for` and `try/except` statement,\n # which can trigger false positive for that violation.\n belongs_to_if = self._does_else_belong_to_if(index)\n\n return is_not_else or not belongs_to_if\n\n def _check_implicit_elif(self, token: tokenize.TokenInfo) -> None:\n token_index = self.file_tokens.index(token)\n\n if self._is_invalid_token(token_index, token):\n return\n\n # There's a bug in coverage, I am not sure how to make it work.\n next_tokens = self.file_tokens[token_index + 1:]\n for index, next_token in enumerate(next_tokens): # pragma: no cover\n if next_token.exact_type in self._allowed_token_types:\n continue\n elif next_token.string == 'if':\n self._check_complex_else(next_tokens, next_token, index)\n return\n", "path": "wemake_python_styleguide/visitors/tokenize/conditions.py"}]}
| 1,268 | 757 |
gh_patches_debug_20297
|
rasdani/github-patches
|
git_diff
|
mozmeao__snippets-service-1063
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix Snippets ICal AttributeError exception
Snippets ICal feed raises:
`AttributeError: 'ASRSnippet' object has no attribute 'locales'`
Fix the error and add tests catch this on the unit test level. We did catch this on the acceptance tests level though :sweat_smile:
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `snippets/base/urls.py`
Content:
```
1 from django.urls import path
2
3 from watchman import views as watchman_views
4
5 from snippets.base import views
6 from snippets.base import feed
7
8
9 urlpatterns = [
10 path('', views.HomeView.as_view()),
11 path('<int:startpage_version>/<name>/<version>/<appbuildid>/<build_target>/'
12 '<locale>/<channel>/<os_version>/<distribution>/<distribution_version>/',
13 views.fetch_snippets, name='base.fetch_snippets'),
14 path('preview/', views.preview_snippet, name='base.preview'),
15 path('preview-asr/<str:uuid>/', views.preview_asr_snippet, name='asr-preview'),
16 path('show/<int:snippet_id>/', views.show_snippet, name='base.show'),
17 path('show/uuid/<str:snippet_id>/', views.show_snippet, {'uuid': True}, name='base.show_uuid'),
18 path('csp-violation-capture', views.csp_violation_capture, name='csp-violation-capture'),
19 path('healthz/', watchman_views.ping, name='watchman.ping'),
20 path('readiness/', watchman_views.status, name='watchman.status'),
21 path('feeds/snippets.ics', feed.SnippetsFeed()),
22 ]
23
```
Path: `snippets/base/feed.py`
Content:
```
1 import operator
2 from datetime import timedelta
3 from distutils.util import strtobool
4 from textwrap import dedent
5 from urllib.parse import urlparse
6
7 from django.conf import settings
8 from django.db.models import Q
9
10 import django_filters
11 from django_ical.views import ICalFeed
12
13 from snippets.base import models
14
15
16 class ASRSnippetFilter(django_filters.FilterSet):
17 name = django_filters.CharFilter(lookup_expr='icontains')
18 locale = django_filters.CharFilter(method='filter_locale')
19 only_scheduled = django_filters.ChoiceFilter(
20 method='filter_scheduled', choices=(('true', 'Yes'),
21 ('false', 'No'),
22 ('all', 'All')))
23
24 def filter_locale(self, queryset, name, value):
25 if not value:
26 return queryset
27
28 locales = value.split(',')
29 return queryset.filter(
30 operator.or_(
31 *[Q(locale__code=',{},'.format(locale)) for locale in locales]
32 )
33 )
34
35 def filter_scheduled(self, queryset, name, value):
36 if value == 'all':
37 return queryset
38
39 value = strtobool(value)
40
41 if value:
42 return queryset.exclude(publish_start=None, publish_end=None)
43
44 return queryset.filter(publish_start=None, publish_end=None)
45
46 class Meta:
47 model = models.ASRSnippet
48 fields = []
49
50
51 class SnippetsFeed(ICalFeed):
52 timezone = 'UTC'
53 title = 'Snippets'
54
55 def __call__(self, request, *args, **kwargs):
56 self.request = request
57 return super().__call__(request, *args, **kwargs)
58
59 @property
60 def product_id(self):
61 return '//{}/Snippets?{}'.format(urlparse(settings.SITE_URL).netloc,
62 self.request.GET.urlencode())
63
64 def items(self):
65 queryset = (models.ASRSnippet.objects
66 .filter(for_qa=False, status=models.STATUS_CHOICES['Published'])
67 .order_by('publish_start'))
68 filtr = ASRSnippetFilter(self.request.GET, queryset=queryset)
69 return filtr.qs
70
71 def item_title(self, item):
72 return item.name
73
74 def item_link(self, item):
75 return item.get_admin_url()
76
77 def item_description(self, item):
78 description = dedent('''\
79 Channels: {}
80 Locales: {}'
81 Preview Link: {}
82 '''.format(', '.join(item.channels),
83 ', '.join(item.locales.values_list('name', flat=True)),
84 item.get_preview_url()))
85 return description
86
87 def item_start_datetime(self, item):
88 return item.publish_start or item.created
89
90 def item_end_datetime(self, item):
91 return item.publish_end or (self.item_start_datetime(item) + timedelta(days=365))
92
93 def item_created(self, item):
94 return item.created
95
96 def item_updateddate(self, item):
97 return item.modified
98
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/snippets/base/feed.py b/snippets/base/feed.py
--- a/snippets/base/feed.py
+++ b/snippets/base/feed.py
@@ -77,10 +77,10 @@
def item_description(self, item):
description = dedent('''\
Channels: {}
- Locales: {}'
+ Locale: {}'
Preview Link: {}
'''.format(', '.join(item.channels),
- ', '.join(item.locales.values_list('name', flat=True)),
+ item.locale,
item.get_preview_url()))
return description
diff --git a/snippets/base/urls.py b/snippets/base/urls.py
--- a/snippets/base/urls.py
+++ b/snippets/base/urls.py
@@ -18,5 +18,5 @@
path('csp-violation-capture', views.csp_violation_capture, name='csp-violation-capture'),
path('healthz/', watchman_views.ping, name='watchman.ping'),
path('readiness/', watchman_views.status, name='watchman.status'),
- path('feeds/snippets.ics', feed.SnippetsFeed()),
+ path('feeds/snippets.ics', feed.SnippetsFeed(), name='ical-feed'),
]
|
{"golden_diff": "diff --git a/snippets/base/feed.py b/snippets/base/feed.py\n--- a/snippets/base/feed.py\n+++ b/snippets/base/feed.py\n@@ -77,10 +77,10 @@\n def item_description(self, item):\n description = dedent('''\\\n Channels: {}\n- Locales: {}'\n+ Locale: {}'\n Preview Link: {}\n '''.format(', '.join(item.channels),\n- ', '.join(item.locales.values_list('name', flat=True)),\n+ item.locale,\n item.get_preview_url()))\n return description\n \ndiff --git a/snippets/base/urls.py b/snippets/base/urls.py\n--- a/snippets/base/urls.py\n+++ b/snippets/base/urls.py\n@@ -18,5 +18,5 @@\n path('csp-violation-capture', views.csp_violation_capture, name='csp-violation-capture'),\n path('healthz/', watchman_views.ping, name='watchman.ping'),\n path('readiness/', watchman_views.status, name='watchman.status'),\n- path('feeds/snippets.ics', feed.SnippetsFeed()),\n+ path('feeds/snippets.ics', feed.SnippetsFeed(), name='ical-feed'),\n ]\n", "issue": "Fix Snippets ICal AttributeError exception \nSnippets ICal feed raises:\r\n\r\n`AttributeError: 'ASRSnippet' object has no attribute 'locales'`\r\n\r\nFix the error and add tests catch this on the unit test level. We did catch this on the acceptance tests level though :sweat_smile: \r\n\n", "before_files": [{"content": "from django.urls import path\n\nfrom watchman import views as watchman_views\n\nfrom snippets.base import views\nfrom snippets.base import feed\n\n\nurlpatterns = [\n path('', views.HomeView.as_view()),\n path('<int:startpage_version>/<name>/<version>/<appbuildid>/<build_target>/'\n '<locale>/<channel>/<os_version>/<distribution>/<distribution_version>/',\n views.fetch_snippets, name='base.fetch_snippets'),\n path('preview/', views.preview_snippet, name='base.preview'),\n path('preview-asr/<str:uuid>/', views.preview_asr_snippet, name='asr-preview'),\n path('show/<int:snippet_id>/', views.show_snippet, name='base.show'),\n path('show/uuid/<str:snippet_id>/', views.show_snippet, {'uuid': True}, name='base.show_uuid'),\n path('csp-violation-capture', views.csp_violation_capture, name='csp-violation-capture'),\n path('healthz/', watchman_views.ping, name='watchman.ping'),\n path('readiness/', watchman_views.status, name='watchman.status'),\n path('feeds/snippets.ics', feed.SnippetsFeed()),\n]\n", "path": "snippets/base/urls.py"}, {"content": "import operator\nfrom datetime import timedelta\nfrom distutils.util import strtobool\nfrom textwrap import dedent\nfrom urllib.parse import urlparse\n\nfrom django.conf import settings\nfrom django.db.models import Q\n\nimport django_filters\nfrom django_ical.views import ICalFeed\n\nfrom snippets.base import models\n\n\nclass ASRSnippetFilter(django_filters.FilterSet):\n name = django_filters.CharFilter(lookup_expr='icontains')\n locale = django_filters.CharFilter(method='filter_locale')\n only_scheduled = django_filters.ChoiceFilter(\n method='filter_scheduled', choices=(('true', 'Yes'),\n ('false', 'No'),\n ('all', 'All')))\n\n def filter_locale(self, queryset, name, value):\n if not value:\n return queryset\n\n locales = value.split(',')\n return queryset.filter(\n operator.or_(\n *[Q(locale__code=',{},'.format(locale)) for locale in locales]\n )\n )\n\n def filter_scheduled(self, queryset, name, value):\n if value == 'all':\n return queryset\n\n value = strtobool(value)\n\n if value:\n return queryset.exclude(publish_start=None, publish_end=None)\n\n return queryset.filter(publish_start=None, publish_end=None)\n\n class Meta:\n model = models.ASRSnippet\n fields = []\n\n\nclass SnippetsFeed(ICalFeed):\n timezone = 'UTC'\n title = 'Snippets'\n\n def __call__(self, request, *args, **kwargs):\n self.request = request\n return super().__call__(request, *args, **kwargs)\n\n @property\n def product_id(self):\n return '//{}/Snippets?{}'.format(urlparse(settings.SITE_URL).netloc,\n self.request.GET.urlencode())\n\n def items(self):\n queryset = (models.ASRSnippet.objects\n .filter(for_qa=False, status=models.STATUS_CHOICES['Published'])\n .order_by('publish_start'))\n filtr = ASRSnippetFilter(self.request.GET, queryset=queryset)\n return filtr.qs\n\n def item_title(self, item):\n return item.name\n\n def item_link(self, item):\n return item.get_admin_url()\n\n def item_description(self, item):\n description = dedent('''\\\n Channels: {}\n Locales: {}'\n Preview Link: {}\n '''.format(', '.join(item.channels),\n ', '.join(item.locales.values_list('name', flat=True)),\n item.get_preview_url()))\n return description\n\n def item_start_datetime(self, item):\n return item.publish_start or item.created\n\n def item_end_datetime(self, item):\n return item.publish_end or (self.item_start_datetime(item) + timedelta(days=365))\n\n def item_created(self, item):\n return item.created\n\n def item_updateddate(self, item):\n return item.modified\n", "path": "snippets/base/feed.py"}], "after_files": [{"content": "from django.urls import path\n\nfrom watchman import views as watchman_views\n\nfrom snippets.base import views\nfrom snippets.base import feed\n\n\nurlpatterns = [\n path('', views.HomeView.as_view()),\n path('<int:startpage_version>/<name>/<version>/<appbuildid>/<build_target>/'\n '<locale>/<channel>/<os_version>/<distribution>/<distribution_version>/',\n views.fetch_snippets, name='base.fetch_snippets'),\n path('preview/', views.preview_snippet, name='base.preview'),\n path('preview-asr/<str:uuid>/', views.preview_asr_snippet, name='asr-preview'),\n path('show/<int:snippet_id>/', views.show_snippet, name='base.show'),\n path('show/uuid/<str:snippet_id>/', views.show_snippet, {'uuid': True}, name='base.show_uuid'),\n path('csp-violation-capture', views.csp_violation_capture, name='csp-violation-capture'),\n path('healthz/', watchman_views.ping, name='watchman.ping'),\n path('readiness/', watchman_views.status, name='watchman.status'),\n path('feeds/snippets.ics', feed.SnippetsFeed(), name='ical-feed'),\n]\n", "path": "snippets/base/urls.py"}, {"content": "import operator\nfrom datetime import timedelta\nfrom distutils.util import strtobool\nfrom textwrap import dedent\nfrom urllib.parse import urlparse\n\nfrom django.conf import settings\nfrom django.db.models import Q\n\nimport django_filters\nfrom django_ical.views import ICalFeed\n\nfrom snippets.base import models\n\n\nclass ASRSnippetFilter(django_filters.FilterSet):\n name = django_filters.CharFilter(lookup_expr='icontains')\n locale = django_filters.CharFilter(method='filter_locale')\n only_scheduled = django_filters.ChoiceFilter(\n method='filter_scheduled', choices=(('true', 'Yes'),\n ('false', 'No'),\n ('all', 'All')))\n\n def filter_locale(self, queryset, name, value):\n if not value:\n return queryset\n\n locales = value.split(',')\n return queryset.filter(\n operator.or_(\n *[Q(locale__code=',{},'.format(locale)) for locale in locales]\n )\n )\n\n def filter_scheduled(self, queryset, name, value):\n if value == 'all':\n return queryset\n\n value = strtobool(value)\n\n if value:\n return queryset.exclude(publish_start=None, publish_end=None)\n\n return queryset.filter(publish_start=None, publish_end=None)\n\n class Meta:\n model = models.ASRSnippet\n fields = []\n\n\nclass SnippetsFeed(ICalFeed):\n timezone = 'UTC'\n title = 'Snippets'\n\n def __call__(self, request, *args, **kwargs):\n self.request = request\n return super().__call__(request, *args, **kwargs)\n\n @property\n def product_id(self):\n return '//{}/Snippets?{}'.format(urlparse(settings.SITE_URL).netloc,\n self.request.GET.urlencode())\n\n def items(self):\n queryset = (models.ASRSnippet.objects\n .filter(for_qa=False, status=models.STATUS_CHOICES['Published'])\n .order_by('publish_start'))\n filtr = ASRSnippetFilter(self.request.GET, queryset=queryset)\n return filtr.qs\n\n def item_title(self, item):\n return item.name\n\n def item_link(self, item):\n return item.get_admin_url()\n\n def item_description(self, item):\n description = dedent('''\\\n Channels: {}\n Locale: {}'\n Preview Link: {}\n '''.format(', '.join(item.channels),\n item.locale,\n item.get_preview_url()))\n return description\n\n def item_start_datetime(self, item):\n return item.publish_start or item.created\n\n def item_end_datetime(self, item):\n return item.publish_end or (self.item_start_datetime(item) + timedelta(days=365))\n\n def item_created(self, item):\n return item.created\n\n def item_updateddate(self, item):\n return item.modified\n", "path": "snippets/base/feed.py"}]}
| 1,448 | 269 |
gh_patches_debug_29324
|
rasdani/github-patches
|
git_diff
|
dask__distributed-228
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
No such file or directory "bokeh" - in release 1.9.2 (conda install)
Get this error in my logs:
```
scheduler_1 | distributed.scheduler - INFO - http at: 172.17.0.4:9786
scheduler_1 | distributed.scheduler - WARNING - Could not start Bokeh web UI
scheduler_1 | Traceback (most recent call last):
scheduler_1 | File "/opt/conda/envs/fire/lib/python3.5/site-packages/distributed/cli/dscheduler.py", line scheduler_1 | bokeh_proc[0] = subprocess.Popen(args)
scheduler_1 | File "/opt/conda/envs/fire/lib/python3.5/subprocess.py", line 950, in __init__
scheduler_1 | restore_signals, start_new_session)
scheduler_1 | File "/opt/conda/envs/fire/lib/python3.5/subprocess.py", line 1544, in _execute_child
scheduler_1 | raise child_exception_type(errno_num, err_msg)
scheduler_1 | FileNotFoundError: [Errno 2] No such file or directory: 'bokeh'
scheduler_1 | distributed.core - INFO - Connection from 172.17.0.6:60119 to Scheduler
scheduler_1 | distributed.scheduler - INFO - Register 172.17.0.6:5700
scheduler_1 | distributed.scheduler - INFO - Starting worker compute stream, 172.17.0.6:5700
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `distributed/cli/dscheduler.py`
Content:
```
1 from __future__ import print_function, division, absolute_import
2
3 import logging
4 import os
5 import socket
6 import subprocess
7 from sys import argv, exit
8 from time import sleep
9
10 import click
11
12 import distributed
13 from distributed import Scheduler
14 from distributed.utils import get_ip
15 from distributed.http import HTTPScheduler
16 from distributed.cli.utils import check_python_3
17 from tornado.ioloop import IOLoop
18
19 logger = logging.getLogger('distributed.scheduler')
20
21 ip = get_ip()
22
23 import signal
24
25 bokeh_proc = [False]
26
27
28 def handle_signal(sig, frame):
29 if bokeh_proc[0]:
30 bokeh_proc[0].terminate()
31 IOLoop.instance().add_callback(IOLoop.instance().stop)
32
33 signal.signal(signal.SIGINT, handle_signal)
34 signal.signal(signal.SIGTERM, handle_signal)
35
36
37 @click.command()
38 @click.argument('center', type=str, default='')
39 @click.option('--port', type=int, default=8786, help="Serving port")
40 @click.option('--http-port', type=int, default=9786, help="HTTP port")
41 @click.option('--bokeh-port', type=int, default=8787, help="HTTP port")
42 @click.option('--bokeh/--no-bokeh', '_bokeh', default=True, show_default=True,
43 required=False, help="Launch Bokeh Web UI")
44 @click.option('--host', type=str, default=ip,
45 help="Serving host defaults to %s" % ip)
46 @click.option('--show/--no-show', default=False, help="Show web UI")
47 def main(center, host, port, http_port, bokeh_port, show, _bokeh):
48 ip = socket.gethostbyname(host)
49 loop = IOLoop.current()
50 scheduler = Scheduler(center, ip=ip,
51 services={('http', http_port): HTTPScheduler})
52 if center:
53 loop.run_sync(scheduler.sync_center)
54 scheduler.start(port)
55
56 if _bokeh:
57 try:
58 import bokeh
59 import distributed.bokeh
60 hosts = ['%s:%d' % (h, bokeh_port) for h in
61 ['localhost', '127.0.0.1', ip, socket.gethostname(), host]]
62 dirname = os.path.dirname(distributed.__file__)
63 paths = [os.path.join(dirname, 'bokeh', name)
64 for name in ['status', 'tasks']]
65 args = (['bokeh', 'serve'] + paths +
66 ['--log-level', 'warning',
67 '--check-unused-sessions=50',
68 '--unused-session-lifetime=1',
69 '--port', str(bokeh_port)] +
70 sum([['--host', host] for host in hosts], []))
71 if show:
72 args.append('--show')
73 bokeh_proc[0] = subprocess.Popen(args)
74
75 logger.info(" Start Bokeh UI at: http://%s:%d/status/"
76 % (ip, bokeh_port))
77 except ImportError:
78 logger.info("Please install Bokeh to get Web UI")
79 except Exception as e:
80 logger.warn("Could not start Bokeh web UI", exc_info=True)
81
82 loop.start()
83 loop.close()
84 scheduler.stop()
85 bokeh_proc[0].terminate()
86
87 logger.info("End scheduler at %s:%d", ip, port)
88
89
90 def go():
91 check_python_3()
92 main()
93
94
95 if __name__ == '__main__':
96 go()
97
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/distributed/cli/dscheduler.py b/distributed/cli/dscheduler.py
--- a/distributed/cli/dscheduler.py
+++ b/distributed/cli/dscheduler.py
@@ -1,9 +1,9 @@
from __future__ import print_function, division, absolute_import
import logging
+import multiprocessing
import os
import socket
-import subprocess
from sys import argv, exit
from time import sleep
@@ -22,12 +22,8 @@
import signal
-bokeh_proc = [False]
-
def handle_signal(sig, frame):
- if bokeh_proc[0]:
- bokeh_proc[0].terminate()
IOLoop.instance().add_callback(IOLoop.instance().stop)
signal.signal(signal.SIGINT, handle_signal)
@@ -70,7 +66,10 @@
sum([['--host', host] for host in hosts], []))
if show:
args.append('--show')
- bokeh_proc[0] = subprocess.Popen(args)
+ from bokeh.command.bootstrap import main
+ proc = multiprocessing.Process(target=main, args=(args,))
+ proc.daemon = True
+ proc.start()
logger.info(" Start Bokeh UI at: http://%s:%d/status/"
% (ip, bokeh_port))
@@ -82,7 +81,7 @@
loop.start()
loop.close()
scheduler.stop()
- bokeh_proc[0].terminate()
+ proc.terminate()
logger.info("End scheduler at %s:%d", ip, port)
|
{"golden_diff": "diff --git a/distributed/cli/dscheduler.py b/distributed/cli/dscheduler.py\n--- a/distributed/cli/dscheduler.py\n+++ b/distributed/cli/dscheduler.py\n@@ -1,9 +1,9 @@\n from __future__ import print_function, division, absolute_import\n \n import logging\n+import multiprocessing\n import os\n import socket\n-import subprocess\n from sys import argv, exit\n from time import sleep\n \n@@ -22,12 +22,8 @@\n \n import signal\n \n-bokeh_proc = [False]\n-\n \n def handle_signal(sig, frame):\n- if bokeh_proc[0]:\n- bokeh_proc[0].terminate()\n IOLoop.instance().add_callback(IOLoop.instance().stop)\n \n signal.signal(signal.SIGINT, handle_signal)\n@@ -70,7 +66,10 @@\n sum([['--host', host] for host in hosts], []))\n if show:\n args.append('--show')\n- bokeh_proc[0] = subprocess.Popen(args)\n+ from bokeh.command.bootstrap import main\n+ proc = multiprocessing.Process(target=main, args=(args,))\n+ proc.daemon = True\n+ proc.start()\n \n logger.info(\" Start Bokeh UI at: http://%s:%d/status/\"\n % (ip, bokeh_port))\n@@ -82,7 +81,7 @@\n loop.start()\n loop.close()\n scheduler.stop()\n- bokeh_proc[0].terminate()\n+ proc.terminate()\n \n logger.info(\"End scheduler at %s:%d\", ip, port)\n", "issue": "No such file or directory \"bokeh\" - in release 1.9.2 (conda install)\nGet this error in my logs:\n\n```\nscheduler_1 | distributed.scheduler - INFO - http at: 172.17.0.4:9786\nscheduler_1 | distributed.scheduler - WARNING - Could not start Bokeh web UI\nscheduler_1 | Traceback (most recent call last):\nscheduler_1 | File \"/opt/conda/envs/fire/lib/python3.5/site-packages/distributed/cli/dscheduler.py\", line scheduler_1 | bokeh_proc[0] = subprocess.Popen(args)\nscheduler_1 | File \"/opt/conda/envs/fire/lib/python3.5/subprocess.py\", line 950, in __init__\nscheduler_1 | restore_signals, start_new_session)\nscheduler_1 | File \"/opt/conda/envs/fire/lib/python3.5/subprocess.py\", line 1544, in _execute_child\nscheduler_1 | raise child_exception_type(errno_num, err_msg)\nscheduler_1 | FileNotFoundError: [Errno 2] No such file or directory: 'bokeh'\nscheduler_1 | distributed.core - INFO - Connection from 172.17.0.6:60119 to Scheduler\nscheduler_1 | distributed.scheduler - INFO - Register 172.17.0.6:5700\nscheduler_1 | distributed.scheduler - INFO - Starting worker compute stream, 172.17.0.6:5700\n```\n\n", "before_files": [{"content": "from __future__ import print_function, division, absolute_import\n\nimport logging\nimport os\nimport socket\nimport subprocess\nfrom sys import argv, exit\nfrom time import sleep\n\nimport click\n\nimport distributed\nfrom distributed import Scheduler\nfrom distributed.utils import get_ip\nfrom distributed.http import HTTPScheduler\nfrom distributed.cli.utils import check_python_3\nfrom tornado.ioloop import IOLoop\n\nlogger = logging.getLogger('distributed.scheduler')\n\nip = get_ip()\n\nimport signal\n\nbokeh_proc = [False]\n\n\ndef handle_signal(sig, frame):\n if bokeh_proc[0]:\n bokeh_proc[0].terminate()\n IOLoop.instance().add_callback(IOLoop.instance().stop)\n\nsignal.signal(signal.SIGINT, handle_signal)\nsignal.signal(signal.SIGTERM, handle_signal)\n\n\[email protected]()\[email protected]('center', type=str, default='')\[email protected]('--port', type=int, default=8786, help=\"Serving port\")\[email protected]('--http-port', type=int, default=9786, help=\"HTTP port\")\[email protected]('--bokeh-port', type=int, default=8787, help=\"HTTP port\")\[email protected]('--bokeh/--no-bokeh', '_bokeh', default=True, show_default=True,\n required=False, help=\"Launch Bokeh Web UI\")\[email protected]('--host', type=str, default=ip,\n help=\"Serving host defaults to %s\" % ip)\[email protected]('--show/--no-show', default=False, help=\"Show web UI\")\ndef main(center, host, port, http_port, bokeh_port, show, _bokeh):\n ip = socket.gethostbyname(host)\n loop = IOLoop.current()\n scheduler = Scheduler(center, ip=ip,\n services={('http', http_port): HTTPScheduler})\n if center:\n loop.run_sync(scheduler.sync_center)\n scheduler.start(port)\n\n if _bokeh:\n try:\n import bokeh\n import distributed.bokeh\n hosts = ['%s:%d' % (h, bokeh_port) for h in\n ['localhost', '127.0.0.1', ip, socket.gethostname(), host]]\n dirname = os.path.dirname(distributed.__file__)\n paths = [os.path.join(dirname, 'bokeh', name)\n for name in ['status', 'tasks']]\n args = (['bokeh', 'serve'] + paths +\n ['--log-level', 'warning',\n '--check-unused-sessions=50',\n '--unused-session-lifetime=1',\n '--port', str(bokeh_port)] +\n sum([['--host', host] for host in hosts], []))\n if show:\n args.append('--show')\n bokeh_proc[0] = subprocess.Popen(args)\n\n logger.info(\" Start Bokeh UI at: http://%s:%d/status/\"\n % (ip, bokeh_port))\n except ImportError:\n logger.info(\"Please install Bokeh to get Web UI\")\n except Exception as e:\n logger.warn(\"Could not start Bokeh web UI\", exc_info=True)\n\n loop.start()\n loop.close()\n scheduler.stop()\n bokeh_proc[0].terminate()\n\n logger.info(\"End scheduler at %s:%d\", ip, port)\n\n\ndef go():\n check_python_3()\n main()\n\n\nif __name__ == '__main__':\n go()\n", "path": "distributed/cli/dscheduler.py"}], "after_files": [{"content": "from __future__ import print_function, division, absolute_import\n\nimport logging\nimport multiprocessing\nimport os\nimport socket\nfrom sys import argv, exit\nfrom time import sleep\n\nimport click\n\nimport distributed\nfrom distributed import Scheduler\nfrom distributed.utils import get_ip\nfrom distributed.http import HTTPScheduler\nfrom distributed.cli.utils import check_python_3\nfrom tornado.ioloop import IOLoop\n\nlogger = logging.getLogger('distributed.scheduler')\n\nip = get_ip()\n\nimport signal\n\n\ndef handle_signal(sig, frame):\n IOLoop.instance().add_callback(IOLoop.instance().stop)\n\nsignal.signal(signal.SIGINT, handle_signal)\nsignal.signal(signal.SIGTERM, handle_signal)\n\n\[email protected]()\[email protected]('center', type=str, default='')\[email protected]('--port', type=int, default=8786, help=\"Serving port\")\[email protected]('--http-port', type=int, default=9786, help=\"HTTP port\")\[email protected]('--bokeh-port', type=int, default=8787, help=\"HTTP port\")\[email protected]('--bokeh/--no-bokeh', '_bokeh', default=True, show_default=True,\n required=False, help=\"Launch Bokeh Web UI\")\[email protected]('--host', type=str, default=ip,\n help=\"Serving host defaults to %s\" % ip)\[email protected]('--show/--no-show', default=False, help=\"Show web UI\")\ndef main(center, host, port, http_port, bokeh_port, show, _bokeh):\n ip = socket.gethostbyname(host)\n loop = IOLoop.current()\n scheduler = Scheduler(center, ip=ip,\n services={('http', http_port): HTTPScheduler})\n if center:\n loop.run_sync(scheduler.sync_center)\n scheduler.start(port)\n\n if _bokeh:\n try:\n import bokeh\n import distributed.bokeh\n hosts = ['%s:%d' % (h, bokeh_port) for h in\n ['localhost', '127.0.0.1', ip, socket.gethostname(), host]]\n dirname = os.path.dirname(distributed.__file__)\n paths = [os.path.join(dirname, 'bokeh', name)\n for name in ['status', 'tasks']]\n args = (['bokeh', 'serve'] + paths +\n ['--log-level', 'warning',\n '--check-unused-sessions=50',\n '--unused-session-lifetime=1',\n '--port', str(bokeh_port)] +\n sum([['--host', host] for host in hosts], []))\n if show:\n args.append('--show')\n from bokeh.command.bootstrap import main\n proc = multiprocessing.Process(target=main, args=(args,))\n proc.daemon = True\n proc.start()\n\n logger.info(\" Start Bokeh UI at: http://%s:%d/status/\"\n % (ip, bokeh_port))\n except ImportError:\n logger.info(\"Please install Bokeh to get Web UI\")\n except Exception as e:\n logger.warn(\"Could not start Bokeh web UI\", exc_info=True)\n\n loop.start()\n loop.close()\n scheduler.stop()\n proc.terminate()\n\n logger.info(\"End scheduler at %s:%d\", ip, port)\n\n\ndef go():\n check_python_3()\n main()\n\n\nif __name__ == '__main__':\n go()\n", "path": "distributed/cli/dscheduler.py"}]}
| 1,523 | 337 |
gh_patches_debug_12036
|
rasdani/github-patches
|
git_diff
|
sql-machine-learning__elasticdl-354
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix TFExample codec
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `record_codec/__init__.py`
Content:
```
```
Path: `record_codec/tf_example_codec.py`
Content:
```
1 from enum import Enum
2 import tensorflow as tf
3
4
5 class TFExampleCodec(object):
6 def __init__(self, feature_columns):
7 self._f_name2type = {}
8 self._example_spec = tf.feature_column.make_parse_example_spec(feature_columns)
9 for f_col in feature_columns:
10 self._f_name2type[f_col.key] = f_col.dtype
11
12 def encode(self, example):
13 f_dict = {}
14 for f_name, f_value in example:
15 f_type = self._f_name2type[f_name]
16 if f_type == tf.string:
17 f_dict[f_name] = tf.train.Feature(
18 bytes_list=tf.train.BytesList(value=f_value)
19 )
20 elif f_type == tf.float32:
21 f_dict[f_name] = tf.train.Feature(
22 float_list=tf.train.FloatList(value=f_value.flatten())
23 )
24 elif f_type == tf.int64:
25 f_dict[f_name] = tf.train.Feature(
26 int64_list=tf.train.Int64List(value=f_value.flatten())
27 )
28 else:
29 raise ValueError(
30 "not supported tensorflow data type: " + str(f_type)
31 )
32
33 example = tf.train.Example(features=tf.train.Features(feature=f_dict))
34 return example.SerializeToString()
35
36 def decode(self, raw):
37 return tf.parse_single_example(raw, self._example_spec)
38
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/record_codec/__init__.py b/record_codec/__init__.py
--- a/record_codec/__init__.py
+++ b/record_codec/__init__.py
@@ -0,0 +1,3 @@
+from record_codec.tf_example_codec import TFExampleCodec
+
+__all__=[TFExampleCodec]
diff --git a/record_codec/tf_example_codec.py b/record_codec/tf_example_codec.py
--- a/record_codec/tf_example_codec.py
+++ b/record_codec/tf_example_codec.py
@@ -4,10 +4,8 @@
class TFExampleCodec(object):
def __init__(self, feature_columns):
- self._f_name2type = {}
self._example_spec = tf.feature_column.make_parse_example_spec(feature_columns)
- for f_col in feature_columns:
- self._f_name2type[f_col.key] = f_col.dtype
+ self._f_name2type = {f_col.key: f_col.dtype for f_col in feature_columns}
def encode(self, example):
f_dict = {}
|
{"golden_diff": "diff --git a/record_codec/__init__.py b/record_codec/__init__.py\n--- a/record_codec/__init__.py\n+++ b/record_codec/__init__.py\n@@ -0,0 +1,3 @@\n+from record_codec.tf_example_codec import TFExampleCodec\n+\n+__all__=[TFExampleCodec]\ndiff --git a/record_codec/tf_example_codec.py b/record_codec/tf_example_codec.py\n--- a/record_codec/tf_example_codec.py\n+++ b/record_codec/tf_example_codec.py\n@@ -4,10 +4,8 @@\n \n class TFExampleCodec(object):\n def __init__(self, feature_columns):\n- self._f_name2type = {}\n self._example_spec = tf.feature_column.make_parse_example_spec(feature_columns)\n- for f_col in feature_columns:\n- self._f_name2type[f_col.key] = f_col.dtype\n+ self._f_name2type = {f_col.key: f_col.dtype for f_col in feature_columns}\n \n def encode(self, example):\n f_dict = {}\n", "issue": "Fix TFExample codec\n\n", "before_files": [{"content": "", "path": "record_codec/__init__.py"}, {"content": "from enum import Enum\nimport tensorflow as tf\n\n\nclass TFExampleCodec(object):\n def __init__(self, feature_columns):\n self._f_name2type = {}\n self._example_spec = tf.feature_column.make_parse_example_spec(feature_columns)\n for f_col in feature_columns:\n self._f_name2type[f_col.key] = f_col.dtype\n\n def encode(self, example):\n f_dict = {}\n for f_name, f_value in example:\n f_type = self._f_name2type[f_name]\n if f_type == tf.string:\n f_dict[f_name] = tf.train.Feature(\n bytes_list=tf.train.BytesList(value=f_value)\n )\n elif f_type == tf.float32:\n f_dict[f_name] = tf.train.Feature(\n float_list=tf.train.FloatList(value=f_value.flatten())\n )\n elif f_type == tf.int64:\n f_dict[f_name] = tf.train.Feature(\n int64_list=tf.train.Int64List(value=f_value.flatten())\n )\n else:\n raise ValueError(\n \"not supported tensorflow data type: \" + str(f_type)\n )\n\n example = tf.train.Example(features=tf.train.Features(feature=f_dict))\n return example.SerializeToString()\n\n def decode(self, raw):\n return tf.parse_single_example(raw, self._example_spec)\n", "path": "record_codec/tf_example_codec.py"}], "after_files": [{"content": "from record_codec.tf_example_codec import TFExampleCodec\n\n__all__=[TFExampleCodec]\n", "path": "record_codec/__init__.py"}, {"content": "from enum import Enum\nimport tensorflow as tf\n\n\nclass TFExampleCodec(object):\n def __init__(self, feature_columns):\n self._example_spec = tf.feature_column.make_parse_example_spec(feature_columns)\n self._f_name2type = {f_col.key: f_col.dtype for f_col in feature_columns}\n\n def encode(self, example):\n f_dict = {}\n for f_name, f_value in example:\n f_type = self._f_name2type[f_name]\n if f_type == tf.string:\n f_dict[f_name] = tf.train.Feature(\n bytes_list=tf.train.BytesList(value=f_value)\n )\n elif f_type == tf.float32:\n f_dict[f_name] = tf.train.Feature(\n float_list=tf.train.FloatList(value=f_value.flatten())\n )\n elif f_type == tf.int64:\n f_dict[f_name] = tf.train.Feature(\n int64_list=tf.train.Int64List(value=f_value.flatten())\n )\n else:\n raise ValueError(\n \"not supported tensorflow data type: \" + str(f_type)\n )\n\n example = tf.train.Example(features=tf.train.Features(feature=f_dict))\n return example.SerializeToString()\n\n def decode(self, raw):\n return tf.parse_single_example(raw, self._example_spec)\n", "path": "record_codec/tf_example_codec.py"}]}
| 634 | 233 |
gh_patches_debug_18359
|
rasdani/github-patches
|
git_diff
|
pantsbuild__pants-6361
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`pants_requirement` should include the `; python_version<'3'` environment marker
Until pants can run on python 3, the `pants_requirement` should add an environment marker to restict the python environment it applies to. After pants only supports `>=3.5` a change should be made to the environment marker.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/python/pants/backend/python/pants_requirement.py`
Content:
```
1 # coding=utf-8
2 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
3 # Licensed under the Apache License, Version 2.0 (see LICENSE).
4
5 from __future__ import absolute_import, division, print_function, unicode_literals
6
7 import os
8 from builtins import object
9
10 from pants.backend.python.python_requirement import PythonRequirement
11 from pants.base.build_environment import pants_version
12
13
14 class PantsRequirement(object):
15 """Exports a `python_requirement_library` pointing at the active pants' corresponding sdist.
16
17 This requirement is useful for custom plugin authors who want to build and test their plugin with
18 pants itself. Using the resulting target as a dependency of their plugin target ensures the
19 dependency stays true to the surrounding repo's version of pants.
20
21 NB: The requirement generated is for official pants releases on pypi; so may not be appropriate
22 for use in a repo that tracks `pantsbuild/pants` or otherwise uses custom pants sdists.
23
24 :API: public
25 """
26
27 def __init__(self, parse_context):
28 self._parse_context = parse_context
29
30 def __call__(self, name=None):
31 """
32 :param string name: The name to use for the target, defaults to the parent dir name.
33 """
34 name = name or os.path.basename(self._parse_context.rel_path)
35 requirement = PythonRequirement(requirement='pantsbuild.pants=={}'.format(pants_version()))
36 self._parse_context.create_object('python_requirement_library',
37 name=name,
38 requirements=[requirement])
39
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/python/pants/backend/python/pants_requirement.py b/src/python/pants/backend/python/pants_requirement.py
--- a/src/python/pants/backend/python/pants_requirement.py
+++ b/src/python/pants/backend/python/pants_requirement.py
@@ -32,7 +32,14 @@
:param string name: The name to use for the target, defaults to the parent dir name.
"""
name = name or os.path.basename(self._parse_context.rel_path)
- requirement = PythonRequirement(requirement='pantsbuild.pants=={}'.format(pants_version()))
+
+ # TODO(John Sirois): Modify to constraint to >=3.5,<4 as part of
+ # https://github.com/pantsbuild/pants/issues/6062
+ env_marker = "python_version>='2.7' and python_version<'3'"
+
+ requirement = PythonRequirement(requirement="pantsbuild.pants=={version} ; {env_marker}"
+ .format(version=pants_version(), env_marker=env_marker))
+
self._parse_context.create_object('python_requirement_library',
name=name,
requirements=[requirement])
|
{"golden_diff": "diff --git a/src/python/pants/backend/python/pants_requirement.py b/src/python/pants/backend/python/pants_requirement.py\n--- a/src/python/pants/backend/python/pants_requirement.py\n+++ b/src/python/pants/backend/python/pants_requirement.py\n@@ -32,7 +32,14 @@\n :param string name: The name to use for the target, defaults to the parent dir name.\n \"\"\"\n name = name or os.path.basename(self._parse_context.rel_path)\n- requirement = PythonRequirement(requirement='pantsbuild.pants=={}'.format(pants_version()))\n+\n+ # TODO(John Sirois): Modify to constraint to >=3.5,<4 as part of\n+ # https://github.com/pantsbuild/pants/issues/6062\n+ env_marker = \"python_version>='2.7' and python_version<'3'\"\n+\n+ requirement = PythonRequirement(requirement=\"pantsbuild.pants=={version} ; {env_marker}\"\n+ .format(version=pants_version(), env_marker=env_marker))\n+\n self._parse_context.create_object('python_requirement_library',\n name=name,\n requirements=[requirement])\n", "issue": "`pants_requirement` should include the `; python_version<'3'` environment marker\nUntil pants can run on python 3, the `pants_requirement` should add an environment marker to restict the python environment it applies to. After pants only supports `>=3.5` a change should be made to the environment marker.\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\nfrom builtins import object\n\nfrom pants.backend.python.python_requirement import PythonRequirement\nfrom pants.base.build_environment import pants_version\n\n\nclass PantsRequirement(object):\n \"\"\"Exports a `python_requirement_library` pointing at the active pants' corresponding sdist.\n\n This requirement is useful for custom plugin authors who want to build and test their plugin with\n pants itself. Using the resulting target as a dependency of their plugin target ensures the\n dependency stays true to the surrounding repo's version of pants.\n\n NB: The requirement generated is for official pants releases on pypi; so may not be appropriate\n for use in a repo that tracks `pantsbuild/pants` or otherwise uses custom pants sdists.\n\n :API: public\n \"\"\"\n\n def __init__(self, parse_context):\n self._parse_context = parse_context\n\n def __call__(self, name=None):\n \"\"\"\n :param string name: The name to use for the target, defaults to the parent dir name.\n \"\"\"\n name = name or os.path.basename(self._parse_context.rel_path)\n requirement = PythonRequirement(requirement='pantsbuild.pants=={}'.format(pants_version()))\n self._parse_context.create_object('python_requirement_library',\n name=name,\n requirements=[requirement])\n", "path": "src/python/pants/backend/python/pants_requirement.py"}], "after_files": [{"content": "# coding=utf-8\n# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\nfrom builtins import object\n\nfrom pants.backend.python.python_requirement import PythonRequirement\nfrom pants.base.build_environment import pants_version\n\n\nclass PantsRequirement(object):\n \"\"\"Exports a `python_requirement_library` pointing at the active pants' corresponding sdist.\n\n This requirement is useful for custom plugin authors who want to build and test their plugin with\n pants itself. Using the resulting target as a dependency of their plugin target ensures the\n dependency stays true to the surrounding repo's version of pants.\n\n NB: The requirement generated is for official pants releases on pypi; so may not be appropriate\n for use in a repo that tracks `pantsbuild/pants` or otherwise uses custom pants sdists.\n\n :API: public\n \"\"\"\n\n def __init__(self, parse_context):\n self._parse_context = parse_context\n\n def __call__(self, name=None):\n \"\"\"\n :param string name: The name to use for the target, defaults to the parent dir name.\n \"\"\"\n name = name or os.path.basename(self._parse_context.rel_path)\n\n # TODO(John Sirois): Modify to constraint to >=3.5,<4 as part of\n # https://github.com/pantsbuild/pants/issues/6062\n env_marker = \"python_version>='2.7' and python_version<'3'\"\n\n requirement = PythonRequirement(requirement=\"pantsbuild.pants=={version} ; {env_marker}\"\n .format(version=pants_version(), env_marker=env_marker))\n\n self._parse_context.create_object('python_requirement_library',\n name=name,\n requirements=[requirement])\n", "path": "src/python/pants/backend/python/pants_requirement.py"}]}
| 733 | 253 |
gh_patches_debug_19531
|
rasdani/github-patches
|
git_diff
|
fidals__shopelectro-733
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Adapt page usage to the new refarm pages interface
At https://github.com/fidals/refarm-site/issues/240 we changed page templates and page context interfaces.
Adapt site code to the new interface.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `shopelectro/context.py`
Content:
```
1 from functools import partial
2
3 from django.conf import settings
4 from django.shortcuts import get_object_or_404
5
6 from catalog import newcontext
7 from images.models import Image
8 from pages import models as pages_models, newcontext as pages_newcontext
9 from shopelectro import models, request_data
10
11
12 # @todo #255:60m Improve `SortingOption` interface.
13 # Now it's located in context and this is wrong.
14 # Maybe refactor `CATEGORY_SORTING_OPTIONS`.
15 class SortingOption:
16 def __init__(self, index=0):
17 options = settings.CATEGORY_SORTING_OPTIONS[index]
18 self.label = options['label']
19 self.field = options['field']
20 self.direction = options['direction']
21
22 @property
23 def directed_field(self):
24 return self.direction + self.field
25
26
27 class Page(newcontext.Context):
28
29 def __init__(self, page, tags: newcontext.Tags):
30 self._page = page
31 self._tags = tags
32
33 def context(self):
34 def template_context(page, tag_titles, tags):
35 return {
36 'page': page,
37 'tag_titles': tag_titles,
38 'tags': tags,
39 }
40
41 tags_qs = self._tags.qs()
42 self._page.get_template_render_context = partial(
43 template_context, self._page, tags_qs.as_title(), tags_qs
44 )
45
46 return {
47 'page': self._page,
48 }
49
50
51 class Catalog(newcontext.Context):
52
53 def __init__(self, request_data_: request_data.Catalog):
54 self.request_data = request_data_
55
56 @property
57 def page(self):
58 return get_object_or_404(
59 pages_models.ModelPage,
60 slug=self.request_data.slug
61 )
62
63 @property
64 def category(self):
65 return self.page.model
66
67 def context(self) -> dict:
68 all_tags = newcontext.Tags(models.Tag.objects.all())
69
70 selected_tags = newcontext.tags.ParsedTags(
71 tags=all_tags,
72 raw_tags=self.request_data.tags,
73 )
74 if self.request_data.tags:
75 selected_tags = newcontext.tags.Checked404Tags(selected_tags)
76
77 products = (
78 models.Product.objects.active()
79 .filter_descendants(self.category)
80 .tagged_or_all(selected_tags.qs())
81 .order_by(SortingOption(index=self.request_data.sorting_index).directed_field)
82 )
83
84 """
85 We have to use separated variable for pagination.
86
87 Because paginated QuerySet can not used as QuerySet.
88 It's not the most strong place of Django ORM, of course.
89 :return: ProductsContext with paginated QuerySet inside
90 """
91 # @todo #683:30m Remove *Tags and *Products suffixes from catalog.newcontext classes.
92 # Rename Checked404Tags to ExistingOr404.
93 paginated = newcontext.products.PaginatedProducts(
94 products=products,
95 url=self.request_data.request.path,
96 page_number=self.request_data.pagination_page_number,
97 per_page=self.request_data.pagination_per_page,
98 )
99
100 images = newcontext.products.ProductImages(paginated.products, Image.objects.all())
101 brands = newcontext.products.ProductBrands(paginated.products, all_tags)
102 grouped_tags = newcontext.tags.GroupedTags(
103 tags=newcontext.tags.TagsByProducts(all_tags, products)
104 )
105 page = Page(self.page, selected_tags)
106 category = newcontext.category.Context(self.category)
107 params = {
108 'view_type': self.request_data.get_view_type(),
109 'sorting_options': settings.CATEGORY_SORTING_OPTIONS.values(),
110 'limits': settings.CATEGORY_STEP_MULTIPLIERS,
111 'sort': self.request_data.sorting_index,
112 }
113
114 return {
115 **params,
116 **pages_newcontext.Contexts([
117 page, category, paginated,
118 images, brands, grouped_tags
119 ]).context()
120 }
121
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/shopelectro/context.py b/shopelectro/context.py
--- a/shopelectro/context.py
+++ b/shopelectro/context.py
@@ -1,5 +1,3 @@
-from functools import partial
-
from django.conf import settings
from django.shortcuts import get_object_or_404
@@ -31,18 +29,15 @@
self._tags = tags
def context(self):
- def template_context(page, tag_titles, tags):
- return {
- 'page': page,
- 'tag_titles': tag_titles,
- 'tags': tags,
- }
-
tags_qs = self._tags.qs()
- self._page.get_template_render_context = partial(
- template_context, self._page, tags_qs.as_title(), tags_qs
- )
-
+ # use dirty patch here, because it's the most simple method
+ # to make shared templates work.
+ # For example `templates/layout/metadata.html`.
+ self._page.display = {
+ 'page': self._page,
+ 'tag_titles': tags_qs.as_title(),
+ 'tags': tags_qs,
+ }
return {
'page': self._page,
}
|
{"golden_diff": "diff --git a/shopelectro/context.py b/shopelectro/context.py\n--- a/shopelectro/context.py\n+++ b/shopelectro/context.py\n@@ -1,5 +1,3 @@\n-from functools import partial\n-\n from django.conf import settings\n from django.shortcuts import get_object_or_404\n \n@@ -31,18 +29,15 @@\n self._tags = tags\n \n def context(self):\n- def template_context(page, tag_titles, tags):\n- return {\n- 'page': page,\n- 'tag_titles': tag_titles,\n- 'tags': tags,\n- }\n-\n tags_qs = self._tags.qs()\n- self._page.get_template_render_context = partial(\n- template_context, self._page, tags_qs.as_title(), tags_qs\n- )\n-\n+ # use dirty patch here, because it's the most simple method\n+ # to make shared templates work.\n+ # For example `templates/layout/metadata.html`.\n+ self._page.display = {\n+ 'page': self._page,\n+ 'tag_titles': tags_qs.as_title(),\n+ 'tags': tags_qs,\n+ }\n return {\n 'page': self._page,\n }\n", "issue": "Adapt page usage to the new refarm pages interface\nAt https://github.com/fidals/refarm-site/issues/240 we changed page templates and page context interfaces.\r\n\r\nAdapt site code to the new interface.\r\n\n", "before_files": [{"content": "from functools import partial\n\nfrom django.conf import settings\nfrom django.shortcuts import get_object_or_404\n\nfrom catalog import newcontext\nfrom images.models import Image\nfrom pages import models as pages_models, newcontext as pages_newcontext\nfrom shopelectro import models, request_data\n\n\n# @todo #255:60m Improve `SortingOption` interface.\n# Now it's located in context and this is wrong.\n# Maybe refactor `CATEGORY_SORTING_OPTIONS`.\nclass SortingOption:\n def __init__(self, index=0):\n options = settings.CATEGORY_SORTING_OPTIONS[index]\n self.label = options['label']\n self.field = options['field']\n self.direction = options['direction']\n\n @property\n def directed_field(self):\n return self.direction + self.field\n\n\nclass Page(newcontext.Context):\n\n def __init__(self, page, tags: newcontext.Tags):\n self._page = page\n self._tags = tags\n\n def context(self):\n def template_context(page, tag_titles, tags):\n return {\n 'page': page,\n 'tag_titles': tag_titles,\n 'tags': tags,\n }\n\n tags_qs = self._tags.qs()\n self._page.get_template_render_context = partial(\n template_context, self._page, tags_qs.as_title(), tags_qs\n )\n\n return {\n 'page': self._page,\n }\n\n\nclass Catalog(newcontext.Context):\n\n def __init__(self, request_data_: request_data.Catalog):\n self.request_data = request_data_\n\n @property\n def page(self):\n return get_object_or_404(\n pages_models.ModelPage,\n slug=self.request_data.slug\n )\n\n @property\n def category(self):\n return self.page.model\n\n def context(self) -> dict:\n all_tags = newcontext.Tags(models.Tag.objects.all())\n\n selected_tags = newcontext.tags.ParsedTags(\n tags=all_tags,\n raw_tags=self.request_data.tags,\n )\n if self.request_data.tags:\n selected_tags = newcontext.tags.Checked404Tags(selected_tags)\n\n products = (\n models.Product.objects.active()\n .filter_descendants(self.category)\n .tagged_or_all(selected_tags.qs())\n .order_by(SortingOption(index=self.request_data.sorting_index).directed_field)\n )\n\n \"\"\"\n We have to use separated variable for pagination.\n\n Because paginated QuerySet can not used as QuerySet.\n It's not the most strong place of Django ORM, of course.\n :return: ProductsContext with paginated QuerySet inside\n \"\"\"\n # @todo #683:30m Remove *Tags and *Products suffixes from catalog.newcontext classes.\n # Rename Checked404Tags to ExistingOr404.\n paginated = newcontext.products.PaginatedProducts(\n products=products,\n url=self.request_data.request.path,\n page_number=self.request_data.pagination_page_number,\n per_page=self.request_data.pagination_per_page,\n )\n\n images = newcontext.products.ProductImages(paginated.products, Image.objects.all())\n brands = newcontext.products.ProductBrands(paginated.products, all_tags)\n grouped_tags = newcontext.tags.GroupedTags(\n tags=newcontext.tags.TagsByProducts(all_tags, products)\n )\n page = Page(self.page, selected_tags)\n category = newcontext.category.Context(self.category)\n params = {\n 'view_type': self.request_data.get_view_type(),\n 'sorting_options': settings.CATEGORY_SORTING_OPTIONS.values(),\n 'limits': settings.CATEGORY_STEP_MULTIPLIERS,\n 'sort': self.request_data.sorting_index,\n }\n\n return {\n **params,\n **pages_newcontext.Contexts([\n page, category, paginated,\n images, brands, grouped_tags\n ]).context()\n }\n", "path": "shopelectro/context.py"}], "after_files": [{"content": "from django.conf import settings\nfrom django.shortcuts import get_object_or_404\n\nfrom catalog import newcontext\nfrom images.models import Image\nfrom pages import models as pages_models, newcontext as pages_newcontext\nfrom shopelectro import models, request_data\n\n\n# @todo #255:60m Improve `SortingOption` interface.\n# Now it's located in context and this is wrong.\n# Maybe refactor `CATEGORY_SORTING_OPTIONS`.\nclass SortingOption:\n def __init__(self, index=0):\n options = settings.CATEGORY_SORTING_OPTIONS[index]\n self.label = options['label']\n self.field = options['field']\n self.direction = options['direction']\n\n @property\n def directed_field(self):\n return self.direction + self.field\n\n\nclass Page(newcontext.Context):\n\n def __init__(self, page, tags: newcontext.Tags):\n self._page = page\n self._tags = tags\n\n def context(self):\n tags_qs = self._tags.qs()\n # use dirty patch here, because it's the most simple method\n # to make shared templates work.\n # For example `templates/layout/metadata.html`.\n self._page.display = {\n 'page': self._page,\n 'tag_titles': tags_qs.as_title(),\n 'tags': tags_qs,\n }\n return {\n 'page': self._page,\n }\n\n\nclass Catalog(newcontext.Context):\n\n def __init__(self, request_data_: request_data.Catalog):\n self.request_data = request_data_\n\n @property\n def page(self):\n return get_object_or_404(\n pages_models.ModelPage,\n slug=self.request_data.slug\n )\n\n @property\n def category(self):\n return self.page.model\n\n def context(self) -> dict:\n all_tags = newcontext.Tags(models.Tag.objects.all())\n\n selected_tags = newcontext.tags.ParsedTags(\n tags=all_tags,\n raw_tags=self.request_data.tags,\n )\n if self.request_data.tags:\n selected_tags = newcontext.tags.Checked404Tags(selected_tags)\n\n products = (\n models.Product.objects.active()\n .filter_descendants(self.category)\n .tagged_or_all(selected_tags.qs())\n .order_by(SortingOption(index=self.request_data.sorting_index).directed_field)\n )\n\n \"\"\"\n We have to use separated variable for pagination.\n\n Because paginated QuerySet can not used as QuerySet.\n It's not the most strong place of Django ORM, of course.\n :return: ProductsContext with paginated QuerySet inside\n \"\"\"\n # @todo #683:30m Remove *Tags and *Products suffixes from catalog.newcontext classes.\n # Rename Checked404Tags to ExistingOr404.\n paginated = newcontext.products.PaginatedProducts(\n products=products,\n url=self.request_data.request.path,\n page_number=self.request_data.pagination_page_number,\n per_page=self.request_data.pagination_per_page,\n )\n\n images = newcontext.products.ProductImages(paginated.products, Image.objects.all())\n brands = newcontext.products.ProductBrands(paginated.products, all_tags)\n grouped_tags = newcontext.tags.GroupedTags(\n tags=newcontext.tags.TagsByProducts(all_tags, products)\n )\n page = Page(self.page, selected_tags)\n category = newcontext.category.Context(self.category)\n params = {\n 'view_type': self.request_data.get_view_type(),\n 'sorting_options': settings.CATEGORY_SORTING_OPTIONS.values(),\n 'limits': settings.CATEGORY_STEP_MULTIPLIERS,\n 'sort': self.request_data.sorting_index,\n }\n\n return {\n **params,\n **pages_newcontext.Contexts([\n page, category, paginated,\n images, brands, grouped_tags\n ]).context()\n }\n", "path": "shopelectro/context.py"}]}
| 1,397 | 271 |
gh_patches_debug_41380
|
rasdani/github-patches
|
git_diff
|
scoutapp__scout_apm_python-462
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix Jinja2 async integration
Since https://github.com/pallets/jinja/issues/765 , Jinja2 only lazily imports and adds the `render_async` function that we instrument (since #398).
We need to update to only instrument at the point the `render_async` method becomes available.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/scout_apm/instruments/jinja2.py`
Content:
```
1 # coding=utf-8
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 import logging
5
6 import wrapt
7
8 from scout_apm.core.tracked_request import TrackedRequest
9
10 try:
11 from jinja2 import Template
12 except ImportError: # pragma: no cover
13 Template = None
14
15 # The async_ module can only be shipped on Python 3.6+
16 try:
17 from scout_apm.async_.instruments.jinja2 import wrapped_render_async
18 except ImportError:
19 wrapped_render_async = None
20
21
22 logger = logging.getLogger(__name__)
23
24
25 have_patched_template_render = False
26 have_patched_template_render_async = False
27
28
29 def ensure_installed():
30 global have_patched_template_render
31 global have_patched_template_render_async
32
33 logger.info("Ensuring Jinja2 instrumentation is installed.")
34
35 if Template is None:
36 logger.info("Unable to import jinja2.Template")
37 return
38
39 if not have_patched_template_render:
40 try:
41 Template.render = wrapped_render(Template.render)
42 except Exception as exc:
43 logger.warning(
44 "Unable to instrument jinja2.Template.render: %r", exc, exc_info=exc
45 )
46 else:
47 have_patched_template_render = True
48
49 if not have_patched_template_render_async and wrapped_render_async is not None:
50 try:
51 Template.render_async = wrapped_render_async(Template.render_async)
52 except Exception as exc:
53 logger.warning(
54 "Unable to instrument jinja2.Template.render_async: %r",
55 exc,
56 exc_info=exc,
57 )
58 else:
59 have_patched_template_render_async = True
60
61
62 @wrapt.decorator
63 def wrapped_render(wrapped, instance, args, kwargs):
64 tracked_request = TrackedRequest.instance()
65 span = tracked_request.start_span(operation="Template/Render")
66 span.tag("name", instance.name)
67 try:
68 return wrapped(*args, **kwargs)
69 finally:
70 tracked_request.stop_span()
71
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/scout_apm/instruments/jinja2.py b/src/scout_apm/instruments/jinja2.py
--- a/src/scout_apm/instruments/jinja2.py
+++ b/src/scout_apm/instruments/jinja2.py
@@ -2,11 +2,17 @@
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
+import sys
import wrapt
from scout_apm.core.tracked_request import TrackedRequest
+try:
+ from jinja2 import Environment
+except ImportError: # pragma: no cover
+ Environment = None
+
try:
from jinja2 import Template
except ImportError: # pragma: no cover
@@ -22,13 +28,14 @@
logger = logging.getLogger(__name__)
+have_patched_environment_init = False
have_patched_template_render = False
have_patched_template_render_async = False
def ensure_installed():
+ global have_patched_environment_init
global have_patched_template_render
- global have_patched_template_render_async
logger.info("Ensuring Jinja2 instrumentation is installed.")
@@ -36,27 +43,27 @@
logger.info("Unable to import jinja2.Template")
return
- if not have_patched_template_render:
+ if not have_patched_environment_init:
try:
- Template.render = wrapped_render(Template.render)
+ Environment.__init__ = wrapped_environment_init(Environment.__init__)
except Exception as exc:
logger.warning(
- "Unable to instrument jinja2.Template.render: %r", exc, exc_info=exc
+ "Unable to instrument jinja2.Environment.__init__: %r",
+ exc,
+ exc_info=exc,
)
else:
- have_patched_template_render = True
+ have_patched_environment_init = True
- if not have_patched_template_render_async and wrapped_render_async is not None:
+ if not have_patched_template_render:
try:
- Template.render_async = wrapped_render_async(Template.render_async)
+ Template.render = wrapped_render(Template.render)
except Exception as exc:
logger.warning(
- "Unable to instrument jinja2.Template.render_async: %r",
- exc,
- exc_info=exc,
+ "Unable to instrument jinja2.Template.render: %r", exc, exc_info=exc
)
else:
- have_patched_template_render_async = True
+ have_patched_template_render = True
@wrapt.decorator
@@ -68,3 +75,33 @@
return wrapped(*args, **kwargs)
finally:
tracked_request.stop_span()
+
+
[email protected]
+def wrapped_environment_init(wrapped, instance, args, kwargs):
+ """
+ Delayed wrapping of render_async(), since Template won't have this method
+ until after jinja2.asyncsupport is imported, which since Jinja2 2.11.0 is
+ done conditionally in Environment.__init__:
+ https://github.com/pallets/jinja/issues/765
+ """
+ global have_patched_template_render_async
+ result = wrapped(*args, **kwargs)
+
+ if (
+ wrapped_render_async is not None
+ and not have_patched_template_render_async
+ and "jinja2.asyncsupport" in sys.modules
+ ):
+ try:
+ Template.render_async = wrapped_render_async(Template.render_async)
+ except Exception as exc:
+ logger.warning(
+ "Unable to instrument jinja2.Template.render_async: %r",
+ exc,
+ exc_info=exc,
+ )
+ else:
+ have_patched_template_render_async = True
+
+ return result
|
{"golden_diff": "diff --git a/src/scout_apm/instruments/jinja2.py b/src/scout_apm/instruments/jinja2.py\n--- a/src/scout_apm/instruments/jinja2.py\n+++ b/src/scout_apm/instruments/jinja2.py\n@@ -2,11 +2,17 @@\n from __future__ import absolute_import, division, print_function, unicode_literals\n \n import logging\n+import sys\n \n import wrapt\n \n from scout_apm.core.tracked_request import TrackedRequest\n \n+try:\n+ from jinja2 import Environment\n+except ImportError: # pragma: no cover\n+ Environment = None\n+\n try:\n from jinja2 import Template\n except ImportError: # pragma: no cover\n@@ -22,13 +28,14 @@\n logger = logging.getLogger(__name__)\n \n \n+have_patched_environment_init = False\n have_patched_template_render = False\n have_patched_template_render_async = False\n \n \n def ensure_installed():\n+ global have_patched_environment_init\n global have_patched_template_render\n- global have_patched_template_render_async\n \n logger.info(\"Ensuring Jinja2 instrumentation is installed.\")\n \n@@ -36,27 +43,27 @@\n logger.info(\"Unable to import jinja2.Template\")\n return\n \n- if not have_patched_template_render:\n+ if not have_patched_environment_init:\n try:\n- Template.render = wrapped_render(Template.render)\n+ Environment.__init__ = wrapped_environment_init(Environment.__init__)\n except Exception as exc:\n logger.warning(\n- \"Unable to instrument jinja2.Template.render: %r\", exc, exc_info=exc\n+ \"Unable to instrument jinja2.Environment.__init__: %r\",\n+ exc,\n+ exc_info=exc,\n )\n else:\n- have_patched_template_render = True\n+ have_patched_environment_init = True\n \n- if not have_patched_template_render_async and wrapped_render_async is not None:\n+ if not have_patched_template_render:\n try:\n- Template.render_async = wrapped_render_async(Template.render_async)\n+ Template.render = wrapped_render(Template.render)\n except Exception as exc:\n logger.warning(\n- \"Unable to instrument jinja2.Template.render_async: %r\",\n- exc,\n- exc_info=exc,\n+ \"Unable to instrument jinja2.Template.render: %r\", exc, exc_info=exc\n )\n else:\n- have_patched_template_render_async = True\n+ have_patched_template_render = True\n \n \n @wrapt.decorator\n@@ -68,3 +75,33 @@\n return wrapped(*args, **kwargs)\n finally:\n tracked_request.stop_span()\n+\n+\[email protected]\n+def wrapped_environment_init(wrapped, instance, args, kwargs):\n+ \"\"\"\n+ Delayed wrapping of render_async(), since Template won't have this method\n+ until after jinja2.asyncsupport is imported, which since Jinja2 2.11.0 is\n+ done conditionally in Environment.__init__:\n+ https://github.com/pallets/jinja/issues/765\n+ \"\"\"\n+ global have_patched_template_render_async\n+ result = wrapped(*args, **kwargs)\n+\n+ if (\n+ wrapped_render_async is not None\n+ and not have_patched_template_render_async\n+ and \"jinja2.asyncsupport\" in sys.modules\n+ ):\n+ try:\n+ Template.render_async = wrapped_render_async(Template.render_async)\n+ except Exception as exc:\n+ logger.warning(\n+ \"Unable to instrument jinja2.Template.render_async: %r\",\n+ exc,\n+ exc_info=exc,\n+ )\n+ else:\n+ have_patched_template_render_async = True\n+\n+ return result\n", "issue": "Fix Jinja2 async integration\nSince https://github.com/pallets/jinja/issues/765 , Jinja2 only lazily imports and adds the `render_async` function that we instrument (since #398).\r\n\r\nWe need to update to only instrument at the point the `render_async` method becomes available.\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\n\nimport wrapt\n\nfrom scout_apm.core.tracked_request import TrackedRequest\n\ntry:\n from jinja2 import Template\nexcept ImportError: # pragma: no cover\n Template = None\n\n# The async_ module can only be shipped on Python 3.6+\ntry:\n from scout_apm.async_.instruments.jinja2 import wrapped_render_async\nexcept ImportError:\n wrapped_render_async = None\n\n\nlogger = logging.getLogger(__name__)\n\n\nhave_patched_template_render = False\nhave_patched_template_render_async = False\n\n\ndef ensure_installed():\n global have_patched_template_render\n global have_patched_template_render_async\n\n logger.info(\"Ensuring Jinja2 instrumentation is installed.\")\n\n if Template is None:\n logger.info(\"Unable to import jinja2.Template\")\n return\n\n if not have_patched_template_render:\n try:\n Template.render = wrapped_render(Template.render)\n except Exception as exc:\n logger.warning(\n \"Unable to instrument jinja2.Template.render: %r\", exc, exc_info=exc\n )\n else:\n have_patched_template_render = True\n\n if not have_patched_template_render_async and wrapped_render_async is not None:\n try:\n Template.render_async = wrapped_render_async(Template.render_async)\n except Exception as exc:\n logger.warning(\n \"Unable to instrument jinja2.Template.render_async: %r\",\n exc,\n exc_info=exc,\n )\n else:\n have_patched_template_render_async = True\n\n\[email protected]\ndef wrapped_render(wrapped, instance, args, kwargs):\n tracked_request = TrackedRequest.instance()\n span = tracked_request.start_span(operation=\"Template/Render\")\n span.tag(\"name\", instance.name)\n try:\n return wrapped(*args, **kwargs)\n finally:\n tracked_request.stop_span()\n", "path": "src/scout_apm/instruments/jinja2.py"}], "after_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nimport sys\n\nimport wrapt\n\nfrom scout_apm.core.tracked_request import TrackedRequest\n\ntry:\n from jinja2 import Environment\nexcept ImportError: # pragma: no cover\n Environment = None\n\ntry:\n from jinja2 import Template\nexcept ImportError: # pragma: no cover\n Template = None\n\n# The async_ module can only be shipped on Python 3.6+\ntry:\n from scout_apm.async_.instruments.jinja2 import wrapped_render_async\nexcept ImportError:\n wrapped_render_async = None\n\n\nlogger = logging.getLogger(__name__)\n\n\nhave_patched_environment_init = False\nhave_patched_template_render = False\nhave_patched_template_render_async = False\n\n\ndef ensure_installed():\n global have_patched_environment_init\n global have_patched_template_render\n\n logger.info(\"Ensuring Jinja2 instrumentation is installed.\")\n\n if Template is None:\n logger.info(\"Unable to import jinja2.Template\")\n return\n\n if not have_patched_environment_init:\n try:\n Environment.__init__ = wrapped_environment_init(Environment.__init__)\n except Exception as exc:\n logger.warning(\n \"Unable to instrument jinja2.Environment.__init__: %r\",\n exc,\n exc_info=exc,\n )\n else:\n have_patched_environment_init = True\n\n if not have_patched_template_render:\n try:\n Template.render = wrapped_render(Template.render)\n except Exception as exc:\n logger.warning(\n \"Unable to instrument jinja2.Template.render: %r\", exc, exc_info=exc\n )\n else:\n have_patched_template_render = True\n\n\[email protected]\ndef wrapped_render(wrapped, instance, args, kwargs):\n tracked_request = TrackedRequest.instance()\n span = tracked_request.start_span(operation=\"Template/Render\")\n span.tag(\"name\", instance.name)\n try:\n return wrapped(*args, **kwargs)\n finally:\n tracked_request.stop_span()\n\n\[email protected]\ndef wrapped_environment_init(wrapped, instance, args, kwargs):\n \"\"\"\n Delayed wrapping of render_async(), since Template won't have this method\n until after jinja2.asyncsupport is imported, which since Jinja2 2.11.0 is\n done conditionally in Environment.__init__:\n https://github.com/pallets/jinja/issues/765\n \"\"\"\n global have_patched_template_render_async\n result = wrapped(*args, **kwargs)\n\n if (\n wrapped_render_async is not None\n and not have_patched_template_render_async\n and \"jinja2.asyncsupport\" in sys.modules\n ):\n try:\n Template.render_async = wrapped_render_async(Template.render_async)\n except Exception as exc:\n logger.warning(\n \"Unable to instrument jinja2.Template.render_async: %r\",\n exc,\n exc_info=exc,\n )\n else:\n have_patched_template_render_async = True\n\n return result\n", "path": "src/scout_apm/instruments/jinja2.py"}]}
| 893 | 836 |
gh_patches_debug_2580
|
rasdani/github-patches
|
git_diff
|
pypa__setuptools-4127
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Setuptools 69.0.0 breaks Astropy's setup
### setuptools version
setuptools==69.0.0
### Python version
3.12
### OS
Ubuntu
### Additional environment information
_No response_
### Description
About 15h ago, Astropy's CI started failing to build with
```
ImportError: cannot import name 'newer_group' from 'setuptools.dep_util'
```
This seems to correspond to an [intentional change in setuptools 69](https://setuptools.pypa.io/en/latest/history.html#features).
Nonetheless, from reading the PR that introduced the change (https://github.com/pypa/setuptools/pull/4069), I'm not sure that this was supposed to break immediately. Was this intended ?
### Expected behavior
a deprecation warning instead of a hard error ?
### How to Reproduce
```shell
$ python -c "from setuptools.dep_util import newer_group"
```
### Output
```console
Traceback (most recent call last):
File "<string>", line 1, in <module>
ImportError: cannot import name 'newer_group' from 'setuptools.dep_util' (/private/tmp/venv/lib/python3.12/site-packages/setuptools/dep_util.py)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setuptools/dep_util.py`
Content:
```
1 import warnings
2
3 from ._distutils import _modified
4
5
6 def __getattr__(name):
7 if name not in ['newer_pairwise_group']:
8 raise AttributeError(name)
9 warnings.warn(
10 "dep_util is Deprecated. Use functions from setuptools.modified instead.",
11 DeprecationWarning,
12 stacklevel=2,
13 )
14 return getattr(_modified, name)
15
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setuptools/dep_util.py b/setuptools/dep_util.py
--- a/setuptools/dep_util.py
+++ b/setuptools/dep_util.py
@@ -4,7 +4,7 @@
def __getattr__(name):
- if name not in ['newer_pairwise_group']:
+ if name not in ['newer_group', 'newer_pairwise_group']:
raise AttributeError(name)
warnings.warn(
"dep_util is Deprecated. Use functions from setuptools.modified instead.",
|
{"golden_diff": "diff --git a/setuptools/dep_util.py b/setuptools/dep_util.py\n--- a/setuptools/dep_util.py\n+++ b/setuptools/dep_util.py\n@@ -4,7 +4,7 @@\n \n \n def __getattr__(name):\n- if name not in ['newer_pairwise_group']:\n+ if name not in ['newer_group', 'newer_pairwise_group']:\n raise AttributeError(name)\n warnings.warn(\n \"dep_util is Deprecated. Use functions from setuptools.modified instead.\",\n", "issue": "[BUG] Setuptools 69.0.0 breaks Astropy's setup\n### setuptools version\n\nsetuptools==69.0.0\n\n### Python version\n\n3.12\n\n### OS\n\nUbuntu\n\n### Additional environment information\n\n_No response_\n\n### Description\n\nAbout 15h ago, Astropy's CI started failing to build with\r\n```\r\nImportError: cannot import name 'newer_group' from 'setuptools.dep_util'\r\n```\r\nThis seems to correspond to an [intentional change in setuptools 69](https://setuptools.pypa.io/en/latest/history.html#features).\r\nNonetheless, from reading the PR that introduced the change (https://github.com/pypa/setuptools/pull/4069), I'm not sure that this was supposed to break immediately. Was this intended ?\n\n### Expected behavior\n\na deprecation warning instead of a hard error ?\n\n### How to Reproduce\n\n```shell\r\n$ python -c \"from setuptools.dep_util import newer_group\"\r\n```\n\n### Output\n\n```console\r\nTraceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\nImportError: cannot import name 'newer_group' from 'setuptools.dep_util' (/private/tmp/venv/lib/python3.12/site-packages/setuptools/dep_util.py)\r\n```\r\n\n", "before_files": [{"content": "import warnings\n\nfrom ._distutils import _modified\n\n\ndef __getattr__(name):\n if name not in ['newer_pairwise_group']:\n raise AttributeError(name)\n warnings.warn(\n \"dep_util is Deprecated. Use functions from setuptools.modified instead.\",\n DeprecationWarning,\n stacklevel=2,\n )\n return getattr(_modified, name)\n", "path": "setuptools/dep_util.py"}], "after_files": [{"content": "import warnings\n\nfrom ._distutils import _modified\n\n\ndef __getattr__(name):\n if name not in ['newer_group', 'newer_pairwise_group']:\n raise AttributeError(name)\n warnings.warn(\n \"dep_util is Deprecated. Use functions from setuptools.modified instead.\",\n DeprecationWarning,\n stacklevel=2,\n )\n return getattr(_modified, name)\n", "path": "setuptools/dep_util.py"}]}
| 638 | 107 |
gh_patches_debug_23752
|
rasdani/github-patches
|
git_diff
|
Azure__azure-cli-extensions-3136
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Transition to GA: databox
Command module `databox` has been released for a long time and is using stable sdk version `2019-09-01`.
Please check [Extension GA guidelines](https://github.com/Azure/azure-cli/blob/dev/doc/onboarding_guide.md#preview-extension-to-ga-extension) and remove `experimental` tag if necessary.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/databox/azext_databox/commands.py`
Content:
```
1 # --------------------------------------------------------------------------------------------
2 # Copyright (c) Microsoft Corporation. All rights reserved.
3 # Licensed under the MIT License. See License.txt in the project root for license information.
4 # --------------------------------------------------------------------------------------------
5
6 # pylint: disable=line-too-long
7 # pylint: disable=too-many-lines
8 # pylint: disable=too-many-statements
9 # pylint: disable=too-many-locals
10 from azext_databox._validators import validate_create_input_parameters
11 from azure.cli.core.commands import CliCommandType
12
13
14 def load_command_table(self, _):
15
16 from azext_databox._client_factory import cf_jobs
17 databox_jobs = CliCommandType(
18 operations_tmpl='azext_databox.vendored_sdks.databox.operations._jobs_operations#JobsOperations.{}',
19 client_factory=cf_jobs)
20 with self.command_group('databox job', databox_jobs, client_factory=cf_jobs, is_experimental=True) as g:
21 g.custom_command('create', 'create_databox_job', validator=validate_create_input_parameters)
22 g.custom_command('update', 'update_databox_job')
23 g.custom_command('delete', 'delete_databox_job', confirmation=True)
24 g.custom_show_command('show', 'get_databox_job')
25 g.custom_command('list', 'list_databox_job')
26 g.custom_command('cancel', 'cancel_databox_job', confirmation=True)
27 g.custom_command('list-credentials', 'list_credentials_databox_job')
28
```
Path: `src/databox/setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 # --------------------------------------------------------------------------------------------
4 # Copyright (c) Microsoft Corporation. All rights reserved.
5 # Licensed under the MIT License. See License.txt in the project root for license information.
6 # --------------------------------------------------------------------------------------------
7
8
9 from codecs import open
10 from setuptools import setup, find_packages
11 try:
12 from azure_bdist_wheel import cmdclass
13 except ImportError:
14 from distutils import log as logger
15 logger.warn("Wheel is not available, disabling bdist_wheel hook")
16
17 # TODO: Confirm this is the right version number you want and it matches your
18 # HISTORY.rst entry.
19 VERSION = '0.1.0'
20
21 # The full list of classifiers is available at
22 # https://pypi.python.org/pypi?%3Aaction=list_classifiers
23 CLASSIFIERS = [
24 'Development Status :: 4 - Beta',
25 'Intended Audience :: Developers',
26 'Intended Audience :: System Administrators',
27 'Programming Language :: Python',
28 'Programming Language :: Python :: 3',
29 'Programming Language :: Python :: 3.4',
30 'Programming Language :: Python :: 3.5',
31 'Programming Language :: Python :: 3.6',
32 'Programming Language :: Python :: 3.7',
33 'Programming Language :: Python :: 3.8',
34 'License :: OSI Approved :: MIT License',
35 ]
36
37 # TODO: Add any additional SDK dependencies here
38 DEPENDENCIES = []
39
40 with open('README.md', 'r', encoding='utf-8') as f:
41 README = f.read()
42 with open('HISTORY.rst', 'r', encoding='utf-8') as f:
43 HISTORY = f.read()
44
45 setup(
46 name='databox',
47 version=VERSION,
48 description='Microsoft Azure Command-Line Tools DataBox Extension',
49 # TODO: Update author and email, if applicable
50 author='Microsoft Corporation',
51 author_email='[email protected]',
52 url='https://github.com/Azure/azure-cli-extensions/tree/master/src/databox',
53 long_description=README + '\n\n' + HISTORY,
54 license='MIT',
55 classifiers=CLASSIFIERS,
56 packages=find_packages(),
57 install_requires=DEPENDENCIES,
58 package_data={'azext_databox': ['azext_metadata.json']},
59 )
60
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/databox/azext_databox/commands.py b/src/databox/azext_databox/commands.py
--- a/src/databox/azext_databox/commands.py
+++ b/src/databox/azext_databox/commands.py
@@ -17,7 +17,7 @@
databox_jobs = CliCommandType(
operations_tmpl='azext_databox.vendored_sdks.databox.operations._jobs_operations#JobsOperations.{}',
client_factory=cf_jobs)
- with self.command_group('databox job', databox_jobs, client_factory=cf_jobs, is_experimental=True) as g:
+ with self.command_group('databox job', databox_jobs, client_factory=cf_jobs) as g:
g.custom_command('create', 'create_databox_job', validator=validate_create_input_parameters)
g.custom_command('update', 'update_databox_job')
g.custom_command('delete', 'delete_databox_job', confirmation=True)
diff --git a/src/databox/setup.py b/src/databox/setup.py
--- a/src/databox/setup.py
+++ b/src/databox/setup.py
@@ -16,7 +16,7 @@
# TODO: Confirm this is the right version number you want and it matches your
# HISTORY.rst entry.
-VERSION = '0.1.0'
+VERSION = '0.1.1'
# The full list of classifiers is available at
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
|
{"golden_diff": "diff --git a/src/databox/azext_databox/commands.py b/src/databox/azext_databox/commands.py\n--- a/src/databox/azext_databox/commands.py\n+++ b/src/databox/azext_databox/commands.py\n@@ -17,7 +17,7 @@\n databox_jobs = CliCommandType(\n operations_tmpl='azext_databox.vendored_sdks.databox.operations._jobs_operations#JobsOperations.{}',\n client_factory=cf_jobs)\n- with self.command_group('databox job', databox_jobs, client_factory=cf_jobs, is_experimental=True) as g:\n+ with self.command_group('databox job', databox_jobs, client_factory=cf_jobs) as g:\n g.custom_command('create', 'create_databox_job', validator=validate_create_input_parameters)\n g.custom_command('update', 'update_databox_job')\n g.custom_command('delete', 'delete_databox_job', confirmation=True)\ndiff --git a/src/databox/setup.py b/src/databox/setup.py\n--- a/src/databox/setup.py\n+++ b/src/databox/setup.py\n@@ -16,7 +16,7 @@\n \n # TODO: Confirm this is the right version number you want and it matches your\n # HISTORY.rst entry.\n-VERSION = '0.1.0'\n+VERSION = '0.1.1'\n \n # The full list of classifiers is available at\n # https://pypi.python.org/pypi?%3Aaction=list_classifiers\n", "issue": "Transition to GA: databox\nCommand module `databox` has been released for a long time and is using stable sdk version `2019-09-01`.\r\n\r\nPlease check [Extension GA guidelines](https://github.com/Azure/azure-cli/blob/dev/doc/onboarding_guide.md#preview-extension-to-ga-extension) and remove `experimental` tag if necessary.\n", "before_files": [{"content": "# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\n# pylint: disable=line-too-long\n# pylint: disable=too-many-lines\n# pylint: disable=too-many-statements\n# pylint: disable=too-many-locals\nfrom azext_databox._validators import validate_create_input_parameters\nfrom azure.cli.core.commands import CliCommandType\n\n\ndef load_command_table(self, _):\n\n from azext_databox._client_factory import cf_jobs\n databox_jobs = CliCommandType(\n operations_tmpl='azext_databox.vendored_sdks.databox.operations._jobs_operations#JobsOperations.{}',\n client_factory=cf_jobs)\n with self.command_group('databox job', databox_jobs, client_factory=cf_jobs, is_experimental=True) as g:\n g.custom_command('create', 'create_databox_job', validator=validate_create_input_parameters)\n g.custom_command('update', 'update_databox_job')\n g.custom_command('delete', 'delete_databox_job', confirmation=True)\n g.custom_show_command('show', 'get_databox_job')\n g.custom_command('list', 'list_databox_job')\n g.custom_command('cancel', 'cancel_databox_job', confirmation=True)\n g.custom_command('list-credentials', 'list_credentials_databox_job')\n", "path": "src/databox/azext_databox/commands.py"}, {"content": "#!/usr/bin/env python\n\n# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\n\nfrom codecs import open\nfrom setuptools import setup, find_packages\ntry:\n from azure_bdist_wheel import cmdclass\nexcept ImportError:\n from distutils import log as logger\n logger.warn(\"Wheel is not available, disabling bdist_wheel hook\")\n\n# TODO: Confirm this is the right version number you want and it matches your\n# HISTORY.rst entry.\nVERSION = '0.1.0'\n\n# The full list of classifiers is available at\n# https://pypi.python.org/pypi?%3Aaction=list_classifiers\nCLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'License :: OSI Approved :: MIT License',\n]\n\n# TODO: Add any additional SDK dependencies here\nDEPENDENCIES = []\n\nwith open('README.md', 'r', encoding='utf-8') as f:\n README = f.read()\nwith open('HISTORY.rst', 'r', encoding='utf-8') as f:\n HISTORY = f.read()\n\nsetup(\n name='databox',\n version=VERSION,\n description='Microsoft Azure Command-Line Tools DataBox Extension',\n # TODO: Update author and email, if applicable\n author='Microsoft Corporation',\n author_email='[email protected]',\n url='https://github.com/Azure/azure-cli-extensions/tree/master/src/databox',\n long_description=README + '\\n\\n' + HISTORY,\n license='MIT',\n classifiers=CLASSIFIERS,\n packages=find_packages(),\n install_requires=DEPENDENCIES,\n package_data={'azext_databox': ['azext_metadata.json']},\n)\n", "path": "src/databox/setup.py"}], "after_files": [{"content": "# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\n# pylint: disable=line-too-long\n# pylint: disable=too-many-lines\n# pylint: disable=too-many-statements\n# pylint: disable=too-many-locals\nfrom azext_databox._validators import validate_create_input_parameters\nfrom azure.cli.core.commands import CliCommandType\n\n\ndef load_command_table(self, _):\n\n from azext_databox._client_factory import cf_jobs\n databox_jobs = CliCommandType(\n operations_tmpl='azext_databox.vendored_sdks.databox.operations._jobs_operations#JobsOperations.{}',\n client_factory=cf_jobs)\n with self.command_group('databox job', databox_jobs, client_factory=cf_jobs) as g:\n g.custom_command('create', 'create_databox_job', validator=validate_create_input_parameters)\n g.custom_command('update', 'update_databox_job')\n g.custom_command('delete', 'delete_databox_job', confirmation=True)\n g.custom_show_command('show', 'get_databox_job')\n g.custom_command('list', 'list_databox_job')\n g.custom_command('cancel', 'cancel_databox_job', confirmation=True)\n g.custom_command('list-credentials', 'list_credentials_databox_job')\n", "path": "src/databox/azext_databox/commands.py"}, {"content": "#!/usr/bin/env python\n\n# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\n\nfrom codecs import open\nfrom setuptools import setup, find_packages\ntry:\n from azure_bdist_wheel import cmdclass\nexcept ImportError:\n from distutils import log as logger\n logger.warn(\"Wheel is not available, disabling bdist_wheel hook\")\n\n# TODO: Confirm this is the right version number you want and it matches your\n# HISTORY.rst entry.\nVERSION = '0.1.1'\n\n# The full list of classifiers is available at\n# https://pypi.python.org/pypi?%3Aaction=list_classifiers\nCLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'License :: OSI Approved :: MIT License',\n]\n\n# TODO: Add any additional SDK dependencies here\nDEPENDENCIES = []\n\nwith open('README.md', 'r', encoding='utf-8') as f:\n README = f.read()\nwith open('HISTORY.rst', 'r', encoding='utf-8') as f:\n HISTORY = f.read()\n\nsetup(\n name='databox',\n version=VERSION,\n description='Microsoft Azure Command-Line Tools DataBox Extension',\n # TODO: Update author and email, if applicable\n author='Microsoft Corporation',\n author_email='[email protected]',\n url='https://github.com/Azure/azure-cli-extensions/tree/master/src/databox',\n long_description=README + '\\n\\n' + HISTORY,\n license='MIT',\n classifiers=CLASSIFIERS,\n packages=find_packages(),\n install_requires=DEPENDENCIES,\n package_data={'azext_databox': ['azext_metadata.json']},\n)\n", "path": "src/databox/setup.py"}]}
| 1,315 | 347 |
gh_patches_debug_37030
|
rasdani/github-patches
|
git_diff
|
RedHatInsights__insights-core-2713
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
yum.repos.d parser is too tolerant
The yum repos d parser will accept almost anything, including binary.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `insights/parsers/yum_repos_d.py`
Content:
```
1 from .. import Parser, parser, get_active_lines, LegacyItemAccess
2 from insights.specs import Specs
3
4
5 @parser(Specs.yum_repos_d)
6 class YumReposD(LegacyItemAccess, Parser):
7 """Class to parse the files under ``yum.repos.d`` """
8
9 def get(self, key):
10 return self.data.get(key)
11
12 def parse_content(self, content):
13 '''
14 Return an object contains a dict.
15 {
16 "rhel-source": {
17 "gpgcheck": "1",
18 "gpgkey": ["file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release",
19 "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release_bak"]
20 "enabled": "0",
21 "name": "Red Hat Enterprise Linux $releasever - $basearch - Source",
22 "baseurl": "ftp://ftp.redhat.com/pub/redhat/linux/enterprise/$releasever/en/os/SRPMS/"
23 }
24 }
25 ----------------------------------------------------
26 There are several files in 'yum.repos.d' directory, which have the same
27 format. For example:
28 --------one of the files : rhel-source.repo---------
29 [rhel-source]
30 name=Red Hat Enterprise Linux $releasever - $basearch - Source
31 baseurl=ftp://ftp.redhat.com/pub/redhat/linux/enterprise/$releasever/en/os/SRPMS/
32 enabled=0
33 gpgcheck=1
34 gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
35 file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release_bak
36 '''
37 repos_dict = {}
38 section_dict = {}
39 key = None
40 for line in get_active_lines(content):
41 if line.startswith('['):
42 section_dict = {}
43 repos_dict[line[1:-1]] = section_dict
44 elif '=' in line:
45 key, value = [s.strip() for s in line.split("=", 1)]
46 if key in ('baseurl', 'gpgkey'):
47 section_dict[key] = [v.strip() for v in value.split(",")]
48 else:
49 section_dict[key] = value
50 else:
51 if key and isinstance(section_dict[key], list):
52 section_dict[key].extend(v.strip() for v in line.split(","))
53 # Otherwise ignore line if no key or we don't store multiple values
54 self.data = repos_dict
55
56 def __iter__(self):
57 for repo in self.data:
58 yield repo
59
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/insights/parsers/yum_repos_d.py b/insights/parsers/yum_repos_d.py
--- a/insights/parsers/yum_repos_d.py
+++ b/insights/parsers/yum_repos_d.py
@@ -1,5 +1,39 @@
-from .. import Parser, parser, get_active_lines, LegacyItemAccess
+import re
+import string
+
+from .. import Parser, parser, LegacyItemAccess
from insights.specs import Specs
+from insights.parsr import (Char, EOF, HangingString, InSet, Many, OneLineComment, Opt,
+ skip_none, String, WithIndent, WS)
+
+
+header_chars = (set(string.printable) - set(string.whitespace) - set("[]")) | set(" ")
+sep_chars = set(":=")
+key_chars = header_chars - sep_chars - set(" ")
+value_chars = set(string.printable) - set("\n\r")
+
+LeftEnd = WS >> Char("[") << WS
+RightEnd = WS >> Char("]") << WS
+Header = LeftEnd >> String(header_chars) << RightEnd
+Key = WS >> String(key_chars) << WS
+Sep = InSet(sep_chars)
+Value = WS >> HangingString(value_chars)
+KVPair = WithIndent(Key + Opt(Sep >> Value))
+Comment = WS >> (OneLineComment("#") | OneLineComment(";")).map(lambda x: None)
+
+Line = Comment | KVPair.map(tuple)
+Sect = (Header + Many(Line).map(skip_none).map(dict)).map(tuple)
+Doc = Many(Comment | Sect).map(skip_none).map(dict)
+Top = Doc << WS << EOF
+
+
+def parse_yum_repos(content):
+ doc = Top(content)
+ for k, v in doc.items():
+ for special in ("baseurl", "gpgkey"):
+ if special in v:
+ v[special] = [i.strip() for i in re.split(",| ", v[special])]
+ return doc
@parser(Specs.yum_repos_d)
@@ -10,7 +44,7 @@
return self.data.get(key)
def parse_content(self, content):
- '''
+ """
Return an object contains a dict.
{
"rhel-source": {
@@ -33,25 +67,8 @@
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release_bak
- '''
- repos_dict = {}
- section_dict = {}
- key = None
- for line in get_active_lines(content):
- if line.startswith('['):
- section_dict = {}
- repos_dict[line[1:-1]] = section_dict
- elif '=' in line:
- key, value = [s.strip() for s in line.split("=", 1)]
- if key in ('baseurl', 'gpgkey'):
- section_dict[key] = [v.strip() for v in value.split(",")]
- else:
- section_dict[key] = value
- else:
- if key and isinstance(section_dict[key], list):
- section_dict[key].extend(v.strip() for v in line.split(","))
- # Otherwise ignore line if no key or we don't store multiple values
- self.data = repos_dict
+ """
+ self.data = parse_yum_repos("\n".join(content))
def __iter__(self):
for repo in self.data:
|
{"golden_diff": "diff --git a/insights/parsers/yum_repos_d.py b/insights/parsers/yum_repos_d.py\n--- a/insights/parsers/yum_repos_d.py\n+++ b/insights/parsers/yum_repos_d.py\n@@ -1,5 +1,39 @@\n-from .. import Parser, parser, get_active_lines, LegacyItemAccess\n+import re\n+import string\n+\n+from .. import Parser, parser, LegacyItemAccess\n from insights.specs import Specs\n+from insights.parsr import (Char, EOF, HangingString, InSet, Many, OneLineComment, Opt,\n+ skip_none, String, WithIndent, WS)\n+\n+\n+header_chars = (set(string.printable) - set(string.whitespace) - set(\"[]\")) | set(\" \")\n+sep_chars = set(\":=\")\n+key_chars = header_chars - sep_chars - set(\" \")\n+value_chars = set(string.printable) - set(\"\\n\\r\")\n+\n+LeftEnd = WS >> Char(\"[\") << WS\n+RightEnd = WS >> Char(\"]\") << WS\n+Header = LeftEnd >> String(header_chars) << RightEnd\n+Key = WS >> String(key_chars) << WS\n+Sep = InSet(sep_chars)\n+Value = WS >> HangingString(value_chars)\n+KVPair = WithIndent(Key + Opt(Sep >> Value))\n+Comment = WS >> (OneLineComment(\"#\") | OneLineComment(\";\")).map(lambda x: None)\n+\n+Line = Comment | KVPair.map(tuple)\n+Sect = (Header + Many(Line).map(skip_none).map(dict)).map(tuple)\n+Doc = Many(Comment | Sect).map(skip_none).map(dict)\n+Top = Doc << WS << EOF\n+\n+\n+def parse_yum_repos(content):\n+ doc = Top(content)\n+ for k, v in doc.items():\n+ for special in (\"baseurl\", \"gpgkey\"):\n+ if special in v:\n+ v[special] = [i.strip() for i in re.split(\",| \", v[special])]\n+ return doc\n \n \n @parser(Specs.yum_repos_d)\n@@ -10,7 +44,7 @@\n return self.data.get(key)\n \n def parse_content(self, content):\n- '''\n+ \"\"\"\n Return an object contains a dict.\n {\n \"rhel-source\": {\n@@ -33,25 +67,8 @@\n gpgcheck=1\n gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release\n file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release_bak\n- '''\n- repos_dict = {}\n- section_dict = {}\n- key = None\n- for line in get_active_lines(content):\n- if line.startswith('['):\n- section_dict = {}\n- repos_dict[line[1:-1]] = section_dict\n- elif '=' in line:\n- key, value = [s.strip() for s in line.split(\"=\", 1)]\n- if key in ('baseurl', 'gpgkey'):\n- section_dict[key] = [v.strip() for v in value.split(\",\")]\n- else:\n- section_dict[key] = value\n- else:\n- if key and isinstance(section_dict[key], list):\n- section_dict[key].extend(v.strip() for v in line.split(\",\"))\n- # Otherwise ignore line if no key or we don't store multiple values\n- self.data = repos_dict\n+ \"\"\"\n+ self.data = parse_yum_repos(\"\\n\".join(content))\n \n def __iter__(self):\n for repo in self.data:\n", "issue": "yum.repos.d parser is too tolerant\nThe yum repos d parser will accept almost anything, including binary.\n", "before_files": [{"content": "from .. import Parser, parser, get_active_lines, LegacyItemAccess\nfrom insights.specs import Specs\n\n\n@parser(Specs.yum_repos_d)\nclass YumReposD(LegacyItemAccess, Parser):\n \"\"\"Class to parse the files under ``yum.repos.d`` \"\"\"\n\n def get(self, key):\n return self.data.get(key)\n\n def parse_content(self, content):\n '''\n Return an object contains a dict.\n {\n \"rhel-source\": {\n \"gpgcheck\": \"1\",\n \"gpgkey\": [\"file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release\",\n \"file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release_bak\"]\n \"enabled\": \"0\",\n \"name\": \"Red Hat Enterprise Linux $releasever - $basearch - Source\",\n \"baseurl\": \"ftp://ftp.redhat.com/pub/redhat/linux/enterprise/$releasever/en/os/SRPMS/\"\n }\n }\n ----------------------------------------------------\n There are several files in 'yum.repos.d' directory, which have the same\n format. For example:\n --------one of the files : rhel-source.repo---------\n [rhel-source]\n name=Red Hat Enterprise Linux $releasever - $basearch - Source\n baseurl=ftp://ftp.redhat.com/pub/redhat/linux/enterprise/$releasever/en/os/SRPMS/\n enabled=0\n gpgcheck=1\n gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release\n file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release_bak\n '''\n repos_dict = {}\n section_dict = {}\n key = None\n for line in get_active_lines(content):\n if line.startswith('['):\n section_dict = {}\n repos_dict[line[1:-1]] = section_dict\n elif '=' in line:\n key, value = [s.strip() for s in line.split(\"=\", 1)]\n if key in ('baseurl', 'gpgkey'):\n section_dict[key] = [v.strip() for v in value.split(\",\")]\n else:\n section_dict[key] = value\n else:\n if key and isinstance(section_dict[key], list):\n section_dict[key].extend(v.strip() for v in line.split(\",\"))\n # Otherwise ignore line if no key or we don't store multiple values\n self.data = repos_dict\n\n def __iter__(self):\n for repo in self.data:\n yield repo\n", "path": "insights/parsers/yum_repos_d.py"}], "after_files": [{"content": "import re\nimport string\n\nfrom .. import Parser, parser, LegacyItemAccess\nfrom insights.specs import Specs\nfrom insights.parsr import (Char, EOF, HangingString, InSet, Many, OneLineComment, Opt,\n skip_none, String, WithIndent, WS)\n\n\nheader_chars = (set(string.printable) - set(string.whitespace) - set(\"[]\")) | set(\" \")\nsep_chars = set(\":=\")\nkey_chars = header_chars - sep_chars - set(\" \")\nvalue_chars = set(string.printable) - set(\"\\n\\r\")\n\nLeftEnd = WS >> Char(\"[\") << WS\nRightEnd = WS >> Char(\"]\") << WS\nHeader = LeftEnd >> String(header_chars) << RightEnd\nKey = WS >> String(key_chars) << WS\nSep = InSet(sep_chars)\nValue = WS >> HangingString(value_chars)\nKVPair = WithIndent(Key + Opt(Sep >> Value))\nComment = WS >> (OneLineComment(\"#\") | OneLineComment(\";\")).map(lambda x: None)\n\nLine = Comment | KVPair.map(tuple)\nSect = (Header + Many(Line).map(skip_none).map(dict)).map(tuple)\nDoc = Many(Comment | Sect).map(skip_none).map(dict)\nTop = Doc << WS << EOF\n\n\ndef parse_yum_repos(content):\n doc = Top(content)\n for k, v in doc.items():\n for special in (\"baseurl\", \"gpgkey\"):\n if special in v:\n v[special] = [i.strip() for i in re.split(\",| \", v[special])]\n return doc\n\n\n@parser(Specs.yum_repos_d)\nclass YumReposD(LegacyItemAccess, Parser):\n \"\"\"Class to parse the files under ``yum.repos.d`` \"\"\"\n\n def get(self, key):\n return self.data.get(key)\n\n def parse_content(self, content):\n \"\"\"\n Return an object contains a dict.\n {\n \"rhel-source\": {\n \"gpgcheck\": \"1\",\n \"gpgkey\": [\"file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release\",\n \"file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release_bak\"]\n \"enabled\": \"0\",\n \"name\": \"Red Hat Enterprise Linux $releasever - $basearch - Source\",\n \"baseurl\": \"ftp://ftp.redhat.com/pub/redhat/linux/enterprise/$releasever/en/os/SRPMS/\"\n }\n }\n ----------------------------------------------------\n There are several files in 'yum.repos.d' directory, which have the same\n format. For example:\n --------one of the files : rhel-source.repo---------\n [rhel-source]\n name=Red Hat Enterprise Linux $releasever - $basearch - Source\n baseurl=ftp://ftp.redhat.com/pub/redhat/linux/enterprise/$releasever/en/os/SRPMS/\n enabled=0\n gpgcheck=1\n gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release\n file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release_bak\n \"\"\"\n self.data = parse_yum_repos(\"\\n\".join(content))\n\n def __iter__(self):\n for repo in self.data:\n yield repo\n", "path": "insights/parsers/yum_repos_d.py"}]}
| 952 | 801 |
gh_patches_debug_9034
|
rasdani/github-patches
|
git_diff
|
e-valuation__EvaP-1029
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Staff users should see the public view of results by default
Staff users should by default see the public view of results pages. It shows less data (primarily no comments), which is good, and random people looking over one's shoulder won't ask "omg you see more stuff here why is that"
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `evap/results/views.py`
Content:
```
1 from collections import OrderedDict, namedtuple
2
3 from django.core.exceptions import PermissionDenied
4 from django.shortcuts import get_object_or_404, render
5 from django.contrib.auth.decorators import login_required
6
7 from evap.evaluation.models import Semester, Degree, Contribution
8 from evap.results.tools import calculate_results, calculate_average_grades_and_deviation, TextResult, RatingResult, COMMENT_STATES_REQUIRED_FOR_VISIBILITY
9
10
11 @login_required
12 def index(request):
13 semesters = Semester.get_all_with_published_courses()
14
15 return render(request, "results_index.html", dict(semesters=semesters))
16
17
18 @login_required
19 def semester_detail(request, semester_id):
20 semester = get_object_or_404(Semester, id=semester_id)
21 if request.user.is_reviewer:
22 courses = list(semester.course_set.filter(state__in=["in_evaluation", "evaluated", "reviewed", "published"]).prefetch_related("degrees"))
23 else:
24 courses = list(semester.course_set.filter(state="published").prefetch_related("degrees"))
25
26 courses = [course for course in courses if course.can_user_see_course(request.user)]
27
28 # Annotate each course object with its grades.
29 for course in courses:
30 course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course)
31
32 CourseTuple = namedtuple('CourseTuple', ('courses', 'single_results'))
33
34 courses_by_degree = OrderedDict()
35 for degree in Degree.objects.all():
36 courses_by_degree[degree] = CourseTuple([], [])
37 for course in courses:
38 if course.is_single_result:
39 for degree in course.degrees.all():
40 section = calculate_results(course)[0]
41 result = section.results[0]
42 courses_by_degree[degree].single_results.append((course, result))
43 else:
44 for degree in course.degrees.all():
45 courses_by_degree[degree].courses.append(course)
46
47 template_data = dict(semester=semester, courses_by_degree=courses_by_degree)
48 return render(request, "results_semester_detail.html", template_data)
49
50
51 @login_required
52 def course_detail(request, semester_id, course_id):
53 semester = get_object_or_404(Semester, id=semester_id)
54 course = get_object_or_404(semester.course_set, id=course_id, semester=semester)
55
56 if not course.can_user_see_results(request.user):
57 raise PermissionDenied
58
59 sections = calculate_results(course)
60
61 public_view = request.GET.get('public_view') == 'true' # if parameter is not given, show own view.
62
63 represented_users = list(request.user.represented_users.all())
64 represented_users.append(request.user)
65
66 # filter text answers
67 for section in sections:
68 results = []
69 for result in section.results:
70 if isinstance(result, TextResult):
71 answers = [answer for answer in result.answers if user_can_see_text_answer(request.user, represented_users, answer, public_view)]
72 if answers:
73 results.append(TextResult(question=result.question, answers=answers))
74 else:
75 results.append(result)
76 section.results[:] = results
77
78 # remove empty sections
79 sections = [section for section in sections if section.results]
80
81 # group by contributor
82 course_sections = []
83 contributor_sections = OrderedDict()
84 for section in sections:
85 if not section.results:
86 continue
87 if section.contributor is None:
88 course_sections.append(section)
89 else:
90 contributor_sections.setdefault(section.contributor,
91 {'total_votes': 0, 'sections': []})['sections'].append(section)
92
93 # Sum up all Sections for this contributor.
94 # If section is not a RatingResult:
95 # Add 1 as we assume it is a TextResult or something similar that should be displayed.
96 contributor_sections[section.contributor]['total_votes'] +=\
97 sum([s.total_count if isinstance(s, RatingResult) else 1 for s in section.results])
98
99 # Show a warning if course is still in evaluation (for reviewer preview).
100 evaluation_warning = course.state != 'published'
101
102 # Results for a course might not be visible because there are not enough answers
103 # but it can still be "published" e.g. to show the comment results to contributors.
104 # Users who can open the results page see a warning message in this case.
105 sufficient_votes_warning = not course.can_publish_grades
106
107 show_grades = request.user.is_reviewer or course.can_publish_grades
108
109 course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course)
110
111 template_data = dict(
112 course=course,
113 course_sections=course_sections,
114 contributor_sections=contributor_sections,
115 evaluation_warning=evaluation_warning,
116 sufficient_votes_warning=sufficient_votes_warning,
117 show_grades=show_grades,
118 reviewer=request.user.is_reviewer,
119 contributor=course.is_user_contributor_or_delegate(request.user),
120 can_download_grades=request.user.can_download_grades,
121 public_view=public_view)
122 return render(request, "results_course_detail.html", template_data)
123
124
125 def user_can_see_text_answer(user, represented_users, text_answer, public_view=False):
126 if public_view:
127 return False
128 if text_answer.state not in COMMENT_STATES_REQUIRED_FOR_VISIBILITY:
129 return False
130 if user.is_reviewer:
131 return True
132
133 contributor = text_answer.contribution.contributor
134
135 if text_answer.is_private:
136 return contributor == user
137
138 if text_answer.is_published:
139 if text_answer.contribution.responsible:
140 return contributor == user or user in contributor.delegates.all()
141
142 if contributor in represented_users:
143 return True
144 if text_answer.contribution.course.contributions.filter(
145 contributor__in=represented_users, comment_visibility=Contribution.ALL_COMMENTS).exists():
146 return True
147 if text_answer.contribution.is_general and text_answer.contribution.course.contributions.filter(
148 contributor__in=represented_users, comment_visibility=Contribution.COURSE_COMMENTS).exists():
149 return True
150
151 return False
152
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/evap/results/views.py b/evap/results/views.py
--- a/evap/results/views.py
+++ b/evap/results/views.py
@@ -58,7 +58,10 @@
sections = calculate_results(course)
- public_view = request.GET.get('public_view') == 'true' # if parameter is not given, show own view.
+ if request.user.is_staff or request.user.is_reviewer:
+ public_view = request.GET.get('public_view') != 'false' # if parameter is not given, show public view.
+ else:
+ public_view = request.GET.get('public_view') == 'true' # if parameter is not given, show own view.
represented_users = list(request.user.represented_users.all())
represented_users.append(request.user)
|
{"golden_diff": "diff --git a/evap/results/views.py b/evap/results/views.py\n--- a/evap/results/views.py\n+++ b/evap/results/views.py\n@@ -58,7 +58,10 @@\n \n sections = calculate_results(course)\n \n- public_view = request.GET.get('public_view') == 'true' # if parameter is not given, show own view.\n+ if request.user.is_staff or request.user.is_reviewer:\n+ public_view = request.GET.get('public_view') != 'false' # if parameter is not given, show public view.\n+ else:\n+ public_view = request.GET.get('public_view') == 'true' # if parameter is not given, show own view.\n \n represented_users = list(request.user.represented_users.all())\n represented_users.append(request.user)\n", "issue": "Staff users should see the public view of results by default\nStaff users should by default see the public view of results pages. It shows less data (primarily no comments), which is good, and random people looking over one's shoulder won't ask \"omg you see more stuff here why is that\"\n", "before_files": [{"content": "from collections import OrderedDict, namedtuple\n\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import get_object_or_404, render\nfrom django.contrib.auth.decorators import login_required\n\nfrom evap.evaluation.models import Semester, Degree, Contribution\nfrom evap.results.tools import calculate_results, calculate_average_grades_and_deviation, TextResult, RatingResult, COMMENT_STATES_REQUIRED_FOR_VISIBILITY\n\n\n@login_required\ndef index(request):\n semesters = Semester.get_all_with_published_courses()\n\n return render(request, \"results_index.html\", dict(semesters=semesters))\n\n\n@login_required\ndef semester_detail(request, semester_id):\n semester = get_object_or_404(Semester, id=semester_id)\n if request.user.is_reviewer:\n courses = list(semester.course_set.filter(state__in=[\"in_evaluation\", \"evaluated\", \"reviewed\", \"published\"]).prefetch_related(\"degrees\"))\n else:\n courses = list(semester.course_set.filter(state=\"published\").prefetch_related(\"degrees\"))\n\n courses = [course for course in courses if course.can_user_see_course(request.user)]\n\n # Annotate each course object with its grades.\n for course in courses:\n course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course)\n\n CourseTuple = namedtuple('CourseTuple', ('courses', 'single_results'))\n\n courses_by_degree = OrderedDict()\n for degree in Degree.objects.all():\n courses_by_degree[degree] = CourseTuple([], [])\n for course in courses:\n if course.is_single_result:\n for degree in course.degrees.all():\n section = calculate_results(course)[0]\n result = section.results[0]\n courses_by_degree[degree].single_results.append((course, result))\n else:\n for degree in course.degrees.all():\n courses_by_degree[degree].courses.append(course)\n\n template_data = dict(semester=semester, courses_by_degree=courses_by_degree)\n return render(request, \"results_semester_detail.html\", template_data)\n\n\n@login_required\ndef course_detail(request, semester_id, course_id):\n semester = get_object_or_404(Semester, id=semester_id)\n course = get_object_or_404(semester.course_set, id=course_id, semester=semester)\n\n if not course.can_user_see_results(request.user):\n raise PermissionDenied\n\n sections = calculate_results(course)\n\n public_view = request.GET.get('public_view') == 'true' # if parameter is not given, show own view.\n\n represented_users = list(request.user.represented_users.all())\n represented_users.append(request.user)\n\n # filter text answers\n for section in sections:\n results = []\n for result in section.results:\n if isinstance(result, TextResult):\n answers = [answer for answer in result.answers if user_can_see_text_answer(request.user, represented_users, answer, public_view)]\n if answers:\n results.append(TextResult(question=result.question, answers=answers))\n else:\n results.append(result)\n section.results[:] = results\n\n # remove empty sections\n sections = [section for section in sections if section.results]\n\n # group by contributor\n course_sections = []\n contributor_sections = OrderedDict()\n for section in sections:\n if not section.results:\n continue\n if section.contributor is None:\n course_sections.append(section)\n else:\n contributor_sections.setdefault(section.contributor,\n {'total_votes': 0, 'sections': []})['sections'].append(section)\n\n # Sum up all Sections for this contributor.\n # If section is not a RatingResult:\n # Add 1 as we assume it is a TextResult or something similar that should be displayed.\n contributor_sections[section.contributor]['total_votes'] +=\\\n sum([s.total_count if isinstance(s, RatingResult) else 1 for s in section.results])\n\n # Show a warning if course is still in evaluation (for reviewer preview).\n evaluation_warning = course.state != 'published'\n\n # Results for a course might not be visible because there are not enough answers\n # but it can still be \"published\" e.g. to show the comment results to contributors.\n # Users who can open the results page see a warning message in this case.\n sufficient_votes_warning = not course.can_publish_grades\n\n show_grades = request.user.is_reviewer or course.can_publish_grades\n\n course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course)\n\n template_data = dict(\n course=course,\n course_sections=course_sections,\n contributor_sections=contributor_sections,\n evaluation_warning=evaluation_warning,\n sufficient_votes_warning=sufficient_votes_warning,\n show_grades=show_grades,\n reviewer=request.user.is_reviewer,\n contributor=course.is_user_contributor_or_delegate(request.user),\n can_download_grades=request.user.can_download_grades,\n public_view=public_view)\n return render(request, \"results_course_detail.html\", template_data)\n\n\ndef user_can_see_text_answer(user, represented_users, text_answer, public_view=False):\n if public_view:\n return False\n if text_answer.state not in COMMENT_STATES_REQUIRED_FOR_VISIBILITY:\n return False\n if user.is_reviewer:\n return True\n\n contributor = text_answer.contribution.contributor\n\n if text_answer.is_private:\n return contributor == user\n\n if text_answer.is_published:\n if text_answer.contribution.responsible:\n return contributor == user or user in contributor.delegates.all()\n\n if contributor in represented_users:\n return True\n if text_answer.contribution.course.contributions.filter(\n contributor__in=represented_users, comment_visibility=Contribution.ALL_COMMENTS).exists():\n return True\n if text_answer.contribution.is_general and text_answer.contribution.course.contributions.filter(\n contributor__in=represented_users, comment_visibility=Contribution.COURSE_COMMENTS).exists():\n return True\n\n return False\n", "path": "evap/results/views.py"}], "after_files": [{"content": "from collections import OrderedDict, namedtuple\n\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import get_object_or_404, render\nfrom django.contrib.auth.decorators import login_required\n\nfrom evap.evaluation.models import Semester, Degree, Contribution\nfrom evap.results.tools import calculate_results, calculate_average_grades_and_deviation, TextResult, RatingResult, COMMENT_STATES_REQUIRED_FOR_VISIBILITY\n\n\n@login_required\ndef index(request):\n semesters = Semester.get_all_with_published_courses()\n\n return render(request, \"results_index.html\", dict(semesters=semesters))\n\n\n@login_required\ndef semester_detail(request, semester_id):\n semester = get_object_or_404(Semester, id=semester_id)\n if request.user.is_reviewer:\n courses = list(semester.course_set.filter(state__in=[\"in_evaluation\", \"evaluated\", \"reviewed\", \"published\"]).prefetch_related(\"degrees\"))\n else:\n courses = list(semester.course_set.filter(state=\"published\").prefetch_related(\"degrees\"))\n\n courses = [course for course in courses if course.can_user_see_course(request.user)]\n\n # Annotate each course object with its grades.\n for course in courses:\n course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course)\n\n CourseTuple = namedtuple('CourseTuple', ('courses', 'single_results'))\n\n courses_by_degree = OrderedDict()\n for degree in Degree.objects.all():\n courses_by_degree[degree] = CourseTuple([], [])\n for course in courses:\n if course.is_single_result:\n for degree in course.degrees.all():\n section = calculate_results(course)[0]\n result = section.results[0]\n courses_by_degree[degree].single_results.append((course, result))\n else:\n for degree in course.degrees.all():\n courses_by_degree[degree].courses.append(course)\n\n template_data = dict(semester=semester, courses_by_degree=courses_by_degree)\n return render(request, \"results_semester_detail.html\", template_data)\n\n\n@login_required\ndef course_detail(request, semester_id, course_id):\n semester = get_object_or_404(Semester, id=semester_id)\n course = get_object_or_404(semester.course_set, id=course_id, semester=semester)\n\n if not course.can_user_see_results(request.user):\n raise PermissionDenied\n\n sections = calculate_results(course)\n\n if request.user.is_staff or request.user.is_reviewer:\n public_view = request.GET.get('public_view') != 'false' # if parameter is not given, show public view.\n else:\n public_view = request.GET.get('public_view') == 'true' # if parameter is not given, show own view.\n\n represented_users = list(request.user.represented_users.all())\n represented_users.append(request.user)\n\n # filter text answers\n for section in sections:\n results = []\n for result in section.results:\n if isinstance(result, TextResult):\n answers = [answer for answer in result.answers if user_can_see_text_answer(request.user, represented_users, answer, public_view)]\n if answers:\n results.append(TextResult(question=result.question, answers=answers))\n else:\n results.append(result)\n section.results[:] = results\n\n # remove empty sections\n sections = [section for section in sections if section.results]\n\n # group by contributor\n course_sections = []\n contributor_sections = OrderedDict()\n for section in sections:\n if not section.results:\n continue\n if section.contributor is None:\n course_sections.append(section)\n else:\n contributor_sections.setdefault(section.contributor,\n {'total_votes': 0, 'sections': []})['sections'].append(section)\n\n # Sum up all Sections for this contributor.\n # If section is not a RatingResult:\n # Add 1 as we assume it is a TextResult or something similar that should be displayed.\n contributor_sections[section.contributor]['total_votes'] +=\\\n sum([s.total_count if isinstance(s, RatingResult) else 1 for s in section.results])\n\n # Show a warning if course is still in evaluation (for reviewer preview).\n evaluation_warning = course.state != 'published'\n\n # Results for a course might not be visible because there are not enough answers\n # but it can still be \"published\" e.g. to show the comment results to contributors.\n # Users who can open the results page see a warning message in this case.\n sufficient_votes_warning = not course.can_publish_grades\n\n show_grades = request.user.is_reviewer or course.can_publish_grades\n\n course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course)\n\n template_data = dict(\n course=course,\n course_sections=course_sections,\n contributor_sections=contributor_sections,\n evaluation_warning=evaluation_warning,\n sufficient_votes_warning=sufficient_votes_warning,\n show_grades=show_grades,\n reviewer=request.user.is_reviewer,\n contributor=course.is_user_contributor_or_delegate(request.user),\n can_download_grades=request.user.can_download_grades,\n public_view=public_view)\n return render(request, \"results_course_detail.html\", template_data)\n\n\ndef user_can_see_text_answer(user, represented_users, text_answer, public_view=False):\n if public_view:\n return False\n if text_answer.state not in COMMENT_STATES_REQUIRED_FOR_VISIBILITY:\n return False\n if user.is_reviewer:\n return True\n\n contributor = text_answer.contribution.contributor\n\n if text_answer.is_private:\n return contributor == user\n\n if text_answer.is_published:\n if text_answer.contribution.responsible:\n return contributor == user or user in contributor.delegates.all()\n\n if contributor in represented_users:\n return True\n if text_answer.contribution.course.contributions.filter(\n contributor__in=represented_users, comment_visibility=Contribution.ALL_COMMENTS).exists():\n return True\n if text_answer.contribution.is_general and text_answer.contribution.course.contributions.filter(\n contributor__in=represented_users, comment_visibility=Contribution.COURSE_COMMENTS).exists():\n return True\n\n return False\n", "path": "evap/results/views.py"}]}
| 1,947 | 181 |
gh_patches_debug_28895
|
rasdani/github-patches
|
git_diff
|
translate__pootle-3780
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Legal pages don't remember your acceptance of the agreemenet
This is likely only a problem on the [LibreOffice dev server](https://translations-test.documentfoundation.org/projects/) but tracking it here so we don't forget.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pootle/apps/staticpages/managers.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright (C) Pootle contributors.
5 #
6 # This file is a part of the Pootle project. It is distributed under the GPL3
7 # or later license. See the LICENSE file for a copy of the license and the
8 # AUTHORS file for copyright and authorship information.
9
10 from django.db.models import F, Manager
11
12
13 class PageManager(Manager):
14
15 def live(self, user=None, **kwargs):
16 """Filters active (live) pages.
17
18 :param user: Current active user. If omitted or the user doesn't
19 have administration privileges, only active pages will be
20 returned.
21 """
22 if user is not None and user.is_superuser:
23 return self.get_queryset()
24
25 return self.get_queryset().filter(active=True)
26
27 def pending_user_agreement(self, user, **kwargs):
28 """Filters active pages where the given `user` has pending
29 agreements.
30 """
31 # FIXME: This should be a method exclusive to a LegalPage manager
32 return self.live().exclude(
33 agreement__user=user,
34 modified_on__lt=F('agreement__agreed_on'),
35 ).distinct()
36
```
Path: `pootle/apps/pootle_misc/context_processors.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright (C) Pootle contributors.
5 #
6 # This file is a part of the Pootle project. It is distributed under the GPL3
7 # or later license. See the LICENSE file for a copy of the license and the
8 # AUTHORS file for copyright and authorship information.
9
10 from django.conf import settings
11 from django.utils import translation
12
13 from pootle import __version__
14 from pootle_language.models import Language
15 from pootle_project.models import Project
16 from staticpages.models import LegalPage
17
18
19 def _agreement_context(request):
20 """Returns whether the agreement box should be displayed or not."""
21 request_path = request.META['PATH_INFO']
22 nocheck = filter(lambda x: request_path.startswith(x),
23 settings.LEGALPAGE_NOCHECK_PREFIXES)
24
25 if (request.user.is_authenticated() and not nocheck and
26 LegalPage.objects.pending_user_agreement(request.user).exists()):
27 return True
28
29 return False
30
31
32 def pootle_context(request):
33 """Exposes settings to templates."""
34 #FIXME: maybe we should expose relevant settings only?
35 return {
36 'settings': {
37 'TITLE': settings.TITLE,
38 'POOTLE_INSTANCE_ID': settings.POOTLE_INSTANCE_ID,
39 'CAN_CONTACT': settings.CAN_CONTACT and settings.CONTACT_EMAIL,
40 'SCRIPT_NAME': settings.SCRIPT_NAME,
41 'POOTLE_VERSION': __version__,
42 'CACHE_TIMEOUT': settings.CACHE_MIDDLEWARE_SECONDS,
43 'POOTLE_CACHE_TIMEOUT': settings.POOTLE_CACHE_TIMEOUT,
44 'DEBUG': settings.DEBUG,
45 },
46 'custom': settings.CUSTOM_TEMPLATE_CONTEXT,
47 'ALL_LANGUAGES': Language.live.cached_dict(translation.get_language()),
48 'ALL_PROJECTS': Project.objects.cached_dict(request.user),
49 'display_agreement': _agreement_context(request),
50 }
51
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pootle/apps/pootle_misc/context_processors.py b/pootle/apps/pootle_misc/context_processors.py
--- a/pootle/apps/pootle_misc/context_processors.py
+++ b/pootle/apps/pootle_misc/context_processors.py
@@ -23,7 +23,7 @@
settings.LEGALPAGE_NOCHECK_PREFIXES)
if (request.user.is_authenticated() and not nocheck and
- LegalPage.objects.pending_user_agreement(request.user).exists()):
+ LegalPage.objects.has_pending_agreement(request.user)):
return True
return False
diff --git a/pootle/apps/staticpages/managers.py b/pootle/apps/staticpages/managers.py
--- a/pootle/apps/staticpages/managers.py
+++ b/pootle/apps/staticpages/managers.py
@@ -24,12 +24,24 @@
return self.get_queryset().filter(active=True)
+ def has_pending_agreement(self, user):
+ agreements = self.pending_user_agreement(user)
+ return len(list(agreements)) > 0
+
def pending_user_agreement(self, user, **kwargs):
"""Filters active pages where the given `user` has pending
agreements.
"""
# FIXME: This should be a method exclusive to a LegalPage manager
- return self.live().exclude(
- agreement__user=user,
- modified_on__lt=F('agreement__agreed_on'),
- ).distinct()
+ return self.raw('''
+ SELECT DISTINCT staticpages_legalpage.id
+ FROM staticpages_legalpage
+ WHERE (staticpages_legalpage.active = TRUE
+ AND NOT (staticpages_legalpage.id IN
+ (SELECT A.document_id
+ FROM staticpages_legalpage AS LP
+ INNER JOIN staticpages_agreement AS A
+ ON LP.id = A.document_id
+ WHERE A.user_id = %s AND
+ A.agreed_on > LP.modified_on)))
+ ''', [user.id])
|
{"golden_diff": "diff --git a/pootle/apps/pootle_misc/context_processors.py b/pootle/apps/pootle_misc/context_processors.py\n--- a/pootle/apps/pootle_misc/context_processors.py\n+++ b/pootle/apps/pootle_misc/context_processors.py\n@@ -23,7 +23,7 @@\n settings.LEGALPAGE_NOCHECK_PREFIXES)\n \n if (request.user.is_authenticated() and not nocheck and\n- LegalPage.objects.pending_user_agreement(request.user).exists()):\n+ LegalPage.objects.has_pending_agreement(request.user)):\n return True\n \n return False\ndiff --git a/pootle/apps/staticpages/managers.py b/pootle/apps/staticpages/managers.py\n--- a/pootle/apps/staticpages/managers.py\n+++ b/pootle/apps/staticpages/managers.py\n@@ -24,12 +24,24 @@\n \n return self.get_queryset().filter(active=True)\n \n+ def has_pending_agreement(self, user):\n+ agreements = self.pending_user_agreement(user)\n+ return len(list(agreements)) > 0\n+\n def pending_user_agreement(self, user, **kwargs):\n \"\"\"Filters active pages where the given `user` has pending\n agreements.\n \"\"\"\n # FIXME: This should be a method exclusive to a LegalPage manager\n- return self.live().exclude(\n- agreement__user=user,\n- modified_on__lt=F('agreement__agreed_on'),\n- ).distinct()\n+ return self.raw('''\n+ SELECT DISTINCT staticpages_legalpage.id\n+ FROM staticpages_legalpage\n+ WHERE (staticpages_legalpage.active = TRUE\n+ AND NOT (staticpages_legalpage.id IN\n+ (SELECT A.document_id\n+ FROM staticpages_legalpage AS LP\n+ INNER JOIN staticpages_agreement AS A\n+ ON LP.id = A.document_id\n+ WHERE A.user_id = %s AND\n+ A.agreed_on > LP.modified_on)))\n+ ''', [user.id])\n", "issue": "Legal pages don't remember your acceptance of the agreemenet\nThis is likely only a problem on the [LibreOffice dev server](https://translations-test.documentfoundation.org/projects/) but tracking it here so we don't forget.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django.db.models import F, Manager\n\n\nclass PageManager(Manager):\n\n def live(self, user=None, **kwargs):\n \"\"\"Filters active (live) pages.\n\n :param user: Current active user. If omitted or the user doesn't\n have administration privileges, only active pages will be\n returned.\n \"\"\"\n if user is not None and user.is_superuser:\n return self.get_queryset()\n\n return self.get_queryset().filter(active=True)\n\n def pending_user_agreement(self, user, **kwargs):\n \"\"\"Filters active pages where the given `user` has pending\n agreements.\n \"\"\"\n # FIXME: This should be a method exclusive to a LegalPage manager\n return self.live().exclude(\n agreement__user=user,\n modified_on__lt=F('agreement__agreed_on'),\n ).distinct()\n", "path": "pootle/apps/staticpages/managers.py"}, {"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django.conf import settings\nfrom django.utils import translation\n\nfrom pootle import __version__\nfrom pootle_language.models import Language\nfrom pootle_project.models import Project\nfrom staticpages.models import LegalPage\n\n\ndef _agreement_context(request):\n \"\"\"Returns whether the agreement box should be displayed or not.\"\"\"\n request_path = request.META['PATH_INFO']\n nocheck = filter(lambda x: request_path.startswith(x),\n settings.LEGALPAGE_NOCHECK_PREFIXES)\n\n if (request.user.is_authenticated() and not nocheck and\n LegalPage.objects.pending_user_agreement(request.user).exists()):\n return True\n\n return False\n\n\ndef pootle_context(request):\n \"\"\"Exposes settings to templates.\"\"\"\n #FIXME: maybe we should expose relevant settings only?\n return {\n 'settings': {\n 'TITLE': settings.TITLE,\n 'POOTLE_INSTANCE_ID': settings.POOTLE_INSTANCE_ID,\n 'CAN_CONTACT': settings.CAN_CONTACT and settings.CONTACT_EMAIL,\n 'SCRIPT_NAME': settings.SCRIPT_NAME,\n 'POOTLE_VERSION': __version__,\n 'CACHE_TIMEOUT': settings.CACHE_MIDDLEWARE_SECONDS,\n 'POOTLE_CACHE_TIMEOUT': settings.POOTLE_CACHE_TIMEOUT,\n 'DEBUG': settings.DEBUG,\n },\n 'custom': settings.CUSTOM_TEMPLATE_CONTEXT,\n 'ALL_LANGUAGES': Language.live.cached_dict(translation.get_language()),\n 'ALL_PROJECTS': Project.objects.cached_dict(request.user),\n 'display_agreement': _agreement_context(request),\n }\n", "path": "pootle/apps/pootle_misc/context_processors.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django.db.models import F, Manager\n\n\nclass PageManager(Manager):\n\n def live(self, user=None, **kwargs):\n \"\"\"Filters active (live) pages.\n\n :param user: Current active user. If omitted or the user doesn't\n have administration privileges, only active pages will be\n returned.\n \"\"\"\n if user is not None and user.is_superuser:\n return self.get_queryset()\n\n return self.get_queryset().filter(active=True)\n\n def has_pending_agreement(self, user):\n agreements = self.pending_user_agreement(user)\n return len(list(agreements)) > 0\n\n def pending_user_agreement(self, user, **kwargs):\n \"\"\"Filters active pages where the given `user` has pending\n agreements.\n \"\"\"\n # FIXME: This should be a method exclusive to a LegalPage manager\n return self.raw('''\n SELECT DISTINCT staticpages_legalpage.id\n FROM staticpages_legalpage\n WHERE (staticpages_legalpage.active = TRUE\n AND NOT (staticpages_legalpage.id IN\n (SELECT A.document_id\n FROM staticpages_legalpage AS LP\n INNER JOIN staticpages_agreement AS A\n ON LP.id = A.document_id\n WHERE A.user_id = %s AND\n A.agreed_on > LP.modified_on)))\n ''', [user.id])\n", "path": "pootle/apps/staticpages/managers.py"}, {"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django.conf import settings\nfrom django.utils import translation\n\nfrom pootle import __version__\nfrom pootle_language.models import Language\nfrom pootle_project.models import Project\nfrom staticpages.models import LegalPage\n\n\ndef _agreement_context(request):\n \"\"\"Returns whether the agreement box should be displayed or not.\"\"\"\n request_path = request.META['PATH_INFO']\n nocheck = filter(lambda x: request_path.startswith(x),\n settings.LEGALPAGE_NOCHECK_PREFIXES)\n\n if (request.user.is_authenticated() and not nocheck and\n LegalPage.objects.has_pending_agreement(request.user)):\n return True\n\n return False\n\n\ndef pootle_context(request):\n \"\"\"Exposes settings to templates.\"\"\"\n #FIXME: maybe we should expose relevant settings only?\n return {\n 'settings': {\n 'TITLE': settings.TITLE,\n 'POOTLE_INSTANCE_ID': settings.POOTLE_INSTANCE_ID,\n 'CAN_CONTACT': settings.CAN_CONTACT and settings.CONTACT_EMAIL,\n 'SCRIPT_NAME': settings.SCRIPT_NAME,\n 'POOTLE_VERSION': __version__,\n 'CACHE_TIMEOUT': settings.CACHE_MIDDLEWARE_SECONDS,\n 'POOTLE_CACHE_TIMEOUT': settings.POOTLE_CACHE_TIMEOUT,\n 'DEBUG': settings.DEBUG,\n },\n 'custom': settings.CUSTOM_TEMPLATE_CONTEXT,\n 'ALL_LANGUAGES': Language.live.cached_dict(translation.get_language()),\n 'ALL_PROJECTS': Project.objects.cached_dict(request.user),\n 'display_agreement': _agreement_context(request),\n }\n", "path": "pootle/apps/pootle_misc/context_processors.py"}]}
| 1,150 | 439 |
gh_patches_debug_1241
|
rasdani/github-patches
|
git_diff
|
crytic__slither-1108
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug]: Infinite loop in RTLO detector
### What happened?
Slither hangs on this code indefinitely
### Can you share code with us to reproduce this bug?
https://github.com/ethereum/solidity/blob/develop/test/libsolidity/syntaxTests/comments/multiline_unicode_direction_override_5.sol
### Version
0.8.2
### Relevant log output
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `slither/detectors/source/rtlo.py`
Content:
```
1 import re
2 from slither.detectors.abstract_detector import AbstractDetector, DetectorClassification
3
4
5 class RightToLeftOverride(AbstractDetector):
6 """
7 Detect the usage of a Right-To-Left-Override (U+202E) character
8 """
9
10 ARGUMENT = "rtlo"
11 HELP = "Right-To-Left-Override control character is used"
12 IMPACT = DetectorClassification.HIGH
13 CONFIDENCE = DetectorClassification.HIGH
14
15 WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation#right-to-left-override-character"
16 WIKI_TITLE = "Right-to-Left-Override character"
17 WIKI_DESCRIPTION = "An attacker can manipulate the logic of the contract by using a right-to-left-override character (`U+202E)`."
18
19 # region wiki_exploit_scenario
20 WIKI_EXPLOIT_SCENARIO = """
21 ```solidity
22 contract Token
23 {
24
25 address payable o; // owner
26 mapping(address => uint) tokens;
27
28 function withdraw() external returns(uint)
29 {
30 uint amount = tokens[msg.sender];
31 address payable d = msg.sender;
32 tokens[msg.sender] = 0;
33 _withdraw(/*owner/*noitanitsed*/ d, o/*
34 /*value */, amount);
35 }
36
37 function _withdraw(address payable fee_receiver, address payable destination, uint value) internal
38 {
39 fee_receiver.transfer(1);
40 destination.transfer(value);
41 }
42 }
43 ```
44
45 `Token` uses the right-to-left-override character when calling `_withdraw`. As a result, the fee is incorrectly sent to `msg.sender`, and the token balance is sent to the owner.
46
47 """
48 # endregion wiki_exploit_scenario
49
50 WIKI_RECOMMENDATION = "Special control characters must not be allowed."
51
52 RTLO_CHARACTER_ENCODED = "\u202e".encode("utf-8")
53 STANDARD_JSON = False
54
55 def _detect(self):
56 results = []
57 pattern = re.compile(".*\u202e.*".encode("utf-8"))
58
59 for filename, source in self.slither.source_code.items():
60 # Attempt to find all RTLO characters in this source file.
61 original_source_encoded = source.encode("utf-8")
62 start_index = 0
63
64 # Keep searching all file contents for the character.
65 while True:
66 source_encoded = original_source_encoded[start_index:]
67 result_index = source_encoded.find(self.RTLO_CHARACTER_ENCODED)
68
69 # If we couldn't find the character in the remainder of source, stop.
70 if result_index == -1:
71 break
72
73 # We found another instance of the character, define our output
74 idx = start_index + result_index
75
76 relative = self.slither.crytic_compile.filename_lookup(filename).relative
77 info = f"{relative} contains a unicode right-to-left-override character at byte offset {idx}:\n"
78
79 # We have a patch, so pattern.find will return at least one result
80
81 info += f"\t- {pattern.findall(source_encoded)[0]}\n"
82 res = self.generate_result(info)
83 res.add_other(
84 "rtlo-character",
85 (filename, idx, len(self.RTLO_CHARACTER_ENCODED)),
86 self.compilation_unit,
87 )
88 results.append(res)
89
90 # Advance the start index for the next iteration
91 start_index = result_index + 1
92
93 return results
94
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/slither/detectors/source/rtlo.py b/slither/detectors/source/rtlo.py
--- a/slither/detectors/source/rtlo.py
+++ b/slither/detectors/source/rtlo.py
@@ -88,6 +88,6 @@
results.append(res)
# Advance the start index for the next iteration
- start_index = result_index + 1
+ start_index = idx + 1
return results
|
{"golden_diff": "diff --git a/slither/detectors/source/rtlo.py b/slither/detectors/source/rtlo.py\n--- a/slither/detectors/source/rtlo.py\n+++ b/slither/detectors/source/rtlo.py\n@@ -88,6 +88,6 @@\n results.append(res)\n \n # Advance the start index for the next iteration\n- start_index = result_index + 1\n+ start_index = idx + 1\n \n return results\n", "issue": "[Bug]: Infinite loop in RTLO detector\n### What happened?\n\nSlither hangs on this code indefinitely\n\n### Can you share code with us to reproduce this bug?\n\nhttps://github.com/ethereum/solidity/blob/develop/test/libsolidity/syntaxTests/comments/multiline_unicode_direction_override_5.sol\n\n### Version\n\n0.8.2\n\n### Relevant log output\n\n_No response_\n", "before_files": [{"content": "import re\nfrom slither.detectors.abstract_detector import AbstractDetector, DetectorClassification\n\n\nclass RightToLeftOverride(AbstractDetector):\n \"\"\"\n Detect the usage of a Right-To-Left-Override (U+202E) character\n \"\"\"\n\n ARGUMENT = \"rtlo\"\n HELP = \"Right-To-Left-Override control character is used\"\n IMPACT = DetectorClassification.HIGH\n CONFIDENCE = DetectorClassification.HIGH\n\n WIKI = \"https://github.com/crytic/slither/wiki/Detector-Documentation#right-to-left-override-character\"\n WIKI_TITLE = \"Right-to-Left-Override character\"\n WIKI_DESCRIPTION = \"An attacker can manipulate the logic of the contract by using a right-to-left-override character (`U+202E)`.\"\n\n # region wiki_exploit_scenario\n WIKI_EXPLOIT_SCENARIO = \"\"\"\n```solidity\ncontract Token\n{\n\n address payable o; // owner\n mapping(address => uint) tokens;\n\n function withdraw() external returns(uint)\n {\n uint amount = tokens[msg.sender];\n address payable d = msg.sender;\n tokens[msg.sender] = 0;\n _withdraw(/*owner\u202e/*noitanitsed*/ d, o/*\u202d\n\t\t /*value */, amount);\n }\n\n function _withdraw(address payable fee_receiver, address payable destination, uint value) internal\n {\n\t\tfee_receiver.transfer(1);\n\t\tdestination.transfer(value);\n }\n}\n```\n\n`Token` uses the right-to-left-override character when calling `_withdraw`. As a result, the fee is incorrectly sent to `msg.sender`, and the token balance is sent to the owner.\n\n\"\"\"\n # endregion wiki_exploit_scenario\n\n WIKI_RECOMMENDATION = \"Special control characters must not be allowed.\"\n\n RTLO_CHARACTER_ENCODED = \"\\u202e\".encode(\"utf-8\")\n STANDARD_JSON = False\n\n def _detect(self):\n results = []\n pattern = re.compile(\".*\\u202e.*\".encode(\"utf-8\"))\n\n for filename, source in self.slither.source_code.items():\n # Attempt to find all RTLO characters in this source file.\n original_source_encoded = source.encode(\"utf-8\")\n start_index = 0\n\n # Keep searching all file contents for the character.\n while True:\n source_encoded = original_source_encoded[start_index:]\n result_index = source_encoded.find(self.RTLO_CHARACTER_ENCODED)\n\n # If we couldn't find the character in the remainder of source, stop.\n if result_index == -1:\n break\n\n # We found another instance of the character, define our output\n idx = start_index + result_index\n\n relative = self.slither.crytic_compile.filename_lookup(filename).relative\n info = f\"{relative} contains a unicode right-to-left-override character at byte offset {idx}:\\n\"\n\n # We have a patch, so pattern.find will return at least one result\n\n info += f\"\\t- {pattern.findall(source_encoded)[0]}\\n\"\n res = self.generate_result(info)\n res.add_other(\n \"rtlo-character\",\n (filename, idx, len(self.RTLO_CHARACTER_ENCODED)),\n self.compilation_unit,\n )\n results.append(res)\n\n # Advance the start index for the next iteration\n start_index = result_index + 1\n\n return results\n", "path": "slither/detectors/source/rtlo.py"}], "after_files": [{"content": "import re\nfrom slither.detectors.abstract_detector import AbstractDetector, DetectorClassification\n\n\nclass RightToLeftOverride(AbstractDetector):\n \"\"\"\n Detect the usage of a Right-To-Left-Override (U+202E) character\n \"\"\"\n\n ARGUMENT = \"rtlo\"\n HELP = \"Right-To-Left-Override control character is used\"\n IMPACT = DetectorClassification.HIGH\n CONFIDENCE = DetectorClassification.HIGH\n\n WIKI = \"https://github.com/crytic/slither/wiki/Detector-Documentation#right-to-left-override-character\"\n WIKI_TITLE = \"Right-to-Left-Override character\"\n WIKI_DESCRIPTION = \"An attacker can manipulate the logic of the contract by using a right-to-left-override character (`U+202E)`.\"\n\n # region wiki_exploit_scenario\n WIKI_EXPLOIT_SCENARIO = \"\"\"\n```solidity\ncontract Token\n{\n\n address payable o; // owner\n mapping(address => uint) tokens;\n\n function withdraw() external returns(uint)\n {\n uint amount = tokens[msg.sender];\n address payable d = msg.sender;\n tokens[msg.sender] = 0;\n _withdraw(/*owner\u202e/*noitanitsed*/ d, o/*\u202d\n\t\t /*value */, amount);\n }\n\n function _withdraw(address payable fee_receiver, address payable destination, uint value) internal\n {\n\t\tfee_receiver.transfer(1);\n\t\tdestination.transfer(value);\n }\n}\n```\n\n`Token` uses the right-to-left-override character when calling `_withdraw`. As a result, the fee is incorrectly sent to `msg.sender`, and the token balance is sent to the owner.\n\n\"\"\"\n # endregion wiki_exploit_scenario\n\n WIKI_RECOMMENDATION = \"Special control characters must not be allowed.\"\n\n RTLO_CHARACTER_ENCODED = \"\\u202e\".encode(\"utf-8\")\n STANDARD_JSON = False\n\n def _detect(self):\n results = []\n pattern = re.compile(\".*\\u202e.*\".encode(\"utf-8\"))\n\n for filename, source in self.slither.source_code.items():\n # Attempt to find all RTLO characters in this source file.\n original_source_encoded = source.encode(\"utf-8\")\n start_index = 0\n\n # Keep searching all file contents for the character.\n while True:\n source_encoded = original_source_encoded[start_index:]\n result_index = source_encoded.find(self.RTLO_CHARACTER_ENCODED)\n\n # If we couldn't find the character in the remainder of source, stop.\n if result_index == -1:\n break\n\n # We found another instance of the character, define our output\n idx = start_index + result_index\n\n relative = self.slither.crytic_compile.filename_lookup(filename).relative\n info = f\"{relative} contains a unicode right-to-left-override character at byte offset {idx}:\\n\"\n\n # We have a patch, so pattern.find will return at least one result\n\n info += f\"\\t- {pattern.findall(source_encoded)[0]}\\n\"\n res = self.generate_result(info)\n res.add_other(\n \"rtlo-character\",\n (filename, idx, len(self.RTLO_CHARACTER_ENCODED)),\n self.compilation_unit,\n )\n results.append(res)\n\n # Advance the start index for the next iteration\n start_index = idx + 1\n\n return results\n", "path": "slither/detectors/source/rtlo.py"}]}
| 1,270 | 106 |
gh_patches_debug_28367
|
rasdani/github-patches
|
git_diff
|
dbt-labs__dbt-core-8909
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[CT-3279] Remove usage of dbt.config in dbt/adapters
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `core/dbt/adapters/base/plugin.py`
Content:
```
1 from typing import List, Optional, Type
2
3 from dbt.adapters.base import Credentials
4 from dbt.exceptions import CompilationError
5 from dbt.adapters.protocol import AdapterProtocol
6
7
8 def project_name_from_path(include_path: str) -> str:
9 # avoid an import cycle
10 from dbt.config.project import PartialProject
11
12 partial = PartialProject.from_project_root(include_path)
13 if partial.project_name is None:
14 raise CompilationError(f"Invalid project at {include_path}: name not set!")
15 return partial.project_name
16
17
18 class AdapterPlugin:
19 """Defines the basic requirements for a dbt adapter plugin.
20
21 :param include_path: The path to this adapter plugin's root
22 :param dependencies: A list of adapter names that this adapter depends
23 upon.
24 """
25
26 def __init__(
27 self,
28 adapter: Type[AdapterProtocol],
29 credentials: Type[Credentials],
30 include_path: str,
31 dependencies: Optional[List[str]] = None,
32 ) -> None:
33
34 self.adapter: Type[AdapterProtocol] = adapter
35 self.credentials: Type[Credentials] = credentials
36 self.include_path: str = include_path
37 self.project_name: str = project_name_from_path(include_path)
38 self.dependencies: List[str]
39 if dependencies is None:
40 self.dependencies = []
41 else:
42 self.dependencies = dependencies
43
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/core/dbt/adapters/base/plugin.py b/core/dbt/adapters/base/plugin.py
--- a/core/dbt/adapters/base/plugin.py
+++ b/core/dbt/adapters/base/plugin.py
@@ -1,20 +1,10 @@
from typing import List, Optional, Type
+from pathlib import Path
from dbt.adapters.base import Credentials
-from dbt.exceptions import CompilationError
from dbt.adapters.protocol import AdapterProtocol
-def project_name_from_path(include_path: str) -> str:
- # avoid an import cycle
- from dbt.config.project import PartialProject
-
- partial = PartialProject.from_project_root(include_path)
- if partial.project_name is None:
- raise CompilationError(f"Invalid project at {include_path}: name not set!")
- return partial.project_name
-
-
class AdapterPlugin:
"""Defines the basic requirements for a dbt adapter plugin.
@@ -29,12 +19,13 @@
credentials: Type[Credentials],
include_path: str,
dependencies: Optional[List[str]] = None,
+ project_name: Optional[str] = None,
) -> None:
self.adapter: Type[AdapterProtocol] = adapter
self.credentials: Type[Credentials] = credentials
self.include_path: str = include_path
- self.project_name: str = project_name_from_path(include_path)
+ self.project_name: str = project_name or f"dbt_{Path(include_path).name}"
self.dependencies: List[str]
if dependencies is None:
self.dependencies = []
|
{"golden_diff": "diff --git a/core/dbt/adapters/base/plugin.py b/core/dbt/adapters/base/plugin.py\n--- a/core/dbt/adapters/base/plugin.py\n+++ b/core/dbt/adapters/base/plugin.py\n@@ -1,20 +1,10 @@\n from typing import List, Optional, Type\n+from pathlib import Path\n \n from dbt.adapters.base import Credentials\n-from dbt.exceptions import CompilationError\n from dbt.adapters.protocol import AdapterProtocol\n \n \n-def project_name_from_path(include_path: str) -> str:\n- # avoid an import cycle\n- from dbt.config.project import PartialProject\n-\n- partial = PartialProject.from_project_root(include_path)\n- if partial.project_name is None:\n- raise CompilationError(f\"Invalid project at {include_path}: name not set!\")\n- return partial.project_name\n-\n-\n class AdapterPlugin:\n \"\"\"Defines the basic requirements for a dbt adapter plugin.\n \n@@ -29,12 +19,13 @@\n credentials: Type[Credentials],\n include_path: str,\n dependencies: Optional[List[str]] = None,\n+ project_name: Optional[str] = None,\n ) -> None:\n \n self.adapter: Type[AdapterProtocol] = adapter\n self.credentials: Type[Credentials] = credentials\n self.include_path: str = include_path\n- self.project_name: str = project_name_from_path(include_path)\n+ self.project_name: str = project_name or f\"dbt_{Path(include_path).name}\"\n self.dependencies: List[str]\n if dependencies is None:\n self.dependencies = []\n", "issue": "[CT-3279] Remove usage of dbt.config in dbt/adapters\n\n", "before_files": [{"content": "from typing import List, Optional, Type\n\nfrom dbt.adapters.base import Credentials\nfrom dbt.exceptions import CompilationError\nfrom dbt.adapters.protocol import AdapterProtocol\n\n\ndef project_name_from_path(include_path: str) -> str:\n # avoid an import cycle\n from dbt.config.project import PartialProject\n\n partial = PartialProject.from_project_root(include_path)\n if partial.project_name is None:\n raise CompilationError(f\"Invalid project at {include_path}: name not set!\")\n return partial.project_name\n\n\nclass AdapterPlugin:\n \"\"\"Defines the basic requirements for a dbt adapter plugin.\n\n :param include_path: The path to this adapter plugin's root\n :param dependencies: A list of adapter names that this adapter depends\n upon.\n \"\"\"\n\n def __init__(\n self,\n adapter: Type[AdapterProtocol],\n credentials: Type[Credentials],\n include_path: str,\n dependencies: Optional[List[str]] = None,\n ) -> None:\n\n self.adapter: Type[AdapterProtocol] = adapter\n self.credentials: Type[Credentials] = credentials\n self.include_path: str = include_path\n self.project_name: str = project_name_from_path(include_path)\n self.dependencies: List[str]\n if dependencies is None:\n self.dependencies = []\n else:\n self.dependencies = dependencies\n", "path": "core/dbt/adapters/base/plugin.py"}], "after_files": [{"content": "from typing import List, Optional, Type\nfrom pathlib import Path\n\nfrom dbt.adapters.base import Credentials\nfrom dbt.adapters.protocol import AdapterProtocol\n\n\nclass AdapterPlugin:\n \"\"\"Defines the basic requirements for a dbt adapter plugin.\n\n :param include_path: The path to this adapter plugin's root\n :param dependencies: A list of adapter names that this adapter depends\n upon.\n \"\"\"\n\n def __init__(\n self,\n adapter: Type[AdapterProtocol],\n credentials: Type[Credentials],\n include_path: str,\n dependencies: Optional[List[str]] = None,\n project_name: Optional[str] = None,\n ) -> None:\n\n self.adapter: Type[AdapterProtocol] = adapter\n self.credentials: Type[Credentials] = credentials\n self.include_path: str = include_path\n self.project_name: str = project_name or f\"dbt_{Path(include_path).name}\"\n self.dependencies: List[str]\n if dependencies is None:\n self.dependencies = []\n else:\n self.dependencies = dependencies\n", "path": "core/dbt/adapters/base/plugin.py"}]}
| 644 | 338 |
gh_patches_debug_10793
|
rasdani/github-patches
|
git_diff
|
pyro-ppl__pyro-724
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Deprecate dependency on scipy for pyro-ppl package
Once distributions are migrated upstream and we do not have a core dependency on scipy, we should remove this from our requirements in `setup.py` (add it to test dependencies), so that users do not need to install a separate 16MB file when they do a pip install of `pyro-ppl`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from __future__ import absolute_import, division, print_function
2
3 import sys
4
5 from setuptools import find_packages, setup
6
7 # Find pyro version.
8 for line in open('pyro/__init__.py'):
9 if line.startswith('__version__ = '):
10 version = line.strip().split()[2][1:-1]
11
12 # Convert README.md to rst for display at https://pypi.python.org/pypi/pyro-ppl
13 # When releasing on pypi, make sure pandoc is on your system:
14 # $ brew install pandoc # OS X
15 # $ sudo apt-get install pandoc # Ubuntu Linux
16 try:
17 import pypandoc
18 long_description = pypandoc.convert('README.md', 'rst')
19 except (IOError, ImportError, OSError) as e:
20 sys.stderr.write('Failed to convert README.md to rst:\n {}\n'.format(e))
21 sys.stderr.flush()
22 long_description = open('README.md').read()
23
24 # Remove badges since they will always be obsolete.
25 blacklist = ['Build Status', 'Latest Version', 'travis-ci.org', 'pypi.python.org']
26 long_description = '\n'.join(
27 [line for line in long_description.split('\n') if not any(patt in line for patt in blacklist)])
28
29 setup(
30 name='pyro-ppl',
31 version=version,
32 description='A Python library for probabilistic modeling and inference',
33 long_description=long_description,
34 packages=find_packages(exclude=('tests*',)),
35 url='http://pyro.ai',
36 author='Uber AI Labs',
37 author_email='[email protected]',
38 install_requires=[
39 'numpy>=1.7',
40 'scipy>=0.19.0',
41 'cloudpickle>=0.3.1',
42 'graphviz>=0.8',
43 'networkx>=2.0.0',
44 'observations>=0.1.4',
45 'torch',
46 'six>=1.10.0',
47 ],
48 extras_require={
49 'notebooks': ['jupyter>=1.0.0'],
50 'visualization': [
51 'matplotlib>=1.3',
52 'visdom>=0.1.4',
53 'pillow',
54 ],
55 'test': [
56 'pytest',
57 'pytest-cov',
58 'nbval',
59 # examples/tutorials
60 'matplotlib',
61 'visdom',
62 'torchvision',
63 ],
64 'profile': ['prettytable'],
65 'dev': [
66 'torchvision',
67 'flake8',
68 'yapf',
69 'isort',
70 'pytest',
71 'pytest-xdist',
72 'nbval',
73 'nbstripout',
74 'pypandoc',
75 'sphinx',
76 'sphinx_rtd_theme',
77 ],
78 },
79 tests_require=['flake8', 'pytest'],
80 keywords='machine learning statistics probabilistic programming bayesian modeling pytorch',
81 license='MIT License',
82 classifiers=[
83 'Intended Audience :: Developers',
84 'Intended Audience :: Education',
85 'Intended Audience :: Science/Research',
86 'Operating System :: POSIX :: Linux',
87 'Operating System :: MacOS :: MacOS X',
88 'Programming Language :: Python :: 2.7',
89 'Programming Language :: Python :: 3.4',
90 ],
91 # yapf
92 )
93
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -37,7 +37,6 @@
author_email='[email protected]',
install_requires=[
'numpy>=1.7',
- 'scipy>=0.19.0',
'cloudpickle>=0.3.1',
'graphviz>=0.8',
'networkx>=2.0.0',
@@ -56,6 +55,7 @@
'pytest',
'pytest-cov',
'nbval',
+ 'scipy>=0.19.0',
# examples/tutorials
'matplotlib',
'visdom',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -37,7 +37,6 @@\n author_email='[email protected]',\n install_requires=[\n 'numpy>=1.7',\n- 'scipy>=0.19.0',\n 'cloudpickle>=0.3.1',\n 'graphviz>=0.8',\n 'networkx>=2.0.0',\n@@ -56,6 +55,7 @@\n 'pytest',\n 'pytest-cov',\n 'nbval',\n+ 'scipy>=0.19.0',\n # examples/tutorials\n 'matplotlib',\n 'visdom',\n", "issue": "Deprecate dependency on scipy for pyro-ppl package\nOnce distributions are migrated upstream and we do not have a core dependency on scipy, we should remove this from our requirements in `setup.py` (add it to test dependencies), so that users do not need to install a separate 16MB file when they do a pip install of `pyro-ppl`.\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport sys\n\nfrom setuptools import find_packages, setup\n\n# Find pyro version.\nfor line in open('pyro/__init__.py'):\n if line.startswith('__version__ = '):\n version = line.strip().split()[2][1:-1]\n\n# Convert README.md to rst for display at https://pypi.python.org/pypi/pyro-ppl\n# When releasing on pypi, make sure pandoc is on your system:\n# $ brew install pandoc # OS X\n# $ sudo apt-get install pandoc # Ubuntu Linux\ntry:\n import pypandoc\n long_description = pypandoc.convert('README.md', 'rst')\nexcept (IOError, ImportError, OSError) as e:\n sys.stderr.write('Failed to convert README.md to rst:\\n {}\\n'.format(e))\n sys.stderr.flush()\n long_description = open('README.md').read()\n\n# Remove badges since they will always be obsolete.\nblacklist = ['Build Status', 'Latest Version', 'travis-ci.org', 'pypi.python.org']\nlong_description = '\\n'.join(\n [line for line in long_description.split('\\n') if not any(patt in line for patt in blacklist)])\n\nsetup(\n name='pyro-ppl',\n version=version,\n description='A Python library for probabilistic modeling and inference',\n long_description=long_description,\n packages=find_packages(exclude=('tests*',)),\n url='http://pyro.ai',\n author='Uber AI Labs',\n author_email='[email protected]',\n install_requires=[\n 'numpy>=1.7',\n 'scipy>=0.19.0',\n 'cloudpickle>=0.3.1',\n 'graphviz>=0.8',\n 'networkx>=2.0.0',\n 'observations>=0.1.4',\n 'torch',\n 'six>=1.10.0',\n ],\n extras_require={\n 'notebooks': ['jupyter>=1.0.0'],\n 'visualization': [\n 'matplotlib>=1.3',\n 'visdom>=0.1.4',\n 'pillow',\n ],\n 'test': [\n 'pytest',\n 'pytest-cov',\n 'nbval',\n # examples/tutorials\n 'matplotlib',\n 'visdom',\n 'torchvision',\n ],\n 'profile': ['prettytable'],\n 'dev': [\n 'torchvision',\n 'flake8',\n 'yapf',\n 'isort',\n 'pytest',\n 'pytest-xdist',\n 'nbval',\n 'nbstripout',\n 'pypandoc',\n 'sphinx',\n 'sphinx_rtd_theme',\n ],\n },\n tests_require=['flake8', 'pytest'],\n keywords='machine learning statistics probabilistic programming bayesian modeling pytorch',\n license='MIT License',\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: MacOS :: MacOS X',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n ],\n # yapf\n)\n", "path": "setup.py"}], "after_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport sys\n\nfrom setuptools import find_packages, setup\n\n# Find pyro version.\nfor line in open('pyro/__init__.py'):\n if line.startswith('__version__ = '):\n version = line.strip().split()[2][1:-1]\n\n# Convert README.md to rst for display at https://pypi.python.org/pypi/pyro-ppl\n# When releasing on pypi, make sure pandoc is on your system:\n# $ brew install pandoc # OS X\n# $ sudo apt-get install pandoc # Ubuntu Linux\ntry:\n import pypandoc\n long_description = pypandoc.convert('README.md', 'rst')\nexcept (IOError, ImportError, OSError) as e:\n sys.stderr.write('Failed to convert README.md to rst:\\n {}\\n'.format(e))\n sys.stderr.flush()\n long_description = open('README.md').read()\n\n# Remove badges since they will always be obsolete.\nblacklist = ['Build Status', 'Latest Version', 'travis-ci.org', 'pypi.python.org']\nlong_description = '\\n'.join(\n [line for line in long_description.split('\\n') if not any(patt in line for patt in blacklist)])\n\nsetup(\n name='pyro-ppl',\n version=version,\n description='A Python library for probabilistic modeling and inference',\n long_description=long_description,\n packages=find_packages(exclude=('tests*',)),\n url='http://pyro.ai',\n author='Uber AI Labs',\n author_email='[email protected]',\n install_requires=[\n 'numpy>=1.7',\n 'cloudpickle>=0.3.1',\n 'graphviz>=0.8',\n 'networkx>=2.0.0',\n 'observations>=0.1.4',\n 'torch',\n 'six>=1.10.0',\n ],\n extras_require={\n 'notebooks': ['jupyter>=1.0.0'],\n 'visualization': [\n 'matplotlib>=1.3',\n 'visdom>=0.1.4',\n 'pillow',\n ],\n 'test': [\n 'pytest',\n 'pytest-cov',\n 'nbval',\n 'scipy>=0.19.0',\n # examples/tutorials\n 'matplotlib',\n 'visdom',\n 'torchvision',\n ],\n 'profile': ['prettytable'],\n 'dev': [\n 'torchvision',\n 'flake8',\n 'yapf',\n 'isort',\n 'pytest',\n 'pytest-xdist',\n 'nbval',\n 'nbstripout',\n 'pypandoc',\n 'sphinx',\n 'sphinx_rtd_theme',\n ],\n },\n tests_require=['flake8', 'pytest'],\n keywords='machine learning statistics probabilistic programming bayesian modeling pytorch',\n license='MIT License',\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: MacOS :: MacOS X',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n ],\n # yapf\n)\n", "path": "setup.py"}]}
| 1,216 | 153 |
gh_patches_debug_17814
|
rasdani/github-patches
|
git_diff
|
Qiskit__qiskit-7328
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Parameters needs examples in the documentation
### What is the expected enhancement?
Many users land in https://qiskit.org/documentation/stubs/qiskit.circuit.Parameter.html when search on how to parametrize circuits. @Cryoris did a good jobs at explaining them in https://quantumcomputing.stackexchange.com/questions/13558/how-to-add-several-parameters-in-qiskit-circuit . I think the Parameter docstring needs a short example.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qiskit/circuit/parameter.py`
Content:
```
1 # This code is part of Qiskit.
2 #
3 # (C) Copyright IBM 2017, 2019.
4 #
5 # This code is licensed under the Apache License, Version 2.0. You may
6 # obtain a copy of this license in the LICENSE.txt file in the root directory
7 # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
8 #
9 # Any modifications or derivative works of this code must retain this
10 # copyright notice, and modified files need to carry a notice indicating
11 # that they have been altered from the originals.
12 """
13 Parameter Class for variable parameters.
14 """
15
16 from uuid import uuid4
17
18 from .parameterexpression import ParameterExpression
19
20 try:
21 import symengine
22
23 HAS_SYMENGINE = True
24 except ImportError:
25 HAS_SYMENGINE = False
26
27
28 class Parameter(ParameterExpression):
29 """Parameter Class for variable parameters."""
30
31 def __new__(cls, name, uuid=None): # pylint: disable=unused-argument
32 # Parameter relies on self._uuid being set prior to other attributes
33 # (e.g. symbol_map) which may depend on self._uuid for Parameter's hash
34 # or __eq__ functions.
35 obj = object.__new__(cls)
36
37 if uuid is None:
38 obj._uuid = uuid4()
39 else:
40 obj._uuid = uuid
41
42 obj._hash = hash(obj._uuid)
43 return obj
44
45 def __getnewargs__(self):
46 # Unpickling won't in general call __init__ but will always call
47 # __new__. Specify arguments to be passed to __new__ when unpickling.
48
49 return (self.name, self._uuid)
50
51 def __init__(self, name: str):
52 """Create a new named :class:`Parameter`.
53
54 Args:
55 name: name of the ``Parameter``, used for visual representation. This can
56 be any unicode string, e.g. "ϕ".
57 """
58 self._name = name
59 if not HAS_SYMENGINE:
60 from sympy import Symbol
61
62 symbol = Symbol(name)
63 else:
64 symbol = symengine.Symbol(name)
65 super().__init__(symbol_map={self: symbol}, expr=symbol)
66
67 def subs(self, parameter_map: dict):
68 """Substitute self with the corresponding parameter in ``parameter_map``."""
69 return parameter_map[self]
70
71 @property
72 def name(self):
73 """Returns the name of the :class:`Parameter`."""
74 return self._name
75
76 def __str__(self):
77 return self.name
78
79 def __copy__(self):
80 return self
81
82 def __deepcopy__(self, memo=None):
83 return self
84
85 def __repr__(self):
86 return f"{self.__class__.__name__}({self.name})"
87
88 def __eq__(self, other):
89 if isinstance(other, Parameter):
90 return self._uuid == other._uuid
91 elif isinstance(other, ParameterExpression):
92 return super().__eq__(other)
93 else:
94 return False
95
96 def __hash__(self):
97 return self._hash
98
99 def __getstate__(self):
100 return {"name": self._name}
101
102 def __setstate__(self, state):
103 self._name = state["name"]
104 if not HAS_SYMENGINE:
105 from sympy import Symbol
106
107 symbol = Symbol(self._name)
108 else:
109 symbol = symengine.Symbol(self._name)
110 super().__init__(symbol_map={self: symbol}, expr=symbol)
111
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/qiskit/circuit/parameter.py b/qiskit/circuit/parameter.py
--- a/qiskit/circuit/parameter.py
+++ b/qiskit/circuit/parameter.py
@@ -26,7 +26,32 @@
class Parameter(ParameterExpression):
- """Parameter Class for variable parameters."""
+ """Parameter Class for variable parameters.
+
+ A parameter is a variable value that is not required to be fixed
+ at circuit definition.
+
+ Examples:
+
+ Construct a variable-rotation X gate using circuit parameters.
+
+ .. jupyter-execute::
+
+ from qiskit.circuit import QuantumCircuit, Parameter
+
+ # create the parameter
+ phi = Parameter('phi')
+ qc = QuantumCircuit(1)
+
+ # parameterize the rotation
+ qc.rx(phi, 0)
+ qc.draw()
+
+ # bind the parameters after circuit to create a bound circuit
+ bc = qc.bind_parameters({phi: 3.14})
+ bc.measure_all()
+ bc.draw()
+ """
def __new__(cls, name, uuid=None): # pylint: disable=unused-argument
# Parameter relies on self._uuid being set prior to other attributes
|
{"golden_diff": "diff --git a/qiskit/circuit/parameter.py b/qiskit/circuit/parameter.py\n--- a/qiskit/circuit/parameter.py\n+++ b/qiskit/circuit/parameter.py\n@@ -26,7 +26,32 @@\n \n \n class Parameter(ParameterExpression):\n- \"\"\"Parameter Class for variable parameters.\"\"\"\n+ \"\"\"Parameter Class for variable parameters.\n+\n+ A parameter is a variable value that is not required to be fixed\n+ at circuit definition.\n+\n+ Examples:\n+\n+ Construct a variable-rotation X gate using circuit parameters.\n+\n+ .. jupyter-execute::\n+\n+ from qiskit.circuit import QuantumCircuit, Parameter\n+\n+ # create the parameter\n+ phi = Parameter('phi')\n+ qc = QuantumCircuit(1)\n+\n+ # parameterize the rotation\n+ qc.rx(phi, 0)\n+ qc.draw()\n+\n+ # bind the parameters after circuit to create a bound circuit\n+ bc = qc.bind_parameters({phi: 3.14})\n+ bc.measure_all()\n+ bc.draw()\n+ \"\"\"\n \n def __new__(cls, name, uuid=None): # pylint: disable=unused-argument\n # Parameter relies on self._uuid being set prior to other attributes\n", "issue": "Parameters needs examples in the documentation\n### What is the expected enhancement?\n\nMany users land in https://qiskit.org/documentation/stubs/qiskit.circuit.Parameter.html when search on how to parametrize circuits. @Cryoris did a good jobs at explaining them in https://quantumcomputing.stackexchange.com/questions/13558/how-to-add-several-parameters-in-qiskit-circuit . I think the Parameter docstring needs a short example.\n", "before_files": [{"content": "# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\"\"\"\nParameter Class for variable parameters.\n\"\"\"\n\nfrom uuid import uuid4\n\nfrom .parameterexpression import ParameterExpression\n\ntry:\n import symengine\n\n HAS_SYMENGINE = True\nexcept ImportError:\n HAS_SYMENGINE = False\n\n\nclass Parameter(ParameterExpression):\n \"\"\"Parameter Class for variable parameters.\"\"\"\n\n def __new__(cls, name, uuid=None): # pylint: disable=unused-argument\n # Parameter relies on self._uuid being set prior to other attributes\n # (e.g. symbol_map) which may depend on self._uuid for Parameter's hash\n # or __eq__ functions.\n obj = object.__new__(cls)\n\n if uuid is None:\n obj._uuid = uuid4()\n else:\n obj._uuid = uuid\n\n obj._hash = hash(obj._uuid)\n return obj\n\n def __getnewargs__(self):\n # Unpickling won't in general call __init__ but will always call\n # __new__. Specify arguments to be passed to __new__ when unpickling.\n\n return (self.name, self._uuid)\n\n def __init__(self, name: str):\n \"\"\"Create a new named :class:`Parameter`.\n\n Args:\n name: name of the ``Parameter``, used for visual representation. This can\n be any unicode string, e.g. \"\u03d5\".\n \"\"\"\n self._name = name\n if not HAS_SYMENGINE:\n from sympy import Symbol\n\n symbol = Symbol(name)\n else:\n symbol = symengine.Symbol(name)\n super().__init__(symbol_map={self: symbol}, expr=symbol)\n\n def subs(self, parameter_map: dict):\n \"\"\"Substitute self with the corresponding parameter in ``parameter_map``.\"\"\"\n return parameter_map[self]\n\n @property\n def name(self):\n \"\"\"Returns the name of the :class:`Parameter`.\"\"\"\n return self._name\n\n def __str__(self):\n return self.name\n\n def __copy__(self):\n return self\n\n def __deepcopy__(self, memo=None):\n return self\n\n def __repr__(self):\n return f\"{self.__class__.__name__}({self.name})\"\n\n def __eq__(self, other):\n if isinstance(other, Parameter):\n return self._uuid == other._uuid\n elif isinstance(other, ParameterExpression):\n return super().__eq__(other)\n else:\n return False\n\n def __hash__(self):\n return self._hash\n\n def __getstate__(self):\n return {\"name\": self._name}\n\n def __setstate__(self, state):\n self._name = state[\"name\"]\n if not HAS_SYMENGINE:\n from sympy import Symbol\n\n symbol = Symbol(self._name)\n else:\n symbol = symengine.Symbol(self._name)\n super().__init__(symbol_map={self: symbol}, expr=symbol)\n", "path": "qiskit/circuit/parameter.py"}], "after_files": [{"content": "# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\"\"\"\nParameter Class for variable parameters.\n\"\"\"\n\nfrom uuid import uuid4\n\nfrom .parameterexpression import ParameterExpression\n\ntry:\n import symengine\n\n HAS_SYMENGINE = True\nexcept ImportError:\n HAS_SYMENGINE = False\n\n\nclass Parameter(ParameterExpression):\n \"\"\"Parameter Class for variable parameters.\n\n A parameter is a variable value that is not required to be fixed\n at circuit definition.\n\n Examples:\n\n Construct a variable-rotation X gate using circuit parameters.\n\n .. jupyter-execute::\n\n from qiskit.circuit import QuantumCircuit, Parameter\n\n # create the parameter\n phi = Parameter('phi')\n qc = QuantumCircuit(1)\n\n # parameterize the rotation\n qc.rx(phi, 0)\n qc.draw()\n\n # bind the parameters after circuit to create a bound circuit\n bc = qc.bind_parameters({phi: 3.14})\n bc.measure_all()\n bc.draw()\n \"\"\"\n\n def __new__(cls, name, uuid=None): # pylint: disable=unused-argument\n # Parameter relies on self._uuid being set prior to other attributes\n # (e.g. symbol_map) which may depend on self._uuid for Parameter's hash\n # or __eq__ functions.\n obj = object.__new__(cls)\n\n if uuid is None:\n obj._uuid = uuid4()\n else:\n obj._uuid = uuid\n\n obj._hash = hash(obj._uuid)\n return obj\n\n def __getnewargs__(self):\n # Unpickling won't in general call __init__ but will always call\n # __new__. Specify arguments to be passed to __new__ when unpickling.\n\n return (self.name, self._uuid)\n\n def __init__(self, name: str):\n \"\"\"Create a new named :class:`Parameter`.\n\n Args:\n name: name of the ``Parameter``, used for visual representation. This can\n be any unicode string, e.g. \"\u03d5\".\n \"\"\"\n self._name = name\n if not HAS_SYMENGINE:\n from sympy import Symbol\n\n symbol = Symbol(name)\n else:\n symbol = symengine.Symbol(name)\n super().__init__(symbol_map={self: symbol}, expr=symbol)\n\n def subs(self, parameter_map: dict):\n \"\"\"Substitute self with the corresponding parameter in ``parameter_map``.\"\"\"\n return parameter_map[self]\n\n @property\n def name(self):\n \"\"\"Returns the name of the :class:`Parameter`.\"\"\"\n return self._name\n\n def __str__(self):\n return self.name\n\n def __copy__(self):\n return self\n\n def __deepcopy__(self, memo=None):\n return self\n\n def __repr__(self):\n return f\"{self.__class__.__name__}({self.name})\"\n\n def __eq__(self, other):\n if isinstance(other, Parameter):\n return self._uuid == other._uuid\n elif isinstance(other, ParameterExpression):\n return super().__eq__(other)\n else:\n return False\n\n def __hash__(self):\n return self._hash\n\n def __getstate__(self):\n return {\"name\": self._name}\n\n def __setstate__(self, state):\n self._name = state[\"name\"]\n if not HAS_SYMENGINE:\n from sympy import Symbol\n\n symbol = Symbol(self._name)\n else:\n symbol = symengine.Symbol(self._name)\n super().__init__(symbol_map={self: symbol}, expr=symbol)\n", "path": "qiskit/circuit/parameter.py"}]}
| 1,338 | 280 |
gh_patches_debug_21047
|
rasdani/github-patches
|
git_diff
|
dask__distributed-3387
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
When --dashboard-prefix is used root location is always redirected to /status without adding the prefix
The problem arises when using --dashboard-prefix, using http://DASHBOARD[:PORT]/PREFIX/ redirects to http://DASHBOARD[:PORT]/status instead of http://DASHBOARD[:PORT]/PREFIX/status.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `distributed/dashboard/core.py`
Content:
```
1 from distutils.version import LooseVersion
2 import os
3 import warnings
4
5 import bokeh
6 from bokeh.server.server import Server
7 from tornado import web
8
9
10 if LooseVersion(bokeh.__version__) < LooseVersion("0.13.0"):
11 warnings.warn(
12 "\nDask needs bokeh >= 0.13.0 for the dashboard."
13 "\nContinuing without the dashboard."
14 )
15 raise ImportError("Dask needs bokeh >= 0.13.0")
16
17
18 class BokehServer(object):
19 server_kwargs = {}
20
21 def listen(self, addr):
22 if self.server:
23 return
24 if isinstance(addr, tuple):
25 ip, port = addr
26 else:
27 port = addr
28 ip = None
29 for i in range(5):
30 try:
31 server_kwargs = dict(
32 port=port,
33 address=ip,
34 check_unused_sessions_milliseconds=500,
35 allow_websocket_origin=["*"],
36 use_index=False,
37 extra_patterns=[(r"/", web.RedirectHandler, {"url": "/status"})],
38 )
39 server_kwargs.update(self.server_kwargs)
40 self.server = Server(self.apps, **server_kwargs)
41 self.server.start()
42
43 handlers = [
44 (
45 self.prefix + r"/statics/(.*)",
46 web.StaticFileHandler,
47 {"path": os.path.join(os.path.dirname(__file__), "static")},
48 )
49 ]
50
51 self.server._tornado.add_handlers(r".*", handlers)
52
53 return
54 except (SystemExit, EnvironmentError) as exc:
55 if port != 0:
56 if "already in use" in str(
57 exc
58 ) or "Only one usage of" in str( # Unix/Mac
59 exc
60 ): # Windows
61 msg = (
62 "Port %d is already in use. "
63 "\nPerhaps you already have a cluster running?"
64 "\nHosting the diagnostics dashboard on a random port instead."
65 % port
66 )
67 else:
68 msg = (
69 "Failed to start diagnostics server on port %d. " % port
70 + str(exc)
71 )
72 warnings.warn("\n" + msg)
73 port = 0
74 if i == 4:
75 raise
76
77 @property
78 def port(self):
79 return (
80 self.server.port
81 or list(self.server._http._sockets.values())[0].getsockname()[1]
82 )
83
84 def stop(self):
85 for context in self.server._tornado._applications.values():
86 context.run_unload_hook()
87
88 self.server._tornado._stats_job.stop()
89 self.server._tornado._cleanup_job.stop()
90 if self.server._tornado._ping_job is not None:
91 self.server._tornado._ping_job.stop()
92
93 # https://github.com/bokeh/bokeh/issues/5494
94 if LooseVersion(bokeh.__version__) >= "0.12.4":
95 self.server.stop()
96
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/distributed/dashboard/core.py b/distributed/dashboard/core.py
--- a/distributed/dashboard/core.py
+++ b/distributed/dashboard/core.py
@@ -5,6 +5,7 @@
import bokeh
from bokeh.server.server import Server
from tornado import web
+from urllib.parse import urljoin
if LooseVersion(bokeh.__version__) < LooseVersion("0.13.0"):
@@ -34,7 +35,13 @@
check_unused_sessions_milliseconds=500,
allow_websocket_origin=["*"],
use_index=False,
- extra_patterns=[(r"/", web.RedirectHandler, {"url": "/status"})],
+ extra_patterns=[
+ (
+ r"/",
+ web.RedirectHandler,
+ {"url": urljoin(self.prefix.rstrip("/") + "/", r"status")},
+ )
+ ],
)
server_kwargs.update(self.server_kwargs)
self.server = Server(self.apps, **server_kwargs)
|
{"golden_diff": "diff --git a/distributed/dashboard/core.py b/distributed/dashboard/core.py\n--- a/distributed/dashboard/core.py\n+++ b/distributed/dashboard/core.py\n@@ -5,6 +5,7 @@\n import bokeh\n from bokeh.server.server import Server\n from tornado import web\n+from urllib.parse import urljoin\n \n \n if LooseVersion(bokeh.__version__) < LooseVersion(\"0.13.0\"):\n@@ -34,7 +35,13 @@\n check_unused_sessions_milliseconds=500,\n allow_websocket_origin=[\"*\"],\n use_index=False,\n- extra_patterns=[(r\"/\", web.RedirectHandler, {\"url\": \"/status\"})],\n+ extra_patterns=[\n+ (\n+ r\"/\",\n+ web.RedirectHandler,\n+ {\"url\": urljoin(self.prefix.rstrip(\"/\") + \"/\", r\"status\")},\n+ )\n+ ],\n )\n server_kwargs.update(self.server_kwargs)\n self.server = Server(self.apps, **server_kwargs)\n", "issue": "When --dashboard-prefix is used root location is always redirected to /status without adding the prefix\nThe problem arises when using --dashboard-prefix, using http://DASHBOARD[:PORT]/PREFIX/ redirects to http://DASHBOARD[:PORT]/status instead of http://DASHBOARD[:PORT]/PREFIX/status.\n", "before_files": [{"content": "from distutils.version import LooseVersion\nimport os\nimport warnings\n\nimport bokeh\nfrom bokeh.server.server import Server\nfrom tornado import web\n\n\nif LooseVersion(bokeh.__version__) < LooseVersion(\"0.13.0\"):\n warnings.warn(\n \"\\nDask needs bokeh >= 0.13.0 for the dashboard.\"\n \"\\nContinuing without the dashboard.\"\n )\n raise ImportError(\"Dask needs bokeh >= 0.13.0\")\n\n\nclass BokehServer(object):\n server_kwargs = {}\n\n def listen(self, addr):\n if self.server:\n return\n if isinstance(addr, tuple):\n ip, port = addr\n else:\n port = addr\n ip = None\n for i in range(5):\n try:\n server_kwargs = dict(\n port=port,\n address=ip,\n check_unused_sessions_milliseconds=500,\n allow_websocket_origin=[\"*\"],\n use_index=False,\n extra_patterns=[(r\"/\", web.RedirectHandler, {\"url\": \"/status\"})],\n )\n server_kwargs.update(self.server_kwargs)\n self.server = Server(self.apps, **server_kwargs)\n self.server.start()\n\n handlers = [\n (\n self.prefix + r\"/statics/(.*)\",\n web.StaticFileHandler,\n {\"path\": os.path.join(os.path.dirname(__file__), \"static\")},\n )\n ]\n\n self.server._tornado.add_handlers(r\".*\", handlers)\n\n return\n except (SystemExit, EnvironmentError) as exc:\n if port != 0:\n if \"already in use\" in str(\n exc\n ) or \"Only one usage of\" in str( # Unix/Mac\n exc\n ): # Windows\n msg = (\n \"Port %d is already in use. \"\n \"\\nPerhaps you already have a cluster running?\"\n \"\\nHosting the diagnostics dashboard on a random port instead.\"\n % port\n )\n else:\n msg = (\n \"Failed to start diagnostics server on port %d. \" % port\n + str(exc)\n )\n warnings.warn(\"\\n\" + msg)\n port = 0\n if i == 4:\n raise\n\n @property\n def port(self):\n return (\n self.server.port\n or list(self.server._http._sockets.values())[0].getsockname()[1]\n )\n\n def stop(self):\n for context in self.server._tornado._applications.values():\n context.run_unload_hook()\n\n self.server._tornado._stats_job.stop()\n self.server._tornado._cleanup_job.stop()\n if self.server._tornado._ping_job is not None:\n self.server._tornado._ping_job.stop()\n\n # https://github.com/bokeh/bokeh/issues/5494\n if LooseVersion(bokeh.__version__) >= \"0.12.4\":\n self.server.stop()\n", "path": "distributed/dashboard/core.py"}], "after_files": [{"content": "from distutils.version import LooseVersion\nimport os\nimport warnings\n\nimport bokeh\nfrom bokeh.server.server import Server\nfrom tornado import web\nfrom urllib.parse import urljoin\n\n\nif LooseVersion(bokeh.__version__) < LooseVersion(\"0.13.0\"):\n warnings.warn(\n \"\\nDask needs bokeh >= 0.13.0 for the dashboard.\"\n \"\\nContinuing without the dashboard.\"\n )\n raise ImportError(\"Dask needs bokeh >= 0.13.0\")\n\n\nclass BokehServer(object):\n server_kwargs = {}\n\n def listen(self, addr):\n if self.server:\n return\n if isinstance(addr, tuple):\n ip, port = addr\n else:\n port = addr\n ip = None\n for i in range(5):\n try:\n server_kwargs = dict(\n port=port,\n address=ip,\n check_unused_sessions_milliseconds=500,\n allow_websocket_origin=[\"*\"],\n use_index=False,\n extra_patterns=[\n (\n r\"/\",\n web.RedirectHandler,\n {\"url\": urljoin(self.prefix.rstrip(\"/\") + \"/\", r\"status\")},\n )\n ],\n )\n server_kwargs.update(self.server_kwargs)\n self.server = Server(self.apps, **server_kwargs)\n self.server.start()\n\n handlers = [\n (\n self.prefix + r\"/statics/(.*)\",\n web.StaticFileHandler,\n {\"path\": os.path.join(os.path.dirname(__file__), \"static\")},\n )\n ]\n\n self.server._tornado.add_handlers(r\".*\", handlers)\n\n return\n except (SystemExit, EnvironmentError) as exc:\n if port != 0:\n if \"already in use\" in str(\n exc\n ) or \"Only one usage of\" in str( # Unix/Mac\n exc\n ): # Windows\n msg = (\n \"Port %d is already in use. \"\n \"\\nPerhaps you already have a cluster running?\"\n \"\\nHosting the diagnostics dashboard on a random port instead.\"\n % port\n )\n else:\n msg = (\n \"Failed to start diagnostics server on port %d. \" % port\n + str(exc)\n )\n warnings.warn(\"\\n\" + msg)\n port = 0\n if i == 4:\n raise\n\n @property\n def port(self):\n return (\n self.server.port\n or list(self.server._http._sockets.values())[0].getsockname()[1]\n )\n\n def stop(self):\n for context in self.server._tornado._applications.values():\n context.run_unload_hook()\n\n self.server._tornado._stats_job.stop()\n self.server._tornado._cleanup_job.stop()\n if self.server._tornado._ping_job is not None:\n self.server._tornado._ping_job.stop()\n\n # https://github.com/bokeh/bokeh/issues/5494\n if LooseVersion(bokeh.__version__) >= \"0.12.4\":\n self.server.stop()\n", "path": "distributed/dashboard/core.py"}]}
| 1,139 | 209 |
gh_patches_debug_40460
|
rasdani/github-patches
|
git_diff
|
vacanza__python-holidays-371
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Wrong workday info for country HR
Today (Oct. 8, 2020) my alarmclock automation did not go off, because my workday sensor gave the wrong info (no workday). This day used to be a holiday in Croatia, but is not anymore.
binary_sensor:
- platform: workday
country: HR

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `holidays/countries/croatia.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # python-holidays
4 # ---------------
5 # A fast, efficient Python library for generating country, province and state
6 # specific sets of holidays on the fly. It aims to make determining whether a
7 # specific date is a holiday as fast and flexible as possible.
8 #
9 # Author: ryanss <[email protected]> (c) 2014-2017
10 # dr-prodigy <[email protected]> (c) 2017-2020
11 # Website: https://github.com/dr-prodigy/python-holidays
12 # License: MIT (see LICENSE file)
13
14 from datetime import date
15
16 from dateutil.easter import easter
17 from dateutil.relativedelta import relativedelta as rd
18
19 from holidays.constants import JAN, MAY, JUN, AUG, OCT, \
20 NOV, DEC
21 from holidays.holiday_base import HolidayBase
22
23
24 class Croatia(HolidayBase):
25
26 # https://en.wikipedia.org/wiki/Public_holidays_in_Croatia
27
28 def __init__(self, **kwargs):
29 self.country = "HR"
30 HolidayBase.__init__(self, **kwargs)
31
32 def _populate(self, year):
33 # New years
34 self[date(year, JAN, 1)] = "Nova Godina"
35 # Epiphany
36 self[date(year, JAN, 6)] = "Sveta tri kralja"
37 easter_date = easter(year)
38
39 # Easter
40 self[easter_date] = "Uskrs"
41 # Easter Monday
42 self[easter_date + rd(days=1)] = "Uskršnji ponedjeljak"
43
44 # Corpus Christi
45 self[easter_date + rd(days=60)] = "Tijelovo"
46
47 # International Workers' Day
48 self[date(year, MAY, 1)] = "Međunarodni praznik rada"
49
50 if year >= 2020:
51 # Statehood day
52 self[date(year, MAY, 30)] = "Dan državnosti"
53
54 # Anti-fascist struggle day
55 self[date(year, JUN, 22)] = "Dan antifašističke borbe"
56
57 if year < 2020:
58 # Statehood day
59 self[date(year, JUN, 25)] = "Dan državnosti"
60
61 # Victory and Homeland Thanksgiving Day
62 self[date(year, AUG, 5)] = "Dan pobjede i domovinske zahvalnosti"
63
64 # Assumption of Mary
65 self[date(year, AUG, 15)] = "Velika Gospa"
66
67 if year < 2020:
68 # Independence Day
69 self[date(year, OCT, 8)] = "Dan neovisnosti"
70
71 # All Saints' Day
72 self[date(year, NOV, 1)] = "Dan svih svetih"
73
74 if year >= 2020:
75 # Memorial day
76 self[date(year, NOV, 18)] =\
77 "Dan sjećanja na žrtve Domovinskog rata"
78
79 # Christmas day
80 self[date(year, DEC, 25)] = "Božić"
81
82 # St. Stephen's day
83 self[date(year, DEC, 26)] = "Sveti Stjepan"
84
85
86 class HR(Croatia):
87 pass
88
89
90 class HRV(Croatia):
91 pass
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/holidays/countries/croatia.py b/holidays/countries/croatia.py
--- a/holidays/countries/croatia.py
+++ b/holidays/countries/croatia.py
@@ -11,18 +11,18 @@
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
-from datetime import date
+from datetime import date, timedelta
from dateutil.easter import easter
-from dateutil.relativedelta import relativedelta as rd
-from holidays.constants import JAN, MAY, JUN, AUG, OCT, \
- NOV, DEC
+from holidays.constants import JAN, MAY, JUN, AUG, OCT, NOV, DEC
from holidays.holiday_base import HolidayBase
class Croatia(HolidayBase):
+ # Updated with act 022-03 / 19-01 / 219 of 14 November 2019
+ # https://narodne-novine.nn.hr/clanci/sluzbeni/2019_11_110_2212.html
# https://en.wikipedia.org/wiki/Public_holidays_in_Croatia
def __init__(self, **kwargs):
@@ -32,6 +32,7 @@
def _populate(self, year):
# New years
self[date(year, JAN, 1)] = "Nova Godina"
+
# Epiphany
self[date(year, JAN, 6)] = "Sveta tri kralja"
easter_date = easter(year)
@@ -39,23 +40,23 @@
# Easter
self[easter_date] = "Uskrs"
# Easter Monday
- self[easter_date + rd(days=1)] = "Uskršnji ponedjeljak"
+ self[easter_date + timedelta(days=1)] = "Uskrsni ponedjeljak"
# Corpus Christi
- self[easter_date + rd(days=60)] = "Tijelovo"
+ self[easter_date + timedelta(days=60)] = "Tijelovo"
# International Workers' Day
self[date(year, MAY, 1)] = "Međunarodni praznik rada"
+ # Statehood day (new)
if year >= 2020:
- # Statehood day
self[date(year, MAY, 30)] = "Dan državnosti"
# Anti-fascist struggle day
self[date(year, JUN, 22)] = "Dan antifašističke borbe"
+ # Statehood day (old)
if year < 2020:
- # Statehood day
self[date(year, JUN, 25)] = "Dan državnosti"
# Victory and Homeland Thanksgiving Day
@@ -64,17 +65,16 @@
# Assumption of Mary
self[date(year, AUG, 15)] = "Velika Gospa"
+ # Independence Day (old)
if year < 2020:
- # Independence Day
self[date(year, OCT, 8)] = "Dan neovisnosti"
# All Saints' Day
- self[date(year, NOV, 1)] = "Dan svih svetih"
+ self[date(year, NOV, 1)] = "Svi sveti"
if year >= 2020:
# Memorial day
- self[date(year, NOV, 18)] =\
- "Dan sjećanja na žrtve Domovinskog rata"
+ self[date(year, NOV, 18)] = "Dan sjećanja"
# Christmas day
self[date(year, DEC, 25)] = "Božić"
|
{"golden_diff": "diff --git a/holidays/countries/croatia.py b/holidays/countries/croatia.py\n--- a/holidays/countries/croatia.py\n+++ b/holidays/countries/croatia.py\n@@ -11,18 +11,18 @@\n # Website: https://github.com/dr-prodigy/python-holidays\n # License: MIT (see LICENSE file)\n \n-from datetime import date\n+from datetime import date, timedelta\n \n from dateutil.easter import easter\n-from dateutil.relativedelta import relativedelta as rd\n \n-from holidays.constants import JAN, MAY, JUN, AUG, OCT, \\\n- NOV, DEC\n+from holidays.constants import JAN, MAY, JUN, AUG, OCT, NOV, DEC\n from holidays.holiday_base import HolidayBase\n \n \n class Croatia(HolidayBase):\n \n+ # Updated with act 022-03 / 19-01 / 219 of 14 November 2019\n+ # https://narodne-novine.nn.hr/clanci/sluzbeni/2019_11_110_2212.html\n # https://en.wikipedia.org/wiki/Public_holidays_in_Croatia\n \n def __init__(self, **kwargs):\n@@ -32,6 +32,7 @@\n def _populate(self, year):\n # New years\n self[date(year, JAN, 1)] = \"Nova Godina\"\n+\n # Epiphany\n self[date(year, JAN, 6)] = \"Sveta tri kralja\"\n easter_date = easter(year)\n@@ -39,23 +40,23 @@\n # Easter\n self[easter_date] = \"Uskrs\"\n # Easter Monday\n- self[easter_date + rd(days=1)] = \"Uskr\u0161nji ponedjeljak\"\n+ self[easter_date + timedelta(days=1)] = \"Uskrsni ponedjeljak\"\n \n # Corpus Christi\n- self[easter_date + rd(days=60)] = \"Tijelovo\"\n+ self[easter_date + timedelta(days=60)] = \"Tijelovo\"\n \n # International Workers' Day\n self[date(year, MAY, 1)] = \"Me\u0111unarodni praznik rada\"\n \n+ # Statehood day (new)\n if year >= 2020:\n- # Statehood day\n self[date(year, MAY, 30)] = \"Dan dr\u017eavnosti\"\n \n # Anti-fascist struggle day\n self[date(year, JUN, 22)] = \"Dan antifa\u0161isti\u010dke borbe\"\n \n+ # Statehood day (old)\n if year < 2020:\n- # Statehood day\n self[date(year, JUN, 25)] = \"Dan dr\u017eavnosti\"\n \n # Victory and Homeland Thanksgiving Day\n@@ -64,17 +65,16 @@\n # Assumption of Mary\n self[date(year, AUG, 15)] = \"Velika Gospa\"\n \n+ # Independence Day (old)\n if year < 2020:\n- # Independence Day\n self[date(year, OCT, 8)] = \"Dan neovisnosti\"\n \n # All Saints' Day\n- self[date(year, NOV, 1)] = \"Dan svih svetih\"\n+ self[date(year, NOV, 1)] = \"Svi sveti\"\n \n if year >= 2020:\n # Memorial day\n- self[date(year, NOV, 18)] =\\\n- \"Dan sje\u0107anja na \u017ertve Domovinskog rata\"\n+ self[date(year, NOV, 18)] = \"Dan sje\u0107anja\"\n \n # Christmas day\n self[date(year, DEC, 25)] = \"Bo\u017ei\u0107\"\n", "issue": "Wrong workday info for country HR\nToday (Oct. 8, 2020) my alarmclock automation did not go off, because my workday sensor gave the wrong info (no workday). This day used to be a holiday in Croatia, but is not anymore.\r\n\r\nbinary_sensor:\r\n - platform: workday\r\n country: HR\r\n\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# python-holidays\n# ---------------\n# A fast, efficient Python library for generating country, province and state\n# specific sets of holidays on the fly. It aims to make determining whether a\n# specific date is a holiday as fast and flexible as possible.\n#\n# Author: ryanss <[email protected]> (c) 2014-2017\n# dr-prodigy <[email protected]> (c) 2017-2020\n# Website: https://github.com/dr-prodigy/python-holidays\n# License: MIT (see LICENSE file)\n\nfrom datetime import date\n\nfrom dateutil.easter import easter\nfrom dateutil.relativedelta import relativedelta as rd\n\nfrom holidays.constants import JAN, MAY, JUN, AUG, OCT, \\\n NOV, DEC\nfrom holidays.holiday_base import HolidayBase\n\n\nclass Croatia(HolidayBase):\n\n # https://en.wikipedia.org/wiki/Public_holidays_in_Croatia\n\n def __init__(self, **kwargs):\n self.country = \"HR\"\n HolidayBase.__init__(self, **kwargs)\n\n def _populate(self, year):\n # New years\n self[date(year, JAN, 1)] = \"Nova Godina\"\n # Epiphany\n self[date(year, JAN, 6)] = \"Sveta tri kralja\"\n easter_date = easter(year)\n\n # Easter\n self[easter_date] = \"Uskrs\"\n # Easter Monday\n self[easter_date + rd(days=1)] = \"Uskr\u0161nji ponedjeljak\"\n\n # Corpus Christi\n self[easter_date + rd(days=60)] = \"Tijelovo\"\n\n # International Workers' Day\n self[date(year, MAY, 1)] = \"Me\u0111unarodni praznik rada\"\n\n if year >= 2020:\n # Statehood day\n self[date(year, MAY, 30)] = \"Dan dr\u017eavnosti\"\n\n # Anti-fascist struggle day\n self[date(year, JUN, 22)] = \"Dan antifa\u0161isti\u010dke borbe\"\n\n if year < 2020:\n # Statehood day\n self[date(year, JUN, 25)] = \"Dan dr\u017eavnosti\"\n\n # Victory and Homeland Thanksgiving Day\n self[date(year, AUG, 5)] = \"Dan pobjede i domovinske zahvalnosti\"\n\n # Assumption of Mary\n self[date(year, AUG, 15)] = \"Velika Gospa\"\n\n if year < 2020:\n # Independence Day\n self[date(year, OCT, 8)] = \"Dan neovisnosti\"\n\n # All Saints' Day\n self[date(year, NOV, 1)] = \"Dan svih svetih\"\n\n if year >= 2020:\n # Memorial day\n self[date(year, NOV, 18)] =\\\n \"Dan sje\u0107anja na \u017ertve Domovinskog rata\"\n\n # Christmas day\n self[date(year, DEC, 25)] = \"Bo\u017ei\u0107\"\n\n # St. Stephen's day\n self[date(year, DEC, 26)] = \"Sveti Stjepan\"\n\n\nclass HR(Croatia):\n pass\n\n\nclass HRV(Croatia):\n pass\n", "path": "holidays/countries/croatia.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# python-holidays\n# ---------------\n# A fast, efficient Python library for generating country, province and state\n# specific sets of holidays on the fly. It aims to make determining whether a\n# specific date is a holiday as fast and flexible as possible.\n#\n# Author: ryanss <[email protected]> (c) 2014-2017\n# dr-prodigy <[email protected]> (c) 2017-2020\n# Website: https://github.com/dr-prodigy/python-holidays\n# License: MIT (see LICENSE file)\n\nfrom datetime import date, timedelta\n\nfrom dateutil.easter import easter\n\nfrom holidays.constants import JAN, MAY, JUN, AUG, OCT, NOV, DEC\nfrom holidays.holiday_base import HolidayBase\n\n\nclass Croatia(HolidayBase):\n\n # Updated with act 022-03 / 19-01 / 219 of 14 November 2019\n # https://narodne-novine.nn.hr/clanci/sluzbeni/2019_11_110_2212.html\n # https://en.wikipedia.org/wiki/Public_holidays_in_Croatia\n\n def __init__(self, **kwargs):\n self.country = \"HR\"\n HolidayBase.__init__(self, **kwargs)\n\n def _populate(self, year):\n # New years\n self[date(year, JAN, 1)] = \"Nova Godina\"\n\n # Epiphany\n self[date(year, JAN, 6)] = \"Sveta tri kralja\"\n easter_date = easter(year)\n\n # Easter\n self[easter_date] = \"Uskrs\"\n # Easter Monday\n self[easter_date + timedelta(days=1)] = \"Uskrsni ponedjeljak\"\n\n # Corpus Christi\n self[easter_date + timedelta(days=60)] = \"Tijelovo\"\n\n # International Workers' Day\n self[date(year, MAY, 1)] = \"Me\u0111unarodni praznik rada\"\n\n # Statehood day (new)\n if year >= 2020:\n self[date(year, MAY, 30)] = \"Dan dr\u017eavnosti\"\n\n # Anti-fascist struggle day\n self[date(year, JUN, 22)] = \"Dan antifa\u0161isti\u010dke borbe\"\n\n # Statehood day (old)\n if year < 2020:\n self[date(year, JUN, 25)] = \"Dan dr\u017eavnosti\"\n\n # Victory and Homeland Thanksgiving Day\n self[date(year, AUG, 5)] = \"Dan pobjede i domovinske zahvalnosti\"\n\n # Assumption of Mary\n self[date(year, AUG, 15)] = \"Velika Gospa\"\n\n # Independence Day (old)\n if year < 2020:\n self[date(year, OCT, 8)] = \"Dan neovisnosti\"\n\n # All Saints' Day\n self[date(year, NOV, 1)] = \"Svi sveti\"\n\n if year >= 2020:\n # Memorial day\n self[date(year, NOV, 18)] = \"Dan sje\u0107anja\"\n\n # Christmas day\n self[date(year, DEC, 25)] = \"Bo\u017ei\u0107\"\n\n # St. Stephen's day\n self[date(year, DEC, 26)] = \"Sveti Stjepan\"\n\n\nclass HR(Croatia):\n pass\n\n\nclass HRV(Croatia):\n pass\n", "path": "holidays/countries/croatia.py"}]}
| 1,373 | 883 |
gh_patches_debug_39903
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-5514
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Drop Python 3.6 support
[It went end-of-life on December 2021](https://endoflife.date/python).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/utils/py36.py`
Content:
```
1 import warnings
2
3 from scrapy.exceptions import ScrapyDeprecationWarning
4 from scrapy.utils.asyncgen import collect_asyncgen # noqa: F401
5
6
7 warnings.warn(
8 "Module `scrapy.utils.py36` is deprecated, please import from `scrapy.utils.asyncgen` instead.",
9 category=ScrapyDeprecationWarning,
10 stacklevel=2,
11 )
12
```
Path: `setup.py`
Content:
```
1 from os.path import dirname, join
2 from pkg_resources import parse_version
3 from setuptools import setup, find_packages, __version__ as setuptools_version
4
5
6 with open(join(dirname(__file__), 'scrapy/VERSION'), 'rb') as f:
7 version = f.read().decode('ascii').strip()
8
9
10 def has_environment_marker_platform_impl_support():
11 """Code extracted from 'pytest/setup.py'
12 https://github.com/pytest-dev/pytest/blob/7538680c/setup.py#L31
13
14 The first known release to support environment marker with range operators
15 it is 18.5, see:
16 https://setuptools.readthedocs.io/en/latest/history.html#id235
17 """
18 return parse_version(setuptools_version) >= parse_version('18.5')
19
20
21 install_requires = [
22 'Twisted>=17.9.0',
23 'cryptography>=2.0',
24 'cssselect>=0.9.1',
25 'itemloaders>=1.0.1',
26 'parsel>=1.5.0',
27 'pyOpenSSL>=16.2.0',
28 'queuelib>=1.4.2',
29 'service_identity>=16.0.0',
30 'w3lib>=1.17.0',
31 'zope.interface>=4.1.3',
32 'protego>=0.1.15',
33 'itemadapter>=0.1.0',
34 'setuptools',
35 'tldextract',
36 ]
37 extras_require = {}
38 cpython_dependencies = [
39 'lxml>=3.5.0',
40 'PyDispatcher>=2.0.5',
41 ]
42 if has_environment_marker_platform_impl_support():
43 extras_require[':platform_python_implementation == "CPython"'] = cpython_dependencies
44 extras_require[':platform_python_implementation == "PyPy"'] = [
45 # Earlier lxml versions are affected by
46 # https://foss.heptapod.net/pypy/pypy/-/issues/2498,
47 # which was fixed in Cython 0.26, released on 2017-06-19, and used to
48 # generate the C headers of lxml release tarballs published since then, the
49 # first of which was:
50 'lxml>=4.0.0',
51 'PyPyDispatcher>=2.1.0',
52 ]
53 else:
54 install_requires.extend(cpython_dependencies)
55
56
57 setup(
58 name='Scrapy',
59 version=version,
60 url='https://scrapy.org',
61 project_urls={
62 'Documentation': 'https://docs.scrapy.org/',
63 'Source': 'https://github.com/scrapy/scrapy',
64 'Tracker': 'https://github.com/scrapy/scrapy/issues',
65 },
66 description='A high-level Web Crawling and Web Scraping framework',
67 long_description=open('README.rst').read(),
68 author='Scrapy developers',
69 maintainer='Pablo Hoffman',
70 maintainer_email='[email protected]',
71 license='BSD',
72 packages=find_packages(exclude=('tests', 'tests.*')),
73 include_package_data=True,
74 zip_safe=False,
75 entry_points={
76 'console_scripts': ['scrapy = scrapy.cmdline:execute']
77 },
78 classifiers=[
79 'Framework :: Scrapy',
80 'Development Status :: 5 - Production/Stable',
81 'Environment :: Console',
82 'Intended Audience :: Developers',
83 'License :: OSI Approved :: BSD License',
84 'Operating System :: OS Independent',
85 'Programming Language :: Python',
86 'Programming Language :: Python :: 3',
87 'Programming Language :: Python :: 3.6',
88 'Programming Language :: Python :: 3.7',
89 'Programming Language :: Python :: 3.8',
90 'Programming Language :: Python :: 3.9',
91 'Programming Language :: Python :: 3.10',
92 'Programming Language :: Python :: Implementation :: CPython',
93 'Programming Language :: Python :: Implementation :: PyPy',
94 'Topic :: Internet :: WWW/HTTP',
95 'Topic :: Software Development :: Libraries :: Application Frameworks',
96 'Topic :: Software Development :: Libraries :: Python Modules',
97 ],
98 python_requires='>=3.6',
99 install_requires=install_requires,
100 extras_require=extras_require,
101 )
102
```
Path: `scrapy/__init__.py`
Content:
```
1 """
2 Scrapy - a web crawling and web scraping framework written for Python
3 """
4
5 import pkgutil
6 import sys
7 import warnings
8
9 from twisted import version as _txv
10
11 # Declare top-level shortcuts
12 from scrapy.spiders import Spider
13 from scrapy.http import Request, FormRequest
14 from scrapy.selector import Selector
15 from scrapy.item import Item, Field
16
17
18 __all__ = [
19 '__version__', 'version_info', 'twisted_version', 'Spider',
20 'Request', 'FormRequest', 'Selector', 'Item', 'Field',
21 ]
22
23
24 # Scrapy and Twisted versions
25 __version__ = (pkgutil.get_data(__package__, "VERSION") or b"").decode("ascii").strip()
26 version_info = tuple(int(v) if v.isdigit() else v for v in __version__.split('.'))
27 twisted_version = (_txv.major, _txv.minor, _txv.micro)
28
29
30 # Check minimum required Python version
31 if sys.version_info < (3, 6):
32 print(f"Scrapy {__version__} requires Python 3.6+")
33 sys.exit(1)
34
35
36 # Ignore noisy twisted deprecation warnings
37 warnings.filterwarnings('ignore', category=DeprecationWarning, module='twisted')
38
39
40 del pkgutil
41 del sys
42 del warnings
43
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scrapy/__init__.py b/scrapy/__init__.py
--- a/scrapy/__init__.py
+++ b/scrapy/__init__.py
@@ -28,8 +28,8 @@
# Check minimum required Python version
-if sys.version_info < (3, 6):
- print(f"Scrapy {__version__} requires Python 3.6+")
+if sys.version_info < (3, 7):
+ print(f"Scrapy {__version__} requires Python 3.7+")
sys.exit(1)
diff --git a/scrapy/utils/py36.py b/scrapy/utils/py36.py
deleted file mode 100644
--- a/scrapy/utils/py36.py
+++ /dev/null
@@ -1,11 +0,0 @@
-import warnings
-
-from scrapy.exceptions import ScrapyDeprecationWarning
-from scrapy.utils.asyncgen import collect_asyncgen # noqa: F401
-
-
-warnings.warn(
- "Module `scrapy.utils.py36` is deprecated, please import from `scrapy.utils.asyncgen` instead.",
- category=ScrapyDeprecationWarning,
- stacklevel=2,
-)
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -19,35 +19,29 @@
install_requires = [
- 'Twisted>=17.9.0',
- 'cryptography>=2.0',
+ 'Twisted>=18.9.0',
+ 'cryptography>=2.8',
'cssselect>=0.9.1',
'itemloaders>=1.0.1',
'parsel>=1.5.0',
- 'pyOpenSSL>=16.2.0',
+ 'pyOpenSSL>=19.1.0',
'queuelib>=1.4.2',
'service_identity>=16.0.0',
'w3lib>=1.17.0',
- 'zope.interface>=4.1.3',
+ 'zope.interface>=5.1.0',
'protego>=0.1.15',
'itemadapter>=0.1.0',
'setuptools',
'tldextract',
+ 'lxml>=4.3.0',
]
extras_require = {}
cpython_dependencies = [
- 'lxml>=3.5.0',
'PyDispatcher>=2.0.5',
]
if has_environment_marker_platform_impl_support():
extras_require[':platform_python_implementation == "CPython"'] = cpython_dependencies
extras_require[':platform_python_implementation == "PyPy"'] = [
- # Earlier lxml versions are affected by
- # https://foss.heptapod.net/pypy/pypy/-/issues/2498,
- # which was fixed in Cython 0.26, released on 2017-06-19, and used to
- # generate the C headers of lxml release tarballs published since then, the
- # first of which was:
- 'lxml>=4.0.0',
'PyPyDispatcher>=2.1.0',
]
else:
@@ -84,7 +78,6 @@
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
- 'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
@@ -95,7 +88,7 @@
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
],
- python_requires='>=3.6',
+ python_requires='>=3.7',
install_requires=install_requires,
extras_require=extras_require,
)
|
{"golden_diff": "diff --git a/scrapy/__init__.py b/scrapy/__init__.py\n--- a/scrapy/__init__.py\n+++ b/scrapy/__init__.py\n@@ -28,8 +28,8 @@\n \n \n # Check minimum required Python version\n-if sys.version_info < (3, 6):\n- print(f\"Scrapy {__version__} requires Python 3.6+\")\n+if sys.version_info < (3, 7):\n+ print(f\"Scrapy {__version__} requires Python 3.7+\")\n sys.exit(1)\n \n \ndiff --git a/scrapy/utils/py36.py b/scrapy/utils/py36.py\ndeleted file mode 100644\n--- a/scrapy/utils/py36.py\n+++ /dev/null\n@@ -1,11 +0,0 @@\n-import warnings\n-\n-from scrapy.exceptions import ScrapyDeprecationWarning\n-from scrapy.utils.asyncgen import collect_asyncgen # noqa: F401\n-\n-\n-warnings.warn(\n- \"Module `scrapy.utils.py36` is deprecated, please import from `scrapy.utils.asyncgen` instead.\",\n- category=ScrapyDeprecationWarning,\n- stacklevel=2,\n-)\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -19,35 +19,29 @@\n \n \n install_requires = [\n- 'Twisted>=17.9.0',\n- 'cryptography>=2.0',\n+ 'Twisted>=18.9.0',\n+ 'cryptography>=2.8',\n 'cssselect>=0.9.1',\n 'itemloaders>=1.0.1',\n 'parsel>=1.5.0',\n- 'pyOpenSSL>=16.2.0',\n+ 'pyOpenSSL>=19.1.0',\n 'queuelib>=1.4.2',\n 'service_identity>=16.0.0',\n 'w3lib>=1.17.0',\n- 'zope.interface>=4.1.3',\n+ 'zope.interface>=5.1.0',\n 'protego>=0.1.15',\n 'itemadapter>=0.1.0',\n 'setuptools',\n 'tldextract',\n+ 'lxml>=4.3.0',\n ]\n extras_require = {}\n cpython_dependencies = [\n- 'lxml>=3.5.0',\n 'PyDispatcher>=2.0.5',\n ]\n if has_environment_marker_platform_impl_support():\n extras_require[':platform_python_implementation == \"CPython\"'] = cpython_dependencies\n extras_require[':platform_python_implementation == \"PyPy\"'] = [\n- # Earlier lxml versions are affected by\n- # https://foss.heptapod.net/pypy/pypy/-/issues/2498,\n- # which was fixed in Cython 0.26, released on 2017-06-19, and used to\n- # generate the C headers of lxml release tarballs published since then, the\n- # first of which was:\n- 'lxml>=4.0.0',\n 'PyPyDispatcher>=2.1.0',\n ]\n else:\n@@ -84,7 +78,6 @@\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n- 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n@@ -95,7 +88,7 @@\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n- python_requires='>=3.6',\n+ python_requires='>=3.7',\n install_requires=install_requires,\n extras_require=extras_require,\n )\n", "issue": "Drop Python 3.6 support\n[It went end-of-life on December 2021](https://endoflife.date/python).\n", "before_files": [{"content": "import warnings\n\nfrom scrapy.exceptions import ScrapyDeprecationWarning\nfrom scrapy.utils.asyncgen import collect_asyncgen # noqa: F401\n\n\nwarnings.warn(\n \"Module `scrapy.utils.py36` is deprecated, please import from `scrapy.utils.asyncgen` instead.\",\n category=ScrapyDeprecationWarning,\n stacklevel=2,\n)\n", "path": "scrapy/utils/py36.py"}, {"content": "from os.path import dirname, join\nfrom pkg_resources import parse_version\nfrom setuptools import setup, find_packages, __version__ as setuptools_version\n\n\nwith open(join(dirname(__file__), 'scrapy/VERSION'), 'rb') as f:\n version = f.read().decode('ascii').strip()\n\n\ndef has_environment_marker_platform_impl_support():\n \"\"\"Code extracted from 'pytest/setup.py'\n https://github.com/pytest-dev/pytest/blob/7538680c/setup.py#L31\n\n The first known release to support environment marker with range operators\n it is 18.5, see:\n https://setuptools.readthedocs.io/en/latest/history.html#id235\n \"\"\"\n return parse_version(setuptools_version) >= parse_version('18.5')\n\n\ninstall_requires = [\n 'Twisted>=17.9.0',\n 'cryptography>=2.0',\n 'cssselect>=0.9.1',\n 'itemloaders>=1.0.1',\n 'parsel>=1.5.0',\n 'pyOpenSSL>=16.2.0',\n 'queuelib>=1.4.2',\n 'service_identity>=16.0.0',\n 'w3lib>=1.17.0',\n 'zope.interface>=4.1.3',\n 'protego>=0.1.15',\n 'itemadapter>=0.1.0',\n 'setuptools',\n 'tldextract',\n]\nextras_require = {}\ncpython_dependencies = [\n 'lxml>=3.5.0',\n 'PyDispatcher>=2.0.5',\n]\nif has_environment_marker_platform_impl_support():\n extras_require[':platform_python_implementation == \"CPython\"'] = cpython_dependencies\n extras_require[':platform_python_implementation == \"PyPy\"'] = [\n # Earlier lxml versions are affected by\n # https://foss.heptapod.net/pypy/pypy/-/issues/2498,\n # which was fixed in Cython 0.26, released on 2017-06-19, and used to\n # generate the C headers of lxml release tarballs published since then, the\n # first of which was:\n 'lxml>=4.0.0',\n 'PyPyDispatcher>=2.1.0',\n ]\nelse:\n install_requires.extend(cpython_dependencies)\n\n\nsetup(\n name='Scrapy',\n version=version,\n url='https://scrapy.org',\n project_urls={\n 'Documentation': 'https://docs.scrapy.org/',\n 'Source': 'https://github.com/scrapy/scrapy',\n 'Tracker': 'https://github.com/scrapy/scrapy/issues',\n },\n description='A high-level Web Crawling and Web Scraping framework',\n long_description=open('README.rst').read(),\n author='Scrapy developers',\n maintainer='Pablo Hoffman',\n maintainer_email='[email protected]',\n license='BSD',\n packages=find_packages(exclude=('tests', 'tests.*')),\n include_package_data=True,\n zip_safe=False,\n entry_points={\n 'console_scripts': ['scrapy = scrapy.cmdline:execute']\n },\n classifiers=[\n 'Framework :: Scrapy',\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n python_requires='>=3.6',\n install_requires=install_requires,\n extras_require=extras_require,\n)\n", "path": "setup.py"}, {"content": "\"\"\"\nScrapy - a web crawling and web scraping framework written for Python\n\"\"\"\n\nimport pkgutil\nimport sys\nimport warnings\n\nfrom twisted import version as _txv\n\n# Declare top-level shortcuts\nfrom scrapy.spiders import Spider\nfrom scrapy.http import Request, FormRequest\nfrom scrapy.selector import Selector\nfrom scrapy.item import Item, Field\n\n\n__all__ = [\n '__version__', 'version_info', 'twisted_version', 'Spider',\n 'Request', 'FormRequest', 'Selector', 'Item', 'Field',\n]\n\n\n# Scrapy and Twisted versions\n__version__ = (pkgutil.get_data(__package__, \"VERSION\") or b\"\").decode(\"ascii\").strip()\nversion_info = tuple(int(v) if v.isdigit() else v for v in __version__.split('.'))\ntwisted_version = (_txv.major, _txv.minor, _txv.micro)\n\n\n# Check minimum required Python version\nif sys.version_info < (3, 6):\n print(f\"Scrapy {__version__} requires Python 3.6+\")\n sys.exit(1)\n\n\n# Ignore noisy twisted deprecation warnings\nwarnings.filterwarnings('ignore', category=DeprecationWarning, module='twisted')\n\n\ndel pkgutil\ndel sys\ndel warnings\n", "path": "scrapy/__init__.py"}], "after_files": [{"content": null, "path": "scrapy/utils/py36.py"}, {"content": "from os.path import dirname, join\nfrom pkg_resources import parse_version\nfrom setuptools import setup, find_packages, __version__ as setuptools_version\n\n\nwith open(join(dirname(__file__), 'scrapy/VERSION'), 'rb') as f:\n version = f.read().decode('ascii').strip()\n\n\ndef has_environment_marker_platform_impl_support():\n \"\"\"Code extracted from 'pytest/setup.py'\n https://github.com/pytest-dev/pytest/blob/7538680c/setup.py#L31\n\n The first known release to support environment marker with range operators\n it is 18.5, see:\n https://setuptools.readthedocs.io/en/latest/history.html#id235\n \"\"\"\n return parse_version(setuptools_version) >= parse_version('18.5')\n\n\ninstall_requires = [\n 'Twisted>=18.9.0',\n 'cryptography>=2.8',\n 'cssselect>=0.9.1',\n 'itemloaders>=1.0.1',\n 'parsel>=1.5.0',\n 'pyOpenSSL>=19.1.0',\n 'queuelib>=1.4.2',\n 'service_identity>=16.0.0',\n 'w3lib>=1.17.0',\n 'zope.interface>=5.1.0',\n 'protego>=0.1.15',\n 'itemadapter>=0.1.0',\n 'setuptools',\n 'tldextract',\n 'lxml>=4.3.0',\n]\nextras_require = {}\ncpython_dependencies = [\n 'PyDispatcher>=2.0.5',\n]\nif has_environment_marker_platform_impl_support():\n extras_require[':platform_python_implementation == \"CPython\"'] = cpython_dependencies\n extras_require[':platform_python_implementation == \"PyPy\"'] = [\n 'PyPyDispatcher>=2.1.0',\n ]\nelse:\n install_requires.extend(cpython_dependencies)\n\n\nsetup(\n name='Scrapy',\n version=version,\n url='https://scrapy.org',\n project_urls={\n 'Documentation': 'https://docs.scrapy.org/',\n 'Source': 'https://github.com/scrapy/scrapy',\n 'Tracker': 'https://github.com/scrapy/scrapy/issues',\n },\n description='A high-level Web Crawling and Web Scraping framework',\n long_description=open('README.rst').read(),\n author='Scrapy developers',\n maintainer='Pablo Hoffman',\n maintainer_email='[email protected]',\n license='BSD',\n packages=find_packages(exclude=('tests', 'tests.*')),\n include_package_data=True,\n zip_safe=False,\n entry_points={\n 'console_scripts': ['scrapy = scrapy.cmdline:execute']\n },\n classifiers=[\n 'Framework :: Scrapy',\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n python_requires='>=3.7',\n install_requires=install_requires,\n extras_require=extras_require,\n)\n", "path": "setup.py"}, {"content": "\"\"\"\nScrapy - a web crawling and web scraping framework written for Python\n\"\"\"\n\nimport pkgutil\nimport sys\nimport warnings\n\nfrom twisted import version as _txv\n\n# Declare top-level shortcuts\nfrom scrapy.spiders import Spider\nfrom scrapy.http import Request, FormRequest\nfrom scrapy.selector import Selector\nfrom scrapy.item import Item, Field\n\n\n__all__ = [\n '__version__', 'version_info', 'twisted_version', 'Spider',\n 'Request', 'FormRequest', 'Selector', 'Item', 'Field',\n]\n\n\n# Scrapy and Twisted versions\n__version__ = (pkgutil.get_data(__package__, \"VERSION\") or b\"\").decode(\"ascii\").strip()\nversion_info = tuple(int(v) if v.isdigit() else v for v in __version__.split('.'))\ntwisted_version = (_txv.major, _txv.minor, _txv.micro)\n\n\n# Check minimum required Python version\nif sys.version_info < (3, 7):\n print(f\"Scrapy {__version__} requires Python 3.7+\")\n sys.exit(1)\n\n\n# Ignore noisy twisted deprecation warnings\nwarnings.filterwarnings('ignore', category=DeprecationWarning, module='twisted')\n\n\ndel pkgutil\ndel sys\ndel warnings\n", "path": "scrapy/__init__.py"}]}
| 1,894 | 887 |
gh_patches_debug_10492
|
rasdani/github-patches
|
git_diff
|
fossasia__open-event-server-2532
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug: Kubernetes Deployment Info shown on heroku
Do not try to show Kubernetes deployment info when not hosted on Kubernetes
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/views/admin/super_admin/super_admin.py`
Content:
```
1 import os
2
3 from flask_admin import expose
4
5 from app.views.admin.super_admin.super_admin_base import SuperAdminBaseView
6 from ....helpers.data_getter import DataGetter
7 from app.helpers.helpers import get_latest_heroku_release, get_commit_info, get_count
8 from app.models.user import ATTENDEE,TRACK_ORGANIZER, COORGANIZER, ORGANIZER
9 from app.helpers.kubernetes import KubernetesApi
10
11
12 class SuperAdminView(SuperAdminBaseView):
13
14 @expose('/')
15 def index_view(self):
16 events = DataGetter.get_all_events()[:5]
17 number_live_events = get_count(DataGetter.get_all_live_events())
18 number_draft_events = get_count(DataGetter.get_all_draft_events())
19 number_past_events = get_count(DataGetter.get_all_past_events())
20 super_admins = DataGetter.get_all_super_admins()
21 admins = DataGetter.get_all_admins()
22 registered_users = DataGetter.get_all_registered_users()
23 # TODO Fix function and correct this
24 organizers = get_count(DataGetter.get_all_user_roles(ORGANIZER))
25 co_organizers = get_count(DataGetter.get_all_user_roles(COORGANIZER))
26 track_organizers = get_count(DataGetter.get_all_user_roles(TRACK_ORGANIZER))
27 attendees = get_count(DataGetter.get_all_user_roles(ATTENDEE))
28 accepted_sessions = DataGetter.get_all_accepted_sessions()
29 rejected_sessions = DataGetter.get_all_rejected_sessions()
30 draft_sessions = DataGetter.get_all_draft_sessions()
31 email_times = DataGetter.get_email_by_times()
32
33 commit_info = None
34 version = None
35 on_kubernetes = False
36 pods_info = None
37
38 if KubernetesApi.is_on_kubernetes():
39 on_kubernetes = True
40 kubernetes_api = KubernetesApi()
41 pods_info = kubernetes_api.get_pods()['items']
42 version = os.getenv('REPOSITORY', 'https://github.com/fossasia/open-event-orga-server.git')
43 commit_info = os.getenv('BRANCH', 'development')
44 else:
45 version = get_latest_heroku_release()
46 commit_info = None
47 commit_number = None
48 if version:
49 commit_number = version['description'].split(' ')[1]
50 commit_info = get_commit_info(commit_number)
51
52 return self.render('/gentelella/admin/super_admin/widgets/index.html',
53 events=events,
54 version=version,
55 commit_info=commit_info,
56 on_kubernetes=True,
57 pods_info=pods_info,
58 number_live_events=number_live_events,
59 number_draft_events=number_draft_events,
60 number_past_events=number_past_events,
61 super_admins=super_admins,
62 admins=admins,
63 registered_users=registered_users,
64 organizers=organizers,
65 co_organizers=co_organizers,
66 track_organizers=track_organizers,
67 attendees=attendees,
68 accepted_sessions=accepted_sessions,
69 rejected_sessions=rejected_sessions,
70 draft_sessions=draft_sessions,
71 email_times=email_times)
72
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/app/views/admin/super_admin/super_admin.py b/app/views/admin/super_admin/super_admin.py
--- a/app/views/admin/super_admin/super_admin.py
+++ b/app/views/admin/super_admin/super_admin.py
@@ -53,7 +53,7 @@
events=events,
version=version,
commit_info=commit_info,
- on_kubernetes=True,
+ on_kubernetes=on_kubernetes,
pods_info=pods_info,
number_live_events=number_live_events,
number_draft_events=number_draft_events,
|
{"golden_diff": "diff --git a/app/views/admin/super_admin/super_admin.py b/app/views/admin/super_admin/super_admin.py\n--- a/app/views/admin/super_admin/super_admin.py\n+++ b/app/views/admin/super_admin/super_admin.py\n@@ -53,7 +53,7 @@\n events=events,\n version=version,\n commit_info=commit_info,\n- on_kubernetes=True,\n+ on_kubernetes=on_kubernetes,\n pods_info=pods_info,\n number_live_events=number_live_events,\n number_draft_events=number_draft_events,\n", "issue": "Bug: Kubernetes Deployment Info shown on heroku\nDo not try to show Kubernetes deployment info when not hosted on Kubernetes\n", "before_files": [{"content": "import os\n\nfrom flask_admin import expose\n\nfrom app.views.admin.super_admin.super_admin_base import SuperAdminBaseView\nfrom ....helpers.data_getter import DataGetter\nfrom app.helpers.helpers import get_latest_heroku_release, get_commit_info, get_count\nfrom app.models.user import ATTENDEE,TRACK_ORGANIZER, COORGANIZER, ORGANIZER\nfrom app.helpers.kubernetes import KubernetesApi\n\n\nclass SuperAdminView(SuperAdminBaseView):\n\n @expose('/')\n def index_view(self):\n events = DataGetter.get_all_events()[:5]\n number_live_events = get_count(DataGetter.get_all_live_events())\n number_draft_events = get_count(DataGetter.get_all_draft_events())\n number_past_events = get_count(DataGetter.get_all_past_events())\n super_admins = DataGetter.get_all_super_admins()\n admins = DataGetter.get_all_admins()\n registered_users = DataGetter.get_all_registered_users()\n # TODO Fix function and correct this\n organizers = get_count(DataGetter.get_all_user_roles(ORGANIZER))\n co_organizers = get_count(DataGetter.get_all_user_roles(COORGANIZER))\n track_organizers = get_count(DataGetter.get_all_user_roles(TRACK_ORGANIZER))\n attendees = get_count(DataGetter.get_all_user_roles(ATTENDEE))\n accepted_sessions = DataGetter.get_all_accepted_sessions()\n rejected_sessions = DataGetter.get_all_rejected_sessions()\n draft_sessions = DataGetter.get_all_draft_sessions()\n email_times = DataGetter.get_email_by_times()\n\n commit_info = None\n version = None\n on_kubernetes = False\n pods_info = None\n\n if KubernetesApi.is_on_kubernetes():\n on_kubernetes = True\n kubernetes_api = KubernetesApi()\n pods_info = kubernetes_api.get_pods()['items']\n version = os.getenv('REPOSITORY', 'https://github.com/fossasia/open-event-orga-server.git')\n commit_info = os.getenv('BRANCH', 'development')\n else:\n version = get_latest_heroku_release()\n commit_info = None\n commit_number = None\n if version:\n commit_number = version['description'].split(' ')[1]\n commit_info = get_commit_info(commit_number)\n\n return self.render('/gentelella/admin/super_admin/widgets/index.html',\n events=events,\n version=version,\n commit_info=commit_info,\n on_kubernetes=True,\n pods_info=pods_info,\n number_live_events=number_live_events,\n number_draft_events=number_draft_events,\n number_past_events=number_past_events,\n super_admins=super_admins,\n admins=admins,\n registered_users=registered_users,\n organizers=organizers,\n co_organizers=co_organizers,\n track_organizers=track_organizers,\n attendees=attendees,\n accepted_sessions=accepted_sessions,\n rejected_sessions=rejected_sessions,\n draft_sessions=draft_sessions,\n email_times=email_times)\n", "path": "app/views/admin/super_admin/super_admin.py"}], "after_files": [{"content": "import os\n\nfrom flask_admin import expose\n\nfrom app.views.admin.super_admin.super_admin_base import SuperAdminBaseView\nfrom ....helpers.data_getter import DataGetter\nfrom app.helpers.helpers import get_latest_heroku_release, get_commit_info, get_count\nfrom app.models.user import ATTENDEE,TRACK_ORGANIZER, COORGANIZER, ORGANIZER\nfrom app.helpers.kubernetes import KubernetesApi\n\n\nclass SuperAdminView(SuperAdminBaseView):\n\n @expose('/')\n def index_view(self):\n events = DataGetter.get_all_events()[:5]\n number_live_events = get_count(DataGetter.get_all_live_events())\n number_draft_events = get_count(DataGetter.get_all_draft_events())\n number_past_events = get_count(DataGetter.get_all_past_events())\n super_admins = DataGetter.get_all_super_admins()\n admins = DataGetter.get_all_admins()\n registered_users = DataGetter.get_all_registered_users()\n # TODO Fix function and correct this\n organizers = get_count(DataGetter.get_all_user_roles(ORGANIZER))\n co_organizers = get_count(DataGetter.get_all_user_roles(COORGANIZER))\n track_organizers = get_count(DataGetter.get_all_user_roles(TRACK_ORGANIZER))\n attendees = get_count(DataGetter.get_all_user_roles(ATTENDEE))\n accepted_sessions = DataGetter.get_all_accepted_sessions()\n rejected_sessions = DataGetter.get_all_rejected_sessions()\n draft_sessions = DataGetter.get_all_draft_sessions()\n email_times = DataGetter.get_email_by_times()\n\n commit_info = None\n version = None\n on_kubernetes = False\n pods_info = None\n\n if KubernetesApi.is_on_kubernetes():\n on_kubernetes = True\n kubernetes_api = KubernetesApi()\n pods_info = kubernetes_api.get_pods()['items']\n version = os.getenv('REPOSITORY', 'https://github.com/fossasia/open-event-orga-server.git')\n commit_info = os.getenv('BRANCH', 'development')\n else:\n version = get_latest_heroku_release()\n commit_info = None\n commit_number = None\n if version:\n commit_number = version['description'].split(' ')[1]\n commit_info = get_commit_info(commit_number)\n\n return self.render('/gentelella/admin/super_admin/widgets/index.html',\n events=events,\n version=version,\n commit_info=commit_info,\n on_kubernetes=on_kubernetes,\n pods_info=pods_info,\n number_live_events=number_live_events,\n number_draft_events=number_draft_events,\n number_past_events=number_past_events,\n super_admins=super_admins,\n admins=admins,\n registered_users=registered_users,\n organizers=organizers,\n co_organizers=co_organizers,\n track_organizers=track_organizers,\n attendees=attendees,\n accepted_sessions=accepted_sessions,\n rejected_sessions=rejected_sessions,\n draft_sessions=draft_sessions,\n email_times=email_times)\n", "path": "app/views/admin/super_admin/super_admin.py"}]}
| 1,059 | 123 |
gh_patches_debug_9975
|
rasdani/github-patches
|
git_diff
|
cookiecutter__cookiecutter-852
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
# -*- coding: utf-8 -*- is not consistently used in all Python source files
- Cookiecutter version:
Cookiecutter 1.4.0
- Template project url:
none
- Python version:
3.5
- Operating System:
OS X
### Description:
# -_\- coding: utf-8 -_\- is not consistently used in all Python source files
### What I've run:
Nothing, it's an observation of an inconsistency in the project.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 import os
4 import io
5 import sys
6
7 from setuptools import setup
8
9 version = "1.4.0"
10
11 if sys.argv[-1] == 'publish':
12 os.system('python setup.py sdist upload')
13 os.system('python setup.py bdist_wheel upload')
14 sys.exit()
15
16 if sys.argv[-1] == 'tag':
17 os.system("git tag -a %s -m 'version %s'" % (version, version))
18 os.system("git push --tags")
19 sys.exit()
20
21 with io.open('README.rst', 'r', encoding='utf-8') as readme_file:
22 readme = readme_file.read()
23
24 with io.open('HISTORY.rst', 'r', encoding='utf-8') as history_file:
25 history = history_file.read().replace('.. :changelog:', '')
26
27 requirements = [
28 'future>=0.15.2',
29 'binaryornot>=0.2.0',
30 'jinja2>=2.7',
31 'click>=5.0',
32 'whichcraft>=0.4.0',
33 'poyo>=0.1.0',
34 'jinja2-time>=0.1.0'
35 ]
36
37 long_description = readme + '\n\n' + history
38
39 if sys.argv[-1] == 'readme':
40 print(long_description)
41 sys.exit()
42
43
44 setup(
45 name='cookiecutter',
46 version=version,
47 description=('A command-line utility that creates projects from project '
48 'templates, e.g. creating a Python package project from a '
49 'Python package project template.'),
50 long_description=long_description,
51 author='Audrey Roy',
52 author_email='[email protected]',
53 url='https://github.com/audreyr/cookiecutter',
54 packages=[
55 'cookiecutter',
56 ],
57 package_dir={'cookiecutter': 'cookiecutter'},
58 entry_points={
59 'console_scripts': [
60 'cookiecutter = cookiecutter.__main__:main',
61 ]
62 },
63 include_package_data=True,
64 install_requires=requirements,
65 license='BSD',
66 zip_safe=False,
67 classifiers=[
68 'Development Status :: 5 - Production/Stable',
69 'Environment :: Console',
70 'Intended Audience :: Developers',
71 'Natural Language :: English',
72 'License :: OSI Approved :: BSD License',
73 'Programming Language :: Python',
74 'Programming Language :: Python :: 2',
75 'Programming Language :: Python :: 2.7',
76 'Programming Language :: Python :: 3',
77 'Programming Language :: Python :: 3.3',
78 'Programming Language :: Python :: 3.4',
79 'Programming Language :: Python :: 3.5',
80 'Programming Language :: Python :: Implementation :: CPython',
81 'Programming Language :: Python :: Implementation :: PyPy',
82 'Topic :: Software Development',
83 ],
84 keywords=(
85 'cookiecutter, Python, projects, project templates, Jinja2, '
86 'skeleton, scaffolding, project directory, setup.py, package, '
87 'packaging'
88 ),
89 )
90
```
Path: `cookiecutter/__main__.py`
Content:
```
1 """Allow cookiecutter to be executable through `python -m cookiecutter`."""
2 from __future__ import absolute_import
3
4 from .cli import main
5
6
7 if __name__ == "__main__": # pragma: no cover
8 main(prog_name="cookiecutter")
9
```
Path: `__main__.py`
Content:
```
1 """Allow cookiecutter to be executable from a checkout or zip file."""
2 import runpy
3
4
5 if __name__ == "__main__":
6 runpy.run_module("cookiecutter", run_name="__main__")
7
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/__main__.py b/__main__.py
--- a/__main__.py
+++ b/__main__.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
"""Allow cookiecutter to be executable from a checkout or zip file."""
import runpy
diff --git a/cookiecutter/__main__.py b/cookiecutter/__main__.py
--- a/cookiecutter/__main__.py
+++ b/cookiecutter/__main__.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
"""Allow cookiecutter to be executable through `python -m cookiecutter`."""
from __future__ import absolute_import
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,4 +1,5 @@
#!/usr/bin/env python
+# -*- coding: utf-8 -*-
import os
import io
|
{"golden_diff": "diff --git a/__main__.py b/__main__.py\n--- a/__main__.py\n+++ b/__main__.py\n@@ -1,3 +1,4 @@\n+# -*- coding: utf-8 -*-\n \"\"\"Allow cookiecutter to be executable from a checkout or zip file.\"\"\"\n import runpy\n \ndiff --git a/cookiecutter/__main__.py b/cookiecutter/__main__.py\n--- a/cookiecutter/__main__.py\n+++ b/cookiecutter/__main__.py\n@@ -1,3 +1,4 @@\n+# -*- coding: utf-8 -*-\n \"\"\"Allow cookiecutter to be executable through `python -m cookiecutter`.\"\"\"\n from __future__ import absolute_import\n \ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,4 +1,5 @@\n #!/usr/bin/env python\n+# -*- coding: utf-8 -*-\n \n import os\n import io\n", "issue": "# -*- coding: utf-8 -*- is not consistently used in all Python source files\n- Cookiecutter version:\n Cookiecutter 1.4.0\n- Template project url:\n none\n- Python version:\n 3.5\n- Operating System:\n OS X\n ### Description:\n # -_\\- coding: utf-8 -_\\- is not consistently used in all Python source files\n### What I've run:\n\nNothing, it's an observation of an inconsistency in the project.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport os\nimport io\nimport sys\n\nfrom setuptools import setup\n\nversion = \"1.4.0\"\n\nif sys.argv[-1] == 'publish':\n os.system('python setup.py sdist upload')\n os.system('python setup.py bdist_wheel upload')\n sys.exit()\n\nif sys.argv[-1] == 'tag':\n os.system(\"git tag -a %s -m 'version %s'\" % (version, version))\n os.system(\"git push --tags\")\n sys.exit()\n\nwith io.open('README.rst', 'r', encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\nwith io.open('HISTORY.rst', 'r', encoding='utf-8') as history_file:\n history = history_file.read().replace('.. :changelog:', '')\n\nrequirements = [\n 'future>=0.15.2',\n 'binaryornot>=0.2.0',\n 'jinja2>=2.7',\n 'click>=5.0',\n 'whichcraft>=0.4.0',\n 'poyo>=0.1.0',\n 'jinja2-time>=0.1.0'\n]\n\nlong_description = readme + '\\n\\n' + history\n\nif sys.argv[-1] == 'readme':\n print(long_description)\n sys.exit()\n\n\nsetup(\n name='cookiecutter',\n version=version,\n description=('A command-line utility that creates projects from project '\n 'templates, e.g. creating a Python package project from a '\n 'Python package project template.'),\n long_description=long_description,\n author='Audrey Roy',\n author_email='[email protected]',\n url='https://github.com/audreyr/cookiecutter',\n packages=[\n 'cookiecutter',\n ],\n package_dir={'cookiecutter': 'cookiecutter'},\n entry_points={\n 'console_scripts': [\n 'cookiecutter = cookiecutter.__main__:main',\n ]\n },\n include_package_data=True,\n install_requires=requirements,\n license='BSD',\n zip_safe=False,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Natural Language :: English',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Software Development',\n ],\n keywords=(\n 'cookiecutter, Python, projects, project templates, Jinja2, '\n 'skeleton, scaffolding, project directory, setup.py, package, '\n 'packaging'\n ),\n)\n", "path": "setup.py"}, {"content": "\"\"\"Allow cookiecutter to be executable through `python -m cookiecutter`.\"\"\"\nfrom __future__ import absolute_import\n\nfrom .cli import main\n\n\nif __name__ == \"__main__\": # pragma: no cover\n main(prog_name=\"cookiecutter\")\n", "path": "cookiecutter/__main__.py"}, {"content": "\"\"\"Allow cookiecutter to be executable from a checkout or zip file.\"\"\"\nimport runpy\n\n\nif __name__ == \"__main__\":\n runpy.run_module(\"cookiecutter\", run_name=\"__main__\")\n", "path": "__main__.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport io\nimport sys\n\nfrom setuptools import setup\n\nversion = \"1.4.0\"\n\nif sys.argv[-1] == 'publish':\n os.system('python setup.py sdist upload')\n os.system('python setup.py bdist_wheel upload')\n sys.exit()\n\nif sys.argv[-1] == 'tag':\n os.system(\"git tag -a %s -m 'version %s'\" % (version, version))\n os.system(\"git push --tags\")\n sys.exit()\n\nwith io.open('README.rst', 'r', encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\nwith io.open('HISTORY.rst', 'r', encoding='utf-8') as history_file:\n history = history_file.read().replace('.. :changelog:', '')\n\nrequirements = [\n 'future>=0.15.2',\n 'binaryornot>=0.2.0',\n 'jinja2>=2.7',\n 'click>=5.0',\n 'whichcraft>=0.4.0',\n 'poyo>=0.1.0',\n 'jinja2-time>=0.1.0'\n]\n\nlong_description = readme + '\\n\\n' + history\n\nif sys.argv[-1] == 'readme':\n print(long_description)\n sys.exit()\n\n\nsetup(\n name='cookiecutter',\n version=version,\n description=('A command-line utility that creates projects from project '\n 'templates, e.g. creating a Python package project from a '\n 'Python package project template.'),\n long_description=long_description,\n author='Audrey Roy',\n author_email='[email protected]',\n url='https://github.com/audreyr/cookiecutter',\n packages=[\n 'cookiecutter',\n ],\n package_dir={'cookiecutter': 'cookiecutter'},\n entry_points={\n 'console_scripts': [\n 'cookiecutter = cookiecutter.__main__:main',\n ]\n },\n include_package_data=True,\n install_requires=requirements,\n license='BSD',\n zip_safe=False,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Natural Language :: English',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Software Development',\n ],\n keywords=(\n 'cookiecutter, Python, projects, project templates, Jinja2, '\n 'skeleton, scaffolding, project directory, setup.py, package, '\n 'packaging'\n ),\n)\n", "path": "setup.py"}, {"content": "# -*- coding: utf-8 -*-\n\"\"\"Allow cookiecutter to be executable through `python -m cookiecutter`.\"\"\"\nfrom __future__ import absolute_import\n\nfrom .cli import main\n\n\nif __name__ == \"__main__\": # pragma: no cover\n main(prog_name=\"cookiecutter\")\n", "path": "cookiecutter/__main__.py"}, {"content": "# -*- coding: utf-8 -*-\n\"\"\"Allow cookiecutter to be executable from a checkout or zip file.\"\"\"\nimport runpy\n\n\nif __name__ == \"__main__\":\n runpy.run_module(\"cookiecutter\", run_name=\"__main__\")\n", "path": "__main__.py"}]}
| 1,353 | 205 |
gh_patches_debug_50127
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-4481
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
responsetypes.py ResponseTypes.from_headers typo as 'Content-type'
Shall this line:
https://github.com/scrapy/scrapy/blob/8845773d44329194ee73fa4985a94e768bf664e7/scrapy/responsetypes.py#L74
be changed to below?
`
content_type=headers[b'Content-Type'],
`
I guess this typo is with all the versions until today.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/responsetypes.py`
Content:
```
1 """
2 This module implements a class which returns the appropriate Response class
3 based on different criteria.
4 """
5 from mimetypes import MimeTypes
6 from pkgutil import get_data
7 from io import StringIO
8
9 from scrapy.http import Response
10 from scrapy.utils.misc import load_object
11 from scrapy.utils.python import binary_is_text, to_bytes, to_unicode
12
13
14 class ResponseTypes:
15
16 CLASSES = {
17 'text/html': 'scrapy.http.HtmlResponse',
18 'application/atom+xml': 'scrapy.http.XmlResponse',
19 'application/rdf+xml': 'scrapy.http.XmlResponse',
20 'application/rss+xml': 'scrapy.http.XmlResponse',
21 'application/xhtml+xml': 'scrapy.http.HtmlResponse',
22 'application/vnd.wap.xhtml+xml': 'scrapy.http.HtmlResponse',
23 'application/xml': 'scrapy.http.XmlResponse',
24 'application/json': 'scrapy.http.TextResponse',
25 'application/x-json': 'scrapy.http.TextResponse',
26 'application/json-amazonui-streaming': 'scrapy.http.TextResponse',
27 'application/javascript': 'scrapy.http.TextResponse',
28 'application/x-javascript': 'scrapy.http.TextResponse',
29 'text/xml': 'scrapy.http.XmlResponse',
30 'text/*': 'scrapy.http.TextResponse',
31 }
32
33 def __init__(self):
34 self.classes = {}
35 self.mimetypes = MimeTypes()
36 mimedata = get_data('scrapy', 'mime.types').decode('utf8')
37 self.mimetypes.readfp(StringIO(mimedata))
38 for mimetype, cls in self.CLASSES.items():
39 self.classes[mimetype] = load_object(cls)
40
41 def from_mimetype(self, mimetype):
42 """Return the most appropriate Response class for the given mimetype"""
43 if mimetype is None:
44 return Response
45 elif mimetype in self.classes:
46 return self.classes[mimetype]
47 else:
48 basetype = "%s/*" % mimetype.split('/')[0]
49 return self.classes.get(basetype, Response)
50
51 def from_content_type(self, content_type, content_encoding=None):
52 """Return the most appropriate Response class from an HTTP Content-Type
53 header """
54 if content_encoding:
55 return Response
56 mimetype = to_unicode(content_type).split(';')[0].strip().lower()
57 return self.from_mimetype(mimetype)
58
59 def from_content_disposition(self, content_disposition):
60 try:
61 filename = to_unicode(content_disposition,
62 encoding='latin-1', errors='replace').split(';')[1].split('=')[1]
63 filename = filename.strip('"\'')
64 return self.from_filename(filename)
65 except IndexError:
66 return Response
67
68 def from_headers(self, headers):
69 """Return the most appropriate Response class by looking at the HTTP
70 headers"""
71 cls = Response
72 if b'Content-Type' in headers:
73 cls = self.from_content_type(
74 content_type=headers[b'Content-type'],
75 content_encoding=headers.get(b'Content-Encoding')
76 )
77 if cls is Response and b'Content-Disposition' in headers:
78 cls = self.from_content_disposition(headers[b'Content-Disposition'])
79 return cls
80
81 def from_filename(self, filename):
82 """Return the most appropriate Response class from a file name"""
83 mimetype, encoding = self.mimetypes.guess_type(filename)
84 if mimetype and not encoding:
85 return self.from_mimetype(mimetype)
86 else:
87 return Response
88
89 def from_body(self, body):
90 """Try to guess the appropriate response based on the body content.
91 This method is a bit magic and could be improved in the future, but
92 it's not meant to be used except for special cases where response types
93 cannot be guess using more straightforward methods."""
94 chunk = body[:5000]
95 chunk = to_bytes(chunk)
96 if not binary_is_text(chunk):
97 return self.from_mimetype('application/octet-stream')
98 elif b"<html>" in chunk.lower():
99 return self.from_mimetype('text/html')
100 elif b"<?xml" in chunk.lower():
101 return self.from_mimetype('text/xml')
102 else:
103 return self.from_mimetype('text')
104
105 def from_args(self, headers=None, url=None, filename=None, body=None):
106 """Guess the most appropriate Response class based on
107 the given arguments."""
108 cls = Response
109 if headers is not None:
110 cls = self.from_headers(headers)
111 if cls is Response and url is not None:
112 cls = self.from_filename(url)
113 if cls is Response and filename is not None:
114 cls = self.from_filename(filename)
115 if cls is Response and body is not None:
116 cls = self.from_body(body)
117 return cls
118
119
120 responsetypes = ResponseTypes()
121
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scrapy/responsetypes.py b/scrapy/responsetypes.py
--- a/scrapy/responsetypes.py
+++ b/scrapy/responsetypes.py
@@ -71,7 +71,7 @@
cls = Response
if b'Content-Type' in headers:
cls = self.from_content_type(
- content_type=headers[b'Content-type'],
+ content_type=headers[b'Content-Type'],
content_encoding=headers.get(b'Content-Encoding')
)
if cls is Response and b'Content-Disposition' in headers:
|
{"golden_diff": "diff --git a/scrapy/responsetypes.py b/scrapy/responsetypes.py\n--- a/scrapy/responsetypes.py\n+++ b/scrapy/responsetypes.py\n@@ -71,7 +71,7 @@\n cls = Response\n if b'Content-Type' in headers:\n cls = self.from_content_type(\n- content_type=headers[b'Content-type'],\n+ content_type=headers[b'Content-Type'],\n content_encoding=headers.get(b'Content-Encoding')\n )\n if cls is Response and b'Content-Disposition' in headers:\n", "issue": "responsetypes.py ResponseTypes.from_headers typo as 'Content-type'\nShall this line:\r\n\r\nhttps://github.com/scrapy/scrapy/blob/8845773d44329194ee73fa4985a94e768bf664e7/scrapy/responsetypes.py#L74\r\n\r\nbe changed to below?\r\n`\r\ncontent_type=headers[b'Content-Type'],\r\n`\r\nI guess this typo is with all the versions until today.\n", "before_files": [{"content": "\"\"\"\nThis module implements a class which returns the appropriate Response class\nbased on different criteria.\n\"\"\"\nfrom mimetypes import MimeTypes\nfrom pkgutil import get_data\nfrom io import StringIO\n\nfrom scrapy.http import Response\nfrom scrapy.utils.misc import load_object\nfrom scrapy.utils.python import binary_is_text, to_bytes, to_unicode\n\n\nclass ResponseTypes:\n\n CLASSES = {\n 'text/html': 'scrapy.http.HtmlResponse',\n 'application/atom+xml': 'scrapy.http.XmlResponse',\n 'application/rdf+xml': 'scrapy.http.XmlResponse',\n 'application/rss+xml': 'scrapy.http.XmlResponse',\n 'application/xhtml+xml': 'scrapy.http.HtmlResponse',\n 'application/vnd.wap.xhtml+xml': 'scrapy.http.HtmlResponse',\n 'application/xml': 'scrapy.http.XmlResponse',\n 'application/json': 'scrapy.http.TextResponse',\n 'application/x-json': 'scrapy.http.TextResponse',\n 'application/json-amazonui-streaming': 'scrapy.http.TextResponse',\n 'application/javascript': 'scrapy.http.TextResponse',\n 'application/x-javascript': 'scrapy.http.TextResponse',\n 'text/xml': 'scrapy.http.XmlResponse',\n 'text/*': 'scrapy.http.TextResponse',\n }\n\n def __init__(self):\n self.classes = {}\n self.mimetypes = MimeTypes()\n mimedata = get_data('scrapy', 'mime.types').decode('utf8')\n self.mimetypes.readfp(StringIO(mimedata))\n for mimetype, cls in self.CLASSES.items():\n self.classes[mimetype] = load_object(cls)\n\n def from_mimetype(self, mimetype):\n \"\"\"Return the most appropriate Response class for the given mimetype\"\"\"\n if mimetype is None:\n return Response\n elif mimetype in self.classes:\n return self.classes[mimetype]\n else:\n basetype = \"%s/*\" % mimetype.split('/')[0]\n return self.classes.get(basetype, Response)\n\n def from_content_type(self, content_type, content_encoding=None):\n \"\"\"Return the most appropriate Response class from an HTTP Content-Type\n header \"\"\"\n if content_encoding:\n return Response\n mimetype = to_unicode(content_type).split(';')[0].strip().lower()\n return self.from_mimetype(mimetype)\n\n def from_content_disposition(self, content_disposition):\n try:\n filename = to_unicode(content_disposition,\n encoding='latin-1', errors='replace').split(';')[1].split('=')[1]\n filename = filename.strip('\"\\'')\n return self.from_filename(filename)\n except IndexError:\n return Response\n\n def from_headers(self, headers):\n \"\"\"Return the most appropriate Response class by looking at the HTTP\n headers\"\"\"\n cls = Response\n if b'Content-Type' in headers:\n cls = self.from_content_type(\n content_type=headers[b'Content-type'],\n content_encoding=headers.get(b'Content-Encoding')\n )\n if cls is Response and b'Content-Disposition' in headers:\n cls = self.from_content_disposition(headers[b'Content-Disposition'])\n return cls\n\n def from_filename(self, filename):\n \"\"\"Return the most appropriate Response class from a file name\"\"\"\n mimetype, encoding = self.mimetypes.guess_type(filename)\n if mimetype and not encoding:\n return self.from_mimetype(mimetype)\n else:\n return Response\n\n def from_body(self, body):\n \"\"\"Try to guess the appropriate response based on the body content.\n This method is a bit magic and could be improved in the future, but\n it's not meant to be used except for special cases where response types\n cannot be guess using more straightforward methods.\"\"\"\n chunk = body[:5000]\n chunk = to_bytes(chunk)\n if not binary_is_text(chunk):\n return self.from_mimetype('application/octet-stream')\n elif b\"<html>\" in chunk.lower():\n return self.from_mimetype('text/html')\n elif b\"<?xml\" in chunk.lower():\n return self.from_mimetype('text/xml')\n else:\n return self.from_mimetype('text')\n\n def from_args(self, headers=None, url=None, filename=None, body=None):\n \"\"\"Guess the most appropriate Response class based on\n the given arguments.\"\"\"\n cls = Response\n if headers is not None:\n cls = self.from_headers(headers)\n if cls is Response and url is not None:\n cls = self.from_filename(url)\n if cls is Response and filename is not None:\n cls = self.from_filename(filename)\n if cls is Response and body is not None:\n cls = self.from_body(body)\n return cls\n\n\nresponsetypes = ResponseTypes()\n", "path": "scrapy/responsetypes.py"}], "after_files": [{"content": "\"\"\"\nThis module implements a class which returns the appropriate Response class\nbased on different criteria.\n\"\"\"\nfrom mimetypes import MimeTypes\nfrom pkgutil import get_data\nfrom io import StringIO\n\nfrom scrapy.http import Response\nfrom scrapy.utils.misc import load_object\nfrom scrapy.utils.python import binary_is_text, to_bytes, to_unicode\n\n\nclass ResponseTypes:\n\n CLASSES = {\n 'text/html': 'scrapy.http.HtmlResponse',\n 'application/atom+xml': 'scrapy.http.XmlResponse',\n 'application/rdf+xml': 'scrapy.http.XmlResponse',\n 'application/rss+xml': 'scrapy.http.XmlResponse',\n 'application/xhtml+xml': 'scrapy.http.HtmlResponse',\n 'application/vnd.wap.xhtml+xml': 'scrapy.http.HtmlResponse',\n 'application/xml': 'scrapy.http.XmlResponse',\n 'application/json': 'scrapy.http.TextResponse',\n 'application/x-json': 'scrapy.http.TextResponse',\n 'application/json-amazonui-streaming': 'scrapy.http.TextResponse',\n 'application/javascript': 'scrapy.http.TextResponse',\n 'application/x-javascript': 'scrapy.http.TextResponse',\n 'text/xml': 'scrapy.http.XmlResponse',\n 'text/*': 'scrapy.http.TextResponse',\n }\n\n def __init__(self):\n self.classes = {}\n self.mimetypes = MimeTypes()\n mimedata = get_data('scrapy', 'mime.types').decode('utf8')\n self.mimetypes.readfp(StringIO(mimedata))\n for mimetype, cls in self.CLASSES.items():\n self.classes[mimetype] = load_object(cls)\n\n def from_mimetype(self, mimetype):\n \"\"\"Return the most appropriate Response class for the given mimetype\"\"\"\n if mimetype is None:\n return Response\n elif mimetype in self.classes:\n return self.classes[mimetype]\n else:\n basetype = \"%s/*\" % mimetype.split('/')[0]\n return self.classes.get(basetype, Response)\n\n def from_content_type(self, content_type, content_encoding=None):\n \"\"\"Return the most appropriate Response class from an HTTP Content-Type\n header \"\"\"\n if content_encoding:\n return Response\n mimetype = to_unicode(content_type).split(';')[0].strip().lower()\n return self.from_mimetype(mimetype)\n\n def from_content_disposition(self, content_disposition):\n try:\n filename = to_unicode(content_disposition,\n encoding='latin-1', errors='replace').split(';')[1].split('=')[1]\n filename = filename.strip('\"\\'')\n return self.from_filename(filename)\n except IndexError:\n return Response\n\n def from_headers(self, headers):\n \"\"\"Return the most appropriate Response class by looking at the HTTP\n headers\"\"\"\n cls = Response\n if b'Content-Type' in headers:\n cls = self.from_content_type(\n content_type=headers[b'Content-Type'],\n content_encoding=headers.get(b'Content-Encoding')\n )\n if cls is Response and b'Content-Disposition' in headers:\n cls = self.from_content_disposition(headers[b'Content-Disposition'])\n return cls\n\n def from_filename(self, filename):\n \"\"\"Return the most appropriate Response class from a file name\"\"\"\n mimetype, encoding = self.mimetypes.guess_type(filename)\n if mimetype and not encoding:\n return self.from_mimetype(mimetype)\n else:\n return Response\n\n def from_body(self, body):\n \"\"\"Try to guess the appropriate response based on the body content.\n This method is a bit magic and could be improved in the future, but\n it's not meant to be used except for special cases where response types\n cannot be guess using more straightforward methods.\"\"\"\n chunk = body[:5000]\n chunk = to_bytes(chunk)\n if not binary_is_text(chunk):\n return self.from_mimetype('application/octet-stream')\n elif b\"<html>\" in chunk.lower():\n return self.from_mimetype('text/html')\n elif b\"<?xml\" in chunk.lower():\n return self.from_mimetype('text/xml')\n else:\n return self.from_mimetype('text')\n\n def from_args(self, headers=None, url=None, filename=None, body=None):\n \"\"\"Guess the most appropriate Response class based on\n the given arguments.\"\"\"\n cls = Response\n if headers is not None:\n cls = self.from_headers(headers)\n if cls is Response and url is not None:\n cls = self.from_filename(url)\n if cls is Response and filename is not None:\n cls = self.from_filename(filename)\n if cls is Response and body is not None:\n cls = self.from_body(body)\n return cls\n\n\nresponsetypes = ResponseTypes()\n", "path": "scrapy/responsetypes.py"}]}
| 1,617 | 121 |
gh_patches_debug_11555
|
rasdani/github-patches
|
git_diff
|
pypa__setuptools-753
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Setuptools doesn't play nice with Numpy
See: https://github.com/numpy/numpy/blob/master/numpy/distutils/extension.py#L42-L52
That functionality was broken by:
https://github.com/pypa/setuptools/blob/master/setuptools/extension.py#L39-L41
In this pr: https://github.com/pypa/setuptools/pull/718
Here's the the pdb session where I debugged this:
```
(Pdb) old_Extension.__module__
'setuptools.extension'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setuptools/extension.py`
Content:
```
1 import sys
2 import re
3 import functools
4 import distutils.core
5 import distutils.errors
6 import distutils.extension
7
8 from setuptools.extern.six.moves import map
9
10 from .dist import _get_unpatched
11 from . import msvc
12
13 _Extension = _get_unpatched(distutils.core.Extension)
14
15 msvc.patch_for_specialized_compiler()
16
17
18 def _have_cython():
19 """
20 Return True if Cython can be imported.
21 """
22 cython_impl = 'Cython.Distutils.build_ext'
23 try:
24 # from (cython_impl) import build_ext
25 __import__(cython_impl, fromlist=['build_ext']).build_ext
26 return True
27 except Exception:
28 pass
29 return False
30
31
32 # for compatibility
33 have_pyrex = _have_cython
34
35
36 class Extension(_Extension):
37 """Extension that uses '.c' files in place of '.pyx' files"""
38
39 def __init__(self, name, sources, py_limited_api=False, **kw):
40 self.py_limited_api = py_limited_api
41 _Extension.__init__(self, name, sources, **kw)
42
43 def _convert_pyx_sources_to_lang(self):
44 """
45 Replace sources with .pyx extensions to sources with the target
46 language extension. This mechanism allows language authors to supply
47 pre-converted sources but to prefer the .pyx sources.
48 """
49 if _have_cython():
50 # the build has Cython, so allow it to compile the .pyx files
51 return
52 lang = self.language or ''
53 target_ext = '.cpp' if lang.lower() == 'c++' else '.c'
54 sub = functools.partial(re.sub, '.pyx$', target_ext)
55 self.sources = list(map(sub, self.sources))
56
57
58 class Library(Extension):
59 """Just like a regular Extension, but built as a library instead"""
60
61
62 distutils.core.Extension = Extension
63 distutils.extension.Extension = Extension
64 if 'distutils.command.build_ext' in sys.modules:
65 sys.modules['distutils.command.build_ext'].Extension = Extension
66
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setuptools/extension.py b/setuptools/extension.py
--- a/setuptools/extension.py
+++ b/setuptools/extension.py
@@ -36,9 +36,11 @@
class Extension(_Extension):
"""Extension that uses '.c' files in place of '.pyx' files"""
- def __init__(self, name, sources, py_limited_api=False, **kw):
- self.py_limited_api = py_limited_api
- _Extension.__init__(self, name, sources, **kw)
+ def __init__(self, name, sources, *args, **kw):
+ # The *args is needed for compatibility as calls may use positional
+ # arguments. py_limited_api may be set only via keyword.
+ self.py_limited_api = kw.pop("py_limited_api", False)
+ _Extension.__init__(self, name, sources, *args, **kw)
def _convert_pyx_sources_to_lang(self):
"""
|
{"golden_diff": "diff --git a/setuptools/extension.py b/setuptools/extension.py\n--- a/setuptools/extension.py\n+++ b/setuptools/extension.py\n@@ -36,9 +36,11 @@\n class Extension(_Extension):\n \"\"\"Extension that uses '.c' files in place of '.pyx' files\"\"\"\n \n- def __init__(self, name, sources, py_limited_api=False, **kw):\n- self.py_limited_api = py_limited_api\n- _Extension.__init__(self, name, sources, **kw)\n+ def __init__(self, name, sources, *args, **kw):\n+ # The *args is needed for compatibility as calls may use positional\n+ # arguments. py_limited_api may be set only via keyword.\n+ self.py_limited_api = kw.pop(\"py_limited_api\", False)\n+ _Extension.__init__(self, name, sources, *args, **kw)\n \n def _convert_pyx_sources_to_lang(self):\n \"\"\"\n", "issue": "Setuptools doesn't play nice with Numpy\nSee: https://github.com/numpy/numpy/blob/master/numpy/distutils/extension.py#L42-L52\n\nThat functionality was broken by:\nhttps://github.com/pypa/setuptools/blob/master/setuptools/extension.py#L39-L41\n\nIn this pr: https://github.com/pypa/setuptools/pull/718\n\nHere's the the pdb session where I debugged this:\n\n```\n(Pdb) old_Extension.__module__\n'setuptools.extension'\n```\n\n", "before_files": [{"content": "import sys\nimport re\nimport functools\nimport distutils.core\nimport distutils.errors\nimport distutils.extension\n\nfrom setuptools.extern.six.moves import map\n\nfrom .dist import _get_unpatched\nfrom . import msvc\n\n_Extension = _get_unpatched(distutils.core.Extension)\n\nmsvc.patch_for_specialized_compiler()\n\n\ndef _have_cython():\n \"\"\"\n Return True if Cython can be imported.\n \"\"\"\n cython_impl = 'Cython.Distutils.build_ext'\n try:\n # from (cython_impl) import build_ext\n __import__(cython_impl, fromlist=['build_ext']).build_ext\n return True\n except Exception:\n pass\n return False\n\n\n# for compatibility\nhave_pyrex = _have_cython\n\n\nclass Extension(_Extension):\n \"\"\"Extension that uses '.c' files in place of '.pyx' files\"\"\"\n\n def __init__(self, name, sources, py_limited_api=False, **kw):\n self.py_limited_api = py_limited_api\n _Extension.__init__(self, name, sources, **kw)\n\n def _convert_pyx_sources_to_lang(self):\n \"\"\"\n Replace sources with .pyx extensions to sources with the target\n language extension. This mechanism allows language authors to supply\n pre-converted sources but to prefer the .pyx sources.\n \"\"\"\n if _have_cython():\n # the build has Cython, so allow it to compile the .pyx files\n return\n lang = self.language or ''\n target_ext = '.cpp' if lang.lower() == 'c++' else '.c'\n sub = functools.partial(re.sub, '.pyx$', target_ext)\n self.sources = list(map(sub, self.sources))\n\n\nclass Library(Extension):\n \"\"\"Just like a regular Extension, but built as a library instead\"\"\"\n\n\ndistutils.core.Extension = Extension\ndistutils.extension.Extension = Extension\nif 'distutils.command.build_ext' in sys.modules:\n sys.modules['distutils.command.build_ext'].Extension = Extension\n", "path": "setuptools/extension.py"}], "after_files": [{"content": "import sys\nimport re\nimport functools\nimport distutils.core\nimport distutils.errors\nimport distutils.extension\n\nfrom setuptools.extern.six.moves import map\n\nfrom .dist import _get_unpatched\nfrom . import msvc\n\n_Extension = _get_unpatched(distutils.core.Extension)\n\nmsvc.patch_for_specialized_compiler()\n\n\ndef _have_cython():\n \"\"\"\n Return True if Cython can be imported.\n \"\"\"\n cython_impl = 'Cython.Distutils.build_ext'\n try:\n # from (cython_impl) import build_ext\n __import__(cython_impl, fromlist=['build_ext']).build_ext\n return True\n except Exception:\n pass\n return False\n\n\n# for compatibility\nhave_pyrex = _have_cython\n\n\nclass Extension(_Extension):\n \"\"\"Extension that uses '.c' files in place of '.pyx' files\"\"\"\n\n def __init__(self, name, sources, *args, **kw):\n # The *args is needed for compatibility as calls may use positional\n # arguments. py_limited_api may be set only via keyword.\n self.py_limited_api = kw.pop(\"py_limited_api\", False)\n _Extension.__init__(self, name, sources, *args, **kw)\n\n def _convert_pyx_sources_to_lang(self):\n \"\"\"\n Replace sources with .pyx extensions to sources with the target\n language extension. This mechanism allows language authors to supply\n pre-converted sources but to prefer the .pyx sources.\n \"\"\"\n if _have_cython():\n # the build has Cython, so allow it to compile the .pyx files\n return\n lang = self.language or ''\n target_ext = '.cpp' if lang.lower() == 'c++' else '.c'\n sub = functools.partial(re.sub, '.pyx$', target_ext)\n self.sources = list(map(sub, self.sources))\n\n\nclass Library(Extension):\n \"\"\"Just like a regular Extension, but built as a library instead\"\"\"\n\n\ndistutils.core.Extension = Extension\ndistutils.extension.Extension = Extension\nif 'distutils.command.build_ext' in sys.modules:\n sys.modules['distutils.command.build_ext'].Extension = Extension\n", "path": "setuptools/extension.py"}]}
| 941 | 219 |
gh_patches_debug_22181
|
rasdani/github-patches
|
git_diff
|
fossasia__open-event-server-6129
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
order_expiry_time migration error
**Describe the bug**
The latest migration, `2c7ff9781032` had a bug where the server_default was supposed to be a string.
**Additional context**
Working on it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `migrations/versions/2c7ff9781032_.py`
Content:
```
1 """empty message
2
3 Revision ID: 2c7ff9781032
4 Revises: b08a4ffff5dd
5 Create Date: 2019-06-21 04:33:59.021835
6
7 """
8
9 from alembic import op
10 import sqlalchemy as sa
11 import sqlalchemy_utils
12
13
14 # revision identifiers, used by Alembic.
15 revision = '2c7ff9781032'
16 down_revision = 'b08a4ffff5dd'
17
18
19 def upgrade():
20 # ### commands auto generated by Alembic - please adjust! ###
21 op.drop_column('events', 'order_expiry_time')
22 op.drop_column('events_version', 'order_expiry_time')
23 op.add_column('settings', sa.Column('order_expiry_time', sa.Integer(), default=15, nullable=False))
24 # ### end Alembic commands ###
25
26
27 def downgrade():
28 # ### commands auto generated by Alembic - please adjust! ###
29 op.drop_column('settings', 'order_expiry_time')
30 op.add_column('events_version', sa.Column('order_expiry_time', sa.INTEGER(), autoincrement=False, nullable=False))
31 op.add_column('events', sa.Column('order_expiry_time', sa.INTEGER(), autoincrement=False, nullable=False))
32 # ### end Alembic commands ###
33
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/migrations/versions/2c7ff9781032_.py b/migrations/versions/2c7ff9781032_.py
--- a/migrations/versions/2c7ff9781032_.py
+++ b/migrations/versions/2c7ff9781032_.py
@@ -20,13 +20,13 @@
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('events', 'order_expiry_time')
op.drop_column('events_version', 'order_expiry_time')
- op.add_column('settings', sa.Column('order_expiry_time', sa.Integer(), default=15, nullable=False))
+ op.add_column('settings', sa.Column('order_expiry_time', sa.Integer(), server_default='15', nullable=False))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('settings', 'order_expiry_time')
- op.add_column('events_version', sa.Column('order_expiry_time', sa.INTEGER(), autoincrement=False, nullable=False))
- op.add_column('events', sa.Column('order_expiry_time', sa.INTEGER(), autoincrement=False, nullable=False))
+ op.add_column('events_version', sa.Column('order_expiry_time', sa.INTEGER(), autoincrement=False, nullable=True))
+ op.add_column('events', sa.Column('order_expiry_time', sa.INTEGER(), autoincrement=False, nullable=True))
# ### end Alembic commands ###
|
{"golden_diff": "diff --git a/migrations/versions/2c7ff9781032_.py b/migrations/versions/2c7ff9781032_.py\n--- a/migrations/versions/2c7ff9781032_.py\n+++ b/migrations/versions/2c7ff9781032_.py\n@@ -20,13 +20,13 @@\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('events', 'order_expiry_time')\n op.drop_column('events_version', 'order_expiry_time')\n- op.add_column('settings', sa.Column('order_expiry_time', sa.Integer(), default=15, nullable=False))\n+ op.add_column('settings', sa.Column('order_expiry_time', sa.Integer(), server_default='15', nullable=False))\n # ### end Alembic commands ###\n \n \n def downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('settings', 'order_expiry_time')\n- op.add_column('events_version', sa.Column('order_expiry_time', sa.INTEGER(), autoincrement=False, nullable=False))\n- op.add_column('events', sa.Column('order_expiry_time', sa.INTEGER(), autoincrement=False, nullable=False))\n+ op.add_column('events_version', sa.Column('order_expiry_time', sa.INTEGER(), autoincrement=False, nullable=True))\n+ op.add_column('events', sa.Column('order_expiry_time', sa.INTEGER(), autoincrement=False, nullable=True))\n # ### end Alembic commands ###\n", "issue": "order_expiry_time migration error\n**Describe the bug**\r\nThe latest migration, `2c7ff9781032` had a bug where the server_default was supposed to be a string. \r\n\r\n\r\n**Additional context**\r\nWorking on it.\n", "before_files": [{"content": "\"\"\"empty message\n\nRevision ID: 2c7ff9781032\nRevises: b08a4ffff5dd\nCreate Date: 2019-06-21 04:33:59.021835\n\n\"\"\"\n\nfrom alembic import op\nimport sqlalchemy as sa\nimport sqlalchemy_utils\n\n\n# revision identifiers, used by Alembic.\nrevision = '2c7ff9781032'\ndown_revision = 'b08a4ffff5dd'\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('events', 'order_expiry_time')\n op.drop_column('events_version', 'order_expiry_time')\n op.add_column('settings', sa.Column('order_expiry_time', sa.Integer(), default=15, nullable=False))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('settings', 'order_expiry_time')\n op.add_column('events_version', sa.Column('order_expiry_time', sa.INTEGER(), autoincrement=False, nullable=False))\n op.add_column('events', sa.Column('order_expiry_time', sa.INTEGER(), autoincrement=False, nullable=False))\n # ### end Alembic commands ###\n", "path": "migrations/versions/2c7ff9781032_.py"}], "after_files": [{"content": "\"\"\"empty message\n\nRevision ID: 2c7ff9781032\nRevises: b08a4ffff5dd\nCreate Date: 2019-06-21 04:33:59.021835\n\n\"\"\"\n\nfrom alembic import op\nimport sqlalchemy as sa\nimport sqlalchemy_utils\n\n\n# revision identifiers, used by Alembic.\nrevision = '2c7ff9781032'\ndown_revision = 'b08a4ffff5dd'\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('events', 'order_expiry_time')\n op.drop_column('events_version', 'order_expiry_time')\n op.add_column('settings', sa.Column('order_expiry_time', sa.Integer(), server_default='15', nullable=False))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('settings', 'order_expiry_time')\n op.add_column('events_version', sa.Column('order_expiry_time', sa.INTEGER(), autoincrement=False, nullable=True))\n op.add_column('events', sa.Column('order_expiry_time', sa.INTEGER(), autoincrement=False, nullable=True))\n # ### end Alembic commands ###\n", "path": "migrations/versions/2c7ff9781032_.py"}]}
| 674 | 342 |
gh_patches_debug_40616
|
rasdani/github-patches
|
git_diff
|
optuna__optuna-1248
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`experimental` decorator adds extra indents to docs
An extra indent appears in the docs when the [`experimental`](https://github.com/optuna/optuna/blob/2d5f24b06eed56ece72b8dfa878135bb4bb63779/optuna/_experimental.py#L60) decorator is added to a method of a class. This does not happen with a top level (i.e., not nested) class or function.
## Steps to reproduce
1. add an `experimental` decorator to a method of a class
2. make the document
## Reproducible examples
The following figure shows the docs of [`Study.enqueue_trial`](https://optuna.readthedocs.io/en/latest/reference/study.html#optuna.study.Study.enqueue_trial). An extra indent appears after the second paragraph.

## Expected behavior
no extra indent is shown after the second paragraph
For example, the docs of [`Study.get_trials`](https://optuna.readthedocs.io/en/latest/reference/study.html#optuna.study.Study.get_trials), which does not have the `experimental` decorator, appears as expected.

The following figure shows another example. The docs of the [`train`](https://optuna.readthedocs.io/en/latest/reference/integration.html#optuna.integration.lightgbm.train) function of LightGBMTuner has no extra indent. Although the function has an `experimental` decorator, it belongs to the top level classes.

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `optuna/_experimental.py`
Content:
```
1 import functools
2 import inspect
3 from typing import Any
4 from typing import Callable
5 import warnings
6
7 from optuna.exceptions import ExperimentalWarning
8
9
10 # White spaces of each line are necessary to beautifully rendered documentation.
11 # NOTE(crcrpar): When `experimental` decorator is applied to member methods, these lines require
12 # another four spaces.
13 _EXPERIMENTAL_DOCSTRING_TEMPLATE = """
14
15 .. note::
16 Added in v{ver} as an experimental feature. The interface may change in newer versions
17 without prior notice. See https://github.com/optuna/optuna/releases/tag/v{ver}.
18 """
19
20
21 def _validate_version(version: str) -> None:
22
23 if not isinstance(version, str) or len(version.split(".")) != 3:
24 raise ValueError(
25 "Invalid version specification. Must follow `x.y.z` format but `{}` is given".format(
26 version
27 )
28 )
29
30
31 def experimental(version: str, name: str = None) -> Any:
32 """Decorate class or function as experimental.
33
34 Args:
35 version: The first version that supports the target feature.
36 name: The name of the feature. Defaults to the function or class name. Optional.
37 """
38
39 _validate_version(version)
40
41 def _experimental_wrapper(f: Any) -> Any:
42 # f is either func or class.
43
44 def _experimental_func(func: Callable[[Any], Any]) -> Callable[[Any], Any]:
45
46 docstring = _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version)
47 if func.__doc__ is None:
48 func.__doc__ = ""
49 func.__doc__ += docstring
50
51 # TODO(crcrpar): Annotate this correctly.
52 @functools.wraps(func)
53 def new_func(*args: Any, **kwargs: Any) -> Any:
54 """Wrapped function."""
55
56 warnings.warn(
57 "{} is experimental (supported from v{}). "
58 "The interface can change in the future.".format(
59 name if name is not None else func.__name__, version
60 ),
61 ExperimentalWarning,
62 )
63
64 return func(*args, **kwargs) # type: ignore
65
66 return new_func
67
68 def _experimental_class(cls: Any) -> Any:
69 """Decorates a class as experimental.
70
71 This decorator is supposed to be applied to the experimental class.
72 """
73
74 _original_init = cls.__init__
75
76 @functools.wraps(_original_init)
77 def wrapped_init(self, *args, **kwargs) -> None: # type: ignore
78 warnings.warn(
79 "{} is experimental (supported from v{}). "
80 "The interface can change in the future.".format(
81 name if name is not None else cls.__name__, version
82 ),
83 ExperimentalWarning,
84 )
85
86 _original_init(self, *args, **kwargs)
87
88 cls.__init__ = wrapped_init
89
90 if cls.__doc__ is None:
91 cls.__doc__ = ""
92 cls.__doc__ += _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version)
93 return cls
94
95 return _experimental_class(f) if inspect.isclass(f) else _experimental_func(f)
96
97 return _experimental_wrapper
98
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/optuna/_experimental.py b/optuna/_experimental.py
--- a/optuna/_experimental.py
+++ b/optuna/_experimental.py
@@ -1,5 +1,6 @@
import functools
import inspect
+import textwrap
from typing import Any
from typing import Callable
import warnings
@@ -7,14 +8,11 @@
from optuna.exceptions import ExperimentalWarning
-# White spaces of each line are necessary to beautifully rendered documentation.
-# NOTE(crcrpar): When `experimental` decorator is applied to member methods, these lines require
-# another four spaces.
-_EXPERIMENTAL_DOCSTRING_TEMPLATE = """
+_EXPERIMENTAL_NOTE_TEMPLATE = """
- .. note::
- Added in v{ver} as an experimental feature. The interface may change in newer versions
- without prior notice. See https://github.com/optuna/optuna/releases/tag/v{ver}.
+.. note::
+ Added in v{ver} as an experimental feature. The interface may change in newer versions
+ without prior notice. See https://github.com/optuna/optuna/releases/tag/v{ver}.
"""
@@ -28,6 +26,10 @@
)
+def _get_docstring_indent(docstring: str) -> str:
+ return docstring.split("\n")[-1] if "\n" in docstring else ""
+
+
def experimental(version: str, name: str = None) -> Any:
"""Decorate class or function as experimental.
@@ -43,16 +45,16 @@
def _experimental_func(func: Callable[[Any], Any]) -> Callable[[Any], Any]:
- docstring = _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version)
if func.__doc__ is None:
func.__doc__ = ""
- func.__doc__ += docstring
+
+ note = _EXPERIMENTAL_NOTE_TEMPLATE.format(ver=version)
+ indent = _get_docstring_indent(func.__doc__)
+ func.__doc__ = func.__doc__.strip() + textwrap.indent(note, indent) + indent
# TODO(crcrpar): Annotate this correctly.
@functools.wraps(func)
def new_func(*args: Any, **kwargs: Any) -> Any:
- """Wrapped function."""
-
warnings.warn(
"{} is experimental (supported from v{}). "
"The interface can change in the future.".format(
@@ -70,7 +72,6 @@
This decorator is supposed to be applied to the experimental class.
"""
-
_original_init = cls.__init__
@functools.wraps(_original_init)
@@ -89,7 +90,11 @@
if cls.__doc__ is None:
cls.__doc__ = ""
- cls.__doc__ += _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version)
+
+ note = _EXPERIMENTAL_NOTE_TEMPLATE.format(ver=version)
+ indent = _get_docstring_indent(cls.__doc__)
+ cls.__doc__ = cls.__doc__.strip() + textwrap.indent(note, indent) + indent
+
return cls
return _experimental_class(f) if inspect.isclass(f) else _experimental_func(f)
|
{"golden_diff": "diff --git a/optuna/_experimental.py b/optuna/_experimental.py\n--- a/optuna/_experimental.py\n+++ b/optuna/_experimental.py\n@@ -1,5 +1,6 @@\n import functools\n import inspect\n+import textwrap\n from typing import Any\n from typing import Callable\n import warnings\n@@ -7,14 +8,11 @@\n from optuna.exceptions import ExperimentalWarning\n \n \n-# White spaces of each line are necessary to beautifully rendered documentation.\n-# NOTE(crcrpar): When `experimental` decorator is applied to member methods, these lines require\n-# another four spaces.\n-_EXPERIMENTAL_DOCSTRING_TEMPLATE = \"\"\"\n+_EXPERIMENTAL_NOTE_TEMPLATE = \"\"\"\n \n- .. note::\n- Added in v{ver} as an experimental feature. The interface may change in newer versions\n- without prior notice. See https://github.com/optuna/optuna/releases/tag/v{ver}.\n+.. note::\n+ Added in v{ver} as an experimental feature. The interface may change in newer versions\n+ without prior notice. See https://github.com/optuna/optuna/releases/tag/v{ver}.\n \"\"\"\n \n \n@@ -28,6 +26,10 @@\n )\n \n \n+def _get_docstring_indent(docstring: str) -> str:\n+ return docstring.split(\"\\n\")[-1] if \"\\n\" in docstring else \"\"\n+\n+\n def experimental(version: str, name: str = None) -> Any:\n \"\"\"Decorate class or function as experimental.\n \n@@ -43,16 +45,16 @@\n \n def _experimental_func(func: Callable[[Any], Any]) -> Callable[[Any], Any]:\n \n- docstring = _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version)\n if func.__doc__ is None:\n func.__doc__ = \"\"\n- func.__doc__ += docstring\n+\n+ note = _EXPERIMENTAL_NOTE_TEMPLATE.format(ver=version)\n+ indent = _get_docstring_indent(func.__doc__)\n+ func.__doc__ = func.__doc__.strip() + textwrap.indent(note, indent) + indent\n \n # TODO(crcrpar): Annotate this correctly.\n @functools.wraps(func)\n def new_func(*args: Any, **kwargs: Any) -> Any:\n- \"\"\"Wrapped function.\"\"\"\n-\n warnings.warn(\n \"{} is experimental (supported from v{}). \"\n \"The interface can change in the future.\".format(\n@@ -70,7 +72,6 @@\n \n This decorator is supposed to be applied to the experimental class.\n \"\"\"\n-\n _original_init = cls.__init__\n \n @functools.wraps(_original_init)\n@@ -89,7 +90,11 @@\n \n if cls.__doc__ is None:\n cls.__doc__ = \"\"\n- cls.__doc__ += _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version)\n+\n+ note = _EXPERIMENTAL_NOTE_TEMPLATE.format(ver=version)\n+ indent = _get_docstring_indent(cls.__doc__)\n+ cls.__doc__ = cls.__doc__.strip() + textwrap.indent(note, indent) + indent\n+\n return cls\n \n return _experimental_class(f) if inspect.isclass(f) else _experimental_func(f)\n", "issue": "`experimental` decorator adds extra indents to docs\nAn extra indent appears in the docs when the [`experimental`](https://github.com/optuna/optuna/blob/2d5f24b06eed56ece72b8dfa878135bb4bb63779/optuna/_experimental.py#L60) decorator is added to a method of a class. This does not happen with a top level (i.e., not nested) class or function.\r\n\r\n## Steps to reproduce\r\n\r\n1. add an `experimental` decorator to a method of a class\r\n2. make the document\r\n\r\n## Reproducible examples\r\nThe following figure shows the docs of [`Study.enqueue_trial`](https://optuna.readthedocs.io/en/latest/reference/study.html#optuna.study.Study.enqueue_trial). An extra indent appears after the second paragraph.\r\n\r\n\r\n\r\n## Expected behavior\r\nno extra indent is shown after the second paragraph\r\n\r\nFor example, the docs of [`Study.get_trials`](https://optuna.readthedocs.io/en/latest/reference/study.html#optuna.study.Study.get_trials), which does not have the `experimental` decorator, appears as expected.\r\n\r\n\r\n\r\nThe following figure shows another example. The docs of the [`train`](https://optuna.readthedocs.io/en/latest/reference/integration.html#optuna.integration.lightgbm.train) function of LightGBMTuner has no extra indent. Although the function has an `experimental` decorator, it belongs to the top level classes.\r\n\r\n\r\n\n", "before_files": [{"content": "import functools\nimport inspect\nfrom typing import Any\nfrom typing import Callable\nimport warnings\n\nfrom optuna.exceptions import ExperimentalWarning\n\n\n# White spaces of each line are necessary to beautifully rendered documentation.\n# NOTE(crcrpar): When `experimental` decorator is applied to member methods, these lines require\n# another four spaces.\n_EXPERIMENTAL_DOCSTRING_TEMPLATE = \"\"\"\n\n .. note::\n Added in v{ver} as an experimental feature. The interface may change in newer versions\n without prior notice. See https://github.com/optuna/optuna/releases/tag/v{ver}.\n\"\"\"\n\n\ndef _validate_version(version: str) -> None:\n\n if not isinstance(version, str) or len(version.split(\".\")) != 3:\n raise ValueError(\n \"Invalid version specification. Must follow `x.y.z` format but `{}` is given\".format(\n version\n )\n )\n\n\ndef experimental(version: str, name: str = None) -> Any:\n \"\"\"Decorate class or function as experimental.\n\n Args:\n version: The first version that supports the target feature.\n name: The name of the feature. Defaults to the function or class name. Optional.\n \"\"\"\n\n _validate_version(version)\n\n def _experimental_wrapper(f: Any) -> Any:\n # f is either func or class.\n\n def _experimental_func(func: Callable[[Any], Any]) -> Callable[[Any], Any]:\n\n docstring = _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version)\n if func.__doc__ is None:\n func.__doc__ = \"\"\n func.__doc__ += docstring\n\n # TODO(crcrpar): Annotate this correctly.\n @functools.wraps(func)\n def new_func(*args: Any, **kwargs: Any) -> Any:\n \"\"\"Wrapped function.\"\"\"\n\n warnings.warn(\n \"{} is experimental (supported from v{}). \"\n \"The interface can change in the future.\".format(\n name if name is not None else func.__name__, version\n ),\n ExperimentalWarning,\n )\n\n return func(*args, **kwargs) # type: ignore\n\n return new_func\n\n def _experimental_class(cls: Any) -> Any:\n \"\"\"Decorates a class as experimental.\n\n This decorator is supposed to be applied to the experimental class.\n \"\"\"\n\n _original_init = cls.__init__\n\n @functools.wraps(_original_init)\n def wrapped_init(self, *args, **kwargs) -> None: # type: ignore\n warnings.warn(\n \"{} is experimental (supported from v{}). \"\n \"The interface can change in the future.\".format(\n name if name is not None else cls.__name__, version\n ),\n ExperimentalWarning,\n )\n\n _original_init(self, *args, **kwargs)\n\n cls.__init__ = wrapped_init\n\n if cls.__doc__ is None:\n cls.__doc__ = \"\"\n cls.__doc__ += _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version)\n return cls\n\n return _experimental_class(f) if inspect.isclass(f) else _experimental_func(f)\n\n return _experimental_wrapper\n", "path": "optuna/_experimental.py"}], "after_files": [{"content": "import functools\nimport inspect\nimport textwrap\nfrom typing import Any\nfrom typing import Callable\nimport warnings\n\nfrom optuna.exceptions import ExperimentalWarning\n\n\n_EXPERIMENTAL_NOTE_TEMPLATE = \"\"\"\n\n.. note::\n Added in v{ver} as an experimental feature. The interface may change in newer versions\n without prior notice. See https://github.com/optuna/optuna/releases/tag/v{ver}.\n\"\"\"\n\n\ndef _validate_version(version: str) -> None:\n\n if not isinstance(version, str) or len(version.split(\".\")) != 3:\n raise ValueError(\n \"Invalid version specification. Must follow `x.y.z` format but `{}` is given\".format(\n version\n )\n )\n\n\ndef _get_docstring_indent(docstring: str) -> str:\n return docstring.split(\"\\n\")[-1] if \"\\n\" in docstring else \"\"\n\n\ndef experimental(version: str, name: str = None) -> Any:\n \"\"\"Decorate class or function as experimental.\n\n Args:\n version: The first version that supports the target feature.\n name: The name of the feature. Defaults to the function or class name. Optional.\n \"\"\"\n\n _validate_version(version)\n\n def _experimental_wrapper(f: Any) -> Any:\n # f is either func or class.\n\n def _experimental_func(func: Callable[[Any], Any]) -> Callable[[Any], Any]:\n\n if func.__doc__ is None:\n func.__doc__ = \"\"\n\n note = _EXPERIMENTAL_NOTE_TEMPLATE.format(ver=version)\n indent = _get_docstring_indent(func.__doc__)\n func.__doc__ = func.__doc__.strip() + textwrap.indent(note, indent) + indent\n\n # TODO(crcrpar): Annotate this correctly.\n @functools.wraps(func)\n def new_func(*args: Any, **kwargs: Any) -> Any:\n warnings.warn(\n \"{} is experimental (supported from v{}). \"\n \"The interface can change in the future.\".format(\n name if name is not None else func.__name__, version\n ),\n ExperimentalWarning,\n )\n\n return func(*args, **kwargs) # type: ignore\n\n return new_func\n\n def _experimental_class(cls: Any) -> Any:\n \"\"\"Decorates a class as experimental.\n\n This decorator is supposed to be applied to the experimental class.\n \"\"\"\n _original_init = cls.__init__\n\n @functools.wraps(_original_init)\n def wrapped_init(self, *args, **kwargs) -> None: # type: ignore\n warnings.warn(\n \"{} is experimental (supported from v{}). \"\n \"The interface can change in the future.\".format(\n name if name is not None else cls.__name__, version\n ),\n ExperimentalWarning,\n )\n\n _original_init(self, *args, **kwargs)\n\n cls.__init__ = wrapped_init\n\n if cls.__doc__ is None:\n cls.__doc__ = \"\"\n\n note = _EXPERIMENTAL_NOTE_TEMPLATE.format(ver=version)\n indent = _get_docstring_indent(cls.__doc__)\n cls.__doc__ = cls.__doc__.strip() + textwrap.indent(note, indent) + indent\n\n return cls\n\n return _experimental_class(f) if inspect.isclass(f) else _experimental_func(f)\n\n return _experimental_wrapper\n", "path": "optuna/_experimental.py"}]}
| 1,622 | 711 |
gh_patches_debug_14346
|
rasdani/github-patches
|
git_diff
|
svthalia__concrexit-2857
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Become a member menu item is missing!
### Describe the bug
Become a member menu item is missing!
### How to reproduce
1. Check staging, the menu item is missing
### Expected behaviour
It should be there
### Screenshots
If applicable, add screenshots to help explain your problem.
### Additional context
Probably this was missed in #2728
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/registrations/apps.py`
Content:
```
1 """Configuration for the newsletters package."""
2 from django.apps import AppConfig
3 from django.utils.translation import gettext_lazy as _
4
5
6 class RegistrationsConfig(AppConfig):
7 """AppConfig for the registrations package."""
8
9 name = "registrations"
10 verbose_name = _("Registrations")
11
12 def ready(self):
13 """Import the signals when the app is ready."""
14 # pylint: disable=unused-import,import-outside-toplevel
15 from . import signals
16 from .payables import register
17
18 register()
19
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/website/registrations/apps.py b/website/registrations/apps.py
--- a/website/registrations/apps.py
+++ b/website/registrations/apps.py
@@ -1,5 +1,6 @@
"""Configuration for the newsletters package."""
from django.apps import AppConfig
+from django.urls import reverse
from django.utils.translation import gettext_lazy as _
@@ -16,3 +17,16 @@
from .payables import register
register()
+
+ def menu_items(self):
+ return {
+ "categories": [{"name": "association", "title": "Association", "key": 1}],
+ "items": [
+ {
+ "category": "association",
+ "title": "Become a member",
+ "url": reverse("registrations:index"),
+ "key": 6,
+ },
+ ],
+ }
|
{"golden_diff": "diff --git a/website/registrations/apps.py b/website/registrations/apps.py\n--- a/website/registrations/apps.py\n+++ b/website/registrations/apps.py\n@@ -1,5 +1,6 @@\n \"\"\"Configuration for the newsletters package.\"\"\"\n from django.apps import AppConfig\n+from django.urls import reverse\n from django.utils.translation import gettext_lazy as _\n \n \n@@ -16,3 +17,16 @@\n from .payables import register\n \n register()\n+\n+ def menu_items(self):\n+ return {\n+ \"categories\": [{\"name\": \"association\", \"title\": \"Association\", \"key\": 1}],\n+ \"items\": [\n+ {\n+ \"category\": \"association\",\n+ \"title\": \"Become a member\",\n+ \"url\": reverse(\"registrations:index\"),\n+ \"key\": 6,\n+ },\n+ ],\n+ }\n", "issue": "Become a member menu item is missing!\n### Describe the bug\r\nBecome a member menu item is missing!\r\n\r\n### How to reproduce\r\n1. Check staging, the menu item is missing\r\n\r\n\r\n### Expected behaviour\r\nIt should be there\r\n\r\n### Screenshots\r\nIf applicable, add screenshots to help explain your problem.\r\n\r\n### Additional context\r\nProbably this was missed in #2728 \n", "before_files": [{"content": "\"\"\"Configuration for the newsletters package.\"\"\"\nfrom django.apps import AppConfig\nfrom django.utils.translation import gettext_lazy as _\n\n\nclass RegistrationsConfig(AppConfig):\n \"\"\"AppConfig for the registrations package.\"\"\"\n\n name = \"registrations\"\n verbose_name = _(\"Registrations\")\n\n def ready(self):\n \"\"\"Import the signals when the app is ready.\"\"\"\n # pylint: disable=unused-import,import-outside-toplevel\n from . import signals\n from .payables import register\n\n register()\n", "path": "website/registrations/apps.py"}], "after_files": [{"content": "\"\"\"Configuration for the newsletters package.\"\"\"\nfrom django.apps import AppConfig\nfrom django.urls import reverse\nfrom django.utils.translation import gettext_lazy as _\n\n\nclass RegistrationsConfig(AppConfig):\n \"\"\"AppConfig for the registrations package.\"\"\"\n\n name = \"registrations\"\n verbose_name = _(\"Registrations\")\n\n def ready(self):\n \"\"\"Import the signals when the app is ready.\"\"\"\n # pylint: disable=unused-import,import-outside-toplevel\n from . import signals\n from .payables import register\n\n register()\n\n def menu_items(self):\n return {\n \"categories\": [{\"name\": \"association\", \"title\": \"Association\", \"key\": 1}],\n \"items\": [\n {\n \"category\": \"association\",\n \"title\": \"Become a member\",\n \"url\": reverse(\"registrations:index\"),\n \"key\": 6,\n },\n ],\n }\n", "path": "website/registrations/apps.py"}]}
| 472 | 194 |
gh_patches_debug_13199
|
rasdani/github-patches
|
git_diff
|
Nitrate__Nitrate-671
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Replace smart_text() with smart_str()
To suppress warning: `RemovedInDjango40Warning: smart_text() is deprecated in favor of smart_str()`
Run command `runserver`, find out the warning messages from output.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/tcms/logs/views.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 from django.conf import settings
4 from django.contrib.contenttypes.models import ContentType
5
6 from .models import TCMSLogModel
7
8 from django.utils.encoding import smart_text as smart_unicode
9
10
11 class TCMSLog:
12 """TCMS Log"""
13
14 def __init__(self, model):
15 super().__init__()
16 self.model = model
17
18 def get_new_log_object(self):
19 elements = ['who', 'field', 'original_value', 'new_value']
20
21 for element in elements:
22 if not hasattr(self, element):
23 raise NotImplementedError(
24 f'Log does not have attribute {element}')
25
26 model = self.get_log_model()
27 new = model(**self.get_log_create_data())
28
29 return new
30
31 def get_log_model(self):
32 """
33 Get the log model to create with this class.
34 """
35 return TCMSLogModel
36
37 def get_log_create_data(self):
38 return {
39 'content_object': self.model,
40 'site_id': settings.SITE_ID,
41 'who': self.who,
42 'field': self.field,
43 'original_value': self.original_value,
44 'new_value': self.new_value,
45 }
46
47 def make(self, who, new_value, field=None, original_value=None):
48 """Create new log"""
49 self.who = who
50 self.field = field or ''
51 self.original_value = original_value or ''
52 self.new_value = new_value
53
54 model = self.get_new_log_object()
55 model.save()
56
57 def lookup_content_type(self):
58 return ContentType.objects.get_for_model(self.model)
59
60 def get_query_set(self):
61 ctype = self.lookup_content_type()
62 model = self.get_log_model()
63
64 qs = model.objects.filter(content_type=ctype,
65 object_pk=smart_unicode(self.model.pk),
66 site=settings.SITE_ID)
67 qs = qs.select_related('who')
68 return qs
69
70 def list(self):
71 """List the logs"""
72 return self.get_query_set().all()
73
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/tcms/logs/views.py b/src/tcms/logs/views.py
--- a/src/tcms/logs/views.py
+++ b/src/tcms/logs/views.py
@@ -5,7 +5,7 @@
from .models import TCMSLogModel
-from django.utils.encoding import smart_text as smart_unicode
+from django.utils.encoding import smart_str
class TCMSLog:
@@ -62,7 +62,7 @@
model = self.get_log_model()
qs = model.objects.filter(content_type=ctype,
- object_pk=smart_unicode(self.model.pk),
+ object_pk=smart_str(self.model.pk),
site=settings.SITE_ID)
qs = qs.select_related('who')
return qs
|
{"golden_diff": "diff --git a/src/tcms/logs/views.py b/src/tcms/logs/views.py\n--- a/src/tcms/logs/views.py\n+++ b/src/tcms/logs/views.py\n@@ -5,7 +5,7 @@\n \n from .models import TCMSLogModel\n \n-from django.utils.encoding import smart_text as smart_unicode\n+from django.utils.encoding import smart_str\n \n \n class TCMSLog:\n@@ -62,7 +62,7 @@\n model = self.get_log_model()\n \n qs = model.objects.filter(content_type=ctype,\n- object_pk=smart_unicode(self.model.pk),\n+ object_pk=smart_str(self.model.pk),\n site=settings.SITE_ID)\n qs = qs.select_related('who')\n return qs\n", "issue": "Replace smart_text() with smart_str()\nTo suppress warning: `RemovedInDjango40Warning: smart_text() is deprecated in favor of smart_str()`\r\n\r\nRun command `runserver`, find out the warning messages from output.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom django.conf import settings\nfrom django.contrib.contenttypes.models import ContentType\n\nfrom .models import TCMSLogModel\n\nfrom django.utils.encoding import smart_text as smart_unicode\n\n\nclass TCMSLog:\n \"\"\"TCMS Log\"\"\"\n\n def __init__(self, model):\n super().__init__()\n self.model = model\n\n def get_new_log_object(self):\n elements = ['who', 'field', 'original_value', 'new_value']\n\n for element in elements:\n if not hasattr(self, element):\n raise NotImplementedError(\n f'Log does not have attribute {element}')\n\n model = self.get_log_model()\n new = model(**self.get_log_create_data())\n\n return new\n\n def get_log_model(self):\n \"\"\"\n Get the log model to create with this class.\n \"\"\"\n return TCMSLogModel\n\n def get_log_create_data(self):\n return {\n 'content_object': self.model,\n 'site_id': settings.SITE_ID,\n 'who': self.who,\n 'field': self.field,\n 'original_value': self.original_value,\n 'new_value': self.new_value,\n }\n\n def make(self, who, new_value, field=None, original_value=None):\n \"\"\"Create new log\"\"\"\n self.who = who\n self.field = field or ''\n self.original_value = original_value or ''\n self.new_value = new_value\n\n model = self.get_new_log_object()\n model.save()\n\n def lookup_content_type(self):\n return ContentType.objects.get_for_model(self.model)\n\n def get_query_set(self):\n ctype = self.lookup_content_type()\n model = self.get_log_model()\n\n qs = model.objects.filter(content_type=ctype,\n object_pk=smart_unicode(self.model.pk),\n site=settings.SITE_ID)\n qs = qs.select_related('who')\n return qs\n\n def list(self):\n \"\"\"List the logs\"\"\"\n return self.get_query_set().all()\n", "path": "src/tcms/logs/views.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom django.conf import settings\nfrom django.contrib.contenttypes.models import ContentType\n\nfrom .models import TCMSLogModel\n\nfrom django.utils.encoding import smart_str\n\n\nclass TCMSLog:\n \"\"\"TCMS Log\"\"\"\n\n def __init__(self, model):\n super().__init__()\n self.model = model\n\n def get_new_log_object(self):\n elements = ['who', 'field', 'original_value', 'new_value']\n\n for element in elements:\n if not hasattr(self, element):\n raise NotImplementedError(\n f'Log does not have attribute {element}')\n\n model = self.get_log_model()\n new = model(**self.get_log_create_data())\n\n return new\n\n def get_log_model(self):\n \"\"\"\n Get the log model to create with this class.\n \"\"\"\n return TCMSLogModel\n\n def get_log_create_data(self):\n return {\n 'content_object': self.model,\n 'site_id': settings.SITE_ID,\n 'who': self.who,\n 'field': self.field,\n 'original_value': self.original_value,\n 'new_value': self.new_value,\n }\n\n def make(self, who, new_value, field=None, original_value=None):\n \"\"\"Create new log\"\"\"\n self.who = who\n self.field = field or ''\n self.original_value = original_value or ''\n self.new_value = new_value\n\n model = self.get_new_log_object()\n model.save()\n\n def lookup_content_type(self):\n return ContentType.objects.get_for_model(self.model)\n\n def get_query_set(self):\n ctype = self.lookup_content_type()\n model = self.get_log_model()\n\n qs = model.objects.filter(content_type=ctype,\n object_pk=smart_str(self.model.pk),\n site=settings.SITE_ID)\n qs = qs.select_related('who')\n return qs\n\n def list(self):\n \"\"\"List the logs\"\"\"\n return self.get_query_set().all()\n", "path": "src/tcms/logs/views.py"}]}
| 874 | 156 |
gh_patches_debug_10819
|
rasdani/github-patches
|
git_diff
|
aws-powertools__powertools-lambda-python-921
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DeprecationWarning for distutils in Python 3.10 (will be removed in Python 3.12)
<!--- Provide a general summary of the issue in the Title above -->
<!--- How has this issue affected you? What are you trying to accomplish? -->
**What were you trying to accomplish?**
Run tests utilizing aws-lambda-powertools.
## Expected Behavior
<!--- If you're describing a bug, tell us what should happen -->
<!--- If you're suggesting a change/improvement, tell us how it should work -->
No DeprecationWarning to appear.
## Current Behavior
<!--- If describing a bug, tell us what happens instead of the expected behavior -->
<!--- If suggesting a change/improvement, explain the difference from current behavior -->
When running tests (specifically using `pytest`) that import `aws_lamdba_powertools`, a `DeprecationWarning` appears.
```
.../aws_lambda_powertools/shared/functions.py:1: DeprecationWarning: The distutils package is deprecated and slated for removal in Python 3.12. Use setuptools or check PEP 632 for potential alternatives
from distutils.util import strtobool
```
## Possible Solution
<!--- Not obligatory, but suggest a fix/reason for the bug, -->
<!--- or ideas how to implement the addition or change -->
The only function used out of distutils is `strtobool`, and it is only used in one place, so that logic can simply be copied into the `resolve_truthy_env_var_choice` function in `aws_lambda_powertools.shared.functions`.
## Steps to Reproduce (for bugs)
<!--- Provide a link to a live example, or an unambiguous set of steps to -->
<!--- reproduce this bug. Include code to reproduce, if relevant -->
1. Use python3.10
2. Activate a new venv (`python3.10 -m venv .venv` and `. .venv/bin/activate`)
3. `pip install aws-lambda-powertools pytest`
4. `echo "import aws_lambda_powertools" > reproduce.py`
5. `pytest reproduce.py`
## Environment
* **Powertools version used**: `1.22.0`
* **Packaging format (Layers, PyPi)**: PyPi
This warning appears in development workflows, but will be eventually breaking when Python 3.12 is released.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `aws_lambda_powertools/shared/functions.py`
Content:
```
1 from distutils.util import strtobool
2 from typing import Any, Optional, Union
3
4
5 def resolve_truthy_env_var_choice(env: str, choice: Optional[bool] = None) -> bool:
6 """Pick explicit choice over truthy env value, if available, otherwise return truthy env value
7
8 NOTE: Environment variable should be resolved by the caller.
9
10 Parameters
11 ----------
12 env : str
13 environment variable actual value
14 choice : bool
15 explicit choice
16
17 Returns
18 -------
19 choice : str
20 resolved choice as either bool or environment value
21 """
22 return choice if choice is not None else strtobool(env)
23
24
25 def resolve_env_var_choice(env: Any, choice: Optional[Any] = None) -> Union[bool, Any]:
26 """Pick explicit choice over env, if available, otherwise return env value received
27
28 NOTE: Environment variable should be resolved by the caller.
29
30 Parameters
31 ----------
32 env : Any
33 environment variable actual value
34 choice : bool
35 explicit choice
36
37 Returns
38 -------
39 choice : str
40 resolved choice as either bool or environment value
41 """
42 return choice if choice is not None else env
43
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/aws_lambda_powertools/shared/functions.py b/aws_lambda_powertools/shared/functions.py
--- a/aws_lambda_powertools/shared/functions.py
+++ b/aws_lambda_powertools/shared/functions.py
@@ -1,7 +1,16 @@
-from distutils.util import strtobool
from typing import Any, Optional, Union
+def strtobool(value):
+ value = value.lower()
+ if value in ("y", "yes", "t", "true", "on", "1"):
+ return 1
+ elif value in ("n", "no", "f", "false", "off", "0"):
+ return 0
+ else:
+ raise ValueError("invalid truth value %r" % (value,))
+
+
def resolve_truthy_env_var_choice(env: str, choice: Optional[bool] = None) -> bool:
"""Pick explicit choice over truthy env value, if available, otherwise return truthy env value
|
{"golden_diff": "diff --git a/aws_lambda_powertools/shared/functions.py b/aws_lambda_powertools/shared/functions.py\n--- a/aws_lambda_powertools/shared/functions.py\n+++ b/aws_lambda_powertools/shared/functions.py\n@@ -1,7 +1,16 @@\n-from distutils.util import strtobool\n from typing import Any, Optional, Union\n \n \n+def strtobool(value):\n+ value = value.lower()\n+ if value in (\"y\", \"yes\", \"t\", \"true\", \"on\", \"1\"):\n+ return 1\n+ elif value in (\"n\", \"no\", \"f\", \"false\", \"off\", \"0\"):\n+ return 0\n+ else:\n+ raise ValueError(\"invalid truth value %r\" % (value,))\n+\n+\n def resolve_truthy_env_var_choice(env: str, choice: Optional[bool] = None) -> bool:\n \"\"\"Pick explicit choice over truthy env value, if available, otherwise return truthy env value\n", "issue": "DeprecationWarning for distutils in Python 3.10 (will be removed in Python 3.12)\n<!--- Provide a general summary of the issue in the Title above -->\r\n<!--- How has this issue affected you? What are you trying to accomplish? -->\r\n\r\n**What were you trying to accomplish?**\r\n\r\nRun tests utilizing aws-lambda-powertools.\r\n\r\n## Expected Behavior\r\n<!--- If you're describing a bug, tell us what should happen -->\r\n<!--- If you're suggesting a change/improvement, tell us how it should work -->\r\n\r\nNo DeprecationWarning to appear.\r\n\r\n## Current Behavior\r\n<!--- If describing a bug, tell us what happens instead of the expected behavior -->\r\n<!--- If suggesting a change/improvement, explain the difference from current behavior -->\r\n\r\nWhen running tests (specifically using `pytest`) that import `aws_lamdba_powertools`, a `DeprecationWarning` appears.\r\n\r\n```\r\n.../aws_lambda_powertools/shared/functions.py:1: DeprecationWarning: The distutils package is deprecated and slated for removal in Python 3.12. Use setuptools or check PEP 632 for potential alternatives\r\n from distutils.util import strtobool\r\n```\r\n\r\n## Possible Solution\r\n<!--- Not obligatory, but suggest a fix/reason for the bug, -->\r\n<!--- or ideas how to implement the addition or change -->\r\n\r\nThe only function used out of distutils is `strtobool`, and it is only used in one place, so that logic can simply be copied into the `resolve_truthy_env_var_choice` function in `aws_lambda_powertools.shared.functions`.\r\n\r\n## Steps to Reproduce (for bugs)\r\n<!--- Provide a link to a live example, or an unambiguous set of steps to -->\r\n<!--- reproduce this bug. Include code to reproduce, if relevant -->\r\n1. Use python3.10\r\n2. Activate a new venv (`python3.10 -m venv .venv` and `. .venv/bin/activate`)\r\n3. `pip install aws-lambda-powertools pytest`\r\n4. `echo \"import aws_lambda_powertools\" > reproduce.py`\r\n5. `pytest reproduce.py`\r\n\r\n## Environment\r\n\r\n* **Powertools version used**: `1.22.0`\r\n* **Packaging format (Layers, PyPi)**: PyPi\r\n\r\nThis warning appears in development workflows, but will be eventually breaking when Python 3.12 is released.\n", "before_files": [{"content": "from distutils.util import strtobool\nfrom typing import Any, Optional, Union\n\n\ndef resolve_truthy_env_var_choice(env: str, choice: Optional[bool] = None) -> bool:\n \"\"\"Pick explicit choice over truthy env value, if available, otherwise return truthy env value\n\n NOTE: Environment variable should be resolved by the caller.\n\n Parameters\n ----------\n env : str\n environment variable actual value\n choice : bool\n explicit choice\n\n Returns\n -------\n choice : str\n resolved choice as either bool or environment value\n \"\"\"\n return choice if choice is not None else strtobool(env)\n\n\ndef resolve_env_var_choice(env: Any, choice: Optional[Any] = None) -> Union[bool, Any]:\n \"\"\"Pick explicit choice over env, if available, otherwise return env value received\n\n NOTE: Environment variable should be resolved by the caller.\n\n Parameters\n ----------\n env : Any\n environment variable actual value\n choice : bool\n explicit choice\n\n Returns\n -------\n choice : str\n resolved choice as either bool or environment value\n \"\"\"\n return choice if choice is not None else env\n", "path": "aws_lambda_powertools/shared/functions.py"}], "after_files": [{"content": "from typing import Any, Optional, Union\n\n\ndef strtobool(value):\n value = value.lower()\n if value in (\"y\", \"yes\", \"t\", \"true\", \"on\", \"1\"):\n return 1\n elif value in (\"n\", \"no\", \"f\", \"false\", \"off\", \"0\"):\n return 0\n else:\n raise ValueError(\"invalid truth value %r\" % (value,))\n\n\ndef resolve_truthy_env_var_choice(env: str, choice: Optional[bool] = None) -> bool:\n \"\"\"Pick explicit choice over truthy env value, if available, otherwise return truthy env value\n\n NOTE: Environment variable should be resolved by the caller.\n\n Parameters\n ----------\n env : str\n environment variable actual value\n choice : bool\n explicit choice\n\n Returns\n -------\n choice : str\n resolved choice as either bool or environment value\n \"\"\"\n return choice if choice is not None else strtobool(env)\n\n\ndef resolve_env_var_choice(env: Any, choice: Optional[Any] = None) -> Union[bool, Any]:\n \"\"\"Pick explicit choice over env, if available, otherwise return env value received\n\n NOTE: Environment variable should be resolved by the caller.\n\n Parameters\n ----------\n env : Any\n environment variable actual value\n choice : bool\n explicit choice\n\n Returns\n -------\n choice : str\n resolved choice as either bool or environment value\n \"\"\"\n return choice if choice is not None else env\n", "path": "aws_lambda_powertools/shared/functions.py"}]}
| 1,103 | 207 |
gh_patches_debug_61694
|
rasdani/github-patches
|
git_diff
|
mdn__kuma-7762
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Locale searches aren't working
**Summary**
https://developer.mozilla.org/de/search?q=Zuweisungsoperator yields nothing.
But it should work, because of: https://developer.mozilla.org/api/v1/search?q=Zuweisungsoperator&locale=de
**Additional context**
_Is there anything else we should know?_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kuma/search/views.py`
Content:
```
1 from urllib.parse import parse_qs, urlencode
2
3 from django.conf import settings
4 from django.shortcuts import render
5 from django.urls import reverse_lazy
6 from django.views.decorators.cache import never_cache
7 from django.views.decorators.http import require_GET
8 from django.views.generic import RedirectView
9 from ratelimit.decorators import ratelimit
10
11 from kuma.api.v1.search import search as search_api
12 from kuma.core.decorators import shared_cache_control
13
14
15 # Since the search endpoint accepts user input (via query parameters) and its
16 # response is compressed, use rate limiting to mitigate the BREACH attack
17 # (see http://breachattack.com/). It still needs to allow a user to click
18 # the filter switches (bug 1426968).
19 # Alternate: forbid gzip by setting Content-Encoding: identity
20 @never_cache
21 @require_GET
22 @ratelimit(key="user_or_ip", rate="25/m", block=True)
23 def search(request, *args, **kwargs):
24 """
25 The search view.
26
27 --2021-- THIS VIEW IS A HACK! --2021--
28 This Django view exists to server-side render the search results page.
29 But we're moving the search result page to Yari and that one will use a XHR
30 request (to /api/v1/search) from a skeleton page (aka. SPA).
31 But as a way to get to that, we need to transition from the old to the new.
32 So, this page uses the Django view in kuma.api.v1.search.search, which
33 returns a special `JsonResponse` instance whose data we can pluck out
34 to our needs for this old view.
35 Once we've fully moved to the Yari (static + XHR to v1 API) site-search,
36 we can comfortably delete this view.
37 """
38 # The underlying v1 API supports searching without a 'q' but the web
39 # UI doesn't. For example, the search input field requires a value.
40 # So we match that here too.
41 if not request.GET.get("q", "").strip():
42 status = 400
43 context = {"results": {}}
44 else:
45 # TODO consider, if the current locale is *not* en-US, that we force
46 # it to do a search in both locales.
47 # This might come in handy for people searching in a locale where
48 # there's very little results but they'd be happy to get the en-US ones.
49 response = search_api(request, *args, **kwargs)
50 results = response.data
51
52 error = None
53 status = response.status_code
54
55 # Determine if there were validation errors
56 if status == 400:
57 error = ""
58 for key, messages in results["errors"].items():
59 for message in messages:
60 error += f"{key}: {message['message']}\n"
61 else:
62 # Have to rearrange the 'results' in a way the old search expects it.
63 # ...which is as follows:
64 # - `count`: integer number of matched documents
65 # - `previous`: a URL or empty string
66 # - `next`: a URL or empty string
67 # - `query`: string
68 # - `start`: pagination number
69 # - `end`: pagination number
70 # - `documents`:
71 # - `title`
72 # - `locale`
73 # - `slug`
74 # - `excerpt`: string of safe HTML
75 next_url = ""
76 previous_url = ""
77 page = results["metadata"]["page"]
78 size = results["metadata"]["size"]
79 count = results["metadata"]["total"]["value"]
80 query_string = request.META.get("QUERY_STRING")
81 query_string_parsed = parse_qs(query_string)
82 if (page + 1) * size < count:
83 query_string_parsed["page"] = f"{page + 1}"
84 next_url = f"?{urlencode(query_string_parsed, True)}"
85 if page > 1:
86 if page == 2:
87 del query_string_parsed["page"]
88 else:
89 query_string_parsed["page"] = f"{page - 1}"
90 previous_url = f"?{urlencode(query_string_parsed, True)}"
91
92 results = {
93 "count": count,
94 "next": next_url,
95 "previous": previous_url,
96 "query": request.GET.get("q"),
97 "start": (page - 1) * size + 1,
98 "end": page * size,
99 "documents": [
100 {
101 "title": x["title"],
102 "slug": x["slug"],
103 "locale": x["locale"],
104 "excerpt": "<br>".join(x["highlight"].get("body", [])),
105 }
106 for x in results["documents"]
107 ],
108 }
109
110 context = {"results": {"results": None if error else results, "error": error}}
111 return render(request, "search/react.html", context, status=status)
112
113
114 class SearchRedirectView(RedirectView):
115 permanent = True
116
117 def get_redirect_url(self, *args, **kwargs):
118 query_string = self.request.META.get("QUERY_STRING")
119 url = reverse_lazy("api.v1.search")
120 qs = parse_qs(query_string)
121 # If you used `/en-Us/search.json` you can skip the `?locale=`
122 # because the default locale in `/api/v1/search` is `en-US`.
123 if self.request.LANGUAGE_CODE.lower() != settings.LANGUAGE_CODE.lower():
124 qs["locale"] = self.request.LANGUAGE_CODE
125 if qs:
126 url += "?" + urlencode(qs, True)
127 return url
128
129
130 @shared_cache_control(s_maxage=60 * 60 * 24 * 7)
131 def plugin(request):
132 """Render an OpenSearch Plugin."""
133 return render(
134 request,
135 "search/plugin.html",
136 {"locale": request.LANGUAGE_CODE},
137 content_type="application/opensearchdescription+xml",
138 )
139
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kuma/search/views.py b/kuma/search/views.py
--- a/kuma/search/views.py
+++ b/kuma/search/views.py
@@ -46,7 +46,7 @@
# it to do a search in both locales.
# This might come in handy for people searching in a locale where
# there's very little results but they'd be happy to get the en-US ones.
- response = search_api(request, *args, **kwargs)
+ response = search_api(request, locale=request.LANGUAGE_CODE, *args, **kwargs)
results = response.data
error = None
|
{"golden_diff": "diff --git a/kuma/search/views.py b/kuma/search/views.py\n--- a/kuma/search/views.py\n+++ b/kuma/search/views.py\n@@ -46,7 +46,7 @@\n # it to do a search in both locales.\n # This might come in handy for people searching in a locale where\n # there's very little results but they'd be happy to get the en-US ones.\n- response = search_api(request, *args, **kwargs)\n+ response = search_api(request, locale=request.LANGUAGE_CODE, *args, **kwargs)\n results = response.data\n \n error = None\n", "issue": "Locale searches aren't working\n**Summary**\r\nhttps://developer.mozilla.org/de/search?q=Zuweisungsoperator yields nothing. \r\nBut it should work, because of: https://developer.mozilla.org/api/v1/search?q=Zuweisungsoperator&locale=de\r\n\r\n\r\n**Additional context**\r\n_Is there anything else we should know?_\r\n\n", "before_files": [{"content": "from urllib.parse import parse_qs, urlencode\n\nfrom django.conf import settings\nfrom django.shortcuts import render\nfrom django.urls import reverse_lazy\nfrom django.views.decorators.cache import never_cache\nfrom django.views.decorators.http import require_GET\nfrom django.views.generic import RedirectView\nfrom ratelimit.decorators import ratelimit\n\nfrom kuma.api.v1.search import search as search_api\nfrom kuma.core.decorators import shared_cache_control\n\n\n# Since the search endpoint accepts user input (via query parameters) and its\n# response is compressed, use rate limiting to mitigate the BREACH attack\n# (see http://breachattack.com/). It still needs to allow a user to click\n# the filter switches (bug 1426968).\n# Alternate: forbid gzip by setting Content-Encoding: identity\n@never_cache\n@require_GET\n@ratelimit(key=\"user_or_ip\", rate=\"25/m\", block=True)\ndef search(request, *args, **kwargs):\n \"\"\"\n The search view.\n\n --2021-- THIS VIEW IS A HACK! --2021--\n This Django view exists to server-side render the search results page.\n But we're moving the search result page to Yari and that one will use a XHR\n request (to /api/v1/search) from a skeleton page (aka. SPA).\n But as a way to get to that, we need to transition from the old to the new.\n So, this page uses the Django view in kuma.api.v1.search.search, which\n returns a special `JsonResponse` instance whose data we can pluck out\n to our needs for this old view.\n Once we've fully moved to the Yari (static + XHR to v1 API) site-search,\n we can comfortably delete this view.\n \"\"\"\n # The underlying v1 API supports searching without a 'q' but the web\n # UI doesn't. For example, the search input field requires a value.\n # So we match that here too.\n if not request.GET.get(\"q\", \"\").strip():\n status = 400\n context = {\"results\": {}}\n else:\n # TODO consider, if the current locale is *not* en-US, that we force\n # it to do a search in both locales.\n # This might come in handy for people searching in a locale where\n # there's very little results but they'd be happy to get the en-US ones.\n response = search_api(request, *args, **kwargs)\n results = response.data\n\n error = None\n status = response.status_code\n\n # Determine if there were validation errors\n if status == 400:\n error = \"\"\n for key, messages in results[\"errors\"].items():\n for message in messages:\n error += f\"{key}: {message['message']}\\n\"\n else:\n # Have to rearrange the 'results' in a way the old search expects it.\n # ...which is as follows:\n # - `count`: integer number of matched documents\n # - `previous`: a URL or empty string\n # - `next`: a URL or empty string\n # - `query`: string\n # - `start`: pagination number\n # - `end`: pagination number\n # - `documents`:\n # - `title`\n # - `locale`\n # - `slug`\n # - `excerpt`: string of safe HTML\n next_url = \"\"\n previous_url = \"\"\n page = results[\"metadata\"][\"page\"]\n size = results[\"metadata\"][\"size\"]\n count = results[\"metadata\"][\"total\"][\"value\"]\n query_string = request.META.get(\"QUERY_STRING\")\n query_string_parsed = parse_qs(query_string)\n if (page + 1) * size < count:\n query_string_parsed[\"page\"] = f\"{page + 1}\"\n next_url = f\"?{urlencode(query_string_parsed, True)}\"\n if page > 1:\n if page == 2:\n del query_string_parsed[\"page\"]\n else:\n query_string_parsed[\"page\"] = f\"{page - 1}\"\n previous_url = f\"?{urlencode(query_string_parsed, True)}\"\n\n results = {\n \"count\": count,\n \"next\": next_url,\n \"previous\": previous_url,\n \"query\": request.GET.get(\"q\"),\n \"start\": (page - 1) * size + 1,\n \"end\": page * size,\n \"documents\": [\n {\n \"title\": x[\"title\"],\n \"slug\": x[\"slug\"],\n \"locale\": x[\"locale\"],\n \"excerpt\": \"<br>\".join(x[\"highlight\"].get(\"body\", [])),\n }\n for x in results[\"documents\"]\n ],\n }\n\n context = {\"results\": {\"results\": None if error else results, \"error\": error}}\n return render(request, \"search/react.html\", context, status=status)\n\n\nclass SearchRedirectView(RedirectView):\n permanent = True\n\n def get_redirect_url(self, *args, **kwargs):\n query_string = self.request.META.get(\"QUERY_STRING\")\n url = reverse_lazy(\"api.v1.search\")\n qs = parse_qs(query_string)\n # If you used `/en-Us/search.json` you can skip the `?locale=`\n # because the default locale in `/api/v1/search` is `en-US`.\n if self.request.LANGUAGE_CODE.lower() != settings.LANGUAGE_CODE.lower():\n qs[\"locale\"] = self.request.LANGUAGE_CODE\n if qs:\n url += \"?\" + urlencode(qs, True)\n return url\n\n\n@shared_cache_control(s_maxage=60 * 60 * 24 * 7)\ndef plugin(request):\n \"\"\"Render an OpenSearch Plugin.\"\"\"\n return render(\n request,\n \"search/plugin.html\",\n {\"locale\": request.LANGUAGE_CODE},\n content_type=\"application/opensearchdescription+xml\",\n )\n", "path": "kuma/search/views.py"}], "after_files": [{"content": "from urllib.parse import parse_qs, urlencode\n\nfrom django.conf import settings\nfrom django.shortcuts import render\nfrom django.urls import reverse_lazy\nfrom django.views.decorators.cache import never_cache\nfrom django.views.decorators.http import require_GET\nfrom django.views.generic import RedirectView\nfrom ratelimit.decorators import ratelimit\n\nfrom kuma.api.v1.search import search as search_api\nfrom kuma.core.decorators import shared_cache_control\n\n\n# Since the search endpoint accepts user input (via query parameters) and its\n# response is compressed, use rate limiting to mitigate the BREACH attack\n# (see http://breachattack.com/). It still needs to allow a user to click\n# the filter switches (bug 1426968).\n# Alternate: forbid gzip by setting Content-Encoding: identity\n@never_cache\n@require_GET\n@ratelimit(key=\"user_or_ip\", rate=\"25/m\", block=True)\ndef search(request, *args, **kwargs):\n \"\"\"\n The search view.\n\n --2021-- THIS VIEW IS A HACK! --2021--\n This Django view exists to server-side render the search results page.\n But we're moving the search result page to Yari and that one will use a XHR\n request (to /api/v1/search) from a skeleton page (aka. SPA).\n But as a way to get to that, we need to transition from the old to the new.\n So, this page uses the Django view in kuma.api.v1.search.search, which\n returns a special `JsonResponse` instance whose data we can pluck out\n to our needs for this old view.\n Once we've fully moved to the Yari (static + XHR to v1 API) site-search,\n we can comfortably delete this view.\n \"\"\"\n # The underlying v1 API supports searching without a 'q' but the web\n # UI doesn't. For example, the search input field requires a value.\n # So we match that here too.\n if not request.GET.get(\"q\", \"\").strip():\n status = 400\n context = {\"results\": {}}\n else:\n # TODO consider, if the current locale is *not* en-US, that we force\n # it to do a search in both locales.\n # This might come in handy for people searching in a locale where\n # there's very little results but they'd be happy to get the en-US ones.\n response = search_api(request, locale=request.LANGUAGE_CODE, *args, **kwargs)\n results = response.data\n\n error = None\n status = response.status_code\n\n # Determine if there were validation errors\n if status == 400:\n error = \"\"\n for key, messages in results[\"errors\"].items():\n for message in messages:\n error += f\"{key}: {message['message']}\\n\"\n else:\n # Have to rearrange the 'results' in a way the old search expects it.\n # ...which is as follows:\n # - `count`: integer number of matched documents\n # - `previous`: a URL or empty string\n # - `next`: a URL or empty string\n # - `query`: string\n # - `start`: pagination number\n # - `end`: pagination number\n # - `documents`:\n # - `title`\n # - `locale`\n # - `slug`\n # - `excerpt`: string of safe HTML\n next_url = \"\"\n previous_url = \"\"\n page = results[\"metadata\"][\"page\"]\n size = results[\"metadata\"][\"size\"]\n count = results[\"metadata\"][\"total\"][\"value\"]\n query_string = request.META.get(\"QUERY_STRING\")\n query_string_parsed = parse_qs(query_string)\n if (page + 1) * size < count:\n query_string_parsed[\"page\"] = f\"{page + 1}\"\n next_url = f\"?{urlencode(query_string_parsed, True)}\"\n if page > 1:\n if page == 2:\n del query_string_parsed[\"page\"]\n else:\n query_string_parsed[\"page\"] = f\"{page - 1}\"\n previous_url = f\"?{urlencode(query_string_parsed, True)}\"\n\n results = {\n \"count\": count,\n \"next\": next_url,\n \"previous\": previous_url,\n \"query\": request.GET.get(\"q\"),\n \"start\": (page - 1) * size + 1,\n \"end\": page * size,\n \"documents\": [\n {\n \"title\": x[\"title\"],\n \"slug\": x[\"slug\"],\n \"locale\": x[\"locale\"],\n \"excerpt\": \"<br>\".join(x[\"highlight\"].get(\"body\", [])),\n }\n for x in results[\"documents\"]\n ],\n }\n\n context = {\"results\": {\"results\": None if error else results, \"error\": error}}\n return render(request, \"search/react.html\", context, status=status)\n\n\nclass SearchRedirectView(RedirectView):\n permanent = True\n\n def get_redirect_url(self, *args, **kwargs):\n query_string = self.request.META.get(\"QUERY_STRING\")\n url = reverse_lazy(\"api.v1.search\")\n qs = parse_qs(query_string)\n # If you used `/en-Us/search.json` you can skip the `?locale=`\n # because the default locale in `/api/v1/search` is `en-US`.\n if self.request.LANGUAGE_CODE.lower() != settings.LANGUAGE_CODE.lower():\n qs[\"locale\"] = self.request.LANGUAGE_CODE\n if qs:\n url += \"?\" + urlencode(qs, True)\n return url\n\n\n@shared_cache_control(s_maxage=60 * 60 * 24 * 7)\ndef plugin(request):\n \"\"\"Render an OpenSearch Plugin.\"\"\"\n return render(\n request,\n \"search/plugin.html\",\n {\"locale\": request.LANGUAGE_CODE},\n content_type=\"application/opensearchdescription+xml\",\n )\n", "path": "kuma/search/views.py"}]}
| 1,928 | 134 |
gh_patches_debug_13051
|
rasdani/github-patches
|
git_diff
|
ckan__ckan-7871
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
current_user.is_anonymous doesn't care if context has an user
## CKAN version
2.10
## Describe the bug
While looking into why https://github.com/ckan/ckan/pull/7266 started failing on unmodified ckan, I noticed that deciphering anonymous users has changed. On 2.9 `restrict_anon` works as follows
https://github.com/ckan/ckan/blob/c4e2818818e08e60bb69d64229f8dbba531f8439/ckan/logic/auth/__init__.py#L51-L55
where `authz.auth_is_anon_user` checks if the context has an user in it. On 2.10 the functionality has changed:
https://github.com/ckan/ckan/blob/d46613e346f9dc551aedb54c8c24baad919f78c1/ckan/logic/auth/__init__.py#L93-L97
which does not check for context at all and the password reset started failing on 2.10. Should the `is_anonymous` check for user in context or are we just relying on what flask login says about the user?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ckan/logic/auth/__init__.py`
Content:
```
1 # encoding: utf-8
2
3 '''
4 Helper functions to be used in the auth check functions
5 '''
6
7 from typing import Any, Optional, TYPE_CHECKING, overload
8 from typing_extensions import Literal
9
10 import ckan.logic as logic
11 from ckan.types import Context, AuthResult, DataDict
12 from ckan.common import current_user
13
14 if TYPE_CHECKING:
15 import ckan.model as model_
16
17
18 @overload
19 def _get_object(context: Context,
20 data_dict: Optional[DataDict], name: str,
21 class_name: Literal['Package']) -> 'model_.Package':
22 ...
23
24
25 @overload
26 def _get_object(context: Context,
27 data_dict: Optional[DataDict], name: str,
28 class_name: Literal['Resource']) -> 'model_.Resource':
29 ...
30
31
32 @overload
33 def _get_object(context: Context,
34 data_dict: Optional[DataDict], name: str,
35 class_name: Literal['Group']) -> 'model_.Group':
36 ...
37
38
39 @overload
40 def _get_object(context: Context,
41 data_dict: Optional[DataDict], name: str,
42 class_name: Literal['User']) -> 'model_.User':
43 ...
44
45
46 def _get_object(context: Context,
47 data_dict: Optional[DataDict], name: str,
48 class_name: str) -> Any:
49 # return the named item from model.class_name
50 model = context['model']
51 if not data_dict:
52 data_dict = {}
53 id = data_dict.get('id', None)
54 if not id:
55 raise logic.ValidationError({
56 "message": 'Missing id, can not get {0} object'.format(
57 class_name)})
58 obj = getattr(model, class_name).get(id)
59 if not obj:
60 raise logic.NotFound
61 return obj
62
63
64 def get_package_object(
65 context: Context,
66 data_dict: Optional[DataDict] = None) -> 'model_.Package':
67 return _get_object(context, data_dict, 'package', 'Package')
68
69
70 def get_resource_object(
71 context: Context,
72 data_dict: Optional[DataDict] = None) -> 'model_.Resource':
73 return _get_object(context, data_dict, 'resource', 'Resource')
74
75
76 def get_group_object(
77 context: Context,
78 data_dict: Optional[DataDict] = None) -> 'model_.Group':
79 return _get_object(context, data_dict, 'group', 'Group')
80
81
82 def get_user_object(
83 context: Context,
84 data_dict: Optional[DataDict] = None) -> 'model_.User':
85 return _get_object(context, data_dict, 'user_obj', 'User')
86
87
88 def restrict_anon(context: Context) -> AuthResult:
89 if current_user.is_anonymous:
90 return {'success': False}
91 else:
92 return {'success': True}
93
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ckan/logic/auth/__init__.py b/ckan/logic/auth/__init__.py
--- a/ckan/logic/auth/__init__.py
+++ b/ckan/logic/auth/__init__.py
@@ -8,8 +8,8 @@
from typing_extensions import Literal
import ckan.logic as logic
+import ckan.authz as authz
from ckan.types import Context, AuthResult, DataDict
-from ckan.common import current_user
if TYPE_CHECKING:
import ckan.model as model_
@@ -86,7 +86,7 @@
def restrict_anon(context: Context) -> AuthResult:
- if current_user.is_anonymous:
+ if authz.auth_is_anon_user(context):
return {'success': False}
else:
return {'success': True}
|
{"golden_diff": "diff --git a/ckan/logic/auth/__init__.py b/ckan/logic/auth/__init__.py\n--- a/ckan/logic/auth/__init__.py\n+++ b/ckan/logic/auth/__init__.py\n@@ -8,8 +8,8 @@\n from typing_extensions import Literal\n \n import ckan.logic as logic\n+import ckan.authz as authz\n from ckan.types import Context, AuthResult, DataDict\n-from ckan.common import current_user\n \n if TYPE_CHECKING:\n import ckan.model as model_\n@@ -86,7 +86,7 @@\n \n \n def restrict_anon(context: Context) -> AuthResult:\n- if current_user.is_anonymous:\n+ if authz.auth_is_anon_user(context):\n return {'success': False}\n else:\n return {'success': True}\n", "issue": "current_user.is_anonymous doesn't care if context has an user\n## CKAN version\r\n2.10\r\n\r\n## Describe the bug\r\nWhile looking into why https://github.com/ckan/ckan/pull/7266 started failing on unmodified ckan, I noticed that deciphering anonymous users has changed. On 2.9 `restrict_anon` works as follows \r\nhttps://github.com/ckan/ckan/blob/c4e2818818e08e60bb69d64229f8dbba531f8439/ckan/logic/auth/__init__.py#L51-L55\r\n\r\nwhere `authz.auth_is_anon_user` checks if the context has an user in it. On 2.10 the functionality has changed:\r\n\r\nhttps://github.com/ckan/ckan/blob/d46613e346f9dc551aedb54c8c24baad919f78c1/ckan/logic/auth/__init__.py#L93-L97\r\n\r\nwhich does not check for context at all and the password reset started failing on 2.10. Should the `is_anonymous` check for user in context or are we just relying on what flask login says about the user? \r\n\r\n\n", "before_files": [{"content": "# encoding: utf-8\n\n'''\nHelper functions to be used in the auth check functions\n'''\n\nfrom typing import Any, Optional, TYPE_CHECKING, overload\nfrom typing_extensions import Literal\n\nimport ckan.logic as logic\nfrom ckan.types import Context, AuthResult, DataDict\nfrom ckan.common import current_user\n\nif TYPE_CHECKING:\n import ckan.model as model_\n\n\n@overload\ndef _get_object(context: Context,\n data_dict: Optional[DataDict], name: str,\n class_name: Literal['Package']) -> 'model_.Package':\n ...\n\n\n@overload\ndef _get_object(context: Context,\n data_dict: Optional[DataDict], name: str,\n class_name: Literal['Resource']) -> 'model_.Resource':\n ...\n\n\n@overload\ndef _get_object(context: Context,\n data_dict: Optional[DataDict], name: str,\n class_name: Literal['Group']) -> 'model_.Group':\n ...\n\n\n@overload\ndef _get_object(context: Context,\n data_dict: Optional[DataDict], name: str,\n class_name: Literal['User']) -> 'model_.User':\n ...\n\n\ndef _get_object(context: Context,\n data_dict: Optional[DataDict], name: str,\n class_name: str) -> Any:\n # return the named item from model.class_name\n model = context['model']\n if not data_dict:\n data_dict = {}\n id = data_dict.get('id', None)\n if not id:\n raise logic.ValidationError({\n \"message\": 'Missing id, can not get {0} object'.format(\n class_name)})\n obj = getattr(model, class_name).get(id)\n if not obj:\n raise logic.NotFound\n return obj\n\n\ndef get_package_object(\n context: Context,\n data_dict: Optional[DataDict] = None) -> 'model_.Package':\n return _get_object(context, data_dict, 'package', 'Package')\n\n\ndef get_resource_object(\n context: Context,\n data_dict: Optional[DataDict] = None) -> 'model_.Resource':\n return _get_object(context, data_dict, 'resource', 'Resource')\n\n\ndef get_group_object(\n context: Context,\n data_dict: Optional[DataDict] = None) -> 'model_.Group':\n return _get_object(context, data_dict, 'group', 'Group')\n\n\ndef get_user_object(\n context: Context,\n data_dict: Optional[DataDict] = None) -> 'model_.User':\n return _get_object(context, data_dict, 'user_obj', 'User')\n\n\ndef restrict_anon(context: Context) -> AuthResult:\n if current_user.is_anonymous:\n return {'success': False}\n else:\n return {'success': True}\n", "path": "ckan/logic/auth/__init__.py"}], "after_files": [{"content": "# encoding: utf-8\n\n'''\nHelper functions to be used in the auth check functions\n'''\n\nfrom typing import Any, Optional, TYPE_CHECKING, overload\nfrom typing_extensions import Literal\n\nimport ckan.logic as logic\nimport ckan.authz as authz\nfrom ckan.types import Context, AuthResult, DataDict\n\nif TYPE_CHECKING:\n import ckan.model as model_\n\n\n@overload\ndef _get_object(context: Context,\n data_dict: Optional[DataDict], name: str,\n class_name: Literal['Package']) -> 'model_.Package':\n ...\n\n\n@overload\ndef _get_object(context: Context,\n data_dict: Optional[DataDict], name: str,\n class_name: Literal['Resource']) -> 'model_.Resource':\n ...\n\n\n@overload\ndef _get_object(context: Context,\n data_dict: Optional[DataDict], name: str,\n class_name: Literal['Group']) -> 'model_.Group':\n ...\n\n\n@overload\ndef _get_object(context: Context,\n data_dict: Optional[DataDict], name: str,\n class_name: Literal['User']) -> 'model_.User':\n ...\n\n\ndef _get_object(context: Context,\n data_dict: Optional[DataDict], name: str,\n class_name: str) -> Any:\n # return the named item from model.class_name\n model = context['model']\n if not data_dict:\n data_dict = {}\n id = data_dict.get('id', None)\n if not id:\n raise logic.ValidationError({\n \"message\": 'Missing id, can not get {0} object'.format(\n class_name)})\n obj = getattr(model, class_name).get(id)\n if not obj:\n raise logic.NotFound\n return obj\n\n\ndef get_package_object(\n context: Context,\n data_dict: Optional[DataDict] = None) -> 'model_.Package':\n return _get_object(context, data_dict, 'package', 'Package')\n\n\ndef get_resource_object(\n context: Context,\n data_dict: Optional[DataDict] = None) -> 'model_.Resource':\n return _get_object(context, data_dict, 'resource', 'Resource')\n\n\ndef get_group_object(\n context: Context,\n data_dict: Optional[DataDict] = None) -> 'model_.Group':\n return _get_object(context, data_dict, 'group', 'Group')\n\n\ndef get_user_object(\n context: Context,\n data_dict: Optional[DataDict] = None) -> 'model_.User':\n return _get_object(context, data_dict, 'user_obj', 'User')\n\n\ndef restrict_anon(context: Context) -> AuthResult:\n if authz.auth_is_anon_user(context):\n return {'success': False}\n else:\n return {'success': True}\n", "path": "ckan/logic/auth/__init__.py"}]}
| 1,343 | 182 |
gh_patches_debug_16492
|
rasdani/github-patches
|
git_diff
|
googleapis__google-cloud-python-4174
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PubSub: Default thread-based subscriber Policy's on_response() implementation degenerates into series of synchronous calls because of debug statement
https://github.com/GoogleCloudPlatform/google-cloud-python/blob/4e17158c0f14e0e91cbeb74c3e8483c5a231e9ac/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py#L147
While testing out the new PubSub API (v 0.28.3), I noticed that the default thread-based `Policy`, which uses a `ThreadPoolExecutor(max_workers=10)` seemed to be executing the subscriber callbacks sequentially, instead of asynchronously. Upon introspection of the source, it looks like the above referenced code:
```
...
future = self._executor.submit(self._callback, message)
logger.debug('Result: %s' % future.result())
```
will block until `self._callback` completes, because of the call to `future.result()`. Or am I losing my mind? Thanks!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py`
Content:
```
1 # Copyright 2017, Google Inc. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from __future__ import absolute_import
16
17 from concurrent import futures
18 from queue import Queue
19 import logging
20 import threading
21
22 import grpc
23
24 from google.cloud.pubsub_v1 import types
25 from google.cloud.pubsub_v1.subscriber import _helper_threads
26 from google.cloud.pubsub_v1.subscriber.policy import base
27 from google.cloud.pubsub_v1.subscriber.message import Message
28
29
30 logger = logging.getLogger(__name__)
31
32
33 class Policy(base.BasePolicy):
34 """A consumer class based on :class:`threading.Thread`.
35
36 This consumer handles the connection to the Pub/Sub service and all of
37 the concurrency needs.
38 """
39 def __init__(self, client, subscription, flow_control=types.FlowControl(),
40 executor=None, queue=None):
41 """Instantiate the policy.
42
43 Args:
44 client (~.pubsub_v1.subscriber.client): The subscriber client used
45 to create this instance.
46 subscription (str): The name of the subscription. The canonical
47 format for this is
48 ``projects/{project}/subscriptions/{subscription}``.
49 flow_control (~google.cloud.pubsub_v1.types.FlowControl): The flow
50 control settings.
51 executor (~concurrent.futures.ThreadPoolExecutor): (Optional.) A
52 ThreadPoolExecutor instance, or anything duck-type compatible
53 with it.
54 queue (~queue.Queue): (Optional.) A Queue instance, appropriate
55 for crossing the concurrency boundary implemented by
56 ``executor``.
57 """
58 # Default the callback to a no-op; it is provided by `.open`.
59 self._callback = lambda message: None
60
61 # Create a queue for keeping track of shared state.
62 if queue is None:
63 queue = Queue()
64 self._request_queue = queue
65
66 # Call the superclass constructor.
67 super(Policy, self).__init__(
68 client=client,
69 flow_control=flow_control,
70 subscription=subscription,
71 )
72
73 # Also maintain a request queue and an executor.
74 logger.debug('Creating callback requests thread (not starting).')
75 if executor is None:
76 executor = futures.ThreadPoolExecutor(max_workers=10)
77 self._executor = executor
78 self._callback_requests = _helper_threads.QueueCallbackThread(
79 self._request_queue,
80 self.on_callback_request,
81 )
82
83 def close(self):
84 """Close the existing connection."""
85 # Close the main subscription connection.
86 self._consumer.helper_threads.stop('callback requests worker')
87 self._consumer.stop_consuming()
88
89 def open(self, callback):
90 """Open a streaming pull connection and begin receiving messages.
91
92 For each message received, the ``callback`` function is fired with
93 a :class:`~.pubsub_v1.subscriber.message.Message` as its only
94 argument.
95
96 Args:
97 callback (Callable): The callback function.
98 """
99 # Start the thread to pass the requests.
100 logger.debug('Starting callback requests worker.')
101 self._callback = callback
102 self._consumer.helper_threads.start(
103 'callback requests worker',
104 self._request_queue,
105 self._callback_requests,
106 )
107
108 # Actually start consuming messages.
109 self._consumer.start_consuming()
110
111 # Spawn a helper thread that maintains all of the leases for
112 # this policy.
113 logger.debug('Spawning lease maintenance worker.')
114 self._leaser = threading.Thread(target=self.maintain_leases)
115 self._leaser.daemon = True
116 self._leaser.start()
117
118 def on_callback_request(self, callback_request):
119 """Map the callback request to the appropriate GRPC request."""
120 action, kwargs = callback_request[0], callback_request[1]
121 getattr(self, action)(**kwargs)
122
123 def on_exception(self, exception):
124 """Bubble the exception.
125
126 This will cause the stream to exit loudly.
127 """
128 # If this is DEADLINE_EXCEEDED, then we want to retry.
129 # That entails just returning None.
130 deadline_exceeded = grpc.StatusCode.DEADLINE_EXCEEDED
131 if getattr(exception, 'code', lambda: None)() == deadline_exceeded:
132 return
133
134 # Raise any other exception.
135 raise exception
136
137 def on_response(self, response):
138 """Process all received Pub/Sub messages.
139
140 For each message, schedule a callback with the executor.
141 """
142 for msg in response.received_messages:
143 logger.debug('New message received from Pub/Sub: %r', msg)
144 logger.debug(self._callback)
145 message = Message(msg.message, msg.ack_id, self._request_queue)
146 future = self._executor.submit(self._callback, message)
147 logger.debug('Result: %s' % future.result())
148
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py b/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py
--- a/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py
+++ b/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py
@@ -30,6 +30,11 @@
logger = logging.getLogger(__name__)
+def _callback_completed(future):
+ """Simple callback that just logs a `Future`'s result."""
+ logger.debug('Result: %s', future.result())
+
+
class Policy(base.BasePolicy):
"""A consumer class based on :class:`threading.Thread`.
@@ -144,4 +149,4 @@
logger.debug(self._callback)
message = Message(msg.message, msg.ack_id, self._request_queue)
future = self._executor.submit(self._callback, message)
- logger.debug('Result: %s' % future.result())
+ future.add_done_callback(_callback_completed)
|
{"golden_diff": "diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py b/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py\n--- a/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py\n+++ b/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py\n@@ -30,6 +30,11 @@\n logger = logging.getLogger(__name__)\n \n \n+def _callback_completed(future):\n+ \"\"\"Simple callback that just logs a `Future`'s result.\"\"\"\n+ logger.debug('Result: %s', future.result())\n+\n+\n class Policy(base.BasePolicy):\n \"\"\"A consumer class based on :class:`threading.Thread`.\n \n@@ -144,4 +149,4 @@\n logger.debug(self._callback)\n message = Message(msg.message, msg.ack_id, self._request_queue)\n future = self._executor.submit(self._callback, message)\n- logger.debug('Result: %s' % future.result())\n+ future.add_done_callback(_callback_completed)\n", "issue": "PubSub: Default thread-based subscriber Policy's on_response() implementation degenerates into series of synchronous calls because of debug statement\nhttps://github.com/GoogleCloudPlatform/google-cloud-python/blob/4e17158c0f14e0e91cbeb74c3e8483c5a231e9ac/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py#L147\r\n\r\nWhile testing out the new PubSub API (v 0.28.3), I noticed that the default thread-based `Policy`, which uses a `ThreadPoolExecutor(max_workers=10)` seemed to be executing the subscriber callbacks sequentially, instead of asynchronously. Upon introspection of the source, it looks like the above referenced code:\r\n```\r\n...\r\nfuture = self._executor.submit(self._callback, message)\r\nlogger.debug('Result: %s' % future.result())\r\n```\r\nwill block until `self._callback` completes, because of the call to `future.result()`. Or am I losing my mind? Thanks!\n", "before_files": [{"content": "# Copyright 2017, Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nfrom concurrent import futures\nfrom queue import Queue\nimport logging\nimport threading\n\nimport grpc\n\nfrom google.cloud.pubsub_v1 import types\nfrom google.cloud.pubsub_v1.subscriber import _helper_threads\nfrom google.cloud.pubsub_v1.subscriber.policy import base\nfrom google.cloud.pubsub_v1.subscriber.message import Message\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Policy(base.BasePolicy):\n \"\"\"A consumer class based on :class:`threading.Thread`.\n\n This consumer handles the connection to the Pub/Sub service and all of\n the concurrency needs.\n \"\"\"\n def __init__(self, client, subscription, flow_control=types.FlowControl(),\n executor=None, queue=None):\n \"\"\"Instantiate the policy.\n\n Args:\n client (~.pubsub_v1.subscriber.client): The subscriber client used\n to create this instance.\n subscription (str): The name of the subscription. The canonical\n format for this is\n ``projects/{project}/subscriptions/{subscription}``.\n flow_control (~google.cloud.pubsub_v1.types.FlowControl): The flow\n control settings.\n executor (~concurrent.futures.ThreadPoolExecutor): (Optional.) A\n ThreadPoolExecutor instance, or anything duck-type compatible\n with it.\n queue (~queue.Queue): (Optional.) A Queue instance, appropriate\n for crossing the concurrency boundary implemented by\n ``executor``.\n \"\"\"\n # Default the callback to a no-op; it is provided by `.open`.\n self._callback = lambda message: None\n\n # Create a queue for keeping track of shared state.\n if queue is None:\n queue = Queue()\n self._request_queue = queue\n\n # Call the superclass constructor.\n super(Policy, self).__init__(\n client=client,\n flow_control=flow_control,\n subscription=subscription,\n )\n\n # Also maintain a request queue and an executor.\n logger.debug('Creating callback requests thread (not starting).')\n if executor is None:\n executor = futures.ThreadPoolExecutor(max_workers=10)\n self._executor = executor\n self._callback_requests = _helper_threads.QueueCallbackThread(\n self._request_queue,\n self.on_callback_request,\n )\n\n def close(self):\n \"\"\"Close the existing connection.\"\"\"\n # Close the main subscription connection.\n self._consumer.helper_threads.stop('callback requests worker')\n self._consumer.stop_consuming()\n\n def open(self, callback):\n \"\"\"Open a streaming pull connection and begin receiving messages.\n\n For each message received, the ``callback`` function is fired with\n a :class:`~.pubsub_v1.subscriber.message.Message` as its only\n argument.\n\n Args:\n callback (Callable): The callback function.\n \"\"\"\n # Start the thread to pass the requests.\n logger.debug('Starting callback requests worker.')\n self._callback = callback\n self._consumer.helper_threads.start(\n 'callback requests worker',\n self._request_queue,\n self._callback_requests,\n )\n\n # Actually start consuming messages.\n self._consumer.start_consuming()\n\n # Spawn a helper thread that maintains all of the leases for\n # this policy.\n logger.debug('Spawning lease maintenance worker.')\n self._leaser = threading.Thread(target=self.maintain_leases)\n self._leaser.daemon = True\n self._leaser.start()\n\n def on_callback_request(self, callback_request):\n \"\"\"Map the callback request to the appropriate GRPC request.\"\"\"\n action, kwargs = callback_request[0], callback_request[1]\n getattr(self, action)(**kwargs)\n\n def on_exception(self, exception):\n \"\"\"Bubble the exception.\n\n This will cause the stream to exit loudly.\n \"\"\"\n # If this is DEADLINE_EXCEEDED, then we want to retry.\n # That entails just returning None.\n deadline_exceeded = grpc.StatusCode.DEADLINE_EXCEEDED\n if getattr(exception, 'code', lambda: None)() == deadline_exceeded:\n return\n\n # Raise any other exception.\n raise exception\n\n def on_response(self, response):\n \"\"\"Process all received Pub/Sub messages.\n\n For each message, schedule a callback with the executor.\n \"\"\"\n for msg in response.received_messages:\n logger.debug('New message received from Pub/Sub: %r', msg)\n logger.debug(self._callback)\n message = Message(msg.message, msg.ack_id, self._request_queue)\n future = self._executor.submit(self._callback, message)\n logger.debug('Result: %s' % future.result())\n", "path": "pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py"}], "after_files": [{"content": "# Copyright 2017, Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nfrom concurrent import futures\nfrom queue import Queue\nimport logging\nimport threading\n\nimport grpc\n\nfrom google.cloud.pubsub_v1 import types\nfrom google.cloud.pubsub_v1.subscriber import _helper_threads\nfrom google.cloud.pubsub_v1.subscriber.policy import base\nfrom google.cloud.pubsub_v1.subscriber.message import Message\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef _callback_completed(future):\n \"\"\"Simple callback that just logs a `Future`'s result.\"\"\"\n logger.debug('Result: %s', future.result())\n\n\nclass Policy(base.BasePolicy):\n \"\"\"A consumer class based on :class:`threading.Thread`.\n\n This consumer handles the connection to the Pub/Sub service and all of\n the concurrency needs.\n \"\"\"\n def __init__(self, client, subscription, flow_control=types.FlowControl(),\n executor=None, queue=None):\n \"\"\"Instantiate the policy.\n\n Args:\n client (~.pubsub_v1.subscriber.client): The subscriber client used\n to create this instance.\n subscription (str): The name of the subscription. The canonical\n format for this is\n ``projects/{project}/subscriptions/{subscription}``.\n flow_control (~google.cloud.pubsub_v1.types.FlowControl): The flow\n control settings.\n executor (~concurrent.futures.ThreadPoolExecutor): (Optional.) A\n ThreadPoolExecutor instance, or anything duck-type compatible\n with it.\n queue (~queue.Queue): (Optional.) A Queue instance, appropriate\n for crossing the concurrency boundary implemented by\n ``executor``.\n \"\"\"\n # Default the callback to a no-op; it is provided by `.open`.\n self._callback = lambda message: None\n\n # Create a queue for keeping track of shared state.\n if queue is None:\n queue = Queue()\n self._request_queue = queue\n\n # Call the superclass constructor.\n super(Policy, self).__init__(\n client=client,\n flow_control=flow_control,\n subscription=subscription,\n )\n\n # Also maintain a request queue and an executor.\n logger.debug('Creating callback requests thread (not starting).')\n if executor is None:\n executor = futures.ThreadPoolExecutor(max_workers=10)\n self._executor = executor\n self._callback_requests = _helper_threads.QueueCallbackThread(\n self._request_queue,\n self.on_callback_request,\n )\n\n def close(self):\n \"\"\"Close the existing connection.\"\"\"\n # Close the main subscription connection.\n self._consumer.helper_threads.stop('callback requests worker')\n self._consumer.stop_consuming()\n\n def open(self, callback):\n \"\"\"Open a streaming pull connection and begin receiving messages.\n\n For each message received, the ``callback`` function is fired with\n a :class:`~.pubsub_v1.subscriber.message.Message` as its only\n argument.\n\n Args:\n callback (Callable): The callback function.\n \"\"\"\n # Start the thread to pass the requests.\n logger.debug('Starting callback requests worker.')\n self._callback = callback\n self._consumer.helper_threads.start(\n 'callback requests worker',\n self._request_queue,\n self._callback_requests,\n )\n\n # Actually start consuming messages.\n self._consumer.start_consuming()\n\n # Spawn a helper thread that maintains all of the leases for\n # this policy.\n logger.debug('Spawning lease maintenance worker.')\n self._leaser = threading.Thread(target=self.maintain_leases)\n self._leaser.daemon = True\n self._leaser.start()\n\n def on_callback_request(self, callback_request):\n \"\"\"Map the callback request to the appropriate GRPC request.\"\"\"\n action, kwargs = callback_request[0], callback_request[1]\n getattr(self, action)(**kwargs)\n\n def on_exception(self, exception):\n \"\"\"Bubble the exception.\n\n This will cause the stream to exit loudly.\n \"\"\"\n # If this is DEADLINE_EXCEEDED, then we want to retry.\n # That entails just returning None.\n deadline_exceeded = grpc.StatusCode.DEADLINE_EXCEEDED\n if getattr(exception, 'code', lambda: None)() == deadline_exceeded:\n return\n\n # Raise any other exception.\n raise exception\n\n def on_response(self, response):\n \"\"\"Process all received Pub/Sub messages.\n\n For each message, schedule a callback with the executor.\n \"\"\"\n for msg in response.received_messages:\n logger.debug('New message received from Pub/Sub: %r', msg)\n logger.debug(self._callback)\n message = Message(msg.message, msg.ack_id, self._request_queue)\n future = self._executor.submit(self._callback, message)\n future.add_done_callback(_callback_completed)\n", "path": "pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py"}]}
| 1,945 | 224 |
gh_patches_debug_18077
|
rasdani/github-patches
|
git_diff
|
freedomofpress__securedrop-3258
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[qt-journalist-updater] Add a time estimate to the update progress bar
## Description
[See epic #3076 for primary feature, PRs for this ticket should go into `qt-journalist-updater`]
We should add a reasonable time estimate to the update progress bar as the upgrade progresses
## User Stories
As a journalist user, I want an estimate of how long it will take until updates are finished so that I can plan.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `journalist_gui/journalist_gui/strings.py`
Content:
```
1 window_title = 'SecureDrop Updater'
2 update_in_progress = ("SecureDrop workstation updates are available! "
3 "You should install them now. If you don\'t want to, "
4 "you can install them the next time you reboot.")
5 fetching_update = 'Fetching and verifying latest update...'
6 updating_tails_env = 'Configuring local Tails environment...'
7 finished = 'Update successfully completed!'
8 finished_dialog_message = 'Updates completed successfully. Click OK to close.'
9 finished_dialog_title = 'SecureDrop Workstation is up to date!'
10 update_failed_dialog_title = 'Error Updating SecureDrop Workstation'
11 update_failed_generic_reason = ("Update failed. "
12 "Please contact your SecureDrop "
13 "administrator.")
14 update_failed_sig_failure = ("WARNING: Signature verification failed. "
15 "Contact your SecureDrop administrator "
16 "immediately.")
17 tailsconfig_failed_sudo_password = 'Sudo password incorrect'
18 tailsconfig_failed_generic_reason = ("Tails workstation configuration failed. "
19 "Contact your administrator.")
20 install_update_button = 'Install Now'
21 install_later_button = 'Install Later'
22
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/journalist_gui/journalist_gui/strings.py b/journalist_gui/journalist_gui/strings.py
--- a/journalist_gui/journalist_gui/strings.py
+++ b/journalist_gui/journalist_gui/strings.py
@@ -2,8 +2,10 @@
update_in_progress = ("SecureDrop workstation updates are available! "
"You should install them now. If you don\'t want to, "
"you can install them the next time you reboot.")
-fetching_update = 'Fetching and verifying latest update...'
-updating_tails_env = 'Configuring local Tails environment...'
+fetching_update = ('Fetching and verifying latest update...'
+ ' (4 mins remaining)')
+updating_tails_env = ('Configuring local Tails environment...'
+ ' (1 min remaining)')
finished = 'Update successfully completed!'
finished_dialog_message = 'Updates completed successfully. Click OK to close.'
finished_dialog_title = 'SecureDrop Workstation is up to date!'
|
{"golden_diff": "diff --git a/journalist_gui/journalist_gui/strings.py b/journalist_gui/journalist_gui/strings.py\n--- a/journalist_gui/journalist_gui/strings.py\n+++ b/journalist_gui/journalist_gui/strings.py\n@@ -2,8 +2,10 @@\n update_in_progress = (\"SecureDrop workstation updates are available! \"\n \"You should install them now. If you don\\'t want to, \"\n \"you can install them the next time you reboot.\")\n-fetching_update = 'Fetching and verifying latest update...'\n-updating_tails_env = 'Configuring local Tails environment...'\n+fetching_update = ('Fetching and verifying latest update...'\n+ ' (4 mins remaining)')\n+updating_tails_env = ('Configuring local Tails environment...'\n+ ' (1 min remaining)')\n finished = 'Update successfully completed!'\n finished_dialog_message = 'Updates completed successfully. Click OK to close.'\n finished_dialog_title = 'SecureDrop Workstation is up to date!'\n", "issue": "[qt-journalist-updater] Add a time estimate to the update progress bar\n## Description\r\n\r\n[See epic #3076 for primary feature, PRs for this ticket should go into `qt-journalist-updater`]\r\n\r\nWe should add a reasonable time estimate to the update progress bar as the upgrade progresses\r\n\r\n## User Stories\r\n\r\nAs a journalist user, I want an estimate of how long it will take until updates are finished so that I can plan. \n", "before_files": [{"content": "window_title = 'SecureDrop Updater'\nupdate_in_progress = (\"SecureDrop workstation updates are available! \"\n \"You should install them now. If you don\\'t want to, \"\n \"you can install them the next time you reboot.\")\nfetching_update = 'Fetching and verifying latest update...'\nupdating_tails_env = 'Configuring local Tails environment...'\nfinished = 'Update successfully completed!'\nfinished_dialog_message = 'Updates completed successfully. Click OK to close.'\nfinished_dialog_title = 'SecureDrop Workstation is up to date!'\nupdate_failed_dialog_title = 'Error Updating SecureDrop Workstation'\nupdate_failed_generic_reason = (\"Update failed. \"\n \"Please contact your SecureDrop \"\n \"administrator.\")\nupdate_failed_sig_failure = (\"WARNING: Signature verification failed. \"\n \"Contact your SecureDrop administrator \"\n \"immediately.\")\ntailsconfig_failed_sudo_password = 'Sudo password incorrect'\ntailsconfig_failed_generic_reason = (\"Tails workstation configuration failed. \"\n \"Contact your administrator.\")\ninstall_update_button = 'Install Now'\ninstall_later_button = 'Install Later'\n", "path": "journalist_gui/journalist_gui/strings.py"}], "after_files": [{"content": "window_title = 'SecureDrop Updater'\nupdate_in_progress = (\"SecureDrop workstation updates are available! \"\n \"You should install them now. If you don\\'t want to, \"\n \"you can install them the next time you reboot.\")\nfetching_update = ('Fetching and verifying latest update...'\n ' (4 mins remaining)')\nupdating_tails_env = ('Configuring local Tails environment...'\n ' (1 min remaining)')\nfinished = 'Update successfully completed!'\nfinished_dialog_message = 'Updates completed successfully. Click OK to close.'\nfinished_dialog_title = 'SecureDrop Workstation is up to date!'\nupdate_failed_dialog_title = 'Error Updating SecureDrop Workstation'\nupdate_failed_generic_reason = (\"Update failed. \"\n \"Please contact your SecureDrop \"\n \"administrator.\")\nupdate_failed_sig_failure = (\"WARNING: Signature verification failed. \"\n \"Contact your SecureDrop administrator \"\n \"immediately.\")\ntailsconfig_failed_sudo_password = 'Sudo password incorrect'\ntailsconfig_failed_generic_reason = (\"Tails workstation configuration failed. \"\n \"Contact your administrator.\")\ninstall_update_button = 'Install Now'\ninstall_later_button = 'Install Later'\n", "path": "journalist_gui/journalist_gui/strings.py"}]}
| 616 | 211 |
gh_patches_debug_10610
|
rasdani/github-patches
|
git_diff
|
spacetelescope__jwql-465
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Import environment bokeh version in HTML
As described in #425, having the environment Bokeh version NOT match the HTML template Bokeh version can cause problems.
I wrote some tests #429 that will test and make sure this is always the case in the future, but we could be even smarter about this if we programmatically set the Bokeh version using a Django variable. We could even include it in `context_processors` so it happens behind the scenes.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `jwql/website/apps/jwql/context_processors.py`
Content:
```
1 """Provides functions that define context inherent to all views.
2
3 The functions within this module define ``context`` that will be
4 included in requests, in addition to any specific ``context`` provided
5 in the view.
6
7 Authors
8 -------
9
10 - Matthew Bourque
11
12 Use
13 ---
14
15 This module is defined under the ``TEMPLATES.OPTIONS`` setting in
16 ``settings.py``, e.g.:
17 ::
18
19 TEMPLATES = [
20 {'OPTIONS': {'context_processors': ['jwql.website.apps.jwql.context_processors.base_context'],},}
21 ]
22
23 As such, it will automatically be executed upon each request.
24 """
25
26 import jwql
27 from jwql.utils.constants import JWST_INSTRUMENT_NAMES, MONITORS
28
29 from .oauth import auth_info
30
31
32 @auth_info
33 def base_context(request, user):
34 """Provide the context needed for the ``base.html`` template.
35
36 Parameters
37 ----------
38 request : HttpRequest object
39 Incoming request from the webpage
40
41 Returns
42 -------
43 context : dict
44 A dictionary containing data needed to render the ``base.html``
45 template
46 """
47
48 context = {}
49 context['inst_list'] = JWST_INSTRUMENT_NAMES
50 context['tools'] = MONITORS
51 context['user'] = user
52 context['version'] = jwql.__version__
53
54 return context
55
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/jwql/website/apps/jwql/context_processors.py b/jwql/website/apps/jwql/context_processors.py
--- a/jwql/website/apps/jwql/context_processors.py
+++ b/jwql/website/apps/jwql/context_processors.py
@@ -23,6 +23,8 @@
As such, it will automatically be executed upon each request.
"""
+import bokeh
+
import jwql
from jwql.utils.constants import JWST_INSTRUMENT_NAMES, MONITORS
@@ -50,5 +52,6 @@
context['tools'] = MONITORS
context['user'] = user
context['version'] = jwql.__version__
+ context['bokeh_version'] = bokeh.__version__
return context
|
{"golden_diff": "diff --git a/jwql/website/apps/jwql/context_processors.py b/jwql/website/apps/jwql/context_processors.py\n--- a/jwql/website/apps/jwql/context_processors.py\n+++ b/jwql/website/apps/jwql/context_processors.py\n@@ -23,6 +23,8 @@\n As such, it will automatically be executed upon each request.\n \"\"\"\n \n+import bokeh\n+\n import jwql\n from jwql.utils.constants import JWST_INSTRUMENT_NAMES, MONITORS\n \n@@ -50,5 +52,6 @@\n context['tools'] = MONITORS\n context['user'] = user\n context['version'] = jwql.__version__\n+ context['bokeh_version'] = bokeh.__version__\n \n return context\n", "issue": "Import environment bokeh version in HTML\nAs described in #425, having the environment Bokeh version NOT match the HTML template Bokeh version can cause problems.\r\n\r\nI wrote some tests #429 that will test and make sure this is always the case in the future, but we could be even smarter about this if we programmatically set the Bokeh version using a Django variable. We could even include it in `context_processors` so it happens behind the scenes.\n", "before_files": [{"content": "\"\"\"Provides functions that define context inherent to all views.\n\nThe functions within this module define ``context`` that will be\nincluded in requests, in addition to any specific ``context`` provided\nin the view.\n\nAuthors\n-------\n\n - Matthew Bourque\n\nUse\n---\n\n This module is defined under the ``TEMPLATES.OPTIONS`` setting in\n ``settings.py``, e.g.:\n ::\n\n TEMPLATES = [\n {'OPTIONS': {'context_processors': ['jwql.website.apps.jwql.context_processors.base_context'],},}\n ]\n\n As such, it will automatically be executed upon each request.\n\"\"\"\n\nimport jwql\nfrom jwql.utils.constants import JWST_INSTRUMENT_NAMES, MONITORS\n\nfrom .oauth import auth_info\n\n\n@auth_info\ndef base_context(request, user):\n \"\"\"Provide the context needed for the ``base.html`` template.\n\n Parameters\n ----------\n request : HttpRequest object\n Incoming request from the webpage\n\n Returns\n -------\n context : dict\n A dictionary containing data needed to render the ``base.html``\n template\n \"\"\"\n\n context = {}\n context['inst_list'] = JWST_INSTRUMENT_NAMES\n context['tools'] = MONITORS\n context['user'] = user\n context['version'] = jwql.__version__\n\n return context\n", "path": "jwql/website/apps/jwql/context_processors.py"}], "after_files": [{"content": "\"\"\"Provides functions that define context inherent to all views.\n\nThe functions within this module define ``context`` that will be\nincluded in requests, in addition to any specific ``context`` provided\nin the view.\n\nAuthors\n-------\n\n - Matthew Bourque\n\nUse\n---\n\n This module is defined under the ``TEMPLATES.OPTIONS`` setting in\n ``settings.py``, e.g.:\n ::\n\n TEMPLATES = [\n {'OPTIONS': {'context_processors': ['jwql.website.apps.jwql.context_processors.base_context'],},}\n ]\n\n As such, it will automatically be executed upon each request.\n\"\"\"\n\nimport bokeh\n\nimport jwql\nfrom jwql.utils.constants import JWST_INSTRUMENT_NAMES, MONITORS\n\nfrom .oauth import auth_info\n\n\n@auth_info\ndef base_context(request, user):\n \"\"\"Provide the context needed for the ``base.html`` template.\n\n Parameters\n ----------\n request : HttpRequest object\n Incoming request from the webpage\n\n Returns\n -------\n context : dict\n A dictionary containing data needed to render the ``base.html``\n template\n \"\"\"\n\n context = {}\n context['inst_list'] = JWST_INSTRUMENT_NAMES\n context['tools'] = MONITORS\n context['user'] = user\n context['version'] = jwql.__version__\n context['bokeh_version'] = bokeh.__version__\n\n return context\n", "path": "jwql/website/apps/jwql/context_processors.py"}]}
| 759 | 177 |
gh_patches_debug_5092
|
rasdani/github-patches
|
git_diff
|
fossasia__open-event-server-5383
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improper exception handling in manage.py
**Describe the bug**
Exceptions are not handled properly in `initialize_db` method of `manage` module.
**To Reproduce**
Steps to reproduce the behavior:
1. Open 'manage.py'
2. See https://github.com/fossasia/open-event-server/blob/05cbd1abee0d81dd24d701420c3530e3b9bd645f/manage.py#L77
**Expected behavior**
It should follow standard exception handling format using `Exception` class.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `manage.py`
Content:
```
1 import os
2
3 from app.api.helpers.db import save_to_db
4 from app.models.event import Event, get_new_event_identifier
5 from app import manager
6 from app import current_app as app
7 from app.models import db
8 from app.models.speaker import Speaker
9 from populate_db import populate
10 from flask_migrate import stamp
11 from sqlalchemy.engine import reflection
12
13 from tests.unittests.auth_helper import create_super_admin
14
15
16 @manager.command
17 def list_routes():
18 import urllib
19
20 output = []
21 for rule in app.url_map.iter_rules():
22 methods = ','.join(rule.methods)
23 line = urllib.unquote("{:50s} {:20s} {}".format(
24 rule.endpoint, methods, rule))
25 output.append(line)
26
27 for line in sorted(output):
28 print(line)
29
30
31 @manager.command
32 def add_event_identifier():
33 events = Event.query.all()
34 for event in events:
35 event.identifier = get_new_event_identifier()
36 save_to_db(event)
37
38
39 @manager.option('-e', '--event', help='Event ID. Eg. 1')
40 def fix_speaker_images(event):
41 from app.helpers.sessions_speakers.speakers import speaker_image_sizes
42 from app.helpers.sessions_speakers.speakers import save_resized_photo
43 import urllib
44 from app.helpers.storage import generate_hash
45 event_id = int(event)
46 image_sizes = speaker_image_sizes()
47 speakers = Speaker.query.filter_by(event_id=event_id).all()
48 for speaker in speakers:
49 if speaker.photo and speaker.photo.strip() != '':
50 file_relative_path = 'static/media/temp/' + generate_hash(str(speaker.id)) + '.jpg'
51 file_path = app.config['BASE_DIR'] + '/' + file_relative_path
52 urllib.urlretrieve(speaker.photo, file_path)
53 speaker.small = save_resized_photo(file_path, event_id, speaker.id, 'small', image_sizes)
54 speaker.thumbnail = save_resized_photo(file_path, event_id, speaker.id, 'thumbnail', image_sizes)
55 speaker.icon = save_resized_photo(file_path, event_id, speaker.id, 'icon', image_sizes)
56 db.session.add(speaker)
57 os.remove(file_path)
58 print("Downloaded " + speaker.photo + " into " + file_relative_path)
59 print("Processed - " + str(speaker.id))
60 db.session.commit()
61
62
63 @manager.option('-c', '--credentials', help='Super admin credentials. Eg. username:password')
64 def initialize_db(credentials):
65 with app.app_context():
66 populate_data = True
67 inspector = reflection.Inspector.from_engine(db.engine)
68 table_name = 'events'
69 table_names = inspector.get_table_names()
70 print("[LOG] Existing tables:")
71 print("[LOG] " + ','.join(table_names))
72 if table_name not in table_names:
73 print("[LOG] Table not found. Attempting creation")
74 try:
75 db.create_all()
76 stamp()
77 except:
78 populate_data = False
79 print("[LOG] Could not create tables. Either database does not exist or tables already created")
80 if populate_data:
81 credentials = credentials.split(":")
82 create_super_admin(credentials[0], credentials[1])
83 populate()
84 else:
85 print("[LOG] Tables already exist. Skipping data population & creation.")
86
87
88 @manager.command
89 def prepare_kubernetes_db():
90 with app.app_context():
91 initialize_db('[email protected]:fossasia')
92
93
94 if __name__ == "__main__":
95 manager.run()
96
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/manage.py b/manage.py
--- a/manage.py
+++ b/manage.py
@@ -74,7 +74,7 @@
try:
db.create_all()
stamp()
- except:
+ except Exception:
populate_data = False
print("[LOG] Could not create tables. Either database does not exist or tables already created")
if populate_data:
|
{"golden_diff": "diff --git a/manage.py b/manage.py\n--- a/manage.py\n+++ b/manage.py\n@@ -74,7 +74,7 @@\n try:\n db.create_all()\n stamp()\n- except:\n+ except Exception:\n populate_data = False\n print(\"[LOG] Could not create tables. Either database does not exist or tables already created\")\n if populate_data:\n", "issue": "Improper exception handling in manage.py\n**Describe the bug**\r\nExceptions are not handled properly in `initialize_db` method of `manage` module.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Open 'manage.py'\r\n2. See https://github.com/fossasia/open-event-server/blob/05cbd1abee0d81dd24d701420c3530e3b9bd645f/manage.py#L77\r\n\r\n**Expected behavior**\r\nIt should follow standard exception handling format using `Exception` class.\n", "before_files": [{"content": "import os\n\nfrom app.api.helpers.db import save_to_db\nfrom app.models.event import Event, get_new_event_identifier\nfrom app import manager\nfrom app import current_app as app\nfrom app.models import db\nfrom app.models.speaker import Speaker\nfrom populate_db import populate\nfrom flask_migrate import stamp\nfrom sqlalchemy.engine import reflection\n\nfrom tests.unittests.auth_helper import create_super_admin\n\n\[email protected]\ndef list_routes():\n import urllib\n\n output = []\n for rule in app.url_map.iter_rules():\n methods = ','.join(rule.methods)\n line = urllib.unquote(\"{:50s} {:20s} {}\".format(\n rule.endpoint, methods, rule))\n output.append(line)\n\n for line in sorted(output):\n print(line)\n\n\[email protected]\ndef add_event_identifier():\n events = Event.query.all()\n for event in events:\n event.identifier = get_new_event_identifier()\n save_to_db(event)\n\n\[email protected]('-e', '--event', help='Event ID. Eg. 1')\ndef fix_speaker_images(event):\n from app.helpers.sessions_speakers.speakers import speaker_image_sizes\n from app.helpers.sessions_speakers.speakers import save_resized_photo\n import urllib\n from app.helpers.storage import generate_hash\n event_id = int(event)\n image_sizes = speaker_image_sizes()\n speakers = Speaker.query.filter_by(event_id=event_id).all()\n for speaker in speakers:\n if speaker.photo and speaker.photo.strip() != '':\n file_relative_path = 'static/media/temp/' + generate_hash(str(speaker.id)) + '.jpg'\n file_path = app.config['BASE_DIR'] + '/' + file_relative_path\n urllib.urlretrieve(speaker.photo, file_path)\n speaker.small = save_resized_photo(file_path, event_id, speaker.id, 'small', image_sizes)\n speaker.thumbnail = save_resized_photo(file_path, event_id, speaker.id, 'thumbnail', image_sizes)\n speaker.icon = save_resized_photo(file_path, event_id, speaker.id, 'icon', image_sizes)\n db.session.add(speaker)\n os.remove(file_path)\n print(\"Downloaded \" + speaker.photo + \" into \" + file_relative_path)\n print(\"Processed - \" + str(speaker.id))\n db.session.commit()\n\n\[email protected]('-c', '--credentials', help='Super admin credentials. Eg. username:password')\ndef initialize_db(credentials):\n with app.app_context():\n populate_data = True\n inspector = reflection.Inspector.from_engine(db.engine)\n table_name = 'events'\n table_names = inspector.get_table_names()\n print(\"[LOG] Existing tables:\")\n print(\"[LOG] \" + ','.join(table_names))\n if table_name not in table_names:\n print(\"[LOG] Table not found. Attempting creation\")\n try:\n db.create_all()\n stamp()\n except:\n populate_data = False\n print(\"[LOG] Could not create tables. Either database does not exist or tables already created\")\n if populate_data:\n credentials = credentials.split(\":\")\n create_super_admin(credentials[0], credentials[1])\n populate()\n else:\n print(\"[LOG] Tables already exist. Skipping data population & creation.\")\n\n\[email protected]\ndef prepare_kubernetes_db():\n with app.app_context():\n initialize_db('[email protected]:fossasia')\n\n\nif __name__ == \"__main__\":\n manager.run()\n", "path": "manage.py"}], "after_files": [{"content": "import os\n\nfrom app.api.helpers.db import save_to_db\nfrom app.models.event import Event, get_new_event_identifier\nfrom app import manager\nfrom app import current_app as app\nfrom app.models import db\nfrom app.models.speaker import Speaker\nfrom populate_db import populate\nfrom flask_migrate import stamp\nfrom sqlalchemy.engine import reflection\n\nfrom tests.unittests.auth_helper import create_super_admin\n\n\[email protected]\ndef list_routes():\n import urllib\n\n output = []\n for rule in app.url_map.iter_rules():\n methods = ','.join(rule.methods)\n line = urllib.unquote(\"{:50s} {:20s} {}\".format(\n rule.endpoint, methods, rule))\n output.append(line)\n\n for line in sorted(output):\n print(line)\n\n\[email protected]\ndef add_event_identifier():\n events = Event.query.all()\n for event in events:\n event.identifier = get_new_event_identifier()\n save_to_db(event)\n\n\[email protected]('-e', '--event', help='Event ID. Eg. 1')\ndef fix_speaker_images(event):\n from app.helpers.sessions_speakers.speakers import speaker_image_sizes\n from app.helpers.sessions_speakers.speakers import save_resized_photo\n import urllib\n from app.helpers.storage import generate_hash\n event_id = int(event)\n image_sizes = speaker_image_sizes()\n speakers = Speaker.query.filter_by(event_id=event_id).all()\n for speaker in speakers:\n if speaker.photo and speaker.photo.strip() != '':\n file_relative_path = 'static/media/temp/' + generate_hash(str(speaker.id)) + '.jpg'\n file_path = app.config['BASE_DIR'] + '/' + file_relative_path\n urllib.urlretrieve(speaker.photo, file_path)\n speaker.small = save_resized_photo(file_path, event_id, speaker.id, 'small', image_sizes)\n speaker.thumbnail = save_resized_photo(file_path, event_id, speaker.id, 'thumbnail', image_sizes)\n speaker.icon = save_resized_photo(file_path, event_id, speaker.id, 'icon', image_sizes)\n db.session.add(speaker)\n os.remove(file_path)\n print(\"Downloaded \" + speaker.photo + \" into \" + file_relative_path)\n print(\"Processed - \" + str(speaker.id))\n db.session.commit()\n\n\[email protected]('-c', '--credentials', help='Super admin credentials. Eg. username:password')\ndef initialize_db(credentials):\n with app.app_context():\n populate_data = True\n inspector = reflection.Inspector.from_engine(db.engine)\n table_name = 'events'\n table_names = inspector.get_table_names()\n print(\"[LOG] Existing tables:\")\n print(\"[LOG] \" + ','.join(table_names))\n if table_name not in table_names:\n print(\"[LOG] Table not found. Attempting creation\")\n try:\n db.create_all()\n stamp()\n except Exception:\n populate_data = False\n print(\"[LOG] Could not create tables. Either database does not exist or tables already created\")\n if populate_data:\n credentials = credentials.split(\":\")\n create_super_admin(credentials[0], credentials[1])\n populate()\n else:\n print(\"[LOG] Tables already exist. Skipping data population & creation.\")\n\n\[email protected]\ndef prepare_kubernetes_db():\n with app.app_context():\n initialize_db('[email protected]:fossasia')\n\n\nif __name__ == \"__main__\":\n manager.run()\n", "path": "manage.py"}]}
| 1,296 | 83 |
gh_patches_debug_1067
|
rasdani/github-patches
|
git_diff
|
encode__httpx-1799
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update h2 pin?
### Discussed in https://github.com/encode/httpx/discussions/1485
<div type='discussions-op-text'>
<sup>Originally posted by **HarrySky** February 24, 2021</sup>
Hi, some time ago `h2` pin was updated in `httpcore`: https://github.com/encode/httpcore/pull/208
But it is still pinned to `3.*` in this package's `http2` extra:
https://github.com/encode/httpx/blob/0f280af8b170ed5cc48c12a894f71a8b5762f748/setup.py#L65
This is not an issue, as I can just add `h2>=3,<5` to my setup.py instead of using `httpx[http2]`, but maybe you want dependencies to be in sync with `httpcore`.
EDIT: Using git blame we can see that before `http2` extra - `httpcore[http2]` was used instead of `h2` dependency directly.</div>
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 import re
5 from pathlib import Path
6
7 from setuptools import setup
8
9
10 def get_version(package):
11 """
12 Return package version as listed in `__version__` in `init.py`.
13 """
14 version = Path(package, "__version__.py").read_text()
15 return re.search("__version__ = ['\"]([^'\"]+)['\"]", version).group(1)
16
17
18 def get_long_description():
19 """
20 Return the README.
21 """
22 long_description = ""
23 with open("README.md", encoding="utf8") as f:
24 long_description += f.read()
25 long_description += "\n\n"
26 with open("CHANGELOG.md", encoding="utf8") as f:
27 long_description += f.read()
28 return long_description
29
30
31 def get_packages(package):
32 """
33 Return root package and all sub-packages.
34 """
35 return [str(path.parent) for path in Path(package).glob("**/__init__.py")]
36
37
38 setup(
39 name="httpx",
40 python_requires=">=3.6",
41 version=get_version("httpx"),
42 url="https://github.com/encode/httpx",
43 project_urls={
44 "Changelog": "https://github.com/encode/httpx/blob/master/CHANGELOG.md",
45 "Documentation": "https://www.python-httpx.org",
46 "Source": "https://github.com/encode/httpx",
47 },
48 license="BSD",
49 description="The next generation HTTP client.",
50 long_description=get_long_description(),
51 long_description_content_type="text/markdown",
52 author="Tom Christie",
53 author_email="[email protected]",
54 package_data={"httpx": ["py.typed"]},
55 packages=get_packages("httpx"),
56 include_package_data=True,
57 zip_safe=False,
58 install_requires=[
59 "certifi",
60 "sniffio",
61 "rfc3986[idna2008]>=1.3,<2",
62 "httpcore>=0.13.3,<0.14.0",
63 "async_generator; python_version < '3.7'"
64 ],
65 extras_require={
66 "http2": "h2==3.*",
67 "brotli": "brotlicffi==1.*",
68 },
69 classifiers=[
70 "Development Status :: 4 - Beta",
71 "Environment :: Web Environment",
72 "Intended Audience :: Developers",
73 "License :: OSI Approved :: BSD License",
74 "Operating System :: OS Independent",
75 "Topic :: Internet :: WWW/HTTP",
76 "Framework :: AsyncIO",
77 "Framework :: Trio",
78 "Programming Language :: Python :: 3",
79 "Programming Language :: Python :: 3.6",
80 "Programming Language :: Python :: 3.7",
81 "Programming Language :: Python :: 3.8",
82 "Programming Language :: Python :: 3.9",
83 "Programming Language :: Python :: 3.10",
84 "Programming Language :: Python :: 3 :: Only",
85 ],
86 )
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -63,7 +63,7 @@
"async_generator; python_version < '3.7'"
],
extras_require={
- "http2": "h2==3.*",
+ "http2": "h2>=3,<5",
"brotli": "brotlicffi==1.*",
},
classifiers=[
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -63,7 +63,7 @@\n \"async_generator; python_version < '3.7'\"\n ],\n extras_require={\n- \"http2\": \"h2==3.*\",\n+ \"http2\": \"h2>=3,<5\",\n \"brotli\": \"brotlicffi==1.*\",\n },\n classifiers=[\n", "issue": "Update h2 pin?\n### Discussed in https://github.com/encode/httpx/discussions/1485\r\n\r\n<div type='discussions-op-text'>\r\n\r\n<sup>Originally posted by **HarrySky** February 24, 2021</sup>\r\nHi, some time ago `h2` pin was updated in `httpcore`: https://github.com/encode/httpcore/pull/208\r\n\r\nBut it is still pinned to `3.*` in this package's `http2` extra:\r\nhttps://github.com/encode/httpx/blob/0f280af8b170ed5cc48c12a894f71a8b5762f748/setup.py#L65\r\n\r\nThis is not an issue, as I can just add `h2>=3,<5` to my setup.py instead of using `httpx[http2]`, but maybe you want dependencies to be in sync with `httpcore`.\r\n\r\nEDIT: Using git blame we can see that before `http2` extra - `httpcore[http2]` was used instead of `h2` dependency directly.</div>\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport re\nfrom pathlib import Path\n\nfrom setuptools import setup\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n version = Path(package, \"__version__.py\").read_text()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", version).group(1)\n\n\ndef get_long_description():\n \"\"\"\n Return the README.\n \"\"\"\n long_description = \"\"\n with open(\"README.md\", encoding=\"utf8\") as f:\n long_description += f.read()\n long_description += \"\\n\\n\"\n with open(\"CHANGELOG.md\", encoding=\"utf8\") as f:\n long_description += f.read()\n return long_description\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [str(path.parent) for path in Path(package).glob(\"**/__init__.py\")]\n\n\nsetup(\n name=\"httpx\",\n python_requires=\">=3.6\",\n version=get_version(\"httpx\"),\n url=\"https://github.com/encode/httpx\",\n project_urls={\n \"Changelog\": \"https://github.com/encode/httpx/blob/master/CHANGELOG.md\",\n \"Documentation\": \"https://www.python-httpx.org\",\n \"Source\": \"https://github.com/encode/httpx\",\n },\n license=\"BSD\",\n description=\"The next generation HTTP client.\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author=\"Tom Christie\",\n author_email=\"[email protected]\",\n package_data={\"httpx\": [\"py.typed\"]},\n packages=get_packages(\"httpx\"),\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n \"certifi\",\n \"sniffio\",\n \"rfc3986[idna2008]>=1.3,<2\",\n \"httpcore>=0.13.3,<0.14.0\",\n \"async_generator; python_version < '3.7'\"\n ],\n extras_require={\n \"http2\": \"h2==3.*\",\n \"brotli\": \"brotlicffi==1.*\",\n },\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Framework :: AsyncIO\",\n \"Framework :: Trio\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3 :: Only\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport re\nfrom pathlib import Path\n\nfrom setuptools import setup\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n version = Path(package, \"__version__.py\").read_text()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", version).group(1)\n\n\ndef get_long_description():\n \"\"\"\n Return the README.\n \"\"\"\n long_description = \"\"\n with open(\"README.md\", encoding=\"utf8\") as f:\n long_description += f.read()\n long_description += \"\\n\\n\"\n with open(\"CHANGELOG.md\", encoding=\"utf8\") as f:\n long_description += f.read()\n return long_description\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [str(path.parent) for path in Path(package).glob(\"**/__init__.py\")]\n\n\nsetup(\n name=\"httpx\",\n python_requires=\">=3.6\",\n version=get_version(\"httpx\"),\n url=\"https://github.com/encode/httpx\",\n project_urls={\n \"Changelog\": \"https://github.com/encode/httpx/blob/master/CHANGELOG.md\",\n \"Documentation\": \"https://www.python-httpx.org\",\n \"Source\": \"https://github.com/encode/httpx\",\n },\n license=\"BSD\",\n description=\"The next generation HTTP client.\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author=\"Tom Christie\",\n author_email=\"[email protected]\",\n package_data={\"httpx\": [\"py.typed\"]},\n packages=get_packages(\"httpx\"),\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n \"certifi\",\n \"sniffio\",\n \"rfc3986[idna2008]>=1.3,<2\",\n \"httpcore>=0.13.3,<0.14.0\",\n \"async_generator; python_version < '3.7'\"\n ],\n extras_require={\n \"http2\": \"h2>=3,<5\",\n \"brotli\": \"brotlicffi==1.*\",\n },\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Framework :: AsyncIO\",\n \"Framework :: Trio\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3 :: Only\",\n ],\n)\n", "path": "setup.py"}]}
| 1,315 | 98 |
gh_patches_debug_15412
|
rasdani/github-patches
|
git_diff
|
bridgecrewio__checkov-1928
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CKV_AZURE_53 is a duplicate of CKV_AZURE_90
**Is your feature request related to a problem? Please describe.**
CKV_AZURE_53 is a duplicate of CKV_AZURE_90. We should use only one.
**Describe the solution you'd like**
N/A
**Describe alternatives you've considered**
N/A
**Additional context**
N/A
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/terraform/checks/resource/azure/MySQLServerPublicAccessDisabled.py`
Content:
```
1 from checkov.common.models.enums import CheckCategories, CheckResult
2 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
3
4
5 class MySQLServerHasPublicAccessDisabled(BaseResourceValueCheck):
6 def __init__(self):
7 name = "Ensure that MySQL server disables public network access"
8 id = "CKV_AZURE_90"
9 supported_resources = ['azurerm_mysql_server']
10 categories = [CheckCategories.NETWORKING]
11 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
12
13 def get_inspected_key(self):
14 return 'public_network_access_enabled'
15
16 def scan_resource_conf(self, conf):
17 public_access = conf.get('public_network_access_enabled', [True])
18 if public_access[0]:
19 return CheckResult.FAILED
20 return CheckResult.PASSED
21
22
23 check = MySQLServerHasPublicAccessDisabled()
24
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/checkov/terraform/checks/resource/azure/MySQLServerPublicAccessDisabled.py b/checkov/terraform/checks/resource/azure/MySQLServerPublicAccessDisabled.py
deleted file mode 100644
--- a/checkov/terraform/checks/resource/azure/MySQLServerPublicAccessDisabled.py
+++ /dev/null
@@ -1,23 +0,0 @@
-from checkov.common.models.enums import CheckCategories, CheckResult
-from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
-
-
-class MySQLServerHasPublicAccessDisabled(BaseResourceValueCheck):
- def __init__(self):
- name = "Ensure that MySQL server disables public network access"
- id = "CKV_AZURE_90"
- supported_resources = ['azurerm_mysql_server']
- categories = [CheckCategories.NETWORKING]
- super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
-
- def get_inspected_key(self):
- return 'public_network_access_enabled'
-
- def scan_resource_conf(self, conf):
- public_access = conf.get('public_network_access_enabled', [True])
- if public_access[0]:
- return CheckResult.FAILED
- return CheckResult.PASSED
-
-
-check = MySQLServerHasPublicAccessDisabled()
|
{"golden_diff": "diff --git a/checkov/terraform/checks/resource/azure/MySQLServerPublicAccessDisabled.py b/checkov/terraform/checks/resource/azure/MySQLServerPublicAccessDisabled.py\ndeleted file mode 100644\n--- a/checkov/terraform/checks/resource/azure/MySQLServerPublicAccessDisabled.py\n+++ /dev/null\n@@ -1,23 +0,0 @@\n-from checkov.common.models.enums import CheckCategories, CheckResult\n-from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n-\n-\n-class MySQLServerHasPublicAccessDisabled(BaseResourceValueCheck):\n- def __init__(self):\n- name = \"Ensure that MySQL server disables public network access\"\n- id = \"CKV_AZURE_90\"\n- supported_resources = ['azurerm_mysql_server']\n- categories = [CheckCategories.NETWORKING]\n- super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n-\n- def get_inspected_key(self):\n- return 'public_network_access_enabled'\n-\n- def scan_resource_conf(self, conf):\n- public_access = conf.get('public_network_access_enabled', [True])\n- if public_access[0]:\n- return CheckResult.FAILED\n- return CheckResult.PASSED\n-\n-\n-check = MySQLServerHasPublicAccessDisabled()\n", "issue": "CKV_AZURE_53 is a duplicate of CKV_AZURE_90\n**Is your feature request related to a problem? Please describe.**\r\nCKV_AZURE_53 is a duplicate of CKV_AZURE_90. We should use only one.\r\n\r\n**Describe the solution you'd like**\r\nN/A\r\n\r\n**Describe alternatives you've considered**\r\nN/A\r\n\r\n**Additional context**\r\nN/A\n", "before_files": [{"content": "from checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n\n\nclass MySQLServerHasPublicAccessDisabled(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure that MySQL server disables public network access\"\n id = \"CKV_AZURE_90\"\n supported_resources = ['azurerm_mysql_server']\n categories = [CheckCategories.NETWORKING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return 'public_network_access_enabled'\n\n def scan_resource_conf(self, conf):\n public_access = conf.get('public_network_access_enabled', [True])\n if public_access[0]:\n return CheckResult.FAILED\n return CheckResult.PASSED\n\n\ncheck = MySQLServerHasPublicAccessDisabled()\n", "path": "checkov/terraform/checks/resource/azure/MySQLServerPublicAccessDisabled.py"}], "after_files": [{"content": null, "path": "checkov/terraform/checks/resource/azure/MySQLServerPublicAccessDisabled.py"}]}
| 595 | 293 |
gh_patches_debug_196
|
rasdani/github-patches
|
git_diff
|
twisted__twisted-11622
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release 22.8.0
This is the ticket to track the release of 22.8.0
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/twisted/_version.py`
Content:
```
1 """
2 Provides Twisted version information.
3 """
4
5 # This file is auto-generated! Do not edit!
6 # Use `python -m incremental.update Twisted` to change this file.
7
8 from incremental import Version
9
10 __version__ = Version("Twisted", 22, 4, 0, post=0)
11 __all__ = ["__version__"]
12
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/twisted/_version.py b/src/twisted/_version.py
--- a/src/twisted/_version.py
+++ b/src/twisted/_version.py
@@ -7,5 +7,5 @@
from incremental import Version
-__version__ = Version("Twisted", 22, 4, 0, post=0)
+__version__ = Version("Twisted", 22, 8, 0, post=0)
__all__ = ["__version__"]
|
{"golden_diff": "diff --git a/src/twisted/_version.py b/src/twisted/_version.py\n--- a/src/twisted/_version.py\n+++ b/src/twisted/_version.py\n@@ -7,5 +7,5 @@\n \n from incremental import Version\n \n-__version__ = Version(\"Twisted\", 22, 4, 0, post=0)\n+__version__ = Version(\"Twisted\", 22, 8, 0, post=0)\n __all__ = [\"__version__\"]\n", "issue": "Release 22.8.0\nThis is the ticket to track the release of 22.8.0\n", "before_files": [{"content": "\"\"\"\nProvides Twisted version information.\n\"\"\"\n\n# This file is auto-generated! Do not edit!\n# Use `python -m incremental.update Twisted` to change this file.\n\nfrom incremental import Version\n\n__version__ = Version(\"Twisted\", 22, 4, 0, post=0)\n__all__ = [\"__version__\"]\n", "path": "src/twisted/_version.py"}], "after_files": [{"content": "\"\"\"\nProvides Twisted version information.\n\"\"\"\n\n# This file is auto-generated! Do not edit!\n# Use `python -m incremental.update Twisted` to change this file.\n\nfrom incremental import Version\n\n__version__ = Version(\"Twisted\", 22, 8, 0, post=0)\n__all__ = [\"__version__\"]\n", "path": "src/twisted/_version.py"}]}
| 377 | 113 |
gh_patches_debug_24456
|
rasdani/github-patches
|
git_diff
|
bridgecrewio__checkov-1096
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Duplicate check
**Describe the bug**
The new check `CKV_AWS_125` popped up complaining about me not activating point in time recovery for a DynamoDB table, which is surprising, because I already acknowledged it in the past. After digging into the code I found out that check `CKV_AWS_125` and `CKV_AWS_28` are identical. It would be great to delete one of them, probably the new one.
**Additional context**
https://github.com/bridgecrewio/checkov/blob/caafcbb83d6901b57c9a39464673670c150f0a33/checkov/terraform/checks/resource/aws/DynamodbRecovery.py#L5-L14
https://github.com/bridgecrewio/checkov/blob/caafcbb83d6901b57c9a39464673670c150f0a33/checkov/terraform/checks/resource/aws/DynamodbPointInTimeRecoveryEnabled.py#L5-L17
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/terraform/checks/resource/aws/DynamodbPointInTimeRecoveryEnabled.py`
Content:
```
1 from checkov.common.models.enums import CheckCategories, CheckResult
2 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
3
4
5 class DynamodbPointInTimeRecoveryEnabled(BaseResourceValueCheck):
6 def __init__(self):
7 name = "Ensure that that point in time recovery is enabled for Amazon DynamoDB tables"
8 id = "CKV_AWS_125"
9 supported_resources = ['aws_dynamodb_table']
10 categories = [CheckCategories.BACKUP_AND_RECOVERY]
11 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
12
13 def get_inspected_key(self):
14 return "point_in_time_recovery/[0]/enabled"
15
16 def get_expected_value(self):
17 return True
18
19
20 check = DynamodbPointInTimeRecoveryEnabled()
21
```
Path: `checkov/terraform/checks/resource/aws/KMSKeyRotation.py`
Content:
```
1 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
2 from checkov.common.models.enums import CheckCategories
3
4
5 class KMSKeyRotation(BaseResourceValueCheck):
6
7 def __init__(self):
8 name = "Ensure KMS have rotation policy"
9 id = "CKV_AWS_132"
10 supported_resources = ['aws_kms_key']
11 categories = [CheckCategories.ENCRYPTION]
12 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
13
14 def get_inspected_key(self):
15 return "enable_key_rotation"
16
17
18 check = KMSKeyRotation()
19
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/checkov/terraform/checks/resource/aws/DynamodbPointInTimeRecoveryEnabled.py b/checkov/terraform/checks/resource/aws/DynamodbPointInTimeRecoveryEnabled.py
deleted file mode 100644
--- a/checkov/terraform/checks/resource/aws/DynamodbPointInTimeRecoveryEnabled.py
+++ /dev/null
@@ -1,20 +0,0 @@
-from checkov.common.models.enums import CheckCategories, CheckResult
-from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
-
-
-class DynamodbPointInTimeRecoveryEnabled(BaseResourceValueCheck):
- def __init__(self):
- name = "Ensure that that point in time recovery is enabled for Amazon DynamoDB tables"
- id = "CKV_AWS_125"
- supported_resources = ['aws_dynamodb_table']
- categories = [CheckCategories.BACKUP_AND_RECOVERY]
- super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
-
- def get_inspected_key(self):
- return "point_in_time_recovery/[0]/enabled"
-
- def get_expected_value(self):
- return True
-
-
-check = DynamodbPointInTimeRecoveryEnabled()
diff --git a/checkov/terraform/checks/resource/aws/KMSKeyRotation.py b/checkov/terraform/checks/resource/aws/KMSKeyRotation.py
deleted file mode 100644
--- a/checkov/terraform/checks/resource/aws/KMSKeyRotation.py
+++ /dev/null
@@ -1,18 +0,0 @@
-from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
-from checkov.common.models.enums import CheckCategories
-
-
-class KMSKeyRotation(BaseResourceValueCheck):
-
- def __init__(self):
- name = "Ensure KMS have rotation policy"
- id = "CKV_AWS_132"
- supported_resources = ['aws_kms_key']
- categories = [CheckCategories.ENCRYPTION]
- super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
-
- def get_inspected_key(self):
- return "enable_key_rotation"
-
-
-check = KMSKeyRotation()
|
{"golden_diff": "diff --git a/checkov/terraform/checks/resource/aws/DynamodbPointInTimeRecoveryEnabled.py b/checkov/terraform/checks/resource/aws/DynamodbPointInTimeRecoveryEnabled.py\ndeleted file mode 100644\n--- a/checkov/terraform/checks/resource/aws/DynamodbPointInTimeRecoveryEnabled.py\n+++ /dev/null\n@@ -1,20 +0,0 @@\n-from checkov.common.models.enums import CheckCategories, CheckResult\n-from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n-\n-\n-class DynamodbPointInTimeRecoveryEnabled(BaseResourceValueCheck):\n- def __init__(self):\n- name = \"Ensure that that point in time recovery is enabled for Amazon DynamoDB tables\"\n- id = \"CKV_AWS_125\"\n- supported_resources = ['aws_dynamodb_table']\n- categories = [CheckCategories.BACKUP_AND_RECOVERY]\n- super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n-\n- def get_inspected_key(self):\n- return \"point_in_time_recovery/[0]/enabled\"\n-\n- def get_expected_value(self):\n- return True\n-\n-\n-check = DynamodbPointInTimeRecoveryEnabled()\ndiff --git a/checkov/terraform/checks/resource/aws/KMSKeyRotation.py b/checkov/terraform/checks/resource/aws/KMSKeyRotation.py\ndeleted file mode 100644\n--- a/checkov/terraform/checks/resource/aws/KMSKeyRotation.py\n+++ /dev/null\n@@ -1,18 +0,0 @@\n-from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n-from checkov.common.models.enums import CheckCategories\n-\n-\n-class KMSKeyRotation(BaseResourceValueCheck):\n-\n- def __init__(self):\n- name = \"Ensure KMS have rotation policy\"\n- id = \"CKV_AWS_132\"\n- supported_resources = ['aws_kms_key']\n- categories = [CheckCategories.ENCRYPTION]\n- super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n-\n- def get_inspected_key(self):\n- return \"enable_key_rotation\"\n-\n-\n-check = KMSKeyRotation()\n", "issue": "Duplicate check\n**Describe the bug**\r\nThe new check `CKV_AWS_125` popped up complaining about me not activating point in time recovery for a DynamoDB table, which is surprising, because I already acknowledged it in the past. After digging into the code I found out that check `CKV_AWS_125` and `CKV_AWS_28` are identical. It would be great to delete one of them, probably the new one.\r\n\r\n**Additional context**\r\nhttps://github.com/bridgecrewio/checkov/blob/caafcbb83d6901b57c9a39464673670c150f0a33/checkov/terraform/checks/resource/aws/DynamodbRecovery.py#L5-L14\r\nhttps://github.com/bridgecrewio/checkov/blob/caafcbb83d6901b57c9a39464673670c150f0a33/checkov/terraform/checks/resource/aws/DynamodbPointInTimeRecoveryEnabled.py#L5-L17\r\n\n", "before_files": [{"content": "from checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n\n\nclass DynamodbPointInTimeRecoveryEnabled(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure that that point in time recovery is enabled for Amazon DynamoDB tables\"\n id = \"CKV_AWS_125\"\n supported_resources = ['aws_dynamodb_table']\n categories = [CheckCategories.BACKUP_AND_RECOVERY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return \"point_in_time_recovery/[0]/enabled\"\n\n def get_expected_value(self):\n return True\n\n\ncheck = DynamodbPointInTimeRecoveryEnabled()\n", "path": "checkov/terraform/checks/resource/aws/DynamodbPointInTimeRecoveryEnabled.py"}, {"content": "from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\nfrom checkov.common.models.enums import CheckCategories\n\n\nclass KMSKeyRotation(BaseResourceValueCheck):\n\n def __init__(self):\n name = \"Ensure KMS have rotation policy\"\n id = \"CKV_AWS_132\"\n supported_resources = ['aws_kms_key']\n categories = [CheckCategories.ENCRYPTION]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return \"enable_key_rotation\"\n\n\ncheck = KMSKeyRotation()\n", "path": "checkov/terraform/checks/resource/aws/KMSKeyRotation.py"}], "after_files": [{"content": null, "path": "checkov/terraform/checks/resource/aws/DynamodbPointInTimeRecoveryEnabled.py"}, {"content": null, "path": "checkov/terraform/checks/resource/aws/KMSKeyRotation.py"}]}
| 912 | 493 |
gh_patches_debug_6160
|
rasdani/github-patches
|
git_diff
|
hpcaitech__ColossalAI-2741
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG]: Error during running chatgpt train_rm.sh
### 🐛 Describe the bug
I run into a runtime error when running `train_rm.sh` under examples of chatgpt.
```
Traceback (most recent call last):
File "/root/ColossalAI/applications/ChatGPT/examples/train_reward_model.py", line 53, in <module>
train(args)
File "/root/ColossalAI/applications/ChatGPT/examples/train_reward_model.py", line 36, in train
trainer.fit(use_lora=args.lora_rank)
File "/root/miniconda3/envs/chatgpt/lib/python3.10/site-packages/chatgpt/trainer/rm.py", line 57, in fit
loss.backward()
File "/root/miniconda3/envs/chatgpt/lib/python3.10/site-packages/torch/_tensor.py", line 488, in backward
torch.autograd.backward(
File "/root/miniconda3/envs/chatgpt/lib/python3.10/site-packages/torch/autograd/__init__.py", line 197, in backward
Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
RuntimeError: element 0 of tensors does not require grad and does not have a grad_fn
```
The model is downloaded from Huggingface `bigscience/bloom-560m`. `lora_rank` is `16`. Is there any hints on this?
### Environment
Environment:
torch 1.13.1+cu113
------------ Environment ------------
Colossal-AI version: 0.2.4
PyTorch version: 1.13.1
CUDA version: 11.3
CUDA version required by PyTorch: 11.7
Note:
1. The table above checks the versions of the libraries/tools in the current environment
2. If the CUDA version is N/A, you can set the CUDA_HOME environment variable to locate it
------------ CUDA Extensions AOT Compilation ------------
Found AOT CUDA Extension: x
PyTorch version used for AOT compilation: N/A
CUDA version used for AOT compilation: N/A
Note:
1. AOT (ahead-of-time) compilation of the CUDA kernels occurs during installation when the environment varialbe CUDA_EXT=1 is set
2. If AOT compilation is not enabled, stay calm as the CUDA kernels can still be built during runtime
------------ Compatibility ------------
PyTorch version match: N/A
System and PyTorch CUDA version match: x
System and Colossal-AI CUDA version match: N/A
Note:
1. The table above checks the version compatibility of the libraries/tools in the current environment
- PyTorch version mistach: whether the PyTorch version in the current environment is compatible with the PyTorch version used for AOT compilation
- System and PyTorch CUDA version match: whether the CUDA version in the current environment is compatible with the CUDA version required by PyTorch
- System and Colossal-AI CUDA version match: whether the CUDA version in the current environment is compatible with the CUDA version used for AOT compilation
[tensor] fix some unittests
[tensor] fix some unittests
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `applications/ChatGPT/chatgpt/trainer/rm.py`
Content:
```
1 from abc import ABC
2
3 import loralib as lora
4 from chatgpt.dataset import RewardDataset
5 from chatgpt.nn import PairWiseLoss
6 from torch.optim import Adam
7 from torch.utils.data import DataLoader
8 from tqdm import tqdm
9
10
11 class RewardModelTrainer(ABC):
12 """
13 Trainer to use while training reward model.
14
15 Args:
16 model (torch.nn.Module): the model to train
17 train_dataset (RewardDataset): the dataset to use for training
18 eval_dataset (RewardDataset): the dataset to use for evaluation
19 batch_size (int, defaults to 1): the batch size while training
20 num_epochs (int, defaults to 2): the number of epochs to train
21 optim_kwargs (dict, defaults to {'lr':1e-4}): the kwargs to use while initializing optimizer
22 """
23
24 def __init__(self,
25 model,
26 train_dataset: RewardDataset,
27 eval_dataset: RewardDataset,
28 batch_size: int = 1,
29 num_epochs: int = 2,
30 optim_kwargs: dict = {'lr': 1e-4}) -> None:
31 super().__init__()
32 self.model = model
33 self.train_dataloader = DataLoader(train_dataset, batch_size=batch_size)
34 self.eval_dataloader = DataLoader(eval_dataset, batch_size=batch_size)
35 self.loss_fn = PairWiseLoss()
36 self.optimizer = Adam(self.model.parameters(), **optim_kwargs)
37 self.epochs = num_epochs
38
39 def fit(self, use_lora):
40 epoch_bar = tqdm(range(self.epochs), desc='Train epoch')
41 for epoch in range(self.epochs):
42 step_bar = tqdm(range(self.train_dataloader.__len__()), desc='Train step of epoch %d' % epoch)
43 # train
44 if use_lora > 0:
45 print("Using Lora")
46 lora.mark_only_lora_as_trainable(self.model)
47 else:
48 self.model.train()
49 for chosen_ids, c_mask, reject_ids, r_mask in self.train_dataloader:
50 chosen_ids = chosen_ids.squeeze(1).cuda()
51 c_mask = c_mask.squeeze(1).cuda()
52 reject_ids = reject_ids.squeeze(1).cuda()
53 r_mask = r_mask.squeeze(1).cuda()
54 chosen_reward = self.model(chosen_ids, attention_mask=c_mask)
55 reject_reward = self.model(reject_ids, attention_mask=r_mask)
56 loss = self.loss_fn(chosen_reward, reject_reward)
57 loss.backward()
58 self.optimizer.step()
59 self.optimizer.zero_grad()
60 step_bar.update()
61 step_bar.set_postfix({'loss': loss.item()})
62
63 # eval
64 self.model.eval()
65 for chosen_ids, c_mask, reject_ids, r_mask in self.eval_dataloader:
66 dist = 0
67 chosen_ids = chosen_ids.squeeze(1).cuda()
68 c_mask = c_mask.squeeze(1).cuda()
69 reject_ids = reject_ids.squeeze(1).cuda()
70 r_mask = r_mask.squeeze(1).cuda()
71 chosen_reward = self.model(chosen_ids, attention_mask=c_mask)
72 reject_reward = self.model(reject_ids, attention_mask=r_mask)
73 dist += (chosen_reward - reject_reward)
74 dist_mean = dist / self.eval_dataloader.__len__()
75 epoch_bar.update()
76 step_bar.set_postfix({'loss': loss.item(), 'dist_mean': dist_mean.item()})
77 step_bar.close()
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/applications/ChatGPT/chatgpt/trainer/rm.py b/applications/ChatGPT/chatgpt/trainer/rm.py
--- a/applications/ChatGPT/chatgpt/trainer/rm.py
+++ b/applications/ChatGPT/chatgpt/trainer/rm.py
@@ -43,7 +43,7 @@
# train
if use_lora > 0:
print("Using Lora")
- lora.mark_only_lora_as_trainable(self.model)
+ lora.mark_only_lora_as_trainable(self.model.model)
else:
self.model.train()
for chosen_ids, c_mask, reject_ids, r_mask in self.train_dataloader:
|
{"golden_diff": "diff --git a/applications/ChatGPT/chatgpt/trainer/rm.py b/applications/ChatGPT/chatgpt/trainer/rm.py\n--- a/applications/ChatGPT/chatgpt/trainer/rm.py\n+++ b/applications/ChatGPT/chatgpt/trainer/rm.py\n@@ -43,7 +43,7 @@\n # train\n if use_lora > 0:\n print(\"Using Lora\")\n- lora.mark_only_lora_as_trainable(self.model)\n+ lora.mark_only_lora_as_trainable(self.model.model)\n else:\n self.model.train()\n for chosen_ids, c_mask, reject_ids, r_mask in self.train_dataloader:\n", "issue": "[BUG]: Error during running chatgpt train_rm.sh \n### \ud83d\udc1b Describe the bug\n\nI run into a runtime error when running `train_rm.sh` under examples of chatgpt.\r\n```\r\nTraceback (most recent call last):\r\n File \"/root/ColossalAI/applications/ChatGPT/examples/train_reward_model.py\", line 53, in <module>\r\n train(args)\r\n File \"/root/ColossalAI/applications/ChatGPT/examples/train_reward_model.py\", line 36, in train\r\n trainer.fit(use_lora=args.lora_rank)\r\n File \"/root/miniconda3/envs/chatgpt/lib/python3.10/site-packages/chatgpt/trainer/rm.py\", line 57, in fit\r\n loss.backward()\r\n File \"/root/miniconda3/envs/chatgpt/lib/python3.10/site-packages/torch/_tensor.py\", line 488, in backward\r\n torch.autograd.backward(\r\n File \"/root/miniconda3/envs/chatgpt/lib/python3.10/site-packages/torch/autograd/__init__.py\", line 197, in backward\r\n Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass\r\nRuntimeError: element 0 of tensors does not require grad and does not have a grad_fn\r\n```\r\n\r\n\r\nThe model is downloaded from Huggingface `bigscience/bloom-560m`. `lora_rank` is `16`. Is there any hints on this?\r\n\n\n### Environment\n\nEnvironment:\r\ntorch 1.13.1+cu113\r\n\r\n------------ Environment ------------\r\nColossal-AI version: 0.2.4\r\nPyTorch version: 1.13.1\r\nCUDA version: 11.3\r\nCUDA version required by PyTorch: 11.7\r\n\r\nNote:\r\n1. The table above checks the versions of the libraries/tools in the current environment\r\n2. If the CUDA version is N/A, you can set the CUDA_HOME environment variable to locate it\r\n\r\n------------ CUDA Extensions AOT Compilation ------------\r\nFound AOT CUDA Extension: x\r\nPyTorch version used for AOT compilation: N/A\r\nCUDA version used for AOT compilation: N/A\r\n\r\nNote:\r\n1. AOT (ahead-of-time) compilation of the CUDA kernels occurs during installation when the environment varialbe CUDA_EXT=1 is set\r\n2. If AOT compilation is not enabled, stay calm as the CUDA kernels can still be built during runtime\r\n\r\n------------ Compatibility ------------\r\nPyTorch version match: N/A\r\nSystem and PyTorch CUDA version match: x\r\nSystem and Colossal-AI CUDA version match: N/A\r\n\r\nNote:\r\n1. The table above checks the version compatibility of the libraries/tools in the current environment\r\n - PyTorch version mistach: whether the PyTorch version in the current environment is compatible with the PyTorch version used for AOT compilation\r\n - System and PyTorch CUDA version match: whether the CUDA version in the current environment is compatible with the CUDA version required by PyTorch\r\n - System and Colossal-AI CUDA version match: whether the CUDA version in the current environment is compatible with the CUDA version used for AOT compilation\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "from abc import ABC\n\nimport loralib as lora\nfrom chatgpt.dataset import RewardDataset\nfrom chatgpt.nn import PairWiseLoss\nfrom torch.optim import Adam\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\n\n\nclass RewardModelTrainer(ABC):\n \"\"\"\n Trainer to use while training reward model.\n\n Args:\n model (torch.nn.Module): the model to train\n train_dataset (RewardDataset): the dataset to use for training\n eval_dataset (RewardDataset): the dataset to use for evaluation\n batch_size (int, defaults to 1): the batch size while training\n num_epochs (int, defaults to 2): the number of epochs to train\n optim_kwargs (dict, defaults to {'lr':1e-4}): the kwargs to use while initializing optimizer\n \"\"\"\n\n def __init__(self,\n model,\n train_dataset: RewardDataset,\n eval_dataset: RewardDataset,\n batch_size: int = 1,\n num_epochs: int = 2,\n optim_kwargs: dict = {'lr': 1e-4}) -> None:\n super().__init__()\n self.model = model\n self.train_dataloader = DataLoader(train_dataset, batch_size=batch_size)\n self.eval_dataloader = DataLoader(eval_dataset, batch_size=batch_size)\n self.loss_fn = PairWiseLoss()\n self.optimizer = Adam(self.model.parameters(), **optim_kwargs)\n self.epochs = num_epochs\n\n def fit(self, use_lora):\n epoch_bar = tqdm(range(self.epochs), desc='Train epoch')\n for epoch in range(self.epochs):\n step_bar = tqdm(range(self.train_dataloader.__len__()), desc='Train step of epoch %d' % epoch)\n # train\n if use_lora > 0:\n print(\"Using Lora\")\n lora.mark_only_lora_as_trainable(self.model)\n else:\n self.model.train()\n for chosen_ids, c_mask, reject_ids, r_mask in self.train_dataloader:\n chosen_ids = chosen_ids.squeeze(1).cuda()\n c_mask = c_mask.squeeze(1).cuda()\n reject_ids = reject_ids.squeeze(1).cuda()\n r_mask = r_mask.squeeze(1).cuda()\n chosen_reward = self.model(chosen_ids, attention_mask=c_mask)\n reject_reward = self.model(reject_ids, attention_mask=r_mask)\n loss = self.loss_fn(chosen_reward, reject_reward)\n loss.backward()\n self.optimizer.step()\n self.optimizer.zero_grad()\n step_bar.update()\n step_bar.set_postfix({'loss': loss.item()})\n\n # eval\n self.model.eval()\n for chosen_ids, c_mask, reject_ids, r_mask in self.eval_dataloader:\n dist = 0\n chosen_ids = chosen_ids.squeeze(1).cuda()\n c_mask = c_mask.squeeze(1).cuda()\n reject_ids = reject_ids.squeeze(1).cuda()\n r_mask = r_mask.squeeze(1).cuda()\n chosen_reward = self.model(chosen_ids, attention_mask=c_mask)\n reject_reward = self.model(reject_ids, attention_mask=r_mask)\n dist += (chosen_reward - reject_reward)\n dist_mean = dist / self.eval_dataloader.__len__()\n epoch_bar.update()\n step_bar.set_postfix({'loss': loss.item(), 'dist_mean': dist_mean.item()})\n step_bar.close()\n", "path": "applications/ChatGPT/chatgpt/trainer/rm.py"}], "after_files": [{"content": "from abc import ABC\n\nimport loralib as lora\nfrom chatgpt.dataset import RewardDataset\nfrom chatgpt.nn import PairWiseLoss\nfrom torch.optim import Adam\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\n\n\nclass RewardModelTrainer(ABC):\n \"\"\"\n Trainer to use while training reward model.\n\n Args:\n model (torch.nn.Module): the model to train\n train_dataset (RewardDataset): the dataset to use for training\n eval_dataset (RewardDataset): the dataset to use for evaluation\n batch_size (int, defaults to 1): the batch size while training\n num_epochs (int, defaults to 2): the number of epochs to train\n optim_kwargs (dict, defaults to {'lr':1e-4}): the kwargs to use while initializing optimizer\n \"\"\"\n\n def __init__(self,\n model,\n train_dataset: RewardDataset,\n eval_dataset: RewardDataset,\n batch_size: int = 1,\n num_epochs: int = 2,\n optim_kwargs: dict = {'lr': 1e-4}) -> None:\n super().__init__()\n self.model = model\n self.train_dataloader = DataLoader(train_dataset, batch_size=batch_size)\n self.eval_dataloader = DataLoader(eval_dataset, batch_size=batch_size)\n self.loss_fn = PairWiseLoss()\n self.optimizer = Adam(self.model.parameters(), **optim_kwargs)\n self.epochs = num_epochs\n\n def fit(self, use_lora):\n epoch_bar = tqdm(range(self.epochs), desc='Train epoch')\n for epoch in range(self.epochs):\n step_bar = tqdm(range(self.train_dataloader.__len__()), desc='Train step of epoch %d' % epoch)\n # train\n if use_lora > 0:\n print(\"Using Lora\")\n lora.mark_only_lora_as_trainable(self.model.model)\n else:\n self.model.train()\n for chosen_ids, c_mask, reject_ids, r_mask in self.train_dataloader:\n chosen_ids = chosen_ids.squeeze(1).cuda()\n c_mask = c_mask.squeeze(1).cuda()\n reject_ids = reject_ids.squeeze(1).cuda()\n r_mask = r_mask.squeeze(1).cuda()\n chosen_reward = self.model(chosen_ids, attention_mask=c_mask)\n reject_reward = self.model(reject_ids, attention_mask=r_mask)\n loss = self.loss_fn(chosen_reward, reject_reward)\n loss.backward()\n self.optimizer.step()\n self.optimizer.zero_grad()\n step_bar.update()\n step_bar.set_postfix({'loss': loss.item()})\n\n # eval\n self.model.eval()\n for chosen_ids, c_mask, reject_ids, r_mask in self.eval_dataloader:\n dist = 0\n chosen_ids = chosen_ids.squeeze(1).cuda()\n c_mask = c_mask.squeeze(1).cuda()\n reject_ids = reject_ids.squeeze(1).cuda()\n r_mask = r_mask.squeeze(1).cuda()\n chosen_reward = self.model(chosen_ids, attention_mask=c_mask)\n reject_reward = self.model(reject_ids, attention_mask=r_mask)\n dist += (chosen_reward - reject_reward)\n dist_mean = dist / self.eval_dataloader.__len__()\n epoch_bar.update()\n step_bar.set_postfix({'loss': loss.item(), 'dist_mean': dist_mean.item()})\n step_bar.close()\n", "path": "applications/ChatGPT/chatgpt/trainer/rm.py"}]}
| 1,842 | 156 |
gh_patches_debug_730
|
rasdani/github-patches
|
git_diff
|
pytorch__pytorch-116517
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Missing packaging dependency in torch 2.1.x
### 🐛 Describe the bug
Hi,
[torch.utils.tensorboard requires "packaging"](https://github.com/pytorch/pytorch/blob/fa1ccc34c4f65756bc50c3e3ab135c88b175b18c/torch/utils/tensorboard/__init__.py#L2C1-L3C1) to be installed but that dependency is [missing on torch 2.1.x](https://github.com/pytorch/pytorch/blob/v2.1.2-rc1/requirements.txt).
Here's some example code:
```python
from torch.utils.tensorboard import SummaryWriter
```
The links above point to a RC version of 2.1.2 but this is also the case for 2.1.1. Would it be possible to make a patch release to add the dependency?
### Versions
Python version: 3.9.16 (main, Dec 7 2022, 10:16:11) [Clang 14.0.0 (clang-140[0.0.29.202](http://0.0.29.202/)
)] (64-bit runtime)
Python platform: macOS-13.6.1-x86_64-i386-64bit
Is CUDA available: False
CUDA runtime version: No CUDA
CUDA_MODULE_LOADING set to: N/A
GPU models and configuration: No CUDA
Nvidia driver version: No CUDA
cuDNN version: No CUDA
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
CPU:
Intel(R) Core(TM) i5-7287U CPU @ 3.30GHz
Versions of relevant libraries:
[pip3] numpy==1.23.5
[pip3] torch==2.1.1
[pip3] torchvision==0.16.1
[conda] Could not collect
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torch/utils/tensorboard/__init__.py`
Content:
```
1 import tensorboard
2 from packaging.version import Version
3
4 if not hasattr(tensorboard, "__version__") or Version(
5 tensorboard.__version__
6 ) < Version("1.15"):
7 raise ImportError("TensorBoard logging requires TensorBoard version 1.15 or above")
8
9 del Version
10 del tensorboard
11
12 from .writer import FileWriter, SummaryWriter # noqa: F401
13 from tensorboard.summary.writer.record_writer import RecordWriter # noqa: F401
14
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/torch/utils/tensorboard/__init__.py b/torch/utils/tensorboard/__init__.py
--- a/torch/utils/tensorboard/__init__.py
+++ b/torch/utils/tensorboard/__init__.py
@@ -1,5 +1,5 @@
import tensorboard
-from packaging.version import Version
+from torch._vendor.packaging.version import Version
if not hasattr(tensorboard, "__version__") or Version(
tensorboard.__version__
|
{"golden_diff": "diff --git a/torch/utils/tensorboard/__init__.py b/torch/utils/tensorboard/__init__.py\n--- a/torch/utils/tensorboard/__init__.py\n+++ b/torch/utils/tensorboard/__init__.py\n@@ -1,5 +1,5 @@\n import tensorboard\n-from packaging.version import Version\n+from torch._vendor.packaging.version import Version\n \n if not hasattr(tensorboard, \"__version__\") or Version(\n tensorboard.__version__\n", "issue": "Missing packaging dependency in torch 2.1.x\n### \ud83d\udc1b Describe the bug\n\nHi,\r\n[torch.utils.tensorboard requires \"packaging\"](https://github.com/pytorch/pytorch/blob/fa1ccc34c4f65756bc50c3e3ab135c88b175b18c/torch/utils/tensorboard/__init__.py#L2C1-L3C1) to be installed but that dependency is [missing on torch 2.1.x](https://github.com/pytorch/pytorch/blob/v2.1.2-rc1/requirements.txt).\r\n\r\nHere's some example code:\r\n```python\r\nfrom torch.utils.tensorboard import SummaryWriter\r\n```\r\n\r\nThe links above point to a RC version of 2.1.2 but this is also the case for 2.1.1. Would it be possible to make a patch release to add the dependency?\n\n### Versions\n\nPython version: 3.9.16 (main, Dec 7 2022, 10:16:11) [Clang 14.0.0 (clang-140[0.0.29.202](http://0.0.29.202/)\r\n)] (64-bit runtime)\r\nPython platform: macOS-13.6.1-x86_64-i386-64bit\r\nIs CUDA available: False\r\nCUDA runtime version: No CUDA\r\nCUDA_MODULE_LOADING set to: N/A\r\nGPU models and configuration: No CUDA\r\nNvidia driver version: No CUDA\r\ncuDNN version: No CUDA\r\nHIP runtime version: N/A\r\nMIOpen runtime version: N/A\r\nIs XNNPACK available: True\r\n\r\nCPU:\r\nIntel(R) Core(TM) i5-7287U CPU @ 3.30GHz\r\n\r\nVersions of relevant libraries:\r\n[pip3] numpy==1.23.5\r\n[pip3] torch==2.1.1\r\n[pip3] torchvision==0.16.1\r\n[conda] Could not collect\n", "before_files": [{"content": "import tensorboard\nfrom packaging.version import Version\n\nif not hasattr(tensorboard, \"__version__\") or Version(\n tensorboard.__version__\n) < Version(\"1.15\"):\n raise ImportError(\"TensorBoard logging requires TensorBoard version 1.15 or above\")\n\ndel Version\ndel tensorboard\n\nfrom .writer import FileWriter, SummaryWriter # noqa: F401\nfrom tensorboard.summary.writer.record_writer import RecordWriter # noqa: F401\n", "path": "torch/utils/tensorboard/__init__.py"}], "after_files": [{"content": "import tensorboard\nfrom torch._vendor.packaging.version import Version\n\nif not hasattr(tensorboard, \"__version__\") or Version(\n tensorboard.__version__\n) < Version(\"1.15\"):\n raise ImportError(\"TensorBoard logging requires TensorBoard version 1.15 or above\")\n\ndel Version\ndel tensorboard\n\nfrom .writer import FileWriter, SummaryWriter # noqa: F401\nfrom tensorboard.summary.writer.record_writer import RecordWriter # noqa: F401\n", "path": "torch/utils/tensorboard/__init__.py"}]}
| 840 | 102 |
gh_patches_debug_21259
|
rasdani/github-patches
|
git_diff
|
archlinux__archinstall-914
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
desktop profiles, replace pipewire-media-session with wireplumber, wireplumber seembs to be the newest replacement media-session replaceing pipewire-media-session
hi,
would it be possible to replace pipewire-media-session with wireplumber?
or, to make things flexable,
add a question in asking,
"would you like to use wireplumber or pipewire-media-session?
p.s
i'm using wireplumber currently.
wireplumber will remove pipewire-media-session and the systemd units.
a total replacement.
thank you for considdering my request!
Majid Hussain
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `profiles/applications/pipewire.py`
Content:
```
1 import archinstall
2 import logging
3
4 # Define the package list in order for lib to source
5 # which packages will be installed by this profile
6 __packages__ = ["pipewire", "pipewire-alsa", "pipewire-jack", "pipewire-media-session", "pipewire-pulse", "gst-plugin-pipewire", "libpulse"]
7
8 archinstall.log('Installing pipewire', level=logging.INFO)
9 archinstall.storage['installation_session'].add_additional_packages(__packages__)
10
11 @archinstall.plugin
12 def on_user_created(installation :archinstall.Installer, user :str):
13 archinstall.log(f"Enabling pipewire-pulse for {user}", level=logging.INFO)
14 installation.chroot('systemctl enable --user pipewire-pulse.service', run_as=user)
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/profiles/applications/pipewire.py b/profiles/applications/pipewire.py
--- a/profiles/applications/pipewire.py
+++ b/profiles/applications/pipewire.py
@@ -3,7 +3,7 @@
# Define the package list in order for lib to source
# which packages will be installed by this profile
-__packages__ = ["pipewire", "pipewire-alsa", "pipewire-jack", "pipewire-media-session", "pipewire-pulse", "gst-plugin-pipewire", "libpulse"]
+__packages__ = ["pipewire", "pipewire-alsa", "pipewire-jack", "pipewire-pulse", "gst-plugin-pipewire", "libpulse", "wireplumber"]
archinstall.log('Installing pipewire', level=logging.INFO)
archinstall.storage['installation_session'].add_additional_packages(__packages__)
@@ -11,4 +11,4 @@
@archinstall.plugin
def on_user_created(installation :archinstall.Installer, user :str):
archinstall.log(f"Enabling pipewire-pulse for {user}", level=logging.INFO)
- installation.chroot('systemctl enable --user pipewire-pulse.service', run_as=user)
\ No newline at end of file
+ installation.chroot('systemctl enable --user pipewire-pulse.service', run_as=user)
|
{"golden_diff": "diff --git a/profiles/applications/pipewire.py b/profiles/applications/pipewire.py\n--- a/profiles/applications/pipewire.py\n+++ b/profiles/applications/pipewire.py\n@@ -3,7 +3,7 @@\n \n # Define the package list in order for lib to source\n # which packages will be installed by this profile\n-__packages__ = [\"pipewire\", \"pipewire-alsa\", \"pipewire-jack\", \"pipewire-media-session\", \"pipewire-pulse\", \"gst-plugin-pipewire\", \"libpulse\"]\n+__packages__ = [\"pipewire\", \"pipewire-alsa\", \"pipewire-jack\", \"pipewire-pulse\", \"gst-plugin-pipewire\", \"libpulse\", \"wireplumber\"]\n \n archinstall.log('Installing pipewire', level=logging.INFO)\n archinstall.storage['installation_session'].add_additional_packages(__packages__)\n@@ -11,4 +11,4 @@\n @archinstall.plugin\n def on_user_created(installation :archinstall.Installer, user :str):\n \tarchinstall.log(f\"Enabling pipewire-pulse for {user}\", level=logging.INFO)\n-\tinstallation.chroot('systemctl enable --user pipewire-pulse.service', run_as=user)\n\\ No newline at end of file\n+\tinstallation.chroot('systemctl enable --user pipewire-pulse.service', run_as=user)\n", "issue": "desktop profiles, replace pipewire-media-session with wireplumber, wireplumber seembs to be the newest replacement media-session replaceing pipewire-media-session\nhi,\r\nwould it be possible to replace pipewire-media-session with wireplumber?\r\nor, to make things flexable,\r\nadd a question in asking,\r\n\"would you like to use wireplumber or pipewire-media-session?\r\np.s\r\ni'm using wireplumber currently.\r\nwireplumber will remove pipewire-media-session and the systemd units.\r\na total replacement.\r\nthank you for considdering my request!\r\nMajid Hussain\n", "before_files": [{"content": "import archinstall\nimport logging\n\n# Define the package list in order for lib to source\n# which packages will be installed by this profile\n__packages__ = [\"pipewire\", \"pipewire-alsa\", \"pipewire-jack\", \"pipewire-media-session\", \"pipewire-pulse\", \"gst-plugin-pipewire\", \"libpulse\"]\n\narchinstall.log('Installing pipewire', level=logging.INFO)\narchinstall.storage['installation_session'].add_additional_packages(__packages__)\n\[email protected]\ndef on_user_created(installation :archinstall.Installer, user :str):\n\tarchinstall.log(f\"Enabling pipewire-pulse for {user}\", level=logging.INFO)\n\tinstallation.chroot('systemctl enable --user pipewire-pulse.service', run_as=user)", "path": "profiles/applications/pipewire.py"}], "after_files": [{"content": "import archinstall\nimport logging\n\n# Define the package list in order for lib to source\n# which packages will be installed by this profile\n__packages__ = [\"pipewire\", \"pipewire-alsa\", \"pipewire-jack\", \"pipewire-pulse\", \"gst-plugin-pipewire\", \"libpulse\", \"wireplumber\"]\n\narchinstall.log('Installing pipewire', level=logging.INFO)\narchinstall.storage['installation_session'].add_additional_packages(__packages__)\n\[email protected]\ndef on_user_created(installation :archinstall.Installer, user :str):\n\tarchinstall.log(f\"Enabling pipewire-pulse for {user}\", level=logging.INFO)\n\tinstallation.chroot('systemctl enable --user pipewire-pulse.service', run_as=user)\n", "path": "profiles/applications/pipewire.py"}]}
| 565 | 295 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.