problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_13449
|
rasdani/github-patches
|
git_diff
|
cloudtools__troposphere-178
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cloudwatch Alarm Threshold Type
The parameter type 'Threshold' within Cloudwatch Alarms is currently of type 'integer' whereas the AWS documentations notes this should be a String.
http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cw-alarm.html#cfn-cloudwatch-alarms-threshold
I am hitting an issue when using alarms to check instance health - to monitor StatusCheckFailed I have an implementation which sets Threshold to 0.5 to evaluate a healthcheck of sorts. This works in Cloudformation but fails when I try to use it in the troposphere code.
I think the line 'Threshold': (integer, True), should be 'Threshold': (basestring, True), within cloudwatch.py
Any thoughts?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `troposphere/cloudwatch.py`
Content:
```
1 # Copyright (c) 2013, Mark Peek <[email protected]>
2 # All rights reserved.
3 #
4 # See LICENSE file for full license.
5
6 from . import AWSObject, AWSProperty, Ref
7 from .validators import integer, positive_integer, boolean
8
9
10 class MetricDimension(AWSProperty):
11 props = {
12 'Name': (basestring, True),
13 'Value': (basestring, True),
14 }
15
16
17 class Alarm(AWSObject):
18 resource_type = "AWS::CloudWatch::Alarm"
19
20 props = {
21 'ActionsEnabled': (boolean, False),
22 'AlarmActions': ([basestring, Ref], False),
23 'AlarmDescription': (basestring, False),
24 'AlarmName': (basestring, False),
25 'ComparisonOperator': (basestring, True),
26 'Dimensions': ([MetricDimension], False),
27 'EvaluationPeriods': (positive_integer, True),
28 'InsufficientDataActions': ([basestring, Ref], False),
29 'MetricName': (basestring, True),
30 'Namespace': (basestring, True),
31 'OKActions': ([basestring, Ref], False),
32 'Period': (positive_integer, True),
33 'Statistic': (basestring, True),
34 'Threshold': (integer, True),
35 'Unit': (basestring, False),
36 }
37
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/troposphere/cloudwatch.py b/troposphere/cloudwatch.py
--- a/troposphere/cloudwatch.py
+++ b/troposphere/cloudwatch.py
@@ -4,7 +4,7 @@
# See LICENSE file for full license.
from . import AWSObject, AWSProperty, Ref
-from .validators import integer, positive_integer, boolean
+from .validators import positive_integer, boolean
class MetricDimension(AWSProperty):
@@ -31,6 +31,6 @@
'OKActions': ([basestring, Ref], False),
'Period': (positive_integer, True),
'Statistic': (basestring, True),
- 'Threshold': (integer, True),
+ 'Threshold': (basestring, True),
'Unit': (basestring, False),
}
|
{"golden_diff": "diff --git a/troposphere/cloudwatch.py b/troposphere/cloudwatch.py\n--- a/troposphere/cloudwatch.py\n+++ b/troposphere/cloudwatch.py\n@@ -4,7 +4,7 @@\n # See LICENSE file for full license.\n \n from . import AWSObject, AWSProperty, Ref\n-from .validators import integer, positive_integer, boolean\n+from .validators import positive_integer, boolean\n \n \n class MetricDimension(AWSProperty):\n@@ -31,6 +31,6 @@\n 'OKActions': ([basestring, Ref], False),\n 'Period': (positive_integer, True),\n 'Statistic': (basestring, True),\n- 'Threshold': (integer, True),\n+ 'Threshold': (basestring, True),\n 'Unit': (basestring, False),\n }\n", "issue": "Cloudwatch Alarm Threshold Type\nThe parameter type 'Threshold' within Cloudwatch Alarms is currently of type 'integer' whereas the AWS documentations notes this should be a String.\n\nhttp://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cw-alarm.html#cfn-cloudwatch-alarms-threshold\n\nI am hitting an issue when using alarms to check instance health - to monitor StatusCheckFailed I have an implementation which sets Threshold to 0.5 to evaluate a healthcheck of sorts. This works in Cloudformation but fails when I try to use it in the troposphere code.\n\nI think the line 'Threshold': (integer, True), should be 'Threshold': (basestring, True), within cloudwatch.py\n\nAny thoughts?\n\n", "before_files": [{"content": "# Copyright (c) 2013, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSObject, AWSProperty, Ref\nfrom .validators import integer, positive_integer, boolean\n\n\nclass MetricDimension(AWSProperty):\n props = {\n 'Name': (basestring, True),\n 'Value': (basestring, True),\n }\n\n\nclass Alarm(AWSObject):\n resource_type = \"AWS::CloudWatch::Alarm\"\n\n props = {\n 'ActionsEnabled': (boolean, False),\n 'AlarmActions': ([basestring, Ref], False),\n 'AlarmDescription': (basestring, False),\n 'AlarmName': (basestring, False),\n 'ComparisonOperator': (basestring, True),\n 'Dimensions': ([MetricDimension], False),\n 'EvaluationPeriods': (positive_integer, True),\n 'InsufficientDataActions': ([basestring, Ref], False),\n 'MetricName': (basestring, True),\n 'Namespace': (basestring, True),\n 'OKActions': ([basestring, Ref], False),\n 'Period': (positive_integer, True),\n 'Statistic': (basestring, True),\n 'Threshold': (integer, True),\n 'Unit': (basestring, False),\n }\n", "path": "troposphere/cloudwatch.py"}], "after_files": [{"content": "# Copyright (c) 2013, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSObject, AWSProperty, Ref\nfrom .validators import positive_integer, boolean\n\n\nclass MetricDimension(AWSProperty):\n props = {\n 'Name': (basestring, True),\n 'Value': (basestring, True),\n }\n\n\nclass Alarm(AWSObject):\n resource_type = \"AWS::CloudWatch::Alarm\"\n\n props = {\n 'ActionsEnabled': (boolean, False),\n 'AlarmActions': ([basestring, Ref], False),\n 'AlarmDescription': (basestring, False),\n 'AlarmName': (basestring, False),\n 'ComparisonOperator': (basestring, True),\n 'Dimensions': ([MetricDimension], False),\n 'EvaluationPeriods': (positive_integer, True),\n 'InsufficientDataActions': ([basestring, Ref], False),\n 'MetricName': (basestring, True),\n 'Namespace': (basestring, True),\n 'OKActions': ([basestring, Ref], False),\n 'Period': (positive_integer, True),\n 'Statistic': (basestring, True),\n 'Threshold': (basestring, True),\n 'Unit': (basestring, False),\n }\n", "path": "troposphere/cloudwatch.py"}]}
| 763 | 173 |
gh_patches_debug_45018
|
rasdani/github-patches
|
git_diff
|
dotkom__onlineweb4-1322
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
External Auth dashboard view permission denied bug
```
Internal Server Error: /dashboard/auth/sso/
Traceback (most recent call last):
File "/srv/www/onlineweb4/env/local/lib/python2.7/site-packages/django/core/handlers/base.py", line 204, in get_response
response = middleware_method(request, response)
File "/srv/www/onlineweb4/env/local/lib/python2.7/site-packages/django/contrib/sessions/middleware.py", line 30, in process_response
patch_vary_headers(response, ('Cookie',))
File "/srv/www/onlineweb4/env/local/lib/python2.7/site-packages/django/utils/cache.py", line 148, in patch_vary_headers
if response.has_header('Vary'):
AttributeError: type object 'PermissionDenied' has no attribute 'has_header'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `apps/sso/views.py`
Content:
```
1 # -*- encoding: utf-8 -*-
2
3 import logging
4
5 from django.contrib.auth.decorators import login_required
6 from django.http import HttpResponse, JsonResponse
7 from django.shortcuts import render
8 from django.views.decorators.debug import sensitive_post_parameters
9 from django.views.generic import View, FormView
10 from django.utils import timezone
11 from django.utils.decorators import method_decorator
12
13 from oauthlib.oauth2 import Server
14
15 from braces.views import LoginRequiredMixin, CsrfExemptMixin
16
17 from oauth2_provider.settings import oauth2_settings
18 from oauth2_provider.backends import OAuth2Backend
19 from oauth2_provider.decorators import protected_resource
20 from oauth2_provider.oauth2_validators import OAuth2Validator
21 from oauth2_provider.exceptions import OAuthToolkitError
22 from oauth2_provider.forms import AllowForm
23 from oauth2_provider.http import HttpResponseUriRedirect
24 from oauth2_provider.models import get_application_model
25 from oauth2_provider.views.mixins import OAuthLibMixin
26 from oauth2_provider.models import AccessToken
27
28 from apps.authentication.models import FIELD_OF_STUDY_CHOICES
29 from apps.sso.models import Client
30
31 log = logging.getLogger('SSO')
32
33
34 @login_required
35 def index(request):
36 """
37 This is the main SSO view
38 """
39
40 context = {}
41
42 return render(request, 'sso/index.html', context)
43
44
45 class BaseAuthorizationView(LoginRequiredMixin, OAuthLibMixin, View):
46 """
47 Implements a generic endpoint to handle *Authorization Requests* as in :rfc:`4.1.1`. The view
48 does not implement any strategy to determine *authorize/do not authorize* logic.
49 The endpoint is used in the following flows:
50 * Authorization code
51 * Implicit grant
52 """
53
54 def dispatch(self, request, *args, **kwargs):
55 self.oauth2_data = {}
56 return super(BaseAuthorizationView, self).dispatch(request, *args, **kwargs)
57
58 def error_response(self, error, **kwargs):
59 """
60 Handle errors either by redirecting to redirect_uri with a json in the body containing
61 error details or providing an error response
62 """
63 redirect, error_response = super(BaseAuthorizationView, self).error_response(error, **kwargs)
64
65 if redirect:
66 return HttpResponseUriRedirect(error_response['url'])
67
68 status = error_response['error'].status_code
69 return self.render_to_response(error_response, status=status)
70
71
72 class AuthorizationView(BaseAuthorizationView, FormView):
73 """
74 Implements and endpoint to handle *Authorization Requests* as in :rfc:`4.1.1` and prompting the
75 user with a form to determine if she authorizes the client application to access her data.
76 This endpoint is reached two times during the authorization process:
77 * first receive a ``GET`` request from user asking authorization for a certain client
78 application, a form is served possibly showing some useful info and prompting for
79 *authorize/do not authorize*.
80 * then receive a ``POST`` request possibly after user authorized the access
81 Some informations contained in the ``GET`` request and needed to create a Grant token during
82 the ``POST`` request would be lost between the two steps above, so they are temporary stored in
83 hidden fields on the form.
84 A possible alternative could be keeping such informations in the session.
85 The endpoint is used in the followin flows:
86 * Authorization code
87 * Implicit grant
88 """
89 template_name = 'sso/authorize.html'
90 form_class = AllowForm
91
92 server_class = Server
93 validator_class = OAuth2Validator
94 oauthlib_backend_class = OAuth2Backend
95
96 skip_authorization_completely = False
97
98 def get_initial(self):
99 scopes = self.oauth2_data.get('scope', self.oauth2_data.get('scopes', []))
100 initial_data = {
101 'redirect_uri': self.oauth2_data.get('redirect_uri', None),
102 'scope': ' '.join(scopes),
103 'client_id': self.oauth2_data.get('client_id', None),
104 'state': self.oauth2_data.get('state', None),
105 'response_type': self.oauth2_data.get('response_type', None),
106 }
107 return initial_data
108
109 def form_valid(self, form):
110 try:
111 credentials = {
112 'client_id': form.cleaned_data.get('client_id'),
113 'redirect_uri': form.cleaned_data.get('redirect_uri'),
114 'response_type': form.cleaned_data.get('response_type', None),
115 'state': form.cleaned_data.get('state', None),
116 }
117
118 scopes = Client.objects.get(client_id=credentials['client_id']).scopes
119 if not scopes:
120 scopes = 'null'
121 allow = form.cleaned_data.get('allow')
122 uri, headers, body, status = self.create_authorization_response(
123 request=self.request, scopes=scopes, credentials=credentials, allow=allow)
124 self.success_url = uri
125 log.debug("Success url for the request: {0}".format(self.success_url))
126 return HttpResponseUriRedirect(self.success_url)
127
128 except OAuthToolkitError as error:
129 return self.error_response(error)
130
131 def get(self, request, *args, **kwargs):
132 try:
133 scopes, credentials = self.validate_authorization_request(request)
134 scopes = Client.objects.get(client_id=credentials['client_id']).get_scopes()
135 if not scopes:
136 scopes = ['null']
137 kwargs['scopes_descriptions'] = [oauth2_settings.SCOPES[scope] for scope in scopes]
138 kwargs['scopes'] = scopes
139 # at this point we know an Application instance with such client_id exists in the database
140 application = get_application_model().objects.get(client_id=credentials['client_id']) # TODO: cache it!
141 kwargs['application'] = application
142 kwargs.update(credentials)
143 self.oauth2_data = kwargs
144 # following two loc are here only because of https://code.djangoproject.com/ticket/17795
145 form = self.get_form(self.get_form_class())
146 kwargs['form'] = form
147
148 # Check to see if the user has already granted access and return
149 # a successful response depending on 'approval_prompt' url parameter
150 require_approval = request.GET.get('approval_prompt', oauth2_settings.REQUEST_APPROVAL_PROMPT)
151
152 # If skip_authorization field is True, skip the authorization screen even
153 # if this is the first use of the application and there was no previous authorization.
154 # This is useful for in-house applications-> assume an in-house applications
155 # are already approved.
156 if application.skip_authorization:
157 uri, headers, body, status = self.create_authorization_response(
158 request=self.request, scopes=" ".join(scopes),
159 credentials=credentials, allow=True)
160 return HttpResponseUriRedirect(uri)
161
162 elif require_approval == 'auto':
163 tokens = request.user.accesstoken_set.filter(application=kwargs['application'],
164 expires__gt=timezone.now()).all()
165 # check past authorizations regarded the same scopes as the current one
166 for token in tokens:
167 if token.allow_scopes(scopes):
168 uri, headers, body, status = self.create_authorization_response(
169 request=self.request, scopes=" ".join(scopes),
170 credentials=credentials, allow=True)
171 return HttpResponseUriRedirect(uri)
172
173 return self.render_to_response(self.get_context_data(**kwargs))
174
175 except OAuthToolkitError as error:
176 return self.error_response(error)
177
178
179 class TokenView(CsrfExemptMixin, OAuthLibMixin, View):
180 """
181 Implements an endpoint to provide access tokens
182 The endpoint is used in the following flows:
183 * Authorization code
184 * Password
185 * Client credentials
186 """
187 server_class = Server
188 validator_class = OAuth2Validator
189 oauthlib_backend_class = OAuth2Backend
190
191 @method_decorator(sensitive_post_parameters('password'))
192 def post(self, request, *args, **kwargs):
193 url, headers, body, status = self.create_token_response(request)
194 response = HttpResponse(content=body, status=status)
195
196 for k, v in headers.items():
197 response[k] = v
198 return response
199
200
201 class RevokeTokenView(CsrfExemptMixin, OAuthLibMixin, View):
202 """
203 Implements an endpoint to revoke access or refresh tokens
204 """
205 server_class = Server
206 validator_class = OAuth2Validator
207 oauthlib_backend_class = OAuth2Backend
208
209 def post(self, request, *args, **kwargs):
210 url, headers, body, status = self.create_revocation_response(request)
211 response = HttpResponse(content=body or '', status=status)
212
213 for k, v in headers.items():
214 response[k] = v
215 return response
216
```
Path: `apps/sso/dashboard/views.py`
Content:
```
1 # -*- coding: utf8 -*-
2 #
3 # Created by 'myth' on 6/25/15
4
5 import logging
6
7 from django.core.urlresolvers import reverse
8 from django.contrib import messages
9 from django.contrib.auth.decorators import login_required
10 from django.core.exceptions import PermissionDenied
11 from django.shortcuts import render, get_object_or_404, redirect
12
13 from oauth2_provider.settings import oauth2_settings
14
15 from apps.dashboard.tools import get_base_context
16 from apps.sso.models import Client
17 from apps.sso.dashboard.forms import NewClientForm
18
19 log = logging.getLogger('SSO')
20
21
22 @login_required()
23 def index(request):
24 """
25 Main viewcontroller of the Dashboard SSO module
26 :param request: Django request object
27 :return: An HttpResponse
28 """
29
30 # Force only the almighty dotKom to access this view
31 if not request.user.is_superuser:
32 return PermissionDenied
33
34 context = get_base_context(request)
35
36 # Fetch all clients sorted by name
37 context['apps'] = Client.objects.all().order_by('name')
38
39 # Add all available scopes from settings dict (sorted)
40 context['available_scopes'] = sorted((k, v) for k, v in oauth2_settings.user_settings['SCOPES'].items())
41
42 return render(request, 'sso/dashboard/index.html', context)
43
44
45 @login_required()
46 def new_app(request):
47 """
48 Viewcontroller for the creation of new apps that can access user information
49 :param request: Django request object
50 :return: An HttpResponse
51 """
52
53 # Force only the almighty dotKom to access this view
54 if not request.user.is_superuser:
55 return PermissionDenied
56
57 context = get_base_context(request)
58
59 if request.method == 'POST':
60 # Bind the request data to the model form
61 client_form = NewClientForm(request.POST)
62
63 if not client_form.is_valid():
64 messages.error(request, u'Noen av de påkrevde feltene inneholder feil.')
65 else:
66 # We save but not commit to get a Client instance
67 client = client_form.save(commit=False)
68
69 # Add the user that performed the request as singlehandedly responsible for all shenanigans
70 # related to this application :)
71 client.user = request.user
72
73 # We definetly want to keep this off as default for now. Can change it in the regular admin
74 # panel for later, as this will automatically approve the OAuth authorization request.
75 # It is intended for In-house applications that are pre-approved.
76 client.skip_authorization = False
77
78 client.save()
79 messages.success(request, u'App-klienten ble opprettet')
80 return redirect(reverse('sso:app_details', kwargs={'app_pk': client.id}))
81
82 context['form'] = client_form
83
84 else:
85 context['form'] = NewClientForm()
86
87 return render(request, 'sso/dashboard/new_app.html', context)
88
89
90 @login_required()
91 def app_details(request, app_pk):
92 """
93 Viewcontroller for detailed view of a specific registered app
94 :param request: Django request object
95 :return: An HttpResponse
96 """
97
98 # Force only the almighty dotKom to access this view
99 if not request.user.is_superuser:
100 return PermissionDenied
101
102 context = get_base_context(request)
103
104 client = get_object_or_404(Client, pk=app_pk)
105 context['app'] = client
106
107 # If we have some action to perform
108 if request.method == 'POST':
109 if 'action' in request.POST:
110 if request.POST['action'] == 'delete':
111 context['app'].delete()
112 messages.success(request, u'App-klienten ble slettet')
113 return redirect(reverse('sso:index'))
114
115 # Add the registered scopes for the client to the context as a list of scope:description tuples
116 scopes_available = oauth2_settings.user_settings['SCOPES']
117 scopes = [(s, scopes_available[s]) for s in client.get_scopes()]
118 context['scopes'] = scopes
119
120 return render(request, 'sso/dashboard/app_details.html', context)
121
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/apps/sso/dashboard/views.py b/apps/sso/dashboard/views.py
--- a/apps/sso/dashboard/views.py
+++ b/apps/sso/dashboard/views.py
@@ -16,8 +16,6 @@
from apps.sso.models import Client
from apps.sso.dashboard.forms import NewClientForm
-log = logging.getLogger('SSO')
-
@login_required()
def index(request):
@@ -29,7 +27,7 @@
# Force only the almighty dotKom to access this view
if not request.user.is_superuser:
- return PermissionDenied
+ raise PermissionDenied
context = get_base_context(request)
@@ -50,9 +48,11 @@
:return: An HttpResponse
"""
+ _log = logging.getLogger('%s.%s' % (__name__, new_app.__name__))
+
# Force only the almighty dotKom to access this view
if not request.user.is_superuser:
- return PermissionDenied
+ raise PermissionDenied
context = get_base_context(request)
@@ -76,6 +76,8 @@
client.skip_authorization = False
client.save()
+
+ _log.info(u'%s created external auth client %s (%d)' % (request.user, client.name, client.id))
messages.success(request, u'App-klienten ble opprettet')
return redirect(reverse('sso:app_details', kwargs={'app_pk': client.id}))
@@ -95,9 +97,11 @@
:return: An HttpResponse
"""
+ _log = logging.getLogger('%s.%s' % (__name__, app_details.__name__))
+
# Force only the almighty dotKom to access this view
if not request.user.is_superuser:
- return PermissionDenied
+ raise PermissionDenied
context = get_base_context(request)
@@ -108,7 +112,10 @@
if request.method == 'POST':
if 'action' in request.POST:
if request.POST['action'] == 'delete':
+ app_id = context['app'].id
+ app_name = context['app'].name
context['app'].delete()
+ _log.info(u'%s deleted external auth client %s (%d)' % (request.user, app_name, app_id))
messages.success(request, u'App-klienten ble slettet')
return redirect(reverse('sso:index'))
diff --git a/apps/sso/views.py b/apps/sso/views.py
--- a/apps/sso/views.py
+++ b/apps/sso/views.py
@@ -3,7 +3,7 @@
import logging
from django.contrib.auth.decorators import login_required
-from django.http import HttpResponse, JsonResponse
+from django.http import HttpResponse
from django.shortcuts import render
from django.views.decorators.debug import sensitive_post_parameters
from django.views.generic import View, FormView
@@ -16,19 +16,16 @@
from oauth2_provider.settings import oauth2_settings
from oauth2_provider.backends import OAuth2Backend
-from oauth2_provider.decorators import protected_resource
from oauth2_provider.oauth2_validators import OAuth2Validator
from oauth2_provider.exceptions import OAuthToolkitError
from oauth2_provider.forms import AllowForm
from oauth2_provider.http import HttpResponseUriRedirect
from oauth2_provider.models import get_application_model
from oauth2_provider.views.mixins import OAuthLibMixin
-from oauth2_provider.models import AccessToken
-from apps.authentication.models import FIELD_OF_STUDY_CHOICES
from apps.sso.models import Client
-log = logging.getLogger('SSO')
+_log = logging.getLogger('SSO')
@login_required
@@ -122,7 +119,7 @@
uri, headers, body, status = self.create_authorization_response(
request=self.request, scopes=scopes, credentials=credentials, allow=allow)
self.success_url = uri
- log.debug("Success url for the request: {0}".format(self.success_url))
+ _log.debug("Success url for the request: {0}".format(self.success_url))
return HttpResponseUriRedirect(self.success_url)
except OAuthToolkitError as error:
|
{"golden_diff": "diff --git a/apps/sso/dashboard/views.py b/apps/sso/dashboard/views.py\n--- a/apps/sso/dashboard/views.py\n+++ b/apps/sso/dashboard/views.py\n@@ -16,8 +16,6 @@\n from apps.sso.models import Client\n from apps.sso.dashboard.forms import NewClientForm\n \n-log = logging.getLogger('SSO')\n-\n \n @login_required()\n def index(request):\n@@ -29,7 +27,7 @@\n \n # Force only the almighty dotKom to access this view\n if not request.user.is_superuser:\n- return PermissionDenied\n+ raise PermissionDenied\n \n context = get_base_context(request)\n \n@@ -50,9 +48,11 @@\n :return: An HttpResponse\n \"\"\"\n \n+ _log = logging.getLogger('%s.%s' % (__name__, new_app.__name__))\n+\n # Force only the almighty dotKom to access this view\n if not request.user.is_superuser:\n- return PermissionDenied\n+ raise PermissionDenied\n \n context = get_base_context(request)\n \n@@ -76,6 +76,8 @@\n client.skip_authorization = False\n \n client.save()\n+\n+ _log.info(u'%s created external auth client %s (%d)' % (request.user, client.name, client.id))\n messages.success(request, u'App-klienten ble opprettet')\n return redirect(reverse('sso:app_details', kwargs={'app_pk': client.id}))\n \n@@ -95,9 +97,11 @@\n :return: An HttpResponse\n \"\"\"\n \n+ _log = logging.getLogger('%s.%s' % (__name__, app_details.__name__))\n+\n # Force only the almighty dotKom to access this view\n if not request.user.is_superuser:\n- return PermissionDenied\n+ raise PermissionDenied\n \n context = get_base_context(request)\n \n@@ -108,7 +112,10 @@\n if request.method == 'POST':\n if 'action' in request.POST:\n if request.POST['action'] == 'delete':\n+ app_id = context['app'].id\n+ app_name = context['app'].name\n context['app'].delete()\n+ _log.info(u'%s deleted external auth client %s (%d)' % (request.user, app_name, app_id))\n messages.success(request, u'App-klienten ble slettet')\n return redirect(reverse('sso:index'))\n \ndiff --git a/apps/sso/views.py b/apps/sso/views.py\n--- a/apps/sso/views.py\n+++ b/apps/sso/views.py\n@@ -3,7 +3,7 @@\n import logging\n \n from django.contrib.auth.decorators import login_required\n-from django.http import HttpResponse, JsonResponse\n+from django.http import HttpResponse\n from django.shortcuts import render\n from django.views.decorators.debug import sensitive_post_parameters\n from django.views.generic import View, FormView\n@@ -16,19 +16,16 @@\n \n from oauth2_provider.settings import oauth2_settings\n from oauth2_provider.backends import OAuth2Backend\n-from oauth2_provider.decorators import protected_resource\n from oauth2_provider.oauth2_validators import OAuth2Validator\n from oauth2_provider.exceptions import OAuthToolkitError\n from oauth2_provider.forms import AllowForm\n from oauth2_provider.http import HttpResponseUriRedirect\n from oauth2_provider.models import get_application_model\n from oauth2_provider.views.mixins import OAuthLibMixin\n-from oauth2_provider.models import AccessToken\n \n-from apps.authentication.models import FIELD_OF_STUDY_CHOICES\n from apps.sso.models import Client\n \n-log = logging.getLogger('SSO')\n+_log = logging.getLogger('SSO')\n \n \n @login_required\n@@ -122,7 +119,7 @@\n uri, headers, body, status = self.create_authorization_response(\n request=self.request, scopes=scopes, credentials=credentials, allow=allow)\n self.success_url = uri\n- log.debug(\"Success url for the request: {0}\".format(self.success_url))\n+ _log.debug(\"Success url for the request: {0}\".format(self.success_url))\n return HttpResponseUriRedirect(self.success_url)\n \n except OAuthToolkitError as error:\n", "issue": "External Auth dashboard view permission denied bug\n```\nInternal Server Error: /dashboard/auth/sso/\nTraceback (most recent call last):\n File \"/srv/www/onlineweb4/env/local/lib/python2.7/site-packages/django/core/handlers/base.py\", line 204, in get_response\n response = middleware_method(request, response)\n File \"/srv/www/onlineweb4/env/local/lib/python2.7/site-packages/django/contrib/sessions/middleware.py\", line 30, in process_response\n patch_vary_headers(response, ('Cookie',))\n File \"/srv/www/onlineweb4/env/local/lib/python2.7/site-packages/django/utils/cache.py\", line 148, in patch_vary_headers\n if response.has_header('Vary'):\nAttributeError: type object 'PermissionDenied' has no attribute 'has_header'\n```\n\n", "before_files": [{"content": "# -*- encoding: utf-8 -*-\n\nimport logging\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponse, JsonResponse\nfrom django.shortcuts import render\nfrom django.views.decorators.debug import sensitive_post_parameters\nfrom django.views.generic import View, FormView\nfrom django.utils import timezone\nfrom django.utils.decorators import method_decorator\n\nfrom oauthlib.oauth2 import Server\n\nfrom braces.views import LoginRequiredMixin, CsrfExemptMixin\n\nfrom oauth2_provider.settings import oauth2_settings\nfrom oauth2_provider.backends import OAuth2Backend\nfrom oauth2_provider.decorators import protected_resource\nfrom oauth2_provider.oauth2_validators import OAuth2Validator\nfrom oauth2_provider.exceptions import OAuthToolkitError\nfrom oauth2_provider.forms import AllowForm\nfrom oauth2_provider.http import HttpResponseUriRedirect\nfrom oauth2_provider.models import get_application_model\nfrom oauth2_provider.views.mixins import OAuthLibMixin\nfrom oauth2_provider.models import AccessToken\n\nfrom apps.authentication.models import FIELD_OF_STUDY_CHOICES\nfrom apps.sso.models import Client\n\nlog = logging.getLogger('SSO')\n\n\n@login_required\ndef index(request):\n \"\"\"\n This is the main SSO view\n \"\"\"\n\n context = {}\n\n return render(request, 'sso/index.html', context)\n\n\nclass BaseAuthorizationView(LoginRequiredMixin, OAuthLibMixin, View):\n \"\"\"\n Implements a generic endpoint to handle *Authorization Requests* as in :rfc:`4.1.1`. The view\n does not implement any strategy to determine *authorize/do not authorize* logic.\n The endpoint is used in the following flows:\n * Authorization code\n * Implicit grant\n \"\"\"\n\n def dispatch(self, request, *args, **kwargs):\n self.oauth2_data = {}\n return super(BaseAuthorizationView, self).dispatch(request, *args, **kwargs)\n\n def error_response(self, error, **kwargs):\n \"\"\"\n Handle errors either by redirecting to redirect_uri with a json in the body containing\n error details or providing an error response\n \"\"\"\n redirect, error_response = super(BaseAuthorizationView, self).error_response(error, **kwargs)\n\n if redirect:\n return HttpResponseUriRedirect(error_response['url'])\n\n status = error_response['error'].status_code\n return self.render_to_response(error_response, status=status)\n\n\nclass AuthorizationView(BaseAuthorizationView, FormView):\n \"\"\"\n Implements and endpoint to handle *Authorization Requests* as in :rfc:`4.1.1` and prompting the\n user with a form to determine if she authorizes the client application to access her data.\n This endpoint is reached two times during the authorization process:\n * first receive a ``GET`` request from user asking authorization for a certain client\n application, a form is served possibly showing some useful info and prompting for\n *authorize/do not authorize*.\n * then receive a ``POST`` request possibly after user authorized the access\n Some informations contained in the ``GET`` request and needed to create a Grant token during\n the ``POST`` request would be lost between the two steps above, so they are temporary stored in\n hidden fields on the form.\n A possible alternative could be keeping such informations in the session.\n The endpoint is used in the followin flows:\n * Authorization code\n * Implicit grant\n \"\"\"\n template_name = 'sso/authorize.html'\n form_class = AllowForm\n\n server_class = Server\n validator_class = OAuth2Validator\n oauthlib_backend_class = OAuth2Backend\n\n skip_authorization_completely = False\n\n def get_initial(self):\n scopes = self.oauth2_data.get('scope', self.oauth2_data.get('scopes', []))\n initial_data = {\n 'redirect_uri': self.oauth2_data.get('redirect_uri', None),\n 'scope': ' '.join(scopes),\n 'client_id': self.oauth2_data.get('client_id', None),\n 'state': self.oauth2_data.get('state', None),\n 'response_type': self.oauth2_data.get('response_type', None),\n }\n return initial_data\n\n def form_valid(self, form):\n try:\n credentials = {\n 'client_id': form.cleaned_data.get('client_id'),\n 'redirect_uri': form.cleaned_data.get('redirect_uri'),\n 'response_type': form.cleaned_data.get('response_type', None),\n 'state': form.cleaned_data.get('state', None),\n }\n\n scopes = Client.objects.get(client_id=credentials['client_id']).scopes\n if not scopes:\n scopes = 'null'\n allow = form.cleaned_data.get('allow')\n uri, headers, body, status = self.create_authorization_response(\n request=self.request, scopes=scopes, credentials=credentials, allow=allow)\n self.success_url = uri\n log.debug(\"Success url for the request: {0}\".format(self.success_url))\n return HttpResponseUriRedirect(self.success_url)\n\n except OAuthToolkitError as error:\n return self.error_response(error)\n\n def get(self, request, *args, **kwargs):\n try:\n scopes, credentials = self.validate_authorization_request(request)\n scopes = Client.objects.get(client_id=credentials['client_id']).get_scopes()\n if not scopes:\n scopes = ['null']\n kwargs['scopes_descriptions'] = [oauth2_settings.SCOPES[scope] for scope in scopes]\n kwargs['scopes'] = scopes\n # at this point we know an Application instance with such client_id exists in the database\n application = get_application_model().objects.get(client_id=credentials['client_id']) # TODO: cache it!\n kwargs['application'] = application\n kwargs.update(credentials)\n self.oauth2_data = kwargs\n # following two loc are here only because of https://code.djangoproject.com/ticket/17795\n form = self.get_form(self.get_form_class())\n kwargs['form'] = form\n\n # Check to see if the user has already granted access and return\n # a successful response depending on 'approval_prompt' url parameter\n require_approval = request.GET.get('approval_prompt', oauth2_settings.REQUEST_APPROVAL_PROMPT)\n\n # If skip_authorization field is True, skip the authorization screen even\n # if this is the first use of the application and there was no previous authorization.\n # This is useful for in-house applications-> assume an in-house applications\n # are already approved.\n if application.skip_authorization:\n uri, headers, body, status = self.create_authorization_response(\n request=self.request, scopes=\" \".join(scopes),\n credentials=credentials, allow=True)\n return HttpResponseUriRedirect(uri)\n\n elif require_approval == 'auto':\n tokens = request.user.accesstoken_set.filter(application=kwargs['application'],\n expires__gt=timezone.now()).all()\n # check past authorizations regarded the same scopes as the current one\n for token in tokens:\n if token.allow_scopes(scopes):\n uri, headers, body, status = self.create_authorization_response(\n request=self.request, scopes=\" \".join(scopes),\n credentials=credentials, allow=True)\n return HttpResponseUriRedirect(uri)\n\n return self.render_to_response(self.get_context_data(**kwargs))\n\n except OAuthToolkitError as error:\n return self.error_response(error)\n\n\nclass TokenView(CsrfExemptMixin, OAuthLibMixin, View):\n \"\"\"\n Implements an endpoint to provide access tokens\n The endpoint is used in the following flows:\n * Authorization code\n * Password\n * Client credentials\n \"\"\"\n server_class = Server\n validator_class = OAuth2Validator\n oauthlib_backend_class = OAuth2Backend\n\n @method_decorator(sensitive_post_parameters('password'))\n def post(self, request, *args, **kwargs):\n url, headers, body, status = self.create_token_response(request)\n response = HttpResponse(content=body, status=status)\n\n for k, v in headers.items():\n response[k] = v\n return response\n\n\nclass RevokeTokenView(CsrfExemptMixin, OAuthLibMixin, View):\n \"\"\"\n Implements an endpoint to revoke access or refresh tokens\n \"\"\"\n server_class = Server\n validator_class = OAuth2Validator\n oauthlib_backend_class = OAuth2Backend\n\n def post(self, request, *args, **kwargs):\n url, headers, body, status = self.create_revocation_response(request)\n response = HttpResponse(content=body or '', status=status)\n\n for k, v in headers.items():\n response[k] = v\n return response\n", "path": "apps/sso/views.py"}, {"content": "# -*- coding: utf8 -*-\n#\n# Created by 'myth' on 6/25/15\n\nimport logging\n\nfrom django.core.urlresolvers import reverse\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import render, get_object_or_404, redirect\n\nfrom oauth2_provider.settings import oauth2_settings\n\nfrom apps.dashboard.tools import get_base_context\nfrom apps.sso.models import Client\nfrom apps.sso.dashboard.forms import NewClientForm\n\nlog = logging.getLogger('SSO')\n\n\n@login_required()\ndef index(request):\n \"\"\"\n Main viewcontroller of the Dashboard SSO module\n :param request: Django request object\n :return: An HttpResponse\n \"\"\"\n\n # Force only the almighty dotKom to access this view\n if not request.user.is_superuser:\n return PermissionDenied\n\n context = get_base_context(request)\n\n # Fetch all clients sorted by name\n context['apps'] = Client.objects.all().order_by('name')\n\n # Add all available scopes from settings dict (sorted)\n context['available_scopes'] = sorted((k, v) for k, v in oauth2_settings.user_settings['SCOPES'].items())\n\n return render(request, 'sso/dashboard/index.html', context)\n\n\n@login_required()\ndef new_app(request):\n \"\"\"\n Viewcontroller for the creation of new apps that can access user information\n :param request: Django request object\n :return: An HttpResponse\n \"\"\"\n\n # Force only the almighty dotKom to access this view\n if not request.user.is_superuser:\n return PermissionDenied\n\n context = get_base_context(request)\n\n if request.method == 'POST':\n # Bind the request data to the model form\n client_form = NewClientForm(request.POST)\n\n if not client_form.is_valid():\n messages.error(request, u'Noen av de p\u00e5krevde feltene inneholder feil.')\n else:\n # We save but not commit to get a Client instance\n client = client_form.save(commit=False)\n\n # Add the user that performed the request as singlehandedly responsible for all shenanigans\n # related to this application :)\n client.user = request.user\n\n # We definetly want to keep this off as default for now. Can change it in the regular admin\n # panel for later, as this will automatically approve the OAuth authorization request.\n # It is intended for In-house applications that are pre-approved.\n client.skip_authorization = False\n\n client.save()\n messages.success(request, u'App-klienten ble opprettet')\n return redirect(reverse('sso:app_details', kwargs={'app_pk': client.id}))\n\n context['form'] = client_form\n\n else:\n context['form'] = NewClientForm()\n\n return render(request, 'sso/dashboard/new_app.html', context)\n\n\n@login_required()\ndef app_details(request, app_pk):\n \"\"\"\n Viewcontroller for detailed view of a specific registered app\n :param request: Django request object\n :return: An HttpResponse\n \"\"\"\n\n # Force only the almighty dotKom to access this view\n if not request.user.is_superuser:\n return PermissionDenied\n\n context = get_base_context(request)\n\n client = get_object_or_404(Client, pk=app_pk)\n context['app'] = client\n\n # If we have some action to perform\n if request.method == 'POST':\n if 'action' in request.POST:\n if request.POST['action'] == 'delete':\n context['app'].delete()\n messages.success(request, u'App-klienten ble slettet')\n return redirect(reverse('sso:index'))\n\n # Add the registered scopes for the client to the context as a list of scope:description tuples\n scopes_available = oauth2_settings.user_settings['SCOPES']\n scopes = [(s, scopes_available[s]) for s in client.get_scopes()]\n context['scopes'] = scopes\n\n return render(request, 'sso/dashboard/app_details.html', context)\n", "path": "apps/sso/dashboard/views.py"}], "after_files": [{"content": "# -*- encoding: utf-8 -*-\n\nimport logging\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.views.decorators.debug import sensitive_post_parameters\nfrom django.views.generic import View, FormView\nfrom django.utils import timezone\nfrom django.utils.decorators import method_decorator\n\nfrom oauthlib.oauth2 import Server\n\nfrom braces.views import LoginRequiredMixin, CsrfExemptMixin\n\nfrom oauth2_provider.settings import oauth2_settings\nfrom oauth2_provider.backends import OAuth2Backend\nfrom oauth2_provider.oauth2_validators import OAuth2Validator\nfrom oauth2_provider.exceptions import OAuthToolkitError\nfrom oauth2_provider.forms import AllowForm\nfrom oauth2_provider.http import HttpResponseUriRedirect\nfrom oauth2_provider.models import get_application_model\nfrom oauth2_provider.views.mixins import OAuthLibMixin\n\nfrom apps.sso.models import Client\n\n_log = logging.getLogger('SSO')\n\n\n@login_required\ndef index(request):\n \"\"\"\n This is the main SSO view\n \"\"\"\n\n context = {}\n\n return render(request, 'sso/index.html', context)\n\n\nclass BaseAuthorizationView(LoginRequiredMixin, OAuthLibMixin, View):\n \"\"\"\n Implements a generic endpoint to handle *Authorization Requests* as in :rfc:`4.1.1`. The view\n does not implement any strategy to determine *authorize/do not authorize* logic.\n The endpoint is used in the following flows:\n * Authorization code\n * Implicit grant\n \"\"\"\n\n def dispatch(self, request, *args, **kwargs):\n self.oauth2_data = {}\n return super(BaseAuthorizationView, self).dispatch(request, *args, **kwargs)\n\n def error_response(self, error, **kwargs):\n \"\"\"\n Handle errors either by redirecting to redirect_uri with a json in the body containing\n error details or providing an error response\n \"\"\"\n redirect, error_response = super(BaseAuthorizationView, self).error_response(error, **kwargs)\n\n if redirect:\n return HttpResponseUriRedirect(error_response['url'])\n\n status = error_response['error'].status_code\n return self.render_to_response(error_response, status=status)\n\n\nclass AuthorizationView(BaseAuthorizationView, FormView):\n \"\"\"\n Implements and endpoint to handle *Authorization Requests* as in :rfc:`4.1.1` and prompting the\n user with a form to determine if she authorizes the client application to access her data.\n This endpoint is reached two times during the authorization process:\n * first receive a ``GET`` request from user asking authorization for a certain client\n application, a form is served possibly showing some useful info and prompting for\n *authorize/do not authorize*.\n * then receive a ``POST`` request possibly after user authorized the access\n Some informations contained in the ``GET`` request and needed to create a Grant token during\n the ``POST`` request would be lost between the two steps above, so they are temporary stored in\n hidden fields on the form.\n A possible alternative could be keeping such informations in the session.\n The endpoint is used in the followin flows:\n * Authorization code\n * Implicit grant\n \"\"\"\n template_name = 'sso/authorize.html'\n form_class = AllowForm\n\n server_class = Server\n validator_class = OAuth2Validator\n oauthlib_backend_class = OAuth2Backend\n\n skip_authorization_completely = False\n\n def get_initial(self):\n scopes = self.oauth2_data.get('scope', self.oauth2_data.get('scopes', []))\n initial_data = {\n 'redirect_uri': self.oauth2_data.get('redirect_uri', None),\n 'scope': ' '.join(scopes),\n 'client_id': self.oauth2_data.get('client_id', None),\n 'state': self.oauth2_data.get('state', None),\n 'response_type': self.oauth2_data.get('response_type', None),\n }\n return initial_data\n\n def form_valid(self, form):\n try:\n credentials = {\n 'client_id': form.cleaned_data.get('client_id'),\n 'redirect_uri': form.cleaned_data.get('redirect_uri'),\n 'response_type': form.cleaned_data.get('response_type', None),\n 'state': form.cleaned_data.get('state', None),\n }\n\n scopes = Client.objects.get(client_id=credentials['client_id']).scopes\n if not scopes:\n scopes = 'null'\n allow = form.cleaned_data.get('allow')\n uri, headers, body, status = self.create_authorization_response(\n request=self.request, scopes=scopes, credentials=credentials, allow=allow)\n self.success_url = uri\n _log.debug(\"Success url for the request: {0}\".format(self.success_url))\n return HttpResponseUriRedirect(self.success_url)\n\n except OAuthToolkitError as error:\n return self.error_response(error)\n\n def get(self, request, *args, **kwargs):\n try:\n scopes, credentials = self.validate_authorization_request(request)\n scopes = Client.objects.get(client_id=credentials['client_id']).get_scopes()\n if not scopes:\n scopes = ['null']\n kwargs['scopes_descriptions'] = [oauth2_settings.SCOPES[scope] for scope in scopes]\n kwargs['scopes'] = scopes\n # at this point we know an Application instance with such client_id exists in the database\n application = get_application_model().objects.get(client_id=credentials['client_id']) # TODO: cache it!\n kwargs['application'] = application\n kwargs.update(credentials)\n self.oauth2_data = kwargs\n # following two loc are here only because of https://code.djangoproject.com/ticket/17795\n form = self.get_form(self.get_form_class())\n kwargs['form'] = form\n\n # Check to see if the user has already granted access and return\n # a successful response depending on 'approval_prompt' url parameter\n require_approval = request.GET.get('approval_prompt', oauth2_settings.REQUEST_APPROVAL_PROMPT)\n\n # If skip_authorization field is True, skip the authorization screen even\n # if this is the first use of the application and there was no previous authorization.\n # This is useful for in-house applications-> assume an in-house applications\n # are already approved.\n if application.skip_authorization:\n uri, headers, body, status = self.create_authorization_response(\n request=self.request, scopes=\" \".join(scopes),\n credentials=credentials, allow=True)\n return HttpResponseUriRedirect(uri)\n\n elif require_approval == 'auto':\n tokens = request.user.accesstoken_set.filter(application=kwargs['application'],\n expires__gt=timezone.now()).all()\n # check past authorizations regarded the same scopes as the current one\n for token in tokens:\n if token.allow_scopes(scopes):\n uri, headers, body, status = self.create_authorization_response(\n request=self.request, scopes=\" \".join(scopes),\n credentials=credentials, allow=True)\n return HttpResponseUriRedirect(uri)\n\n return self.render_to_response(self.get_context_data(**kwargs))\n\n except OAuthToolkitError as error:\n return self.error_response(error)\n\n\nclass TokenView(CsrfExemptMixin, OAuthLibMixin, View):\n \"\"\"\n Implements an endpoint to provide access tokens\n The endpoint is used in the following flows:\n * Authorization code\n * Password\n * Client credentials\n \"\"\"\n server_class = Server\n validator_class = OAuth2Validator\n oauthlib_backend_class = OAuth2Backend\n\n @method_decorator(sensitive_post_parameters('password'))\n def post(self, request, *args, **kwargs):\n url, headers, body, status = self.create_token_response(request)\n response = HttpResponse(content=body, status=status)\n\n for k, v in headers.items():\n response[k] = v\n return response\n\n\nclass RevokeTokenView(CsrfExemptMixin, OAuthLibMixin, View):\n \"\"\"\n Implements an endpoint to revoke access or refresh tokens\n \"\"\"\n server_class = Server\n validator_class = OAuth2Validator\n oauthlib_backend_class = OAuth2Backend\n\n def post(self, request, *args, **kwargs):\n url, headers, body, status = self.create_revocation_response(request)\n response = HttpResponse(content=body or '', status=status)\n\n for k, v in headers.items():\n response[k] = v\n return response\n", "path": "apps/sso/views.py"}, {"content": "# -*- coding: utf8 -*-\n#\n# Created by 'myth' on 6/25/15\n\nimport logging\n\nfrom django.core.urlresolvers import reverse\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import render, get_object_or_404, redirect\n\nfrom oauth2_provider.settings import oauth2_settings\n\nfrom apps.dashboard.tools import get_base_context\nfrom apps.sso.models import Client\nfrom apps.sso.dashboard.forms import NewClientForm\n\n\n@login_required()\ndef index(request):\n \"\"\"\n Main viewcontroller of the Dashboard SSO module\n :param request: Django request object\n :return: An HttpResponse\n \"\"\"\n\n # Force only the almighty dotKom to access this view\n if not request.user.is_superuser:\n raise PermissionDenied\n\n context = get_base_context(request)\n\n # Fetch all clients sorted by name\n context['apps'] = Client.objects.all().order_by('name')\n\n # Add all available scopes from settings dict (sorted)\n context['available_scopes'] = sorted((k, v) for k, v in oauth2_settings.user_settings['SCOPES'].items())\n\n return render(request, 'sso/dashboard/index.html', context)\n\n\n@login_required()\ndef new_app(request):\n \"\"\"\n Viewcontroller for the creation of new apps that can access user information\n :param request: Django request object\n :return: An HttpResponse\n \"\"\"\n\n _log = logging.getLogger('%s.%s' % (__name__, new_app.__name__))\n\n # Force only the almighty dotKom to access this view\n if not request.user.is_superuser:\n raise PermissionDenied\n\n context = get_base_context(request)\n\n if request.method == 'POST':\n # Bind the request data to the model form\n client_form = NewClientForm(request.POST)\n\n if not client_form.is_valid():\n messages.error(request, u'Noen av de p\u00e5krevde feltene inneholder feil.')\n else:\n # We save but not commit to get a Client instance\n client = client_form.save(commit=False)\n\n # Add the user that performed the request as singlehandedly responsible for all shenanigans\n # related to this application :)\n client.user = request.user\n\n # We definetly want to keep this off as default for now. Can change it in the regular admin\n # panel for later, as this will automatically approve the OAuth authorization request.\n # It is intended for In-house applications that are pre-approved.\n client.skip_authorization = False\n\n client.save()\n\n _log.info(u'%s created external auth client %s (%d)' % (request.user, client.name, client.id))\n messages.success(request, u'App-klienten ble opprettet')\n return redirect(reverse('sso:app_details', kwargs={'app_pk': client.id}))\n\n context['form'] = client_form\n\n else:\n context['form'] = NewClientForm()\n\n return render(request, 'sso/dashboard/new_app.html', context)\n\n\n@login_required()\ndef app_details(request, app_pk):\n \"\"\"\n Viewcontroller for detailed view of a specific registered app\n :param request: Django request object\n :return: An HttpResponse\n \"\"\"\n\n _log = logging.getLogger('%s.%s' % (__name__, app_details.__name__))\n\n # Force only the almighty dotKom to access this view\n if not request.user.is_superuser:\n raise PermissionDenied\n\n context = get_base_context(request)\n\n client = get_object_or_404(Client, pk=app_pk)\n context['app'] = client\n\n # If we have some action to perform\n if request.method == 'POST':\n if 'action' in request.POST:\n if request.POST['action'] == 'delete':\n app_id = context['app'].id\n app_name = context['app'].name\n context['app'].delete()\n _log.info(u'%s deleted external auth client %s (%d)' % (request.user, app_name, app_id))\n messages.success(request, u'App-klienten ble slettet')\n return redirect(reverse('sso:index'))\n\n # Add the registered scopes for the client to the context as a list of scope:description tuples\n scopes_available = oauth2_settings.user_settings['SCOPES']\n scopes = [(s, scopes_available[s]) for s in client.get_scopes()]\n context['scopes'] = scopes\n\n return render(request, 'sso/dashboard/app_details.html', context)\n", "path": "apps/sso/dashboard/views.py"}]}
| 3,973 | 895 |
gh_patches_debug_32317
|
rasdani/github-patches
|
git_diff
|
pypa__setuptools-2255
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Debian-specific behavior lost when Setuptools adopts distutils
`pip install -e $PACKAGE_PATH` installs commands to /usr/bin and not /usr/local/bin on Ubuntu as it did in the past.
`pip install $PACKAGE_PATH` continues to install to /usr/local/bin as expected.
Openstack downstream has temporarily capped setuptools until we know if this is expected behaviour or not. Please see http://lists.openstack.org/pipermail/openstack-discuss/2020-July/015779.html for more context.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `_distutils_hack/__init__.py`
Content:
```
1 import sys
2 import os
3 import re
4 import importlib
5 import warnings
6
7
8 is_pypy = '__pypy__' in sys.builtin_module_names
9
10
11 def warn_distutils_present():
12 if 'distutils' not in sys.modules:
13 return
14 if is_pypy and sys.version_info < (3, 7):
15 # PyPy for 3.6 unconditionally imports distutils, so bypass the warning
16 # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250
17 return
18 warnings.warn(
19 "Distutils was imported before Setuptools, but importing Setuptools "
20 "also replaces the `distutils` module in `sys.modules`. This may lead "
21 "to undesirable behaviors or errors. To avoid these issues, avoid "
22 "using distutils directly, ensure that setuptools is installed in the "
23 "traditional way (e.g. not an editable install), and/or make sure that "
24 "setuptools is always imported before distutils.")
25
26
27 def clear_distutils():
28 if 'distutils' not in sys.modules:
29 return
30 warnings.warn("Setuptools is replacing distutils.")
31 mods = [name for name in sys.modules if re.match(r'distutils\b', name)]
32 for name in mods:
33 del sys.modules[name]
34
35
36 def enabled():
37 """
38 Allow selection of distutils by environment variable.
39 """
40 which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'stdlib')
41 return which == 'local'
42
43
44 def ensure_local_distutils():
45 clear_distutils()
46 distutils = importlib.import_module('setuptools._distutils')
47 distutils.__name__ = 'distutils'
48 sys.modules['distutils'] = distutils
49
50 # sanity check that submodules load as expected
51 core = importlib.import_module('distutils.core')
52 assert '_distutils' in core.__file__, core.__file__
53
54
55 def do_override():
56 """
57 Ensure that the local copy of distutils is preferred over stdlib.
58
59 See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401
60 for more motivation.
61 """
62 if enabled():
63 warn_distutils_present()
64 ensure_local_distutils()
65
66
67 class DistutilsMetaFinder:
68 def find_spec(self, fullname, path, target=None):
69 if path is not None or fullname != "distutils":
70 return None
71
72 return self.get_distutils_spec()
73
74 def get_distutils_spec(self):
75 import importlib.util
76
77 class DistutilsLoader(importlib.util.abc.Loader):
78
79 def create_module(self, spec):
80 return importlib.import_module('._distutils', 'setuptools')
81
82 def exec_module(self, module):
83 pass
84
85 return importlib.util.spec_from_loader('distutils', DistutilsLoader())
86
87
88 DISTUTILS_FINDER = DistutilsMetaFinder()
89
90
91 def add_shim():
92 sys.meta_path.insert(0, DISTUTILS_FINDER)
93
94
95 def remove_shim():
96 try:
97 sys.meta_path.remove(DISTUTILS_FINDER)
98 except ValueError:
99 pass
100
```
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 """
3 Distutils setup file, used to install or test 'setuptools'
4 """
5
6 import os
7 import sys
8 import textwrap
9
10 import setuptools
11 from setuptools.command.install import install
12
13 here = os.path.dirname(__file__)
14
15
16 def require_metadata():
17 "Prevent improper installs without necessary metadata. See #659"
18 egg_info_dir = os.path.join(here, 'setuptools.egg-info')
19 if not os.path.exists(egg_info_dir):
20 msg = (
21 "Cannot build setuptools without metadata. "
22 "Run `bootstrap.py`."
23 )
24 raise RuntimeError(msg)
25
26
27 def read_commands():
28 command_ns = {}
29 cmd_module_path = 'setuptools/command/__init__.py'
30 init_path = os.path.join(here, cmd_module_path)
31 with open(init_path) as init_file:
32 exec(init_file.read(), command_ns)
33 return command_ns['__all__']
34
35
36 def _gen_console_scripts():
37 yield "easy_install = setuptools.command.easy_install:main"
38
39 # Gentoo distributions manage the python-version-specific scripts
40 # themselves, so those platforms define an environment variable to
41 # suppress the creation of the version-specific scripts.
42 var_names = (
43 'SETUPTOOLS_DISABLE_VERSIONED_EASY_INSTALL_SCRIPT',
44 'DISTRIBUTE_DISABLE_VERSIONED_EASY_INSTALL_SCRIPT',
45 )
46 if any(os.environ.get(var) not in (None, "", "0") for var in var_names):
47 return
48 tmpl = "easy_install-{shortver} = setuptools.command.easy_install:main"
49 yield tmpl.format(shortver='{}.{}'.format(*sys.version_info))
50
51
52 package_data = dict(
53 setuptools=['script (dev).tmpl', 'script.tmpl', 'site-patch.py'],
54 )
55
56 force_windows_specific_files = (
57 os.environ.get("SETUPTOOLS_INSTALL_WINDOWS_SPECIFIC_FILES", "1").lower()
58 not in ("", "0", "false", "no")
59 )
60
61 include_windows_files = (
62 sys.platform == 'win32' or
63 os.name == 'java' and os._name == 'nt' or
64 force_windows_specific_files
65 )
66
67 if include_windows_files:
68 package_data.setdefault('setuptools', []).extend(['*.exe'])
69 package_data.setdefault('setuptools.command', []).extend(['*.xml'])
70
71 needs_wheel = set(['release', 'bdist_wheel']).intersection(sys.argv)
72 wheel = ['wheel'] if needs_wheel else []
73
74
75 def pypi_link(pkg_filename):
76 """
77 Given the filename, including md5 fragment, construct the
78 dependency link for PyPI.
79 """
80 root = 'https://files.pythonhosted.org/packages/source'
81 name, sep, rest = pkg_filename.partition('-')
82 parts = root, name[0], name, pkg_filename
83 return '/'.join(parts)
84
85
86 class install_with_pth(install):
87 """
88 Custom install command to install a .pth file for distutils patching.
89
90 This hack is necessary because there's no standard way to install behavior
91 on startup (and it's debatable if there should be one). This hack (ab)uses
92 the `extra_path` behavior in Setuptools to install a `.pth` file with
93 implicit behavior on startup to give higher precedence to the local version
94 of `distutils` over the version from the standard library.
95
96 Please do not replicate this behavior.
97 """
98
99 _pth_name = 'distutils-precedence'
100 _pth_contents = textwrap.dedent("""
101 import os
102 enabled = os.environ.get('SETUPTOOLS_USE_DISTUTILS') == 'local'
103 enabled and __import__('_distutils_hack').add_shim()
104 """).lstrip().replace('\n', '; ')
105
106 def initialize_options(self):
107 install.initialize_options(self)
108 self.extra_path = self._pth_name, self._pth_contents
109
110 def finalize_options(self):
111 install.finalize_options(self)
112 self._restore_install_lib()
113
114 def _restore_install_lib(self):
115 """
116 Undo secondary effect of `extra_path` adding to `install_lib`
117 """
118 suffix = os.path.relpath(self.install_lib, self.install_libbase)
119
120 if suffix.strip() == self._pth_contents.strip():
121 self.install_lib = self.install_libbase
122
123
124 setup_params = dict(
125 src_root=None,
126 cmdclass={'install': install_with_pth},
127 package_data=package_data,
128 entry_points={
129 "distutils.commands": [
130 "%(cmd)s = setuptools.command.%(cmd)s:%(cmd)s" % locals()
131 for cmd in read_commands()
132 ],
133 "setuptools.finalize_distribution_options": [
134 "parent_finalize = setuptools.dist:_Distribution.finalize_options",
135 "keywords = setuptools.dist:Distribution._finalize_setup_keywords",
136 "2to3_doctests = "
137 "setuptools.dist:Distribution._finalize_2to3_doctests",
138 ],
139 "distutils.setup_keywords": [
140 "eager_resources = setuptools.dist:assert_string_list",
141 "namespace_packages = setuptools.dist:check_nsp",
142 "extras_require = setuptools.dist:check_extras",
143 "install_requires = setuptools.dist:check_requirements",
144 "tests_require = setuptools.dist:check_requirements",
145 "setup_requires = setuptools.dist:check_requirements",
146 "python_requires = setuptools.dist:check_specifier",
147 "entry_points = setuptools.dist:check_entry_points",
148 "test_suite = setuptools.dist:check_test_suite",
149 "zip_safe = setuptools.dist:assert_bool",
150 "package_data = setuptools.dist:check_package_data",
151 "exclude_package_data = setuptools.dist:check_package_data",
152 "include_package_data = setuptools.dist:assert_bool",
153 "packages = setuptools.dist:check_packages",
154 "dependency_links = setuptools.dist:assert_string_list",
155 "test_loader = setuptools.dist:check_importable",
156 "test_runner = setuptools.dist:check_importable",
157 "use_2to3 = setuptools.dist:assert_bool",
158 "convert_2to3_doctests = setuptools.dist:assert_string_list",
159 "use_2to3_fixers = setuptools.dist:assert_string_list",
160 "use_2to3_exclude_fixers = setuptools.dist:assert_string_list",
161 ],
162 "egg_info.writers": [
163 "PKG-INFO = setuptools.command.egg_info:write_pkg_info",
164 "requires.txt = setuptools.command.egg_info:write_requirements",
165 "entry_points.txt = setuptools.command.egg_info:write_entries",
166 "eager_resources.txt = setuptools.command.egg_info:overwrite_arg",
167 (
168 "namespace_packages.txt = "
169 "setuptools.command.egg_info:overwrite_arg"
170 ),
171 "top_level.txt = setuptools.command.egg_info:write_toplevel_names",
172 "depends.txt = setuptools.command.egg_info:warn_depends_obsolete",
173 "dependency_links.txt = setuptools.command.egg_info:overwrite_arg",
174 ],
175 "console_scripts": list(_gen_console_scripts()),
176 "setuptools.installation":
177 ['eggsecutable = setuptools.command.easy_install:bootstrap'],
178 },
179 dependency_links=[
180 pypi_link(
181 'certifi-2016.9.26.tar.gz#md5=baa81e951a29958563689d868ef1064d',
182 ),
183 pypi_link(
184 'wincertstore-0.2.zip#md5=ae728f2f007185648d0c7a8679b361e2',
185 ),
186 ],
187 setup_requires=[
188 ] + wheel,
189 )
190
191 if __name__ == '__main__':
192 # allow setup.py to run from another directory
193 here and os.chdir(here)
194 require_metadata()
195 dist = setuptools.setup(**setup_params)
196
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/_distutils_hack/__init__.py b/_distutils_hack/__init__.py
--- a/_distutils_hack/__init__.py
+++ b/_distutils_hack/__init__.py
@@ -37,7 +37,7 @@
"""
Allow selection of distutils by environment variable.
"""
- which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'stdlib')
+ which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'local')
return which == 'local'
@@ -66,12 +66,14 @@
class DistutilsMetaFinder:
def find_spec(self, fullname, path, target=None):
- if path is not None or fullname != "distutils":
- return None
+ if path is not None:
+ return
- return self.get_distutils_spec()
+ method_name = 'spec_for_{fullname}'.format(**locals())
+ method = getattr(self, method_name, lambda: None)
+ return method()
- def get_distutils_spec(self):
+ def spec_for_distutils(self):
import importlib.util
class DistutilsLoader(importlib.util.abc.Loader):
@@ -84,6 +86,14 @@
return importlib.util.spec_from_loader('distutils', DistutilsLoader())
+ def spec_for_pip(self):
+ """
+ Ensure stdlib distutils when running under pip.
+ See pypa/pip#8761 for rationale.
+ """
+ clear_distutils()
+ self.spec_for_distutils = lambda: None
+
DISTUTILS_FINDER = DistutilsMetaFinder()
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -99,7 +99,8 @@
_pth_name = 'distutils-precedence'
_pth_contents = textwrap.dedent("""
import os
- enabled = os.environ.get('SETUPTOOLS_USE_DISTUTILS') == 'local'
+ var = 'SETUPTOOLS_USE_DISTUTILS'
+ enabled = os.environ.get(var, 'local') == 'local'
enabled and __import__('_distutils_hack').add_shim()
""").lstrip().replace('\n', '; ')
|
{"golden_diff": "diff --git a/_distutils_hack/__init__.py b/_distutils_hack/__init__.py\n--- a/_distutils_hack/__init__.py\n+++ b/_distutils_hack/__init__.py\n@@ -37,7 +37,7 @@\n \"\"\"\n Allow selection of distutils by environment variable.\n \"\"\"\n- which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'stdlib')\n+ which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'local')\n return which == 'local'\n \n \n@@ -66,12 +66,14 @@\n \n class DistutilsMetaFinder:\n def find_spec(self, fullname, path, target=None):\n- if path is not None or fullname != \"distutils\":\n- return None\n+ if path is not None:\n+ return\n \n- return self.get_distutils_spec()\n+ method_name = 'spec_for_{fullname}'.format(**locals())\n+ method = getattr(self, method_name, lambda: None)\n+ return method()\n \n- def get_distutils_spec(self):\n+ def spec_for_distutils(self):\n import importlib.util\n \n class DistutilsLoader(importlib.util.abc.Loader):\n@@ -84,6 +86,14 @@\n \n return importlib.util.spec_from_loader('distutils', DistutilsLoader())\n \n+ def spec_for_pip(self):\n+ \"\"\"\n+ Ensure stdlib distutils when running under pip.\n+ See pypa/pip#8761 for rationale.\n+ \"\"\"\n+ clear_distutils()\n+ self.spec_for_distutils = lambda: None\n+\n \n DISTUTILS_FINDER = DistutilsMetaFinder()\n \ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -99,7 +99,8 @@\n _pth_name = 'distutils-precedence'\n _pth_contents = textwrap.dedent(\"\"\"\n import os\n- enabled = os.environ.get('SETUPTOOLS_USE_DISTUTILS') == 'local'\n+ var = 'SETUPTOOLS_USE_DISTUTILS'\n+ enabled = os.environ.get(var, 'local') == 'local'\n enabled and __import__('_distutils_hack').add_shim()\n \"\"\").lstrip().replace('\\n', '; ')\n", "issue": "Debian-specific behavior lost when Setuptools adopts distutils\n`pip install -e $PACKAGE_PATH` installs commands to /usr/bin and not /usr/local/bin on Ubuntu as it did in the past.\r\n\r\n`pip install $PACKAGE_PATH` continues to install to /usr/local/bin as expected.\r\n\r\nOpenstack downstream has temporarily capped setuptools until we know if this is expected behaviour or not. Please see http://lists.openstack.org/pipermail/openstack-discuss/2020-July/015779.html for more context.\n", "before_files": [{"content": "import sys\nimport os\nimport re\nimport importlib\nimport warnings\n\n\nis_pypy = '__pypy__' in sys.builtin_module_names\n\n\ndef warn_distutils_present():\n if 'distutils' not in sys.modules:\n return\n if is_pypy and sys.version_info < (3, 7):\n # PyPy for 3.6 unconditionally imports distutils, so bypass the warning\n # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250\n return\n warnings.warn(\n \"Distutils was imported before Setuptools, but importing Setuptools \"\n \"also replaces the `distutils` module in `sys.modules`. This may lead \"\n \"to undesirable behaviors or errors. To avoid these issues, avoid \"\n \"using distutils directly, ensure that setuptools is installed in the \"\n \"traditional way (e.g. not an editable install), and/or make sure that \"\n \"setuptools is always imported before distutils.\")\n\n\ndef clear_distutils():\n if 'distutils' not in sys.modules:\n return\n warnings.warn(\"Setuptools is replacing distutils.\")\n mods = [name for name in sys.modules if re.match(r'distutils\\b', name)]\n for name in mods:\n del sys.modules[name]\n\n\ndef enabled():\n \"\"\"\n Allow selection of distutils by environment variable.\n \"\"\"\n which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'stdlib')\n return which == 'local'\n\n\ndef ensure_local_distutils():\n clear_distutils()\n distutils = importlib.import_module('setuptools._distutils')\n distutils.__name__ = 'distutils'\n sys.modules['distutils'] = distutils\n\n # sanity check that submodules load as expected\n core = importlib.import_module('distutils.core')\n assert '_distutils' in core.__file__, core.__file__\n\n\ndef do_override():\n \"\"\"\n Ensure that the local copy of distutils is preferred over stdlib.\n\n See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401\n for more motivation.\n \"\"\"\n if enabled():\n warn_distutils_present()\n ensure_local_distutils()\n\n\nclass DistutilsMetaFinder:\n def find_spec(self, fullname, path, target=None):\n if path is not None or fullname != \"distutils\":\n return None\n\n return self.get_distutils_spec()\n\n def get_distutils_spec(self):\n import importlib.util\n\n class DistutilsLoader(importlib.util.abc.Loader):\n\n def create_module(self, spec):\n return importlib.import_module('._distutils', 'setuptools')\n\n def exec_module(self, module):\n pass\n\n return importlib.util.spec_from_loader('distutils', DistutilsLoader())\n\n\nDISTUTILS_FINDER = DistutilsMetaFinder()\n\n\ndef add_shim():\n sys.meta_path.insert(0, DISTUTILS_FINDER)\n\n\ndef remove_shim():\n try:\n sys.meta_path.remove(DISTUTILS_FINDER)\n except ValueError:\n pass\n", "path": "_distutils_hack/__init__.py"}, {"content": "#!/usr/bin/env python\n\"\"\"\nDistutils setup file, used to install or test 'setuptools'\n\"\"\"\n\nimport os\nimport sys\nimport textwrap\n\nimport setuptools\nfrom setuptools.command.install import install\n\nhere = os.path.dirname(__file__)\n\n\ndef require_metadata():\n \"Prevent improper installs without necessary metadata. See #659\"\n egg_info_dir = os.path.join(here, 'setuptools.egg-info')\n if not os.path.exists(egg_info_dir):\n msg = (\n \"Cannot build setuptools without metadata. \"\n \"Run `bootstrap.py`.\"\n )\n raise RuntimeError(msg)\n\n\ndef read_commands():\n command_ns = {}\n cmd_module_path = 'setuptools/command/__init__.py'\n init_path = os.path.join(here, cmd_module_path)\n with open(init_path) as init_file:\n exec(init_file.read(), command_ns)\n return command_ns['__all__']\n\n\ndef _gen_console_scripts():\n yield \"easy_install = setuptools.command.easy_install:main\"\n\n # Gentoo distributions manage the python-version-specific scripts\n # themselves, so those platforms define an environment variable to\n # suppress the creation of the version-specific scripts.\n var_names = (\n 'SETUPTOOLS_DISABLE_VERSIONED_EASY_INSTALL_SCRIPT',\n 'DISTRIBUTE_DISABLE_VERSIONED_EASY_INSTALL_SCRIPT',\n )\n if any(os.environ.get(var) not in (None, \"\", \"0\") for var in var_names):\n return\n tmpl = \"easy_install-{shortver} = setuptools.command.easy_install:main\"\n yield tmpl.format(shortver='{}.{}'.format(*sys.version_info))\n\n\npackage_data = dict(\n setuptools=['script (dev).tmpl', 'script.tmpl', 'site-patch.py'],\n)\n\nforce_windows_specific_files = (\n os.environ.get(\"SETUPTOOLS_INSTALL_WINDOWS_SPECIFIC_FILES\", \"1\").lower()\n not in (\"\", \"0\", \"false\", \"no\")\n)\n\ninclude_windows_files = (\n sys.platform == 'win32' or\n os.name == 'java' and os._name == 'nt' or\n force_windows_specific_files\n)\n\nif include_windows_files:\n package_data.setdefault('setuptools', []).extend(['*.exe'])\n package_data.setdefault('setuptools.command', []).extend(['*.xml'])\n\nneeds_wheel = set(['release', 'bdist_wheel']).intersection(sys.argv)\nwheel = ['wheel'] if needs_wheel else []\n\n\ndef pypi_link(pkg_filename):\n \"\"\"\n Given the filename, including md5 fragment, construct the\n dependency link for PyPI.\n \"\"\"\n root = 'https://files.pythonhosted.org/packages/source'\n name, sep, rest = pkg_filename.partition('-')\n parts = root, name[0], name, pkg_filename\n return '/'.join(parts)\n\n\nclass install_with_pth(install):\n \"\"\"\n Custom install command to install a .pth file for distutils patching.\n\n This hack is necessary because there's no standard way to install behavior\n on startup (and it's debatable if there should be one). This hack (ab)uses\n the `extra_path` behavior in Setuptools to install a `.pth` file with\n implicit behavior on startup to give higher precedence to the local version\n of `distutils` over the version from the standard library.\n\n Please do not replicate this behavior.\n \"\"\"\n\n _pth_name = 'distutils-precedence'\n _pth_contents = textwrap.dedent(\"\"\"\n import os\n enabled = os.environ.get('SETUPTOOLS_USE_DISTUTILS') == 'local'\n enabled and __import__('_distutils_hack').add_shim()\n \"\"\").lstrip().replace('\\n', '; ')\n\n def initialize_options(self):\n install.initialize_options(self)\n self.extra_path = self._pth_name, self._pth_contents\n\n def finalize_options(self):\n install.finalize_options(self)\n self._restore_install_lib()\n\n def _restore_install_lib(self):\n \"\"\"\n Undo secondary effect of `extra_path` adding to `install_lib`\n \"\"\"\n suffix = os.path.relpath(self.install_lib, self.install_libbase)\n\n if suffix.strip() == self._pth_contents.strip():\n self.install_lib = self.install_libbase\n\n\nsetup_params = dict(\n src_root=None,\n cmdclass={'install': install_with_pth},\n package_data=package_data,\n entry_points={\n \"distutils.commands\": [\n \"%(cmd)s = setuptools.command.%(cmd)s:%(cmd)s\" % locals()\n for cmd in read_commands()\n ],\n \"setuptools.finalize_distribution_options\": [\n \"parent_finalize = setuptools.dist:_Distribution.finalize_options\",\n \"keywords = setuptools.dist:Distribution._finalize_setup_keywords\",\n \"2to3_doctests = \"\n \"setuptools.dist:Distribution._finalize_2to3_doctests\",\n ],\n \"distutils.setup_keywords\": [\n \"eager_resources = setuptools.dist:assert_string_list\",\n \"namespace_packages = setuptools.dist:check_nsp\",\n \"extras_require = setuptools.dist:check_extras\",\n \"install_requires = setuptools.dist:check_requirements\",\n \"tests_require = setuptools.dist:check_requirements\",\n \"setup_requires = setuptools.dist:check_requirements\",\n \"python_requires = setuptools.dist:check_specifier\",\n \"entry_points = setuptools.dist:check_entry_points\",\n \"test_suite = setuptools.dist:check_test_suite\",\n \"zip_safe = setuptools.dist:assert_bool\",\n \"package_data = setuptools.dist:check_package_data\",\n \"exclude_package_data = setuptools.dist:check_package_data\",\n \"include_package_data = setuptools.dist:assert_bool\",\n \"packages = setuptools.dist:check_packages\",\n \"dependency_links = setuptools.dist:assert_string_list\",\n \"test_loader = setuptools.dist:check_importable\",\n \"test_runner = setuptools.dist:check_importable\",\n \"use_2to3 = setuptools.dist:assert_bool\",\n \"convert_2to3_doctests = setuptools.dist:assert_string_list\",\n \"use_2to3_fixers = setuptools.dist:assert_string_list\",\n \"use_2to3_exclude_fixers = setuptools.dist:assert_string_list\",\n ],\n \"egg_info.writers\": [\n \"PKG-INFO = setuptools.command.egg_info:write_pkg_info\",\n \"requires.txt = setuptools.command.egg_info:write_requirements\",\n \"entry_points.txt = setuptools.command.egg_info:write_entries\",\n \"eager_resources.txt = setuptools.command.egg_info:overwrite_arg\",\n (\n \"namespace_packages.txt = \"\n \"setuptools.command.egg_info:overwrite_arg\"\n ),\n \"top_level.txt = setuptools.command.egg_info:write_toplevel_names\",\n \"depends.txt = setuptools.command.egg_info:warn_depends_obsolete\",\n \"dependency_links.txt = setuptools.command.egg_info:overwrite_arg\",\n ],\n \"console_scripts\": list(_gen_console_scripts()),\n \"setuptools.installation\":\n ['eggsecutable = setuptools.command.easy_install:bootstrap'],\n },\n dependency_links=[\n pypi_link(\n 'certifi-2016.9.26.tar.gz#md5=baa81e951a29958563689d868ef1064d',\n ),\n pypi_link(\n 'wincertstore-0.2.zip#md5=ae728f2f007185648d0c7a8679b361e2',\n ),\n ],\n setup_requires=[\n ] + wheel,\n)\n\nif __name__ == '__main__':\n # allow setup.py to run from another directory\n here and os.chdir(here)\n require_metadata()\n dist = setuptools.setup(**setup_params)\n", "path": "setup.py"}], "after_files": [{"content": "import sys\nimport os\nimport re\nimport importlib\nimport warnings\n\n\nis_pypy = '__pypy__' in sys.builtin_module_names\n\n\ndef warn_distutils_present():\n if 'distutils' not in sys.modules:\n return\n if is_pypy and sys.version_info < (3, 7):\n # PyPy for 3.6 unconditionally imports distutils, so bypass the warning\n # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250\n return\n warnings.warn(\n \"Distutils was imported before Setuptools, but importing Setuptools \"\n \"also replaces the `distutils` module in `sys.modules`. This may lead \"\n \"to undesirable behaviors or errors. To avoid these issues, avoid \"\n \"using distutils directly, ensure that setuptools is installed in the \"\n \"traditional way (e.g. not an editable install), and/or make sure that \"\n \"setuptools is always imported before distutils.\")\n\n\ndef clear_distutils():\n if 'distutils' not in sys.modules:\n return\n warnings.warn(\"Setuptools is replacing distutils.\")\n mods = [name for name in sys.modules if re.match(r'distutils\\b', name)]\n for name in mods:\n del sys.modules[name]\n\n\ndef enabled():\n \"\"\"\n Allow selection of distutils by environment variable.\n \"\"\"\n which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'local')\n return which == 'local'\n\n\ndef ensure_local_distutils():\n clear_distutils()\n distutils = importlib.import_module('setuptools._distutils')\n distutils.__name__ = 'distutils'\n sys.modules['distutils'] = distutils\n\n # sanity check that submodules load as expected\n core = importlib.import_module('distutils.core')\n assert '_distutils' in core.__file__, core.__file__\n\n\ndef do_override():\n \"\"\"\n Ensure that the local copy of distutils is preferred over stdlib.\n\n See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401\n for more motivation.\n \"\"\"\n if enabled():\n warn_distutils_present()\n ensure_local_distutils()\n\n\nclass DistutilsMetaFinder:\n def find_spec(self, fullname, path, target=None):\n if path is not None:\n return\n\n method_name = 'spec_for_{fullname}'.format(**locals())\n method = getattr(self, method_name, lambda: None)\n return method()\n\n def spec_for_distutils(self):\n import importlib.util\n\n class DistutilsLoader(importlib.util.abc.Loader):\n\n def create_module(self, spec):\n return importlib.import_module('._distutils', 'setuptools')\n\n def exec_module(self, module):\n pass\n\n return importlib.util.spec_from_loader('distutils', DistutilsLoader())\n\n def spec_for_pip(self):\n \"\"\"\n Ensure stdlib distutils when running under pip.\n See pypa/pip#8761 for rationale.\n \"\"\"\n clear_distutils()\n self.spec_for_distutils = lambda: None\n\n\nDISTUTILS_FINDER = DistutilsMetaFinder()\n\n\ndef add_shim():\n sys.meta_path.insert(0, DISTUTILS_FINDER)\n\n\ndef remove_shim():\n try:\n sys.meta_path.remove(DISTUTILS_FINDER)\n except ValueError:\n pass\n", "path": "_distutils_hack/__init__.py"}, {"content": "#!/usr/bin/env python\n\"\"\"\nDistutils setup file, used to install or test 'setuptools'\n\"\"\"\n\nimport os\nimport sys\nimport textwrap\n\nimport setuptools\nfrom setuptools.command.install import install\n\nhere = os.path.dirname(__file__)\n\n\ndef require_metadata():\n \"Prevent improper installs without necessary metadata. See #659\"\n egg_info_dir = os.path.join(here, 'setuptools.egg-info')\n if not os.path.exists(egg_info_dir):\n msg = (\n \"Cannot build setuptools without metadata. \"\n \"Run `bootstrap.py`.\"\n )\n raise RuntimeError(msg)\n\n\ndef read_commands():\n command_ns = {}\n cmd_module_path = 'setuptools/command/__init__.py'\n init_path = os.path.join(here, cmd_module_path)\n with open(init_path) as init_file:\n exec(init_file.read(), command_ns)\n return command_ns['__all__']\n\n\ndef _gen_console_scripts():\n yield \"easy_install = setuptools.command.easy_install:main\"\n\n # Gentoo distributions manage the python-version-specific scripts\n # themselves, so those platforms define an environment variable to\n # suppress the creation of the version-specific scripts.\n var_names = (\n 'SETUPTOOLS_DISABLE_VERSIONED_EASY_INSTALL_SCRIPT',\n 'DISTRIBUTE_DISABLE_VERSIONED_EASY_INSTALL_SCRIPT',\n )\n if any(os.environ.get(var) not in (None, \"\", \"0\") for var in var_names):\n return\n tmpl = \"easy_install-{shortver} = setuptools.command.easy_install:main\"\n yield tmpl.format(shortver='{}.{}'.format(*sys.version_info))\n\n\npackage_data = dict(\n setuptools=['script (dev).tmpl', 'script.tmpl', 'site-patch.py'],\n)\n\nforce_windows_specific_files = (\n os.environ.get(\"SETUPTOOLS_INSTALL_WINDOWS_SPECIFIC_FILES\", \"1\").lower()\n not in (\"\", \"0\", \"false\", \"no\")\n)\n\ninclude_windows_files = (\n sys.platform == 'win32' or\n os.name == 'java' and os._name == 'nt' or\n force_windows_specific_files\n)\n\nif include_windows_files:\n package_data.setdefault('setuptools', []).extend(['*.exe'])\n package_data.setdefault('setuptools.command', []).extend(['*.xml'])\n\nneeds_wheel = set(['release', 'bdist_wheel']).intersection(sys.argv)\nwheel = ['wheel'] if needs_wheel else []\n\n\ndef pypi_link(pkg_filename):\n \"\"\"\n Given the filename, including md5 fragment, construct the\n dependency link for PyPI.\n \"\"\"\n root = 'https://files.pythonhosted.org/packages/source'\n name, sep, rest = pkg_filename.partition('-')\n parts = root, name[0], name, pkg_filename\n return '/'.join(parts)\n\n\nclass install_with_pth(install):\n \"\"\"\n Custom install command to install a .pth file for distutils patching.\n\n This hack is necessary because there's no standard way to install behavior\n on startup (and it's debatable if there should be one). This hack (ab)uses\n the `extra_path` behavior in Setuptools to install a `.pth` file with\n implicit behavior on startup to give higher precedence to the local version\n of `distutils` over the version from the standard library.\n\n Please do not replicate this behavior.\n \"\"\"\n\n _pth_name = 'distutils-precedence'\n _pth_contents = textwrap.dedent(\"\"\"\n import os\n var = 'SETUPTOOLS_USE_DISTUTILS'\n enabled = os.environ.get(var, 'local') == 'local'\n enabled and __import__('_distutils_hack').add_shim()\n \"\"\").lstrip().replace('\\n', '; ')\n\n def initialize_options(self):\n install.initialize_options(self)\n self.extra_path = self._pth_name, self._pth_contents\n\n def finalize_options(self):\n install.finalize_options(self)\n self._restore_install_lib()\n\n def _restore_install_lib(self):\n \"\"\"\n Undo secondary effect of `extra_path` adding to `install_lib`\n \"\"\"\n suffix = os.path.relpath(self.install_lib, self.install_libbase)\n\n if suffix.strip() == self._pth_contents.strip():\n self.install_lib = self.install_libbase\n\n\nsetup_params = dict(\n src_root=None,\n cmdclass={'install': install_with_pth},\n package_data=package_data,\n entry_points={\n \"distutils.commands\": [\n \"%(cmd)s = setuptools.command.%(cmd)s:%(cmd)s\" % locals()\n for cmd in read_commands()\n ],\n \"setuptools.finalize_distribution_options\": [\n \"parent_finalize = setuptools.dist:_Distribution.finalize_options\",\n \"keywords = setuptools.dist:Distribution._finalize_setup_keywords\",\n \"2to3_doctests = \"\n \"setuptools.dist:Distribution._finalize_2to3_doctests\",\n ],\n \"distutils.setup_keywords\": [\n \"eager_resources = setuptools.dist:assert_string_list\",\n \"namespace_packages = setuptools.dist:check_nsp\",\n \"extras_require = setuptools.dist:check_extras\",\n \"install_requires = setuptools.dist:check_requirements\",\n \"tests_require = setuptools.dist:check_requirements\",\n \"setup_requires = setuptools.dist:check_requirements\",\n \"python_requires = setuptools.dist:check_specifier\",\n \"entry_points = setuptools.dist:check_entry_points\",\n \"test_suite = setuptools.dist:check_test_suite\",\n \"zip_safe = setuptools.dist:assert_bool\",\n \"package_data = setuptools.dist:check_package_data\",\n \"exclude_package_data = setuptools.dist:check_package_data\",\n \"include_package_data = setuptools.dist:assert_bool\",\n \"packages = setuptools.dist:check_packages\",\n \"dependency_links = setuptools.dist:assert_string_list\",\n \"test_loader = setuptools.dist:check_importable\",\n \"test_runner = setuptools.dist:check_importable\",\n \"use_2to3 = setuptools.dist:assert_bool\",\n \"convert_2to3_doctests = setuptools.dist:assert_string_list\",\n \"use_2to3_fixers = setuptools.dist:assert_string_list\",\n \"use_2to3_exclude_fixers = setuptools.dist:assert_string_list\",\n ],\n \"egg_info.writers\": [\n \"PKG-INFO = setuptools.command.egg_info:write_pkg_info\",\n \"requires.txt = setuptools.command.egg_info:write_requirements\",\n \"entry_points.txt = setuptools.command.egg_info:write_entries\",\n \"eager_resources.txt = setuptools.command.egg_info:overwrite_arg\",\n (\n \"namespace_packages.txt = \"\n \"setuptools.command.egg_info:overwrite_arg\"\n ),\n \"top_level.txt = setuptools.command.egg_info:write_toplevel_names\",\n \"depends.txt = setuptools.command.egg_info:warn_depends_obsolete\",\n \"dependency_links.txt = setuptools.command.egg_info:overwrite_arg\",\n ],\n \"console_scripts\": list(_gen_console_scripts()),\n \"setuptools.installation\":\n ['eggsecutable = setuptools.command.easy_install:bootstrap'],\n },\n dependency_links=[\n pypi_link(\n 'certifi-2016.9.26.tar.gz#md5=baa81e951a29958563689d868ef1064d',\n ),\n pypi_link(\n 'wincertstore-0.2.zip#md5=ae728f2f007185648d0c7a8679b361e2',\n ),\n ],\n setup_requires=[\n ] + wheel,\n)\n\nif __name__ == '__main__':\n # allow setup.py to run from another directory\n here and os.chdir(here)\n require_metadata()\n dist = setuptools.setup(**setup_params)\n", "path": "setup.py"}]}
| 3,487 | 515 |
gh_patches_debug_16526
|
rasdani/github-patches
|
git_diff
|
encode__httpx-685
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DigestAuth should raise a clear error if the request cannot replay.
Our DigestAuth implementation cannot work with non-replayable requests.
We ought to raise a nice clear error if `request.stream.is_replayable` is not True.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `httpx/auth.py`
Content:
```
1 import hashlib
2 import os
3 import re
4 import time
5 import typing
6 from base64 import b64encode
7 from urllib.request import parse_http_list
8
9 from .exceptions import ProtocolError
10 from .models import Request, Response
11 from .utils import to_bytes, to_str, unquote
12
13 AuthFlow = typing.Generator[Request, Response, None]
14
15 AuthTypes = typing.Union[
16 typing.Tuple[typing.Union[str, bytes], typing.Union[str, bytes]],
17 typing.Callable[["Request"], "Request"],
18 "Auth",
19 ]
20
21
22 class Auth:
23 """
24 Base class for all authentication schemes.
25 """
26
27 def __call__(self, request: Request) -> AuthFlow:
28 """
29 Execute the authentication flow.
30
31 To dispatch a request, `yield` it:
32
33 ```
34 yield request
35 ```
36
37 The client will `.send()` the response back into the flow generator. You can
38 access it like so:
39
40 ```
41 response = yield request
42 ```
43
44 A `return` (or reaching the end of the generator) will result in the
45 client returning the last response obtained from the server.
46
47 You can dispatch as many requests as is necessary.
48 """
49 yield request
50
51
52 class FunctionAuth(Auth):
53 """
54 Allows the 'auth' argument to be passed as a simple callable function,
55 that takes the request, and returns a new, modified request.
56 """
57
58 def __init__(self, func: typing.Callable[[Request], Request]) -> None:
59 self.func = func
60
61 def __call__(self, request: Request) -> AuthFlow:
62 yield self.func(request)
63
64
65 class BasicAuth(Auth):
66 """
67 Allows the 'auth' argument to be passed as a (username, password) pair,
68 and uses HTTP Basic authentication.
69 """
70
71 def __init__(
72 self, username: typing.Union[str, bytes], password: typing.Union[str, bytes]
73 ):
74 self.auth_header = self.build_auth_header(username, password)
75
76 def __call__(self, request: Request) -> AuthFlow:
77 request.headers["Authorization"] = self.auth_header
78 yield request
79
80 def build_auth_header(
81 self, username: typing.Union[str, bytes], password: typing.Union[str, bytes]
82 ) -> str:
83 userpass = b":".join((to_bytes(username), to_bytes(password)))
84 token = b64encode(userpass).decode().strip()
85 return f"Basic {token}"
86
87
88 class DigestAuth(Auth):
89 ALGORITHM_TO_HASH_FUNCTION: typing.Dict[str, typing.Callable] = {
90 "MD5": hashlib.md5,
91 "MD5-SESS": hashlib.md5,
92 "SHA": hashlib.sha1,
93 "SHA-SESS": hashlib.sha1,
94 "SHA-256": hashlib.sha256,
95 "SHA-256-SESS": hashlib.sha256,
96 "SHA-512": hashlib.sha512,
97 "SHA-512-SESS": hashlib.sha512,
98 }
99
100 def __init__(
101 self, username: typing.Union[str, bytes], password: typing.Union[str, bytes]
102 ) -> None:
103 self.username = to_bytes(username)
104 self.password = to_bytes(password)
105
106 def __call__(self, request: Request) -> AuthFlow:
107 response = yield request
108
109 if response.status_code != 401 or "www-authenticate" not in response.headers:
110 # If the response is not a 401 WWW-Authenticate, then we don't
111 # need to build an authenticated request.
112 return
113
114 header = response.headers["www-authenticate"]
115 try:
116 challenge = DigestAuthChallenge.from_header(header)
117 except ValueError:
118 raise ProtocolError("Malformed Digest authentication header")
119
120 request.headers["Authorization"] = self._build_auth_header(request, challenge)
121 yield request
122
123 def _build_auth_header(
124 self, request: Request, challenge: "DigestAuthChallenge"
125 ) -> str:
126 hash_func = self.ALGORITHM_TO_HASH_FUNCTION[challenge.algorithm]
127
128 def digest(data: bytes) -> bytes:
129 return hash_func(data).hexdigest().encode()
130
131 A1 = b":".join((self.username, challenge.realm, self.password))
132
133 path = request.url.full_path.encode("utf-8")
134 A2 = b":".join((request.method.encode(), path))
135 # TODO: implement auth-int
136 HA2 = digest(A2)
137
138 nonce_count = 1 # TODO: implement nonce counting
139 nc_value = b"%08x" % nonce_count
140 cnonce = self._get_client_nonce(nonce_count, challenge.nonce)
141
142 HA1 = digest(A1)
143 if challenge.algorithm.lower().endswith("-sess"):
144 HA1 = digest(b":".join((HA1, challenge.nonce, cnonce)))
145
146 qop = self._resolve_qop(challenge.qop)
147 if qop is None:
148 digest_data = [HA1, challenge.nonce, HA2]
149 else:
150 digest_data = [challenge.nonce, nc_value, cnonce, qop, HA2]
151 key_digest = b":".join(digest_data)
152
153 format_args = {
154 "username": self.username,
155 "realm": challenge.realm,
156 "nonce": challenge.nonce,
157 "uri": path,
158 "response": digest(b":".join((HA1, key_digest))),
159 "algorithm": challenge.algorithm.encode(),
160 }
161 if challenge.opaque:
162 format_args["opaque"] = challenge.opaque
163 if qop:
164 format_args["qop"] = b"auth"
165 format_args["nc"] = nc_value
166 format_args["cnonce"] = cnonce
167
168 return "Digest " + self._get_header_value(format_args)
169
170 def _get_client_nonce(self, nonce_count: int, nonce: bytes) -> bytes:
171 s = str(nonce_count).encode()
172 s += nonce
173 s += time.ctime().encode()
174 s += os.urandom(8)
175
176 return hashlib.sha1(s).hexdigest()[:16].encode()
177
178 def _get_header_value(self, header_fields: typing.Dict[str, bytes]) -> str:
179 NON_QUOTED_FIELDS = ("algorithm", "qop", "nc")
180 QUOTED_TEMPLATE = '{}="{}"'
181 NON_QUOTED_TEMPLATE = "{}={}"
182
183 header_value = ""
184 for i, (field, value) in enumerate(header_fields.items()):
185 if i > 0:
186 header_value += ", "
187 template = (
188 QUOTED_TEMPLATE
189 if field not in NON_QUOTED_FIELDS
190 else NON_QUOTED_TEMPLATE
191 )
192 header_value += template.format(field, to_str(value))
193
194 return header_value
195
196 def _resolve_qop(self, qop: typing.Optional[bytes]) -> typing.Optional[bytes]:
197 if qop is None:
198 return None
199 qops = re.split(b", ?", qop)
200 if b"auth" in qops:
201 return b"auth"
202
203 if qops == [b"auth-int"]:
204 raise NotImplementedError("Digest auth-int support is not yet implemented")
205
206 raise ProtocolError(f'Unexpected qop value "{qop!r}" in digest auth')
207
208
209 class DigestAuthChallenge:
210 def __init__(
211 self,
212 realm: bytes,
213 nonce: bytes,
214 algorithm: str = None,
215 opaque: typing.Optional[bytes] = None,
216 qop: typing.Optional[bytes] = None,
217 ) -> None:
218 self.realm = realm
219 self.nonce = nonce
220 self.algorithm = algorithm or "MD5"
221 self.opaque = opaque
222 self.qop = qop
223
224 @classmethod
225 def from_header(cls, header: str) -> "DigestAuthChallenge":
226 """Returns a challenge from a Digest WWW-Authenticate header.
227 These take the form of:
228 `Digest realm="[email protected]",qop="auth,auth-int",nonce="abc",opaque="xyz"`
229 """
230 scheme, _, fields = header.partition(" ")
231 if scheme.lower() != "digest":
232 raise ValueError("Header does not start with 'Digest'")
233
234 header_dict: typing.Dict[str, str] = {}
235 for field in parse_http_list(fields):
236 key, value = field.strip().split("=", 1)
237 header_dict[key] = unquote(value)
238
239 try:
240 return cls.from_header_dict(header_dict)
241 except KeyError as exc:
242 raise ValueError("Malformed Digest WWW-Authenticate header") from exc
243
244 @classmethod
245 def from_header_dict(cls, header_dict: dict) -> "DigestAuthChallenge":
246 realm = header_dict["realm"].encode()
247 nonce = header_dict["nonce"].encode()
248 qop = header_dict["qop"].encode() if "qop" in header_dict else None
249 opaque = header_dict["opaque"].encode() if "opaque" in header_dict else None
250 algorithm = header_dict.get("algorithm")
251 return cls(
252 realm=realm, nonce=nonce, qop=qop, opaque=opaque, algorithm=algorithm
253 )
254
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/httpx/auth.py b/httpx/auth.py
--- a/httpx/auth.py
+++ b/httpx/auth.py
@@ -6,7 +6,7 @@
from base64 import b64encode
from urllib.request import parse_http_list
-from .exceptions import ProtocolError
+from .exceptions import ProtocolError, RequestBodyUnavailable
from .models import Request, Response
from .utils import to_bytes, to_str, unquote
@@ -104,6 +104,8 @@
self.password = to_bytes(password)
def __call__(self, request: Request) -> AuthFlow:
+ if not request.stream.can_replay():
+ raise RequestBodyUnavailable("Request body is no longer available.")
response = yield request
if response.status_code != 401 or "www-authenticate" not in response.headers:
|
{"golden_diff": "diff --git a/httpx/auth.py b/httpx/auth.py\n--- a/httpx/auth.py\n+++ b/httpx/auth.py\n@@ -6,7 +6,7 @@\n from base64 import b64encode\n from urllib.request import parse_http_list\n \n-from .exceptions import ProtocolError\n+from .exceptions import ProtocolError, RequestBodyUnavailable\n from .models import Request, Response\n from .utils import to_bytes, to_str, unquote\n \n@@ -104,6 +104,8 @@\n self.password = to_bytes(password)\n \n def __call__(self, request: Request) -> AuthFlow:\n+ if not request.stream.can_replay():\n+ raise RequestBodyUnavailable(\"Request body is no longer available.\")\n response = yield request\n \n if response.status_code != 401 or \"www-authenticate\" not in response.headers:\n", "issue": "DigestAuth should raise a clear error if the request cannot replay.\nOur DigestAuth implementation cannot work with non-replayable requests.\r\nWe ought to raise a nice clear error if `request.stream.is_replayable` is not True.\n", "before_files": [{"content": "import hashlib\nimport os\nimport re\nimport time\nimport typing\nfrom base64 import b64encode\nfrom urllib.request import parse_http_list\n\nfrom .exceptions import ProtocolError\nfrom .models import Request, Response\nfrom .utils import to_bytes, to_str, unquote\n\nAuthFlow = typing.Generator[Request, Response, None]\n\nAuthTypes = typing.Union[\n typing.Tuple[typing.Union[str, bytes], typing.Union[str, bytes]],\n typing.Callable[[\"Request\"], \"Request\"],\n \"Auth\",\n]\n\n\nclass Auth:\n \"\"\"\n Base class for all authentication schemes.\n \"\"\"\n\n def __call__(self, request: Request) -> AuthFlow:\n \"\"\"\n Execute the authentication flow.\n\n To dispatch a request, `yield` it:\n\n ```\n yield request\n ```\n\n The client will `.send()` the response back into the flow generator. You can\n access it like so:\n\n ```\n response = yield request\n ```\n\n A `return` (or reaching the end of the generator) will result in the\n client returning the last response obtained from the server.\n\n You can dispatch as many requests as is necessary.\n \"\"\"\n yield request\n\n\nclass FunctionAuth(Auth):\n \"\"\"\n Allows the 'auth' argument to be passed as a simple callable function,\n that takes the request, and returns a new, modified request.\n \"\"\"\n\n def __init__(self, func: typing.Callable[[Request], Request]) -> None:\n self.func = func\n\n def __call__(self, request: Request) -> AuthFlow:\n yield self.func(request)\n\n\nclass BasicAuth(Auth):\n \"\"\"\n Allows the 'auth' argument to be passed as a (username, password) pair,\n and uses HTTP Basic authentication.\n \"\"\"\n\n def __init__(\n self, username: typing.Union[str, bytes], password: typing.Union[str, bytes]\n ):\n self.auth_header = self.build_auth_header(username, password)\n\n def __call__(self, request: Request) -> AuthFlow:\n request.headers[\"Authorization\"] = self.auth_header\n yield request\n\n def build_auth_header(\n self, username: typing.Union[str, bytes], password: typing.Union[str, bytes]\n ) -> str:\n userpass = b\":\".join((to_bytes(username), to_bytes(password)))\n token = b64encode(userpass).decode().strip()\n return f\"Basic {token}\"\n\n\nclass DigestAuth(Auth):\n ALGORITHM_TO_HASH_FUNCTION: typing.Dict[str, typing.Callable] = {\n \"MD5\": hashlib.md5,\n \"MD5-SESS\": hashlib.md5,\n \"SHA\": hashlib.sha1,\n \"SHA-SESS\": hashlib.sha1,\n \"SHA-256\": hashlib.sha256,\n \"SHA-256-SESS\": hashlib.sha256,\n \"SHA-512\": hashlib.sha512,\n \"SHA-512-SESS\": hashlib.sha512,\n }\n\n def __init__(\n self, username: typing.Union[str, bytes], password: typing.Union[str, bytes]\n ) -> None:\n self.username = to_bytes(username)\n self.password = to_bytes(password)\n\n def __call__(self, request: Request) -> AuthFlow:\n response = yield request\n\n if response.status_code != 401 or \"www-authenticate\" not in response.headers:\n # If the response is not a 401 WWW-Authenticate, then we don't\n # need to build an authenticated request.\n return\n\n header = response.headers[\"www-authenticate\"]\n try:\n challenge = DigestAuthChallenge.from_header(header)\n except ValueError:\n raise ProtocolError(\"Malformed Digest authentication header\")\n\n request.headers[\"Authorization\"] = self._build_auth_header(request, challenge)\n yield request\n\n def _build_auth_header(\n self, request: Request, challenge: \"DigestAuthChallenge\"\n ) -> str:\n hash_func = self.ALGORITHM_TO_HASH_FUNCTION[challenge.algorithm]\n\n def digest(data: bytes) -> bytes:\n return hash_func(data).hexdigest().encode()\n\n A1 = b\":\".join((self.username, challenge.realm, self.password))\n\n path = request.url.full_path.encode(\"utf-8\")\n A2 = b\":\".join((request.method.encode(), path))\n # TODO: implement auth-int\n HA2 = digest(A2)\n\n nonce_count = 1 # TODO: implement nonce counting\n nc_value = b\"%08x\" % nonce_count\n cnonce = self._get_client_nonce(nonce_count, challenge.nonce)\n\n HA1 = digest(A1)\n if challenge.algorithm.lower().endswith(\"-sess\"):\n HA1 = digest(b\":\".join((HA1, challenge.nonce, cnonce)))\n\n qop = self._resolve_qop(challenge.qop)\n if qop is None:\n digest_data = [HA1, challenge.nonce, HA2]\n else:\n digest_data = [challenge.nonce, nc_value, cnonce, qop, HA2]\n key_digest = b\":\".join(digest_data)\n\n format_args = {\n \"username\": self.username,\n \"realm\": challenge.realm,\n \"nonce\": challenge.nonce,\n \"uri\": path,\n \"response\": digest(b\":\".join((HA1, key_digest))),\n \"algorithm\": challenge.algorithm.encode(),\n }\n if challenge.opaque:\n format_args[\"opaque\"] = challenge.opaque\n if qop:\n format_args[\"qop\"] = b\"auth\"\n format_args[\"nc\"] = nc_value\n format_args[\"cnonce\"] = cnonce\n\n return \"Digest \" + self._get_header_value(format_args)\n\n def _get_client_nonce(self, nonce_count: int, nonce: bytes) -> bytes:\n s = str(nonce_count).encode()\n s += nonce\n s += time.ctime().encode()\n s += os.urandom(8)\n\n return hashlib.sha1(s).hexdigest()[:16].encode()\n\n def _get_header_value(self, header_fields: typing.Dict[str, bytes]) -> str:\n NON_QUOTED_FIELDS = (\"algorithm\", \"qop\", \"nc\")\n QUOTED_TEMPLATE = '{}=\"{}\"'\n NON_QUOTED_TEMPLATE = \"{}={}\"\n\n header_value = \"\"\n for i, (field, value) in enumerate(header_fields.items()):\n if i > 0:\n header_value += \", \"\n template = (\n QUOTED_TEMPLATE\n if field not in NON_QUOTED_FIELDS\n else NON_QUOTED_TEMPLATE\n )\n header_value += template.format(field, to_str(value))\n\n return header_value\n\n def _resolve_qop(self, qop: typing.Optional[bytes]) -> typing.Optional[bytes]:\n if qop is None:\n return None\n qops = re.split(b\", ?\", qop)\n if b\"auth\" in qops:\n return b\"auth\"\n\n if qops == [b\"auth-int\"]:\n raise NotImplementedError(\"Digest auth-int support is not yet implemented\")\n\n raise ProtocolError(f'Unexpected qop value \"{qop!r}\" in digest auth')\n\n\nclass DigestAuthChallenge:\n def __init__(\n self,\n realm: bytes,\n nonce: bytes,\n algorithm: str = None,\n opaque: typing.Optional[bytes] = None,\n qop: typing.Optional[bytes] = None,\n ) -> None:\n self.realm = realm\n self.nonce = nonce\n self.algorithm = algorithm or \"MD5\"\n self.opaque = opaque\n self.qop = qop\n\n @classmethod\n def from_header(cls, header: str) -> \"DigestAuthChallenge\":\n \"\"\"Returns a challenge from a Digest WWW-Authenticate header.\n These take the form of:\n `Digest realm=\"[email protected]\",qop=\"auth,auth-int\",nonce=\"abc\",opaque=\"xyz\"`\n \"\"\"\n scheme, _, fields = header.partition(\" \")\n if scheme.lower() != \"digest\":\n raise ValueError(\"Header does not start with 'Digest'\")\n\n header_dict: typing.Dict[str, str] = {}\n for field in parse_http_list(fields):\n key, value = field.strip().split(\"=\", 1)\n header_dict[key] = unquote(value)\n\n try:\n return cls.from_header_dict(header_dict)\n except KeyError as exc:\n raise ValueError(\"Malformed Digest WWW-Authenticate header\") from exc\n\n @classmethod\n def from_header_dict(cls, header_dict: dict) -> \"DigestAuthChallenge\":\n realm = header_dict[\"realm\"].encode()\n nonce = header_dict[\"nonce\"].encode()\n qop = header_dict[\"qop\"].encode() if \"qop\" in header_dict else None\n opaque = header_dict[\"opaque\"].encode() if \"opaque\" in header_dict else None\n algorithm = header_dict.get(\"algorithm\")\n return cls(\n realm=realm, nonce=nonce, qop=qop, opaque=opaque, algorithm=algorithm\n )\n", "path": "httpx/auth.py"}], "after_files": [{"content": "import hashlib\nimport os\nimport re\nimport time\nimport typing\nfrom base64 import b64encode\nfrom urllib.request import parse_http_list\n\nfrom .exceptions import ProtocolError, RequestBodyUnavailable\nfrom .models import Request, Response\nfrom .utils import to_bytes, to_str, unquote\n\nAuthFlow = typing.Generator[Request, Response, None]\n\nAuthTypes = typing.Union[\n typing.Tuple[typing.Union[str, bytes], typing.Union[str, bytes]],\n typing.Callable[[\"Request\"], \"Request\"],\n \"Auth\",\n]\n\n\nclass Auth:\n \"\"\"\n Base class for all authentication schemes.\n \"\"\"\n\n def __call__(self, request: Request) -> AuthFlow:\n \"\"\"\n Execute the authentication flow.\n\n To dispatch a request, `yield` it:\n\n ```\n yield request\n ```\n\n The client will `.send()` the response back into the flow generator. You can\n access it like so:\n\n ```\n response = yield request\n ```\n\n A `return` (or reaching the end of the generator) will result in the\n client returning the last response obtained from the server.\n\n You can dispatch as many requests as is necessary.\n \"\"\"\n yield request\n\n\nclass FunctionAuth(Auth):\n \"\"\"\n Allows the 'auth' argument to be passed as a simple callable function,\n that takes the request, and returns a new, modified request.\n \"\"\"\n\n def __init__(self, func: typing.Callable[[Request], Request]) -> None:\n self.func = func\n\n def __call__(self, request: Request) -> AuthFlow:\n yield self.func(request)\n\n\nclass BasicAuth(Auth):\n \"\"\"\n Allows the 'auth' argument to be passed as a (username, password) pair,\n and uses HTTP Basic authentication.\n \"\"\"\n\n def __init__(\n self, username: typing.Union[str, bytes], password: typing.Union[str, bytes]\n ):\n self.auth_header = self.build_auth_header(username, password)\n\n def __call__(self, request: Request) -> AuthFlow:\n request.headers[\"Authorization\"] = self.auth_header\n yield request\n\n def build_auth_header(\n self, username: typing.Union[str, bytes], password: typing.Union[str, bytes]\n ) -> str:\n userpass = b\":\".join((to_bytes(username), to_bytes(password)))\n token = b64encode(userpass).decode().strip()\n return f\"Basic {token}\"\n\n\nclass DigestAuth(Auth):\n ALGORITHM_TO_HASH_FUNCTION: typing.Dict[str, typing.Callable] = {\n \"MD5\": hashlib.md5,\n \"MD5-SESS\": hashlib.md5,\n \"SHA\": hashlib.sha1,\n \"SHA-SESS\": hashlib.sha1,\n \"SHA-256\": hashlib.sha256,\n \"SHA-256-SESS\": hashlib.sha256,\n \"SHA-512\": hashlib.sha512,\n \"SHA-512-SESS\": hashlib.sha512,\n }\n\n def __init__(\n self, username: typing.Union[str, bytes], password: typing.Union[str, bytes]\n ) -> None:\n self.username = to_bytes(username)\n self.password = to_bytes(password)\n\n def __call__(self, request: Request) -> AuthFlow:\n if not request.stream.can_replay():\n raise RequestBodyUnavailable(\"Request body is no longer available.\")\n response = yield request\n\n if response.status_code != 401 or \"www-authenticate\" not in response.headers:\n # If the response is not a 401 WWW-Authenticate, then we don't\n # need to build an authenticated request.\n return\n\n header = response.headers[\"www-authenticate\"]\n try:\n challenge = DigestAuthChallenge.from_header(header)\n except ValueError:\n raise ProtocolError(\"Malformed Digest authentication header\")\n\n request.headers[\"Authorization\"] = self._build_auth_header(request, challenge)\n yield request\n\n def _build_auth_header(\n self, request: Request, challenge: \"DigestAuthChallenge\"\n ) -> str:\n hash_func = self.ALGORITHM_TO_HASH_FUNCTION[challenge.algorithm]\n\n def digest(data: bytes) -> bytes:\n return hash_func(data).hexdigest().encode()\n\n A1 = b\":\".join((self.username, challenge.realm, self.password))\n\n path = request.url.full_path.encode(\"utf-8\")\n A2 = b\":\".join((request.method.encode(), path))\n # TODO: implement auth-int\n HA2 = digest(A2)\n\n nonce_count = 1 # TODO: implement nonce counting\n nc_value = b\"%08x\" % nonce_count\n cnonce = self._get_client_nonce(nonce_count, challenge.nonce)\n\n HA1 = digest(A1)\n if challenge.algorithm.lower().endswith(\"-sess\"):\n HA1 = digest(b\":\".join((HA1, challenge.nonce, cnonce)))\n\n qop = self._resolve_qop(challenge.qop)\n if qop is None:\n digest_data = [HA1, challenge.nonce, HA2]\n else:\n digest_data = [challenge.nonce, nc_value, cnonce, qop, HA2]\n key_digest = b\":\".join(digest_data)\n\n format_args = {\n \"username\": self.username,\n \"realm\": challenge.realm,\n \"nonce\": challenge.nonce,\n \"uri\": path,\n \"response\": digest(b\":\".join((HA1, key_digest))),\n \"algorithm\": challenge.algorithm.encode(),\n }\n if challenge.opaque:\n format_args[\"opaque\"] = challenge.opaque\n if qop:\n format_args[\"qop\"] = b\"auth\"\n format_args[\"nc\"] = nc_value\n format_args[\"cnonce\"] = cnonce\n\n return \"Digest \" + self._get_header_value(format_args)\n\n def _get_client_nonce(self, nonce_count: int, nonce: bytes) -> bytes:\n s = str(nonce_count).encode()\n s += nonce\n s += time.ctime().encode()\n s += os.urandom(8)\n\n return hashlib.sha1(s).hexdigest()[:16].encode()\n\n def _get_header_value(self, header_fields: typing.Dict[str, bytes]) -> str:\n NON_QUOTED_FIELDS = (\"algorithm\", \"qop\", \"nc\")\n QUOTED_TEMPLATE = '{}=\"{}\"'\n NON_QUOTED_TEMPLATE = \"{}={}\"\n\n header_value = \"\"\n for i, (field, value) in enumerate(header_fields.items()):\n if i > 0:\n header_value += \", \"\n template = (\n QUOTED_TEMPLATE\n if field not in NON_QUOTED_FIELDS\n else NON_QUOTED_TEMPLATE\n )\n header_value += template.format(field, to_str(value))\n\n return header_value\n\n def _resolve_qop(self, qop: typing.Optional[bytes]) -> typing.Optional[bytes]:\n if qop is None:\n return None\n qops = re.split(b\", ?\", qop)\n if b\"auth\" in qops:\n return b\"auth\"\n\n if qops == [b\"auth-int\"]:\n raise NotImplementedError(\"Digest auth-int support is not yet implemented\")\n\n raise ProtocolError(f'Unexpected qop value \"{qop!r}\" in digest auth')\n\n\nclass DigestAuthChallenge:\n def __init__(\n self,\n realm: bytes,\n nonce: bytes,\n algorithm: str = None,\n opaque: typing.Optional[bytes] = None,\n qop: typing.Optional[bytes] = None,\n ) -> None:\n self.realm = realm\n self.nonce = nonce\n self.algorithm = algorithm or \"MD5\"\n self.opaque = opaque\n self.qop = qop\n\n @classmethod\n def from_header(cls, header: str) -> \"DigestAuthChallenge\":\n \"\"\"Returns a challenge from a Digest WWW-Authenticate header.\n These take the form of:\n `Digest realm=\"[email protected]\",qop=\"auth,auth-int\",nonce=\"abc\",opaque=\"xyz\"`\n \"\"\"\n scheme, _, fields = header.partition(\" \")\n if scheme.lower() != \"digest\":\n raise ValueError(\"Header does not start with 'Digest'\")\n\n header_dict: typing.Dict[str, str] = {}\n for field in parse_http_list(fields):\n key, value = field.strip().split(\"=\", 1)\n header_dict[key] = unquote(value)\n\n try:\n return cls.from_header_dict(header_dict)\n except KeyError as exc:\n raise ValueError(\"Malformed Digest WWW-Authenticate header\") from exc\n\n @classmethod\n def from_header_dict(cls, header_dict: dict) -> \"DigestAuthChallenge\":\n realm = header_dict[\"realm\"].encode()\n nonce = header_dict[\"nonce\"].encode()\n qop = header_dict[\"qop\"].encode() if \"qop\" in header_dict else None\n opaque = header_dict[\"opaque\"].encode() if \"opaque\" in header_dict else None\n algorithm = header_dict.get(\"algorithm\")\n return cls(\n realm=realm, nonce=nonce, qop=qop, opaque=opaque, algorithm=algorithm\n )\n", "path": "httpx/auth.py"}]}
| 2,945 | 185 |
gh_patches_debug_14449
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-602
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Allow sending HTML emails with scrapy.mail.MailSender
I've patched this locally by changing the `send` method:
```
def send(self, to, subject, body, cc=None, attachs=(), mime='text/plain', _callback=None):
if attachs:
msg = MIMEMultipart()
else:
msg = MIMENonMultipart(*mime.split('/'))
```
But it seems fragile. Any thoughts?
Allow sending HTML emails with scrapy.mail.MailSender
I've patched this locally by changing the `send` method:
```
def send(self, to, subject, body, cc=None, attachs=(), mime='text/plain', _callback=None):
if attachs:
msg = MIMEMultipart()
else:
msg = MIMENonMultipart(*mime.split('/'))
```
But it seems fragile. Any thoughts?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/mail.py`
Content:
```
1 """
2 Mail sending helpers
3
4 See documentation in docs/topics/email.rst
5 """
6 from cStringIO import StringIO
7 from email.MIMEMultipart import MIMEMultipart
8 from email.MIMENonMultipart import MIMENonMultipart
9 from email.MIMEBase import MIMEBase
10 from email.MIMEText import MIMEText
11 from email.Utils import COMMASPACE, formatdate
12 from email import Encoders
13
14 from twisted.internet import defer, reactor, ssl
15 from twisted.mail.smtp import ESMTPSenderFactory
16
17 from scrapy import log
18
19 class MailSender(object):
20
21 def __init__(self, smtphost='localhost', mailfrom='scrapy@localhost',
22 smtpuser=None, smtppass=None, smtpport=25, smtptls=False, smtpssl=False, debug=False):
23 self.smtphost = smtphost
24 self.smtpport = smtpport
25 self.smtpuser = smtpuser
26 self.smtppass = smtppass
27 self.smtptls = smtptls
28 self.smtpssl = smtpssl
29 self.mailfrom = mailfrom
30 self.debug = debug
31
32 @classmethod
33 def from_settings(cls, settings):
34 return cls(settings['MAIL_HOST'], settings['MAIL_FROM'], settings['MAIL_USER'],
35 settings['MAIL_PASS'], settings.getint('MAIL_PORT'),
36 settings.getbool('MAIL_TLS'), settings.getbool('MAIL_SSL'))
37
38 def send(self, to, subject, body, cc=None, attachs=(), _callback=None):
39 if attachs:
40 msg = MIMEMultipart()
41 else:
42 msg = MIMENonMultipart('text', 'plain')
43 msg['From'] = self.mailfrom
44 msg['To'] = COMMASPACE.join(to)
45 msg['Date'] = formatdate(localtime=True)
46 msg['Subject'] = subject
47 rcpts = to[:]
48 if cc:
49 rcpts.extend(cc)
50 msg['Cc'] = COMMASPACE.join(cc)
51
52 if attachs:
53 msg.attach(MIMEText(body))
54 for attach_name, mimetype, f in attachs:
55 part = MIMEBase(*mimetype.split('/'))
56 part.set_payload(f.read())
57 Encoders.encode_base64(part)
58 part.add_header('Content-Disposition', 'attachment; filename="%s"' \
59 % attach_name)
60 msg.attach(part)
61 else:
62 msg.set_payload(body)
63
64 if _callback:
65 _callback(to=to, subject=subject, body=body, cc=cc, attach=attachs, msg=msg)
66
67 if self.debug:
68 log.msg(format='Debug mail sent OK: To=%(mailto)s Cc=%(mailcc)s Subject="%(mailsubject)s" Attachs=%(mailattachs)d',
69 level=log.DEBUG, mailto=to, mailcc=cc, mailsubject=subject, mailattachs=len(attachs))
70 return
71
72 dfd = self._sendmail(rcpts, msg.as_string())
73 dfd.addCallbacks(self._sent_ok, self._sent_failed,
74 callbackArgs=[to, cc, subject, len(attachs)],
75 errbackArgs=[to, cc, subject, len(attachs)])
76 reactor.addSystemEventTrigger('before', 'shutdown', lambda: dfd)
77 return dfd
78
79 def _sent_ok(self, result, to, cc, subject, nattachs):
80 log.msg(format='Mail sent OK: To=%(mailto)s Cc=%(mailcc)s '
81 'Subject="%(mailsubject)s" Attachs=%(mailattachs)d',
82 mailto=to, mailcc=cc, mailsubject=subject, mailattachs=nattachs)
83
84 def _sent_failed(self, failure, to, cc, subject, nattachs):
85 errstr = str(failure.value)
86 log.msg(format='Unable to send mail: To=%(mailto)s Cc=%(mailcc)s '
87 'Subject="%(mailsubject)s" Attachs=%(mailattachs)d'
88 '- %(mailerr)s',
89 level=log.ERROR, mailto=to, mailcc=cc, mailsubject=subject,
90 mailattachs=nattachs, mailerr=errstr)
91
92 def _sendmail(self, to_addrs, msg):
93 msg = StringIO(msg)
94 d = defer.Deferred()
95 factory = ESMTPSenderFactory(self.smtpuser, self.smtppass, self.mailfrom, \
96 to_addrs, msg, d, heloFallback=True, requireAuthentication=False, \
97 requireTransportSecurity=self.smtptls)
98 factory.noisy = False
99
100 if self.smtpssl:
101 reactor.connectSSL(self.smtphost, self.smtpport, factory, ssl.ClientContextFactory())
102 else:
103 reactor.connectTCP(self.smtphost, self.smtpport, factory)
104
105 return d
106
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scrapy/mail.py b/scrapy/mail.py
--- a/scrapy/mail.py
+++ b/scrapy/mail.py
@@ -35,11 +35,11 @@
settings['MAIL_PASS'], settings.getint('MAIL_PORT'),
settings.getbool('MAIL_TLS'), settings.getbool('MAIL_SSL'))
- def send(self, to, subject, body, cc=None, attachs=(), _callback=None):
+ def send(self, to, subject, body, cc=None, attachs=(), mimetype='text/plain', _callback=None):
if attachs:
msg = MIMEMultipart()
else:
- msg = MIMENonMultipart('text', 'plain')
+ msg = MIMENonMultipart(*mimetype.split('/', 1))
msg['From'] = self.mailfrom
msg['To'] = COMMASPACE.join(to)
msg['Date'] = formatdate(localtime=True)
|
{"golden_diff": "diff --git a/scrapy/mail.py b/scrapy/mail.py\n--- a/scrapy/mail.py\n+++ b/scrapy/mail.py\n@@ -35,11 +35,11 @@\n settings['MAIL_PASS'], settings.getint('MAIL_PORT'),\n settings.getbool('MAIL_TLS'), settings.getbool('MAIL_SSL'))\n \n- def send(self, to, subject, body, cc=None, attachs=(), _callback=None):\n+ def send(self, to, subject, body, cc=None, attachs=(), mimetype='text/plain', _callback=None):\n if attachs:\n msg = MIMEMultipart()\n else:\n- msg = MIMENonMultipart('text', 'plain')\n+ msg = MIMENonMultipart(*mimetype.split('/', 1))\n msg['From'] = self.mailfrom\n msg['To'] = COMMASPACE.join(to)\n msg['Date'] = formatdate(localtime=True)\n", "issue": "Allow sending HTML emails with scrapy.mail.MailSender\nI've patched this locally by changing the `send` method:\n\n```\ndef send(self, to, subject, body, cc=None, attachs=(), mime='text/plain', _callback=None):\n if attachs:\n msg = MIMEMultipart()\n else:\n msg = MIMENonMultipart(*mime.split('/'))\n```\n\nBut it seems fragile. Any thoughts?\n\nAllow sending HTML emails with scrapy.mail.MailSender\nI've patched this locally by changing the `send` method:\n\n```\ndef send(self, to, subject, body, cc=None, attachs=(), mime='text/plain', _callback=None):\n if attachs:\n msg = MIMEMultipart()\n else:\n msg = MIMENonMultipart(*mime.split('/'))\n```\n\nBut it seems fragile. Any thoughts?\n\n", "before_files": [{"content": "\"\"\"\nMail sending helpers\n\nSee documentation in docs/topics/email.rst\n\"\"\"\nfrom cStringIO import StringIO\nfrom email.MIMEMultipart import MIMEMultipart\nfrom email.MIMENonMultipart import MIMENonMultipart\nfrom email.MIMEBase import MIMEBase\nfrom email.MIMEText import MIMEText\nfrom email.Utils import COMMASPACE, formatdate\nfrom email import Encoders\n\nfrom twisted.internet import defer, reactor, ssl\nfrom twisted.mail.smtp import ESMTPSenderFactory\n\nfrom scrapy import log\n\nclass MailSender(object):\n\n def __init__(self, smtphost='localhost', mailfrom='scrapy@localhost',\n smtpuser=None, smtppass=None, smtpport=25, smtptls=False, smtpssl=False, debug=False):\n self.smtphost = smtphost\n self.smtpport = smtpport\n self.smtpuser = smtpuser\n self.smtppass = smtppass\n self.smtptls = smtptls\n self.smtpssl = smtpssl\n self.mailfrom = mailfrom\n self.debug = debug\n\n @classmethod\n def from_settings(cls, settings):\n return cls(settings['MAIL_HOST'], settings['MAIL_FROM'], settings['MAIL_USER'],\n settings['MAIL_PASS'], settings.getint('MAIL_PORT'),\n settings.getbool('MAIL_TLS'), settings.getbool('MAIL_SSL'))\n\n def send(self, to, subject, body, cc=None, attachs=(), _callback=None):\n if attachs:\n msg = MIMEMultipart()\n else:\n msg = MIMENonMultipart('text', 'plain')\n msg['From'] = self.mailfrom\n msg['To'] = COMMASPACE.join(to)\n msg['Date'] = formatdate(localtime=True)\n msg['Subject'] = subject\n rcpts = to[:]\n if cc:\n rcpts.extend(cc)\n msg['Cc'] = COMMASPACE.join(cc)\n\n if attachs:\n msg.attach(MIMEText(body))\n for attach_name, mimetype, f in attachs:\n part = MIMEBase(*mimetype.split('/'))\n part.set_payload(f.read())\n Encoders.encode_base64(part)\n part.add_header('Content-Disposition', 'attachment; filename=\"%s\"' \\\n % attach_name)\n msg.attach(part)\n else:\n msg.set_payload(body)\n\n if _callback:\n _callback(to=to, subject=subject, body=body, cc=cc, attach=attachs, msg=msg)\n\n if self.debug:\n log.msg(format='Debug mail sent OK: To=%(mailto)s Cc=%(mailcc)s Subject=\"%(mailsubject)s\" Attachs=%(mailattachs)d',\n level=log.DEBUG, mailto=to, mailcc=cc, mailsubject=subject, mailattachs=len(attachs))\n return\n\n dfd = self._sendmail(rcpts, msg.as_string())\n dfd.addCallbacks(self._sent_ok, self._sent_failed,\n callbackArgs=[to, cc, subject, len(attachs)],\n errbackArgs=[to, cc, subject, len(attachs)])\n reactor.addSystemEventTrigger('before', 'shutdown', lambda: dfd)\n return dfd\n\n def _sent_ok(self, result, to, cc, subject, nattachs):\n log.msg(format='Mail sent OK: To=%(mailto)s Cc=%(mailcc)s '\n 'Subject=\"%(mailsubject)s\" Attachs=%(mailattachs)d',\n mailto=to, mailcc=cc, mailsubject=subject, mailattachs=nattachs)\n\n def _sent_failed(self, failure, to, cc, subject, nattachs):\n errstr = str(failure.value)\n log.msg(format='Unable to send mail: To=%(mailto)s Cc=%(mailcc)s '\n 'Subject=\"%(mailsubject)s\" Attachs=%(mailattachs)d'\n '- %(mailerr)s',\n level=log.ERROR, mailto=to, mailcc=cc, mailsubject=subject,\n mailattachs=nattachs, mailerr=errstr)\n\n def _sendmail(self, to_addrs, msg):\n msg = StringIO(msg)\n d = defer.Deferred()\n factory = ESMTPSenderFactory(self.smtpuser, self.smtppass, self.mailfrom, \\\n to_addrs, msg, d, heloFallback=True, requireAuthentication=False, \\\n requireTransportSecurity=self.smtptls)\n factory.noisy = False\n\n if self.smtpssl:\n reactor.connectSSL(self.smtphost, self.smtpport, factory, ssl.ClientContextFactory())\n else:\n reactor.connectTCP(self.smtphost, self.smtpport, factory)\n\n return d\n", "path": "scrapy/mail.py"}], "after_files": [{"content": "\"\"\"\nMail sending helpers\n\nSee documentation in docs/topics/email.rst\n\"\"\"\nfrom cStringIO import StringIO\nfrom email.MIMEMultipart import MIMEMultipart\nfrom email.MIMENonMultipart import MIMENonMultipart\nfrom email.MIMEBase import MIMEBase\nfrom email.MIMEText import MIMEText\nfrom email.Utils import COMMASPACE, formatdate\nfrom email import Encoders\n\nfrom twisted.internet import defer, reactor, ssl\nfrom twisted.mail.smtp import ESMTPSenderFactory\n\nfrom scrapy import log\n\nclass MailSender(object):\n\n def __init__(self, smtphost='localhost', mailfrom='scrapy@localhost',\n smtpuser=None, smtppass=None, smtpport=25, smtptls=False, smtpssl=False, debug=False):\n self.smtphost = smtphost\n self.smtpport = smtpport\n self.smtpuser = smtpuser\n self.smtppass = smtppass\n self.smtptls = smtptls\n self.smtpssl = smtpssl\n self.mailfrom = mailfrom\n self.debug = debug\n\n @classmethod\n def from_settings(cls, settings):\n return cls(settings['MAIL_HOST'], settings['MAIL_FROM'], settings['MAIL_USER'],\n settings['MAIL_PASS'], settings.getint('MAIL_PORT'),\n settings.getbool('MAIL_TLS'), settings.getbool('MAIL_SSL'))\n\n def send(self, to, subject, body, cc=None, attachs=(), mimetype='text/plain', _callback=None):\n if attachs:\n msg = MIMEMultipart()\n else:\n msg = MIMENonMultipart(*mimetype.split('/', 1))\n msg['From'] = self.mailfrom\n msg['To'] = COMMASPACE.join(to)\n msg['Date'] = formatdate(localtime=True)\n msg['Subject'] = subject\n rcpts = to[:]\n if cc:\n rcpts.extend(cc)\n msg['Cc'] = COMMASPACE.join(cc)\n\n if attachs:\n msg.attach(MIMEText(body))\n for attach_name, mimetype, f in attachs:\n part = MIMEBase(*mimetype.split('/'))\n part.set_payload(f.read())\n Encoders.encode_base64(part)\n part.add_header('Content-Disposition', 'attachment; filename=\"%s\"' \\\n % attach_name)\n msg.attach(part)\n else:\n msg.set_payload(body)\n\n if _callback:\n _callback(to=to, subject=subject, body=body, cc=cc, attach=attachs, msg=msg)\n\n if self.debug:\n log.msg(format='Debug mail sent OK: To=%(mailto)s Cc=%(mailcc)s Subject=\"%(mailsubject)s\" Attachs=%(mailattachs)d',\n level=log.DEBUG, mailto=to, mailcc=cc, mailsubject=subject, mailattachs=len(attachs))\n return\n\n dfd = self._sendmail(rcpts, msg.as_string())\n dfd.addCallbacks(self._sent_ok, self._sent_failed,\n callbackArgs=[to, cc, subject, len(attachs)],\n errbackArgs=[to, cc, subject, len(attachs)])\n reactor.addSystemEventTrigger('before', 'shutdown', lambda: dfd)\n return dfd\n\n def _sent_ok(self, result, to, cc, subject, nattachs):\n log.msg(format='Mail sent OK: To=%(mailto)s Cc=%(mailcc)s '\n 'Subject=\"%(mailsubject)s\" Attachs=%(mailattachs)d',\n mailto=to, mailcc=cc, mailsubject=subject, mailattachs=nattachs)\n\n def _sent_failed(self, failure, to, cc, subject, nattachs):\n errstr = str(failure.value)\n log.msg(format='Unable to send mail: To=%(mailto)s Cc=%(mailcc)s '\n 'Subject=\"%(mailsubject)s\" Attachs=%(mailattachs)d'\n '- %(mailerr)s',\n level=log.ERROR, mailto=to, mailcc=cc, mailsubject=subject,\n mailattachs=nattachs, mailerr=errstr)\n\n def _sendmail(self, to_addrs, msg):\n msg = StringIO(msg)\n d = defer.Deferred()\n factory = ESMTPSenderFactory(self.smtpuser, self.smtppass, self.mailfrom, \\\n to_addrs, msg, d, heloFallback=True, requireAuthentication=False, \\\n requireTransportSecurity=self.smtptls)\n factory.noisy = False\n\n if self.smtpssl:\n reactor.connectSSL(self.smtphost, self.smtpport, factory, ssl.ClientContextFactory())\n else:\n reactor.connectTCP(self.smtphost, self.smtpport, factory)\n\n return d\n", "path": "scrapy/mail.py"}]}
| 1,690 | 206 |
gh_patches_debug_9005
|
rasdani/github-patches
|
git_diff
|
pytorch__vision-5129
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Mismatching path in Windows when loading UCF101
### 🐛 Describe the bug
I tried to load UCF-101 by `torchvision.datasets.UCF101` on my Windows laptop. However, I cannot get correct indices by [`_select_fold`](https://github.com/pytorch/vision/blob/main/torchvision/datasets/ucf101.py#L113). Line 113 may go wrong in Windows.
Variable i is 'path_to_video/ApplyEyeMakeup\\\v_ApplyEyeMakeup_g01_c01.avi', while
selected_files is 'path_to_video/ApplyEyeMakeup/v_ApplyEyeMakeup_g01_c01.avi'
The reason is that i is created by `os.path.joint()` using the double right slash. But selected_files is extracted from annotation files. Paths in these files are using the left slash, e.g. 'ApplyEyeMakeup/v_ApplyEyeMakeup_g01_c01.avi'.
May I know how to fix this, please? Many thanks!
### Versions
Collecting environment information...
PyTorch version: 1.10.0+cu102
Is debug build: False
CUDA used to build PyTorch: 10.2
ROCM used to build PyTorch: N/A
OS: Microsoft Windows 10
GCC version: Could not collect
Clang version: Could not collect
CMake version: Could not collect
Libc version: N/A
Python version: 3.8.12 (default, Oct 12 2021, 03:01:40) [MSC v.1916 64 bit (AMD64)] (64-bit runtime)
Python platform: Windows-10-10.0.18363-SP0
Is CUDA available: True
CUDA runtime version: 10.2.89
GPU models and configuration: Could not collect
Nvidia driver version: Could not collect
cuDNN version: C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.2\bin\cudnn64_7.dll
HIP runtime version: N/A
MIOpen runtime version: N/A
Versions of relevant libraries:
[pip3] mypy==0.910
[pip3] mypy-extensions==0.4.3
[pip3] numpy==1.21.4
[pip3] pytorch-lightning==1.5.5
[pip3] pytorch-memlab==0.2.4
[pip3] torch==1.10.0+cu102
[pip3] torch-geometric==2.0.2
[pip3] torch-scatter==2.0.9
[pip3] torch-sparse==0.6.12
[pip3] torchaudio==0.10.0+cu102
[pip3] torchmetrics==0.6.1
[pip3] torchsummary==1.5.1
[pip3] torchvision==0.11.1+cu102
[conda] libblas 3.9.0 12_win64_mkl conda-forge
[conda] libcblas 3.9.0 12_win64_mkl conda-forge
[conda] liblapack 3.9.0 12_win64_mkl conda-forge
[conda] mkl 2021.4.0 h0e2418a_729 conda-forge
[conda] mypy 0.910 pypi_0 pypi
[conda] mypy-extensions 0.4.3 pypi_0 pypi
[conda] numpy 1.21.4 py38h089cfbf_0 conda-forge
[conda] pytorch-lightning 1.5.5 pypi_0 pypi
[conda] pytorch-memlab 0.2.4 pypi_0 pypi
[conda] torch 1.10.0+cu102 pypi_0 pypi
[conda] torch-geometric 2.0.2 pypi_0 pypi
[conda] torch-scatter 2.0.9 pypi_0 pypi
[conda] torch-sparse 0.6.12 pypi_0 pypi
[conda] torchaudio 0.10.0+cu102 pypi_0 pypi
[conda] torchmetrics 0.6.1 pypi_0 pypi
[conda] torchsummary 1.5.1 pypi_0 pypi
[conda] torchvision 0.11.1+cu102 pypi_0 pypi
cc @pmeier
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchvision/datasets/ucf101.py`
Content:
```
1 import os
2 from typing import Any, Dict, List, Tuple, Optional, Callable
3
4 from torch import Tensor
5
6 from .folder import find_classes, make_dataset
7 from .video_utils import VideoClips
8 from .vision import VisionDataset
9
10
11 class UCF101(VisionDataset):
12 """
13 `UCF101 <https://www.crcv.ucf.edu/data/UCF101.php>`_ dataset.
14
15 UCF101 is an action recognition video dataset.
16 This dataset consider every video as a collection of video clips of fixed size, specified
17 by ``frames_per_clip``, where the step in frames between each clip is given by
18 ``step_between_clips``. The dataset itself can be downloaded from the dataset website;
19 annotations that ``annotation_path`` should be pointing to can be downloaded from `here
20 <https://www.crcv.ucf.edu/data/UCF101/UCF101TrainTestSplits-RecognitionTask.zip>`.
21
22 To give an example, for 2 videos with 10 and 15 frames respectively, if ``frames_per_clip=5``
23 and ``step_between_clips=5``, the dataset size will be (2 + 3) = 5, where the first two
24 elements will come from video 1, and the next three elements from video 2.
25 Note that we drop clips which do not have exactly ``frames_per_clip`` elements, so not all
26 frames in a video might be present.
27
28 Internally, it uses a VideoClips object to handle clip creation.
29
30 Args:
31 root (string): Root directory of the UCF101 Dataset.
32 annotation_path (str): path to the folder containing the split files;
33 see docstring above for download instructions of these files
34 frames_per_clip (int): number of frames in a clip.
35 step_between_clips (int, optional): number of frames between each clip.
36 fold (int, optional): which fold to use. Should be between 1 and 3.
37 train (bool, optional): if ``True``, creates a dataset from the train split,
38 otherwise from the ``test`` split.
39 transform (callable, optional): A function/transform that takes in a TxHxWxC video
40 and returns a transformed version.
41
42 Returns:
43 tuple: A 3-tuple with the following entries:
44
45 - video (Tensor[T, H, W, C]): the `T` video frames
46 - audio(Tensor[K, L]): the audio frames, where `K` is the number of channels
47 and `L` is the number of points
48 - label (int): class of the video clip
49 """
50
51 def __init__(
52 self,
53 root: str,
54 annotation_path: str,
55 frames_per_clip: int,
56 step_between_clips: int = 1,
57 frame_rate: Optional[int] = None,
58 fold: int = 1,
59 train: bool = True,
60 transform: Optional[Callable] = None,
61 _precomputed_metadata: Optional[Dict[str, Any]] = None,
62 num_workers: int = 1,
63 _video_width: int = 0,
64 _video_height: int = 0,
65 _video_min_dimension: int = 0,
66 _audio_samples: int = 0,
67 ) -> None:
68 super().__init__(root)
69 if not 1 <= fold <= 3:
70 raise ValueError(f"fold should be between 1 and 3, got {fold}")
71
72 extensions = ("avi",)
73 self.fold = fold
74 self.train = train
75
76 self.classes, class_to_idx = find_classes(self.root)
77 self.samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file=None)
78 video_list = [x[0] for x in self.samples]
79 video_clips = VideoClips(
80 video_list,
81 frames_per_clip,
82 step_between_clips,
83 frame_rate,
84 _precomputed_metadata,
85 num_workers=num_workers,
86 _video_width=_video_width,
87 _video_height=_video_height,
88 _video_min_dimension=_video_min_dimension,
89 _audio_samples=_audio_samples,
90 )
91 # we bookkeep the full version of video clips because we want to be able
92 # to return the meta data of full version rather than the subset version of
93 # video clips
94 self.full_video_clips = video_clips
95 self.indices = self._select_fold(video_list, annotation_path, fold, train)
96 self.video_clips = video_clips.subset(self.indices)
97 self.transform = transform
98
99 @property
100 def metadata(self) -> Dict[str, Any]:
101 return self.full_video_clips.metadata
102
103 def _select_fold(self, video_list: List[str], annotation_path: str, fold: int, train: bool) -> List[int]:
104 name = "train" if train else "test"
105 name = f"{name}list{fold:02d}.txt"
106 f = os.path.join(annotation_path, name)
107 selected_files = set()
108 with open(f) as fid:
109 data = fid.readlines()
110 data = [x.strip().split(" ")[0] for x in data]
111 data = [os.path.join(self.root, x) for x in data]
112 selected_files.update(data)
113 indices = [i for i in range(len(video_list)) if video_list[i] in selected_files]
114 return indices
115
116 def __len__(self) -> int:
117 return self.video_clips.num_clips()
118
119 def __getitem__(self, idx: int) -> Tuple[Tensor, Tensor, int]:
120 video, audio, info, video_idx = self.video_clips.get_clip(idx)
121 label = self.samples[self.indices[video_idx]][1]
122
123 if self.transform is not None:
124 video = self.transform(video)
125
126 return video, audio, label
127
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/torchvision/datasets/ucf101.py b/torchvision/datasets/ucf101.py
--- a/torchvision/datasets/ucf101.py
+++ b/torchvision/datasets/ucf101.py
@@ -108,7 +108,7 @@
with open(f) as fid:
data = fid.readlines()
data = [x.strip().split(" ")[0] for x in data]
- data = [os.path.join(self.root, x) for x in data]
+ data = [os.path.join(self.root, *x.split("/")) for x in data]
selected_files.update(data)
indices = [i for i in range(len(video_list)) if video_list[i] in selected_files]
return indices
|
{"golden_diff": "diff --git a/torchvision/datasets/ucf101.py b/torchvision/datasets/ucf101.py\n--- a/torchvision/datasets/ucf101.py\n+++ b/torchvision/datasets/ucf101.py\n@@ -108,7 +108,7 @@\n with open(f) as fid:\n data = fid.readlines()\n data = [x.strip().split(\" \")[0] for x in data]\n- data = [os.path.join(self.root, x) for x in data]\n+ data = [os.path.join(self.root, *x.split(\"/\")) for x in data]\n selected_files.update(data)\n indices = [i for i in range(len(video_list)) if video_list[i] in selected_files]\n return indices\n", "issue": "Mismatching path in Windows when loading UCF101\n### \ud83d\udc1b Describe the bug\n\nI tried to load UCF-101 by `torchvision.datasets.UCF101` on my Windows laptop. However, I cannot get correct indices by [`_select_fold`](https://github.com/pytorch/vision/blob/main/torchvision/datasets/ucf101.py#L113). Line 113 may go wrong in Windows.\r\n\r\nVariable i is 'path_to_video/ApplyEyeMakeup\\\\\\v_ApplyEyeMakeup_g01_c01.avi', while \r\nselected_files is 'path_to_video/ApplyEyeMakeup/v_ApplyEyeMakeup_g01_c01.avi'\r\n\r\nThe reason is that i is created by `os.path.joint()` using the double right slash. But selected_files is extracted from annotation files. Paths in these files are using the left slash, e.g. 'ApplyEyeMakeup/v_ApplyEyeMakeup_g01_c01.avi'.\r\n\r\nMay I know how to fix this, please? Many thanks!\n\n### Versions\n\nCollecting environment information...\r\nPyTorch version: 1.10.0+cu102\r\nIs debug build: False\r\nCUDA used to build PyTorch: 10.2\r\nROCM used to build PyTorch: N/A\r\n\r\nOS: Microsoft Windows 10\r\nGCC version: Could not collect\r\nClang version: Could not collect\r\nCMake version: Could not collect\r\nLibc version: N/A\r\n\r\nPython version: 3.8.12 (default, Oct 12 2021, 03:01:40) [MSC v.1916 64 bit (AMD64)] (64-bit runtime)\r\nPython platform: Windows-10-10.0.18363-SP0\r\nIs CUDA available: True\r\nCUDA runtime version: 10.2.89\r\nGPU models and configuration: Could not collect\r\nNvidia driver version: Could not collect\r\ncuDNN version: C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v10.2\\bin\\cudnn64_7.dll\r\nHIP runtime version: N/A\r\nMIOpen runtime version: N/A\r\n\r\nVersions of relevant libraries:\r\n[pip3] mypy==0.910\r\n[pip3] mypy-extensions==0.4.3\r\n[pip3] numpy==1.21.4\r\n[pip3] pytorch-lightning==1.5.5\r\n[pip3] pytorch-memlab==0.2.4\r\n[pip3] torch==1.10.0+cu102\r\n[pip3] torch-geometric==2.0.2\r\n[pip3] torch-scatter==2.0.9\r\n[pip3] torch-sparse==0.6.12\r\n[pip3] torchaudio==0.10.0+cu102\r\n[pip3] torchmetrics==0.6.1\r\n[pip3] torchsummary==1.5.1\r\n[pip3] torchvision==0.11.1+cu102\r\n[conda] libblas 3.9.0 12_win64_mkl conda-forge\r\n[conda] libcblas 3.9.0 12_win64_mkl conda-forge\r\n[conda] liblapack 3.9.0 12_win64_mkl conda-forge\r\n[conda] mkl 2021.4.0 h0e2418a_729 conda-forge\r\n[conda] mypy 0.910 pypi_0 pypi\r\n[conda] mypy-extensions 0.4.3 pypi_0 pypi\r\n[conda] numpy 1.21.4 py38h089cfbf_0 conda-forge\r\n[conda] pytorch-lightning 1.5.5 pypi_0 pypi\r\n[conda] pytorch-memlab 0.2.4 pypi_0 pypi\r\n[conda] torch 1.10.0+cu102 pypi_0 pypi\r\n[conda] torch-geometric 2.0.2 pypi_0 pypi\r\n[conda] torch-scatter 2.0.9 pypi_0 pypi\r\n[conda] torch-sparse 0.6.12 pypi_0 pypi\r\n[conda] torchaudio 0.10.0+cu102 pypi_0 pypi\r\n[conda] torchmetrics 0.6.1 pypi_0 pypi\r\n[conda] torchsummary 1.5.1 pypi_0 pypi\r\n[conda] torchvision 0.11.1+cu102 pypi_0 pypi\n\ncc @pmeier\n", "before_files": [{"content": "import os\nfrom typing import Any, Dict, List, Tuple, Optional, Callable\n\nfrom torch import Tensor\n\nfrom .folder import find_classes, make_dataset\nfrom .video_utils import VideoClips\nfrom .vision import VisionDataset\n\n\nclass UCF101(VisionDataset):\n \"\"\"\n `UCF101 <https://www.crcv.ucf.edu/data/UCF101.php>`_ dataset.\n\n UCF101 is an action recognition video dataset.\n This dataset consider every video as a collection of video clips of fixed size, specified\n by ``frames_per_clip``, where the step in frames between each clip is given by\n ``step_between_clips``. The dataset itself can be downloaded from the dataset website;\n annotations that ``annotation_path`` should be pointing to can be downloaded from `here\n <https://www.crcv.ucf.edu/data/UCF101/UCF101TrainTestSplits-RecognitionTask.zip>`.\n\n To give an example, for 2 videos with 10 and 15 frames respectively, if ``frames_per_clip=5``\n and ``step_between_clips=5``, the dataset size will be (2 + 3) = 5, where the first two\n elements will come from video 1, and the next three elements from video 2.\n Note that we drop clips which do not have exactly ``frames_per_clip`` elements, so not all\n frames in a video might be present.\n\n Internally, it uses a VideoClips object to handle clip creation.\n\n Args:\n root (string): Root directory of the UCF101 Dataset.\n annotation_path (str): path to the folder containing the split files;\n see docstring above for download instructions of these files\n frames_per_clip (int): number of frames in a clip.\n step_between_clips (int, optional): number of frames between each clip.\n fold (int, optional): which fold to use. Should be between 1 and 3.\n train (bool, optional): if ``True``, creates a dataset from the train split,\n otherwise from the ``test`` split.\n transform (callable, optional): A function/transform that takes in a TxHxWxC video\n and returns a transformed version.\n\n Returns:\n tuple: A 3-tuple with the following entries:\n\n - video (Tensor[T, H, W, C]): the `T` video frames\n - audio(Tensor[K, L]): the audio frames, where `K` is the number of channels\n and `L` is the number of points\n - label (int): class of the video clip\n \"\"\"\n\n def __init__(\n self,\n root: str,\n annotation_path: str,\n frames_per_clip: int,\n step_between_clips: int = 1,\n frame_rate: Optional[int] = None,\n fold: int = 1,\n train: bool = True,\n transform: Optional[Callable] = None,\n _precomputed_metadata: Optional[Dict[str, Any]] = None,\n num_workers: int = 1,\n _video_width: int = 0,\n _video_height: int = 0,\n _video_min_dimension: int = 0,\n _audio_samples: int = 0,\n ) -> None:\n super().__init__(root)\n if not 1 <= fold <= 3:\n raise ValueError(f\"fold should be between 1 and 3, got {fold}\")\n\n extensions = (\"avi\",)\n self.fold = fold\n self.train = train\n\n self.classes, class_to_idx = find_classes(self.root)\n self.samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file=None)\n video_list = [x[0] for x in self.samples]\n video_clips = VideoClips(\n video_list,\n frames_per_clip,\n step_between_clips,\n frame_rate,\n _precomputed_metadata,\n num_workers=num_workers,\n _video_width=_video_width,\n _video_height=_video_height,\n _video_min_dimension=_video_min_dimension,\n _audio_samples=_audio_samples,\n )\n # we bookkeep the full version of video clips because we want to be able\n # to return the meta data of full version rather than the subset version of\n # video clips\n self.full_video_clips = video_clips\n self.indices = self._select_fold(video_list, annotation_path, fold, train)\n self.video_clips = video_clips.subset(self.indices)\n self.transform = transform\n\n @property\n def metadata(self) -> Dict[str, Any]:\n return self.full_video_clips.metadata\n\n def _select_fold(self, video_list: List[str], annotation_path: str, fold: int, train: bool) -> List[int]:\n name = \"train\" if train else \"test\"\n name = f\"{name}list{fold:02d}.txt\"\n f = os.path.join(annotation_path, name)\n selected_files = set()\n with open(f) as fid:\n data = fid.readlines()\n data = [x.strip().split(\" \")[0] for x in data]\n data = [os.path.join(self.root, x) for x in data]\n selected_files.update(data)\n indices = [i for i in range(len(video_list)) if video_list[i] in selected_files]\n return indices\n\n def __len__(self) -> int:\n return self.video_clips.num_clips()\n\n def __getitem__(self, idx: int) -> Tuple[Tensor, Tensor, int]:\n video, audio, info, video_idx = self.video_clips.get_clip(idx)\n label = self.samples[self.indices[video_idx]][1]\n\n if self.transform is not None:\n video = self.transform(video)\n\n return video, audio, label\n", "path": "torchvision/datasets/ucf101.py"}], "after_files": [{"content": "import os\nfrom typing import Any, Dict, List, Tuple, Optional, Callable\n\nfrom torch import Tensor\n\nfrom .folder import find_classes, make_dataset\nfrom .video_utils import VideoClips\nfrom .vision import VisionDataset\n\n\nclass UCF101(VisionDataset):\n \"\"\"\n `UCF101 <https://www.crcv.ucf.edu/data/UCF101.php>`_ dataset.\n\n UCF101 is an action recognition video dataset.\n This dataset consider every video as a collection of video clips of fixed size, specified\n by ``frames_per_clip``, where the step in frames between each clip is given by\n ``step_between_clips``. The dataset itself can be downloaded from the dataset website;\n annotations that ``annotation_path`` should be pointing to can be downloaded from `here\n <https://www.crcv.ucf.edu/data/UCF101/UCF101TrainTestSplits-RecognitionTask.zip>`.\n\n To give an example, for 2 videos with 10 and 15 frames respectively, if ``frames_per_clip=5``\n and ``step_between_clips=5``, the dataset size will be (2 + 3) = 5, where the first two\n elements will come from video 1, and the next three elements from video 2.\n Note that we drop clips which do not have exactly ``frames_per_clip`` elements, so not all\n frames in a video might be present.\n\n Internally, it uses a VideoClips object to handle clip creation.\n\n Args:\n root (string): Root directory of the UCF101 Dataset.\n annotation_path (str): path to the folder containing the split files;\n see docstring above for download instructions of these files\n frames_per_clip (int): number of frames in a clip.\n step_between_clips (int, optional): number of frames between each clip.\n fold (int, optional): which fold to use. Should be between 1 and 3.\n train (bool, optional): if ``True``, creates a dataset from the train split,\n otherwise from the ``test`` split.\n transform (callable, optional): A function/transform that takes in a TxHxWxC video\n and returns a transformed version.\n\n Returns:\n tuple: A 3-tuple with the following entries:\n\n - video (Tensor[T, H, W, C]): the `T` video frames\n - audio(Tensor[K, L]): the audio frames, where `K` is the number of channels\n and `L` is the number of points\n - label (int): class of the video clip\n \"\"\"\n\n def __init__(\n self,\n root: str,\n annotation_path: str,\n frames_per_clip: int,\n step_between_clips: int = 1,\n frame_rate: Optional[int] = None,\n fold: int = 1,\n train: bool = True,\n transform: Optional[Callable] = None,\n _precomputed_metadata: Optional[Dict[str, Any]] = None,\n num_workers: int = 1,\n _video_width: int = 0,\n _video_height: int = 0,\n _video_min_dimension: int = 0,\n _audio_samples: int = 0,\n ) -> None:\n super().__init__(root)\n if not 1 <= fold <= 3:\n raise ValueError(f\"fold should be between 1 and 3, got {fold}\")\n\n extensions = (\"avi\",)\n self.fold = fold\n self.train = train\n\n self.classes, class_to_idx = find_classes(self.root)\n self.samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file=None)\n video_list = [x[0] for x in self.samples]\n video_clips = VideoClips(\n video_list,\n frames_per_clip,\n step_between_clips,\n frame_rate,\n _precomputed_metadata,\n num_workers=num_workers,\n _video_width=_video_width,\n _video_height=_video_height,\n _video_min_dimension=_video_min_dimension,\n _audio_samples=_audio_samples,\n )\n # we bookkeep the full version of video clips because we want to be able\n # to return the meta data of full version rather than the subset version of\n # video clips\n self.full_video_clips = video_clips\n self.indices = self._select_fold(video_list, annotation_path, fold, train)\n self.video_clips = video_clips.subset(self.indices)\n self.transform = transform\n\n @property\n def metadata(self) -> Dict[str, Any]:\n return self.full_video_clips.metadata\n\n def _select_fold(self, video_list: List[str], annotation_path: str, fold: int, train: bool) -> List[int]:\n name = \"train\" if train else \"test\"\n name = f\"{name}list{fold:02d}.txt\"\n f = os.path.join(annotation_path, name)\n selected_files = set()\n with open(f) as fid:\n data = fid.readlines()\n data = [x.strip().split(\" \")[0] for x in data]\n data = [os.path.join(self.root, *x.split(\"/\")) for x in data]\n selected_files.update(data)\n indices = [i for i in range(len(video_list)) if video_list[i] in selected_files]\n return indices\n\n def __len__(self) -> int:\n return self.video_clips.num_clips()\n\n def __getitem__(self, idx: int) -> Tuple[Tensor, Tensor, int]:\n video, audio, info, video_idx = self.video_clips.get_clip(idx)\n label = self.samples[self.indices[video_idx]][1]\n\n if self.transform is not None:\n video = self.transform(video)\n\n return video, audio, label\n", "path": "torchvision/datasets/ucf101.py"}]}
| 2,946 | 176 |
gh_patches_debug_27014
|
rasdani/github-patches
|
git_diff
|
chainer__chainer-1106
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BatchNormalization does not switch the current GPU automatically
As reported in the forum, the current implementation of BatchNormalization link does not switch the current GPU appropriately during computing the running statistics of batches.
It causes confusing error message like `ValueError: Array device must be same as the current device: array device = 1 while current = 0`.
See for example: https://groups.google.com/forum/#!topic/chainer/T-6s3KD-X-U
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chainer/links/normalization/batch_normalization.py`
Content:
```
1 import numpy
2
3 from chainer.functions.normalization import batch_normalization
4 from chainer import link
5 from chainer import variable
6
7
8 class BatchNormalization(link.Link):
9
10 """Batch normalization layer on outputs of linear or convolution functions.
11
12 This link wraps the :func:`~chainer.functions.batch_normalization` and
13 :func:`~chainer.functions.fixed_batch_normalization` functions.
14
15 It runs in three modes: training mode, fine-tuning mode, and testing mode.
16
17 In training mode, it normalizes the input by *batch statistics*. It also
18 maintains approximated population statistics by moving averages, which can
19 be used for instant evaluation in testing mode.
20
21 In fine-tuning mode, it accumulates the input to compute *population
22 statistics*. In order to correctly compute the population statistics, a
23 user must use this mode to feed mini batches running through whole training
24 dataset.
25
26 In testing mode, it uses pre-computed population statistics to normalize
27 the input variable. The population statistics is approximated if it is
28 computed by training mode, or accurate if it is correctly computed by
29 fine-tuning mode.
30
31 Args:
32 size (int or tuple of ints): Size (or shape) of channel
33 dimensions.
34 decay (float): Decay rate of moving average. It is used on training.
35 eps (float): Epsilon value for numerical stability.
36 dtype (numpy.dtype): Type to use in computing.
37
38 See: `Batch Normalization: Accelerating Deep Network Training by Reducing\
39 Internal Covariate Shift <http://arxiv.org/abs/1502.03167>`_
40
41 .. seealso::
42 :func:`~chainer.functions.batch_normalization`,
43 :func:`~chainer.functions.fixed_batch_normalization`
44
45 Attributes:
46 gamma (~chainer.Variable): Scaling parameter.
47 beta (~chainer.Variable): Shifting parameter.
48 avg_mean (~chainer.Variable): Population mean.
49 avg_var (~chainer.Variable): Population variance.
50 N (int): Count of batches given for fine-tuning.
51 decay (float): Decay rate of moving average. It is used on training.
52 eps (float): Epsilon value for numerical stability. This value is added
53 to the batch variances.
54
55 """
56 def __init__(self, size, decay=0.9, eps=1e-5, dtype=numpy.float32):
57 super(BatchNormalization, self).__init__()
58 self.add_param('gamma', size, dtype=dtype)
59 self.gamma.data.fill(1)
60 self.add_param('beta', size, dtype=dtype)
61 self.beta.data.fill(0)
62 self.add_persistent('avg_mean', numpy.zeros(size, dtype=dtype))
63 self.add_persistent('avg_var', numpy.zeros(size, dtype=dtype))
64 self.add_persistent('N', 0)
65 self.decay = decay
66 self.eps = eps
67
68 def __call__(self, x, test=False, finetune=False):
69 """Invokes the forward propagation of BatchNormalization.
70
71 BatchNormalization accepts additional arguments, which controls three
72 different running mode.
73
74 Args:
75 x (Variable): An input variable.
76 test (bool): If ``True``, BatchNormalization runs in testing mode;
77 it normalizes the input using pre-computed statistics.
78 finetune (bool): If ``True``, BatchNormalization runs in
79 fine-tuning mode; it accumulates the input array to compute
80 population statistics for normalization, and normalizes the
81 input using batch statistics.
82
83 If ``test`` and ``finetune`` are both ``False``, then
84 BatchNormalization runs in training mode; it computes moving averages
85 of mean and variance for evaluation during training, and normalizes the
86 input using batch statistics.
87
88 """
89 use_batch_mean = not test or finetune
90
91 if use_batch_mean:
92 func = batch_normalization.BatchNormalizationFunction(self.eps)
93 ret = func(x, self.gamma, self.beta)
94
95 if finetune:
96 self.N += 1
97 decay = 1. - 1. / self.N
98 else:
99 decay = self.decay
100
101 m = x.data.size // self.gamma.data.size
102 adjust = m / max(m - 1., 1.) # unbiased estimation
103 self.avg_mean *= decay
104 func.mean *= 1 - decay # reuse buffer as a temporary
105 self.avg_mean += func.mean
106 del func.mean
107 self.avg_var *= decay
108 func.var *= (1 - decay) * adjust # reuse buffer as a temporary
109 self.avg_var += func.var
110 del func.var
111 else:
112 mean = variable.Variable(self.avg_mean, volatile='auto')
113 var = variable.Variable(self.avg_var, volatile='auto')
114 ret = batch_normalization.fixed_batch_normalization(
115 x, self.gamma, self.beta, mean, var, self.eps)
116 return ret
117
118 def start_finetuning(self):
119 """Resets the population count for collecting population statistics.
120
121 This method can be skipped if it is the first time to use the
122 fine-tuning mode. Otherwise, this method should be called before
123 starting the fine-tuning mode again.
124
125 """
126 self.N = 0
127
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/chainer/links/normalization/batch_normalization.py b/chainer/links/normalization/batch_normalization.py
--- a/chainer/links/normalization/batch_normalization.py
+++ b/chainer/links/normalization/batch_normalization.py
@@ -1,5 +1,6 @@
import numpy
+from chainer import cuda
from chainer.functions.normalization import batch_normalization
from chainer import link
from chainer import variable
@@ -98,16 +99,17 @@
else:
decay = self.decay
- m = x.data.size // self.gamma.data.size
- adjust = m / max(m - 1., 1.) # unbiased estimation
- self.avg_mean *= decay
- func.mean *= 1 - decay # reuse buffer as a temporary
- self.avg_mean += func.mean
- del func.mean
- self.avg_var *= decay
- func.var *= (1 - decay) * adjust # reuse buffer as a temporary
- self.avg_var += func.var
- del func.var
+ with cuda.get_device(x.data):
+ m = x.data.size // self.gamma.data.size
+ adjust = m / max(m - 1., 1.) # unbiased estimation
+ self.avg_mean *= decay
+ func.mean *= 1 - decay # reuse buffer as a temporary
+ self.avg_mean += func.mean
+ del func.mean
+ self.avg_var *= decay
+ func.var *= (1 - decay) * adjust # reuse buffer as a temporary
+ self.avg_var += func.var
+ del func.var
else:
mean = variable.Variable(self.avg_mean, volatile='auto')
var = variable.Variable(self.avg_var, volatile='auto')
|
{"golden_diff": "diff --git a/chainer/links/normalization/batch_normalization.py b/chainer/links/normalization/batch_normalization.py\n--- a/chainer/links/normalization/batch_normalization.py\n+++ b/chainer/links/normalization/batch_normalization.py\n@@ -1,5 +1,6 @@\n import numpy\n \n+from chainer import cuda\n from chainer.functions.normalization import batch_normalization\n from chainer import link\n from chainer import variable\n@@ -98,16 +99,17 @@\n else:\n decay = self.decay\n \n- m = x.data.size // self.gamma.data.size\n- adjust = m / max(m - 1., 1.) # unbiased estimation\n- self.avg_mean *= decay\n- func.mean *= 1 - decay # reuse buffer as a temporary\n- self.avg_mean += func.mean\n- del func.mean\n- self.avg_var *= decay\n- func.var *= (1 - decay) * adjust # reuse buffer as a temporary\n- self.avg_var += func.var\n- del func.var\n+ with cuda.get_device(x.data):\n+ m = x.data.size // self.gamma.data.size\n+ adjust = m / max(m - 1., 1.) # unbiased estimation\n+ self.avg_mean *= decay\n+ func.mean *= 1 - decay # reuse buffer as a temporary\n+ self.avg_mean += func.mean\n+ del func.mean\n+ self.avg_var *= decay\n+ func.var *= (1 - decay) * adjust # reuse buffer as a temporary\n+ self.avg_var += func.var\n+ del func.var\n else:\n mean = variable.Variable(self.avg_mean, volatile='auto')\n var = variable.Variable(self.avg_var, volatile='auto')\n", "issue": "BatchNormalization does not switch the current GPU automatically\nAs reported in the forum, the current implementation of BatchNormalization link does not switch the current GPU appropriately during computing the running statistics of batches.\nIt causes confusing error message like `ValueError: Array device must be same as the current device: array device = 1 while current = 0`.\nSee for example: https://groups.google.com/forum/#!topic/chainer/T-6s3KD-X-U\n\n", "before_files": [{"content": "import numpy\n\nfrom chainer.functions.normalization import batch_normalization\nfrom chainer import link\nfrom chainer import variable\n\n\nclass BatchNormalization(link.Link):\n\n \"\"\"Batch normalization layer on outputs of linear or convolution functions.\n\n This link wraps the :func:`~chainer.functions.batch_normalization` and\n :func:`~chainer.functions.fixed_batch_normalization` functions.\n\n It runs in three modes: training mode, fine-tuning mode, and testing mode.\n\n In training mode, it normalizes the input by *batch statistics*. It also\n maintains approximated population statistics by moving averages, which can\n be used for instant evaluation in testing mode.\n\n In fine-tuning mode, it accumulates the input to compute *population\n statistics*. In order to correctly compute the population statistics, a\n user must use this mode to feed mini batches running through whole training\n dataset.\n\n In testing mode, it uses pre-computed population statistics to normalize\n the input variable. The population statistics is approximated if it is\n computed by training mode, or accurate if it is correctly computed by\n fine-tuning mode.\n\n Args:\n size (int or tuple of ints): Size (or shape) of channel\n dimensions.\n decay (float): Decay rate of moving average. It is used on training.\n eps (float): Epsilon value for numerical stability.\n dtype (numpy.dtype): Type to use in computing.\n\n See: `Batch Normalization: Accelerating Deep Network Training by Reducing\\\n Internal Covariate Shift <http://arxiv.org/abs/1502.03167>`_\n\n .. seealso::\n :func:`~chainer.functions.batch_normalization`,\n :func:`~chainer.functions.fixed_batch_normalization`\n\n Attributes:\n gamma (~chainer.Variable): Scaling parameter.\n beta (~chainer.Variable): Shifting parameter.\n avg_mean (~chainer.Variable): Population mean.\n avg_var (~chainer.Variable): Population variance.\n N (int): Count of batches given for fine-tuning.\n decay (float): Decay rate of moving average. It is used on training.\n eps (float): Epsilon value for numerical stability. This value is added\n to the batch variances.\n\n \"\"\"\n def __init__(self, size, decay=0.9, eps=1e-5, dtype=numpy.float32):\n super(BatchNormalization, self).__init__()\n self.add_param('gamma', size, dtype=dtype)\n self.gamma.data.fill(1)\n self.add_param('beta', size, dtype=dtype)\n self.beta.data.fill(0)\n self.add_persistent('avg_mean', numpy.zeros(size, dtype=dtype))\n self.add_persistent('avg_var', numpy.zeros(size, dtype=dtype))\n self.add_persistent('N', 0)\n self.decay = decay\n self.eps = eps\n\n def __call__(self, x, test=False, finetune=False):\n \"\"\"Invokes the forward propagation of BatchNormalization.\n\n BatchNormalization accepts additional arguments, which controls three\n different running mode.\n\n Args:\n x (Variable): An input variable.\n test (bool): If ``True``, BatchNormalization runs in testing mode;\n it normalizes the input using pre-computed statistics.\n finetune (bool): If ``True``, BatchNormalization runs in\n fine-tuning mode; it accumulates the input array to compute\n population statistics for normalization, and normalizes the\n input using batch statistics.\n\n If ``test`` and ``finetune`` are both ``False``, then\n BatchNormalization runs in training mode; it computes moving averages\n of mean and variance for evaluation during training, and normalizes the\n input using batch statistics.\n\n \"\"\"\n use_batch_mean = not test or finetune\n\n if use_batch_mean:\n func = batch_normalization.BatchNormalizationFunction(self.eps)\n ret = func(x, self.gamma, self.beta)\n\n if finetune:\n self.N += 1\n decay = 1. - 1. / self.N\n else:\n decay = self.decay\n\n m = x.data.size // self.gamma.data.size\n adjust = m / max(m - 1., 1.) # unbiased estimation\n self.avg_mean *= decay\n func.mean *= 1 - decay # reuse buffer as a temporary\n self.avg_mean += func.mean\n del func.mean\n self.avg_var *= decay\n func.var *= (1 - decay) * adjust # reuse buffer as a temporary\n self.avg_var += func.var\n del func.var\n else:\n mean = variable.Variable(self.avg_mean, volatile='auto')\n var = variable.Variable(self.avg_var, volatile='auto')\n ret = batch_normalization.fixed_batch_normalization(\n x, self.gamma, self.beta, mean, var, self.eps)\n return ret\n\n def start_finetuning(self):\n \"\"\"Resets the population count for collecting population statistics.\n\n This method can be skipped if it is the first time to use the\n fine-tuning mode. Otherwise, this method should be called before\n starting the fine-tuning mode again.\n\n \"\"\"\n self.N = 0\n", "path": "chainer/links/normalization/batch_normalization.py"}], "after_files": [{"content": "import numpy\n\nfrom chainer import cuda\nfrom chainer.functions.normalization import batch_normalization\nfrom chainer import link\nfrom chainer import variable\n\n\nclass BatchNormalization(link.Link):\n\n \"\"\"Batch normalization layer on outputs of linear or convolution functions.\n\n This link wraps the :func:`~chainer.functions.batch_normalization` and\n :func:`~chainer.functions.fixed_batch_normalization` functions.\n\n It runs in three modes: training mode, fine-tuning mode, and testing mode.\n\n In training mode, it normalizes the input by *batch statistics*. It also\n maintains approximated population statistics by moving averages, which can\n be used for instant evaluation in testing mode.\n\n In fine-tuning mode, it accumulates the input to compute *population\n statistics*. In order to correctly compute the population statistics, a\n user must use this mode to feed mini batches running through whole training\n dataset.\n\n In testing mode, it uses pre-computed population statistics to normalize\n the input variable. The population statistics is approximated if it is\n computed by training mode, or accurate if it is correctly computed by\n fine-tuning mode.\n\n Args:\n size (int or tuple of ints): Size (or shape) of channel\n dimensions.\n decay (float): Decay rate of moving average. It is used on training.\n eps (float): Epsilon value for numerical stability.\n dtype (numpy.dtype): Type to use in computing.\n\n See: `Batch Normalization: Accelerating Deep Network Training by Reducing\\\n Internal Covariate Shift <http://arxiv.org/abs/1502.03167>`_\n\n .. seealso::\n :func:`~chainer.functions.batch_normalization`,\n :func:`~chainer.functions.fixed_batch_normalization`\n\n Attributes:\n gamma (~chainer.Variable): Scaling parameter.\n beta (~chainer.Variable): Shifting parameter.\n avg_mean (~chainer.Variable): Population mean.\n avg_var (~chainer.Variable): Population variance.\n N (int): Count of batches given for fine-tuning.\n decay (float): Decay rate of moving average. It is used on training.\n eps (float): Epsilon value for numerical stability. This value is added\n to the batch variances.\n\n \"\"\"\n def __init__(self, size, decay=0.9, eps=1e-5, dtype=numpy.float32):\n super(BatchNormalization, self).__init__()\n self.add_param('gamma', size, dtype=dtype)\n self.gamma.data.fill(1)\n self.add_param('beta', size, dtype=dtype)\n self.beta.data.fill(0)\n self.add_persistent('avg_mean', numpy.zeros(size, dtype=dtype))\n self.add_persistent('avg_var', numpy.zeros(size, dtype=dtype))\n self.add_persistent('N', 0)\n self.decay = decay\n self.eps = eps\n\n def __call__(self, x, test=False, finetune=False):\n \"\"\"Invokes the forward propagation of BatchNormalization.\n\n BatchNormalization accepts additional arguments, which controls three\n different running mode.\n\n Args:\n x (Variable): An input variable.\n test (bool): If ``True``, BatchNormalization runs in testing mode;\n it normalizes the input using pre-computed statistics.\n finetune (bool): If ``True``, BatchNormalization runs in\n fine-tuning mode; it accumulates the input array to compute\n population statistics for normalization, and normalizes the\n input using batch statistics.\n\n If ``test`` and ``finetune`` are both ``False``, then\n BatchNormalization runs in training mode; it computes moving averages\n of mean and variance for evaluation during training, and normalizes the\n input using batch statistics.\n\n \"\"\"\n use_batch_mean = not test or finetune\n\n if use_batch_mean:\n func = batch_normalization.BatchNormalizationFunction(self.eps)\n ret = func(x, self.gamma, self.beta)\n\n if finetune:\n self.N += 1\n decay = 1. - 1. / self.N\n else:\n decay = self.decay\n\n with cuda.get_device(x.data):\n m = x.data.size // self.gamma.data.size\n adjust = m / max(m - 1., 1.) # unbiased estimation\n self.avg_mean *= decay\n func.mean *= 1 - decay # reuse buffer as a temporary\n self.avg_mean += func.mean\n del func.mean\n self.avg_var *= decay\n func.var *= (1 - decay) * adjust # reuse buffer as a temporary\n self.avg_var += func.var\n del func.var\n else:\n mean = variable.Variable(self.avg_mean, volatile='auto')\n var = variable.Variable(self.avg_var, volatile='auto')\n ret = batch_normalization.fixed_batch_normalization(\n x, self.gamma, self.beta, mean, var, self.eps)\n return ret\n\n def start_finetuning(self):\n \"\"\"Resets the population count for collecting population statistics.\n\n This method can be skipped if it is the first time to use the\n fine-tuning mode. Otherwise, this method should be called before\n starting the fine-tuning mode again.\n\n \"\"\"\n self.N = 0\n", "path": "chainer/links/normalization/batch_normalization.py"}]}
| 1,770 | 397 |
gh_patches_debug_31987
|
rasdani/github-patches
|
git_diff
|
vas3k__vas3k.club-142
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Визуализировать результаты батлов
<img width="1113" alt="image" src="https://user-images.githubusercontent.com/19980512/81127819-1f135780-8f48-11ea-83bc-7c56e6e849e4.png">
Было бы круто как-то визуализировать полоску в зависимости от результатов баттла. Чтобы такой раз — и увидел результат, а не подсчитывал, где больше аргументов и плюсов
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `posts/templatetags/battle.py`
Content:
```
1 from django import template
2 from django.template import loader
3
4 register = template.Library()
5
6
7 battle_stats_template = loader.get_template("posts/widgets/battle_stats.html")
8
9
10 def _is_argument_for_side(comment, side):
11 for_side = comment.metadata and comment.metadata.get("battle", {}).get("side") == side
12
13 return not comment.is_deleted and not comment.reply_to_id and for_side
14
15
16 @register.simple_tag()
17 def battle_stats(post, comments):
18 arguments_for_a = [c for c in comments if _is_argument_for_side(c, "a")]
19 arguments_for_b = [c for c in comments if _is_argument_for_side(c, "b")]
20
21 total_votes_a = sum(c.upvotes for c in arguments_for_a)
22 total_votes_b = sum(c.upvotes for c in arguments_for_b)
23 return battle_stats_template.render({
24 "total_arguments": {
25 "a": len(arguments_for_a),
26 "b": len(arguments_for_b),
27 },
28 "total_votes": {
29 "a": total_votes_a,
30 "b": total_votes_b,
31 },
32 "battle": post,
33 })
34
35
36 @register.filter()
37 def side_name(battle, side_code):
38 if battle and battle.metadata and battle.metadata.get("battle"):
39 return battle.metadata["battle"]["sides"][side_code]["name"]
40 return ""
41
42
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/posts/templatetags/battle.py b/posts/templatetags/battle.py
--- a/posts/templatetags/battle.py
+++ b/posts/templatetags/battle.py
@@ -3,7 +3,6 @@
register = template.Library()
-
battle_stats_template = loader.get_template("posts/widgets/battle_stats.html")
@@ -20,6 +19,7 @@
total_votes_a = sum(c.upvotes for c in arguments_for_a)
total_votes_b = sum(c.upvotes for c in arguments_for_b)
+
return battle_stats_template.render({
"total_arguments": {
"a": len(arguments_for_a),
@@ -29,6 +29,7 @@
"a": total_votes_a,
"b": total_votes_b,
},
+ "graph": graph_percentages(len(arguments_for_a), len(arguments_for_b), total_votes_a, total_votes_b),
"battle": post,
})
@@ -39,3 +40,27 @@
return battle.metadata["battle"]["sides"][side_code]["name"]
return ""
+
+def graph_percentages(a_arguments: int, b_arguments: int, a_votes: int, b_votes: int):
+ """Counts percentages for battle graph
+
+ Percentage for a side is a rounded up arithmetic average of side's argument and upvote percentages
+
+ For each side: (argument % of total arguments amount + vote % of total votes amount ) / 2
+ """
+ percent_a = 0
+ percent_b = 0
+ total_arguments = a_arguments + b_arguments
+ total_upvotes = a_votes + b_votes
+ if total_arguments > 0:
+ argument_percent = 100 / total_arguments
+ percent_a = a_arguments * argument_percent
+ percent_b = b_arguments * argument_percent
+ if total_upvotes > 0:
+ upvote_percent = 100 / total_upvotes
+ percent_a = (percent_a + a_votes * upvote_percent) / 2
+ percent_b = (percent_b + b_votes * upvote_percent) / 2
+ return {
+ "percent_a": round(percent_a),
+ "percent_b": round(percent_b)
+ }
|
{"golden_diff": "diff --git a/posts/templatetags/battle.py b/posts/templatetags/battle.py\n--- a/posts/templatetags/battle.py\n+++ b/posts/templatetags/battle.py\n@@ -3,7 +3,6 @@\n \n register = template.Library()\n \n-\n battle_stats_template = loader.get_template(\"posts/widgets/battle_stats.html\")\n \n \n@@ -20,6 +19,7 @@\n \n total_votes_a = sum(c.upvotes for c in arguments_for_a)\n total_votes_b = sum(c.upvotes for c in arguments_for_b)\n+\n return battle_stats_template.render({\n \"total_arguments\": {\n \"a\": len(arguments_for_a),\n@@ -29,6 +29,7 @@\n \"a\": total_votes_a,\n \"b\": total_votes_b,\n },\n+ \"graph\": graph_percentages(len(arguments_for_a), len(arguments_for_b), total_votes_a, total_votes_b),\n \"battle\": post,\n })\n \n@@ -39,3 +40,27 @@\n return battle.metadata[\"battle\"][\"sides\"][side_code][\"name\"]\n return \"\"\n \n+\n+def graph_percentages(a_arguments: int, b_arguments: int, a_votes: int, b_votes: int):\n+ \"\"\"Counts percentages for battle graph\n+\n+ Percentage for a side is a rounded up arithmetic average of side's argument and upvote percentages\n+\n+ For each side: (argument % of total arguments amount + vote % of total votes amount ) / 2\n+ \"\"\"\n+ percent_a = 0\n+ percent_b = 0\n+ total_arguments = a_arguments + b_arguments\n+ total_upvotes = a_votes + b_votes\n+ if total_arguments > 0:\n+ argument_percent = 100 / total_arguments\n+ percent_a = a_arguments * argument_percent\n+ percent_b = b_arguments * argument_percent\n+ if total_upvotes > 0:\n+ upvote_percent = 100 / total_upvotes\n+ percent_a = (percent_a + a_votes * upvote_percent) / 2\n+ percent_b = (percent_b + b_votes * upvote_percent) / 2\n+ return {\n+ \"percent_a\": round(percent_a),\n+ \"percent_b\": round(percent_b)\n+ }\n", "issue": "\u0412\u0438\u0437\u0443\u0430\u043b\u0438\u0437\u0438\u0440\u043e\u0432\u0430\u0442\u044c \u0440\u0435\u0437\u0443\u043b\u044c\u0442\u0430\u0442\u044b \u0431\u0430\u0442\u043b\u043e\u0432\n<img width=\"1113\" alt=\"image\" src=\"https://user-images.githubusercontent.com/19980512/81127819-1f135780-8f48-11ea-83bc-7c56e6e849e4.png\">\r\n\r\n\u0411\u044b\u043b\u043e \u0431\u044b \u043a\u0440\u0443\u0442\u043e \u043a\u0430\u043a-\u0442\u043e \u0432\u0438\u0437\u0443\u0430\u043b\u0438\u0437\u0438\u0440\u043e\u0432\u0430\u0442\u044c \u043f\u043e\u043b\u043e\u0441\u043a\u0443 \u0432 \u0437\u0430\u0432\u0438\u0441\u0438\u043c\u043e\u0441\u0442\u0438 \u043e\u0442 \u0440\u0435\u0437\u0443\u043b\u044c\u0442\u0430\u0442\u043e\u0432 \u0431\u0430\u0442\u0442\u043b\u0430. \u0427\u0442\u043e\u0431\u044b \u0442\u0430\u043a\u043e\u0439 \u0440\u0430\u0437 \u2014 \u0438 \u0443\u0432\u0438\u0434\u0435\u043b \u0440\u0435\u0437\u0443\u043b\u044c\u0442\u0430\u0442, \u0430 \u043d\u0435 \u043f\u043e\u0434\u0441\u0447\u0438\u0442\u044b\u0432\u0430\u043b, \u0433\u0434\u0435 \u0431\u043e\u043b\u044c\u0448\u0435 \u0430\u0440\u0433\u0443\u043c\u0435\u043d\u0442\u043e\u0432 \u0438 \u043f\u043b\u044e\u0441\u043e\u0432\n", "before_files": [{"content": "from django import template\nfrom django.template import loader\n\nregister = template.Library()\n\n\nbattle_stats_template = loader.get_template(\"posts/widgets/battle_stats.html\")\n\n\ndef _is_argument_for_side(comment, side):\n for_side = comment.metadata and comment.metadata.get(\"battle\", {}).get(\"side\") == side\n\n return not comment.is_deleted and not comment.reply_to_id and for_side\n\n\[email protected]_tag()\ndef battle_stats(post, comments):\n arguments_for_a = [c for c in comments if _is_argument_for_side(c, \"a\")]\n arguments_for_b = [c for c in comments if _is_argument_for_side(c, \"b\")]\n\n total_votes_a = sum(c.upvotes for c in arguments_for_a)\n total_votes_b = sum(c.upvotes for c in arguments_for_b)\n return battle_stats_template.render({\n \"total_arguments\": {\n \"a\": len(arguments_for_a),\n \"b\": len(arguments_for_b),\n },\n \"total_votes\": {\n \"a\": total_votes_a,\n \"b\": total_votes_b,\n },\n \"battle\": post,\n })\n\n\[email protected]()\ndef side_name(battle, side_code):\n if battle and battle.metadata and battle.metadata.get(\"battle\"):\n return battle.metadata[\"battle\"][\"sides\"][side_code][\"name\"]\n return \"\"\n\n", "path": "posts/templatetags/battle.py"}], "after_files": [{"content": "from django import template\nfrom django.template import loader\n\nregister = template.Library()\n\nbattle_stats_template = loader.get_template(\"posts/widgets/battle_stats.html\")\n\n\ndef _is_argument_for_side(comment, side):\n for_side = comment.metadata and comment.metadata.get(\"battle\", {}).get(\"side\") == side\n\n return not comment.is_deleted and not comment.reply_to_id and for_side\n\n\[email protected]_tag()\ndef battle_stats(post, comments):\n arguments_for_a = [c for c in comments if _is_argument_for_side(c, \"a\")]\n arguments_for_b = [c for c in comments if _is_argument_for_side(c, \"b\")]\n\n total_votes_a = sum(c.upvotes for c in arguments_for_a)\n total_votes_b = sum(c.upvotes for c in arguments_for_b)\n\n return battle_stats_template.render({\n \"total_arguments\": {\n \"a\": len(arguments_for_a),\n \"b\": len(arguments_for_b),\n },\n \"total_votes\": {\n \"a\": total_votes_a,\n \"b\": total_votes_b,\n },\n \"graph\": graph_percentages(len(arguments_for_a), len(arguments_for_b), total_votes_a, total_votes_b),\n \"battle\": post,\n })\n\n\[email protected]()\ndef side_name(battle, side_code):\n if battle and battle.metadata and battle.metadata.get(\"battle\"):\n return battle.metadata[\"battle\"][\"sides\"][side_code][\"name\"]\n return \"\"\n\n\ndef graph_percentages(a_arguments: int, b_arguments: int, a_votes: int, b_votes: int):\n \"\"\"Counts percentages for battle graph\n\n Percentage for a side is a rounded up arithmetic average of side's argument and upvote percentages\n\n For each side: (argument % of total arguments amount + vote % of total votes amount ) / 2\n \"\"\"\n percent_a = 0\n percent_b = 0\n total_arguments = a_arguments + b_arguments\n total_upvotes = a_votes + b_votes\n if total_arguments > 0:\n argument_percent = 100 / total_arguments\n percent_a = a_arguments * argument_percent\n percent_b = b_arguments * argument_percent\n if total_upvotes > 0:\n upvote_percent = 100 / total_upvotes\n percent_a = (percent_a + a_votes * upvote_percent) / 2\n percent_b = (percent_b + b_votes * upvote_percent) / 2\n return {\n \"percent_a\": round(percent_a),\n \"percent_b\": round(percent_b)\n }\n", "path": "posts/templatetags/battle.py"}]}
| 767 | 508 |
gh_patches_debug_16079
|
rasdani/github-patches
|
git_diff
|
googleapis__python-bigquery-1277
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Passing destinationExpirationTime while creating table snapshot
Hi,
Lib: google-cloud-bigquery==2.32.0
I am trying to create table snapshot but not finding any properties in CopyJob config where i can pass the destintionExpiration time.
I tried passing the expire at destination table but it didn't worked.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `google/cloud/bigquery/job/copy_.py`
Content:
```
1 # Copyright 2015 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Classes for copy jobs."""
16
17 from typing import Optional
18
19 from google.cloud.bigquery.encryption_configuration import EncryptionConfiguration
20 from google.cloud.bigquery import _helpers
21 from google.cloud.bigquery.table import TableReference
22
23 from google.cloud.bigquery.job.base import _AsyncJob
24 from google.cloud.bigquery.job.base import _JobConfig
25 from google.cloud.bigquery.job.base import _JobReference
26
27
28 class OperationType:
29 """Different operation types supported in table copy job.
30
31 https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#operationtype
32 """
33
34 OPERATION_TYPE_UNSPECIFIED = "OPERATION_TYPE_UNSPECIFIED"
35 """Unspecified operation type."""
36
37 COPY = "COPY"
38 """The source and destination table have the same table type."""
39
40 SNAPSHOT = "SNAPSHOT"
41 """The source table type is TABLE and the destination table type is SNAPSHOT."""
42
43 CLONE = "CLONE"
44 """The source table type is TABLE and the destination table type is CLONE."""
45
46 RESTORE = "RESTORE"
47 """The source table type is SNAPSHOT and the destination table type is TABLE."""
48
49
50 class CopyJobConfig(_JobConfig):
51 """Configuration options for copy jobs.
52
53 All properties in this class are optional. Values which are :data:`None` ->
54 server defaults. Set properties on the constructed configuration by using
55 the property name as the name of a keyword argument.
56 """
57
58 def __init__(self, **kwargs) -> None:
59 super(CopyJobConfig, self).__init__("copy", **kwargs)
60
61 @property
62 def create_disposition(self):
63 """google.cloud.bigquery.job.CreateDisposition: Specifies behavior
64 for creating tables.
65
66 See
67 https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationTableCopy.FIELDS.create_disposition
68 """
69 return self._get_sub_prop("createDisposition")
70
71 @create_disposition.setter
72 def create_disposition(self, value):
73 self._set_sub_prop("createDisposition", value)
74
75 @property
76 def write_disposition(self):
77 """google.cloud.bigquery.job.WriteDisposition: Action that occurs if
78 the destination table already exists.
79
80 See
81 https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationTableCopy.FIELDS.write_disposition
82 """
83 return self._get_sub_prop("writeDisposition")
84
85 @write_disposition.setter
86 def write_disposition(self, value):
87 self._set_sub_prop("writeDisposition", value)
88
89 @property
90 def destination_encryption_configuration(self):
91 """google.cloud.bigquery.encryption_configuration.EncryptionConfiguration: Custom
92 encryption configuration for the destination table.
93
94 Custom encryption configuration (e.g., Cloud KMS keys) or :data:`None`
95 if using default encryption.
96
97 See
98 https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationTableCopy.FIELDS.destination_encryption_configuration
99 """
100 prop = self._get_sub_prop("destinationEncryptionConfiguration")
101 if prop is not None:
102 prop = EncryptionConfiguration.from_api_repr(prop)
103 return prop
104
105 @destination_encryption_configuration.setter
106 def destination_encryption_configuration(self, value):
107 api_repr = value
108 if value is not None:
109 api_repr = value.to_api_repr()
110 self._set_sub_prop("destinationEncryptionConfiguration", api_repr)
111
112 @property
113 def operation_type(self) -> str:
114 """The operation to perform with this copy job.
115
116 See
117 https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationTableCopy.FIELDS.operation_type
118 """
119 return self._get_sub_prop(
120 "operationType", OperationType.OPERATION_TYPE_UNSPECIFIED
121 )
122
123 @operation_type.setter
124 def operation_type(self, value: Optional[str]):
125 if value is None:
126 value = OperationType.OPERATION_TYPE_UNSPECIFIED
127 self._set_sub_prop("operationType", value)
128
129
130 class CopyJob(_AsyncJob):
131 """Asynchronous job: copy data into a table from other tables.
132
133 Args:
134 job_id (str): the job's ID, within the project belonging to ``client``.
135
136 sources (List[google.cloud.bigquery.table.TableReference]): Table from which data is to be loaded.
137
138 destination (google.cloud.bigquery.table.TableReference): Table into which data is to be loaded.
139
140 client (google.cloud.bigquery.client.Client):
141 A client which holds credentials and project configuration
142 for the dataset (which requires a project).
143
144 job_config (Optional[google.cloud.bigquery.job.CopyJobConfig]):
145 Extra configuration options for the copy job.
146 """
147
148 _JOB_TYPE = "copy"
149
150 def __init__(self, job_id, sources, destination, client, job_config=None):
151 super(CopyJob, self).__init__(job_id, client)
152
153 if not job_config:
154 job_config = CopyJobConfig()
155
156 self._configuration = job_config
157 self._properties["configuration"] = job_config._properties
158
159 if destination:
160 _helpers._set_sub_prop(
161 self._properties,
162 ["configuration", "copy", "destinationTable"],
163 destination.to_api_repr(),
164 )
165
166 if sources:
167 source_resources = [source.to_api_repr() for source in sources]
168 _helpers._set_sub_prop(
169 self._properties,
170 ["configuration", "copy", "sourceTables"],
171 source_resources,
172 )
173
174 @property
175 def destination(self):
176 """google.cloud.bigquery.table.TableReference: Table into which data
177 is to be loaded.
178 """
179 return TableReference.from_api_repr(
180 _helpers._get_sub_prop(
181 self._properties, ["configuration", "copy", "destinationTable"]
182 )
183 )
184
185 @property
186 def sources(self):
187 """List[google.cloud.bigquery.table.TableReference]): Table(s) from
188 which data is to be loaded.
189 """
190 source_configs = _helpers._get_sub_prop(
191 self._properties, ["configuration", "copy", "sourceTables"]
192 )
193 if source_configs is None:
194 single = _helpers._get_sub_prop(
195 self._properties, ["configuration", "copy", "sourceTable"]
196 )
197 if single is None:
198 raise KeyError("Resource missing 'sourceTables' / 'sourceTable'")
199 source_configs = [single]
200
201 sources = []
202 for source_config in source_configs:
203 table_ref = TableReference.from_api_repr(source_config)
204 sources.append(table_ref)
205 return sources
206
207 @property
208 def create_disposition(self):
209 """See
210 :attr:`google.cloud.bigquery.job.CopyJobConfig.create_disposition`.
211 """
212 return self._configuration.create_disposition
213
214 @property
215 def write_disposition(self):
216 """See
217 :attr:`google.cloud.bigquery.job.CopyJobConfig.write_disposition`.
218 """
219 return self._configuration.write_disposition
220
221 @property
222 def destination_encryption_configuration(self):
223 """google.cloud.bigquery.encryption_configuration.EncryptionConfiguration: Custom
224 encryption configuration for the destination table.
225
226 Custom encryption configuration (e.g., Cloud KMS keys) or :data:`None`
227 if using default encryption.
228
229 See
230 :attr:`google.cloud.bigquery.job.CopyJobConfig.destination_encryption_configuration`.
231 """
232 return self._configuration.destination_encryption_configuration
233
234 def to_api_repr(self):
235 """Generate a resource for :meth:`_begin`."""
236 # Exclude statistics, if set.
237 return {
238 "jobReference": self._properties["jobReference"],
239 "configuration": self._properties["configuration"],
240 }
241
242 @classmethod
243 def from_api_repr(cls, resource, client):
244 """Factory: construct a job given its API representation
245
246 .. note::
247
248 This method assumes that the project found in the resource matches
249 the client's project.
250
251 Args:
252 resource (Dict): dataset job representation returned from the API
253 client (google.cloud.bigquery.client.Client):
254 Client which holds credentials and project
255 configuration for the dataset.
256
257 Returns:
258 google.cloud.bigquery.job.CopyJob: Job parsed from ``resource``.
259 """
260 cls._check_resource_config(resource)
261 job_ref = _JobReference._from_api_repr(resource["jobReference"])
262 job = cls(job_ref, None, None, client=client)
263 job._set_properties(resource)
264 return job
265
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/google/cloud/bigquery/job/copy_.py b/google/cloud/bigquery/job/copy_.py
--- a/google/cloud/bigquery/job/copy_.py
+++ b/google/cloud/bigquery/job/copy_.py
@@ -126,6 +126,20 @@
value = OperationType.OPERATION_TYPE_UNSPECIFIED
self._set_sub_prop("operationType", value)
+ @property
+ def destination_expiration_time(self) -> str:
+ """google.cloud.bigquery.job.DestinationExpirationTime: The time when the
+ destination table expires. Expired tables will be deleted and their storage reclaimed.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationTableCopy.FIELDS.destination_expiration_time
+ """
+ return self._get_sub_prop("destinationExpirationTime")
+
+ @destination_expiration_time.setter
+ def destination_expiration_time(self, value: str):
+ self._set_sub_prop("destinationExpirationTime", value)
+
class CopyJob(_AsyncJob):
"""Asynchronous job: copy data into a table from other tables.
|
{"golden_diff": "diff --git a/google/cloud/bigquery/job/copy_.py b/google/cloud/bigquery/job/copy_.py\n--- a/google/cloud/bigquery/job/copy_.py\n+++ b/google/cloud/bigquery/job/copy_.py\n@@ -126,6 +126,20 @@\n value = OperationType.OPERATION_TYPE_UNSPECIFIED\n self._set_sub_prop(\"operationType\", value)\n \n+ @property\n+ def destination_expiration_time(self) -> str:\n+ \"\"\"google.cloud.bigquery.job.DestinationExpirationTime: The time when the\n+ destination table expires. Expired tables will be deleted and their storage reclaimed.\n+\n+ See\n+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationTableCopy.FIELDS.destination_expiration_time\n+ \"\"\"\n+ return self._get_sub_prop(\"destinationExpirationTime\")\n+\n+ @destination_expiration_time.setter\n+ def destination_expiration_time(self, value: str):\n+ self._set_sub_prop(\"destinationExpirationTime\", value)\n+\n \n class CopyJob(_AsyncJob):\n \"\"\"Asynchronous job: copy data into a table from other tables.\n", "issue": "Passing destinationExpirationTime while creating table snapshot\nHi, \r\n\r\nLib: google-cloud-bigquery==2.32.0\r\n\r\nI am trying to create table snapshot but not finding any properties in CopyJob config where i can pass the destintionExpiration time.\r\nI tried passing the expire at destination table but it didn't worked.\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright 2015 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Classes for copy jobs.\"\"\"\n\nfrom typing import Optional\n\nfrom google.cloud.bigquery.encryption_configuration import EncryptionConfiguration\nfrom google.cloud.bigquery import _helpers\nfrom google.cloud.bigquery.table import TableReference\n\nfrom google.cloud.bigquery.job.base import _AsyncJob\nfrom google.cloud.bigquery.job.base import _JobConfig\nfrom google.cloud.bigquery.job.base import _JobReference\n\n\nclass OperationType:\n \"\"\"Different operation types supported in table copy job.\n\n https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#operationtype\n \"\"\"\n\n OPERATION_TYPE_UNSPECIFIED = \"OPERATION_TYPE_UNSPECIFIED\"\n \"\"\"Unspecified operation type.\"\"\"\n\n COPY = \"COPY\"\n \"\"\"The source and destination table have the same table type.\"\"\"\n\n SNAPSHOT = \"SNAPSHOT\"\n \"\"\"The source table type is TABLE and the destination table type is SNAPSHOT.\"\"\"\n\n CLONE = \"CLONE\"\n \"\"\"The source table type is TABLE and the destination table type is CLONE.\"\"\"\n\n RESTORE = \"RESTORE\"\n \"\"\"The source table type is SNAPSHOT and the destination table type is TABLE.\"\"\"\n\n\nclass CopyJobConfig(_JobConfig):\n \"\"\"Configuration options for copy jobs.\n\n All properties in this class are optional. Values which are :data:`None` ->\n server defaults. Set properties on the constructed configuration by using\n the property name as the name of a keyword argument.\n \"\"\"\n\n def __init__(self, **kwargs) -> None:\n super(CopyJobConfig, self).__init__(\"copy\", **kwargs)\n\n @property\n def create_disposition(self):\n \"\"\"google.cloud.bigquery.job.CreateDisposition: Specifies behavior\n for creating tables.\n\n See\n https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationTableCopy.FIELDS.create_disposition\n \"\"\"\n return self._get_sub_prop(\"createDisposition\")\n\n @create_disposition.setter\n def create_disposition(self, value):\n self._set_sub_prop(\"createDisposition\", value)\n\n @property\n def write_disposition(self):\n \"\"\"google.cloud.bigquery.job.WriteDisposition: Action that occurs if\n the destination table already exists.\n\n See\n https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationTableCopy.FIELDS.write_disposition\n \"\"\"\n return self._get_sub_prop(\"writeDisposition\")\n\n @write_disposition.setter\n def write_disposition(self, value):\n self._set_sub_prop(\"writeDisposition\", value)\n\n @property\n def destination_encryption_configuration(self):\n \"\"\"google.cloud.bigquery.encryption_configuration.EncryptionConfiguration: Custom\n encryption configuration for the destination table.\n\n Custom encryption configuration (e.g., Cloud KMS keys) or :data:`None`\n if using default encryption.\n\n See\n https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationTableCopy.FIELDS.destination_encryption_configuration\n \"\"\"\n prop = self._get_sub_prop(\"destinationEncryptionConfiguration\")\n if prop is not None:\n prop = EncryptionConfiguration.from_api_repr(prop)\n return prop\n\n @destination_encryption_configuration.setter\n def destination_encryption_configuration(self, value):\n api_repr = value\n if value is not None:\n api_repr = value.to_api_repr()\n self._set_sub_prop(\"destinationEncryptionConfiguration\", api_repr)\n\n @property\n def operation_type(self) -> str:\n \"\"\"The operation to perform with this copy job.\n\n See\n https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationTableCopy.FIELDS.operation_type\n \"\"\"\n return self._get_sub_prop(\n \"operationType\", OperationType.OPERATION_TYPE_UNSPECIFIED\n )\n\n @operation_type.setter\n def operation_type(self, value: Optional[str]):\n if value is None:\n value = OperationType.OPERATION_TYPE_UNSPECIFIED\n self._set_sub_prop(\"operationType\", value)\n\n\nclass CopyJob(_AsyncJob):\n \"\"\"Asynchronous job: copy data into a table from other tables.\n\n Args:\n job_id (str): the job's ID, within the project belonging to ``client``.\n\n sources (List[google.cloud.bigquery.table.TableReference]): Table from which data is to be loaded.\n\n destination (google.cloud.bigquery.table.TableReference): Table into which data is to be loaded.\n\n client (google.cloud.bigquery.client.Client):\n A client which holds credentials and project configuration\n for the dataset (which requires a project).\n\n job_config (Optional[google.cloud.bigquery.job.CopyJobConfig]):\n Extra configuration options for the copy job.\n \"\"\"\n\n _JOB_TYPE = \"copy\"\n\n def __init__(self, job_id, sources, destination, client, job_config=None):\n super(CopyJob, self).__init__(job_id, client)\n\n if not job_config:\n job_config = CopyJobConfig()\n\n self._configuration = job_config\n self._properties[\"configuration\"] = job_config._properties\n\n if destination:\n _helpers._set_sub_prop(\n self._properties,\n [\"configuration\", \"copy\", \"destinationTable\"],\n destination.to_api_repr(),\n )\n\n if sources:\n source_resources = [source.to_api_repr() for source in sources]\n _helpers._set_sub_prop(\n self._properties,\n [\"configuration\", \"copy\", \"sourceTables\"],\n source_resources,\n )\n\n @property\n def destination(self):\n \"\"\"google.cloud.bigquery.table.TableReference: Table into which data\n is to be loaded.\n \"\"\"\n return TableReference.from_api_repr(\n _helpers._get_sub_prop(\n self._properties, [\"configuration\", \"copy\", \"destinationTable\"]\n )\n )\n\n @property\n def sources(self):\n \"\"\"List[google.cloud.bigquery.table.TableReference]): Table(s) from\n which data is to be loaded.\n \"\"\"\n source_configs = _helpers._get_sub_prop(\n self._properties, [\"configuration\", \"copy\", \"sourceTables\"]\n )\n if source_configs is None:\n single = _helpers._get_sub_prop(\n self._properties, [\"configuration\", \"copy\", \"sourceTable\"]\n )\n if single is None:\n raise KeyError(\"Resource missing 'sourceTables' / 'sourceTable'\")\n source_configs = [single]\n\n sources = []\n for source_config in source_configs:\n table_ref = TableReference.from_api_repr(source_config)\n sources.append(table_ref)\n return sources\n\n @property\n def create_disposition(self):\n \"\"\"See\n :attr:`google.cloud.bigquery.job.CopyJobConfig.create_disposition`.\n \"\"\"\n return self._configuration.create_disposition\n\n @property\n def write_disposition(self):\n \"\"\"See\n :attr:`google.cloud.bigquery.job.CopyJobConfig.write_disposition`.\n \"\"\"\n return self._configuration.write_disposition\n\n @property\n def destination_encryption_configuration(self):\n \"\"\"google.cloud.bigquery.encryption_configuration.EncryptionConfiguration: Custom\n encryption configuration for the destination table.\n\n Custom encryption configuration (e.g., Cloud KMS keys) or :data:`None`\n if using default encryption.\n\n See\n :attr:`google.cloud.bigquery.job.CopyJobConfig.destination_encryption_configuration`.\n \"\"\"\n return self._configuration.destination_encryption_configuration\n\n def to_api_repr(self):\n \"\"\"Generate a resource for :meth:`_begin`.\"\"\"\n # Exclude statistics, if set.\n return {\n \"jobReference\": self._properties[\"jobReference\"],\n \"configuration\": self._properties[\"configuration\"],\n }\n\n @classmethod\n def from_api_repr(cls, resource, client):\n \"\"\"Factory: construct a job given its API representation\n\n .. note::\n\n This method assumes that the project found in the resource matches\n the client's project.\n\n Args:\n resource (Dict): dataset job representation returned from the API\n client (google.cloud.bigquery.client.Client):\n Client which holds credentials and project\n configuration for the dataset.\n\n Returns:\n google.cloud.bigquery.job.CopyJob: Job parsed from ``resource``.\n \"\"\"\n cls._check_resource_config(resource)\n job_ref = _JobReference._from_api_repr(resource[\"jobReference\"])\n job = cls(job_ref, None, None, client=client)\n job._set_properties(resource)\n return job\n", "path": "google/cloud/bigquery/job/copy_.py"}], "after_files": [{"content": "# Copyright 2015 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Classes for copy jobs.\"\"\"\n\nfrom typing import Optional\n\nfrom google.cloud.bigquery.encryption_configuration import EncryptionConfiguration\nfrom google.cloud.bigquery import _helpers\nfrom google.cloud.bigquery.table import TableReference\n\nfrom google.cloud.bigquery.job.base import _AsyncJob\nfrom google.cloud.bigquery.job.base import _JobConfig\nfrom google.cloud.bigquery.job.base import _JobReference\n\n\nclass OperationType:\n \"\"\"Different operation types supported in table copy job.\n\n https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#operationtype\n \"\"\"\n\n OPERATION_TYPE_UNSPECIFIED = \"OPERATION_TYPE_UNSPECIFIED\"\n \"\"\"Unspecified operation type.\"\"\"\n\n COPY = \"COPY\"\n \"\"\"The source and destination table have the same table type.\"\"\"\n\n SNAPSHOT = \"SNAPSHOT\"\n \"\"\"The source table type is TABLE and the destination table type is SNAPSHOT.\"\"\"\n\n CLONE = \"CLONE\"\n \"\"\"The source table type is TABLE and the destination table type is CLONE.\"\"\"\n\n RESTORE = \"RESTORE\"\n \"\"\"The source table type is SNAPSHOT and the destination table type is TABLE.\"\"\"\n\n\nclass CopyJobConfig(_JobConfig):\n \"\"\"Configuration options for copy jobs.\n\n All properties in this class are optional. Values which are :data:`None` ->\n server defaults. Set properties on the constructed configuration by using\n the property name as the name of a keyword argument.\n \"\"\"\n\n def __init__(self, **kwargs) -> None:\n super(CopyJobConfig, self).__init__(\"copy\", **kwargs)\n\n @property\n def create_disposition(self):\n \"\"\"google.cloud.bigquery.job.CreateDisposition: Specifies behavior\n for creating tables.\n\n See\n https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationTableCopy.FIELDS.create_disposition\n \"\"\"\n return self._get_sub_prop(\"createDisposition\")\n\n @create_disposition.setter\n def create_disposition(self, value):\n self._set_sub_prop(\"createDisposition\", value)\n\n @property\n def write_disposition(self):\n \"\"\"google.cloud.bigquery.job.WriteDisposition: Action that occurs if\n the destination table already exists.\n\n See\n https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationTableCopy.FIELDS.write_disposition\n \"\"\"\n return self._get_sub_prop(\"writeDisposition\")\n\n @write_disposition.setter\n def write_disposition(self, value):\n self._set_sub_prop(\"writeDisposition\", value)\n\n @property\n def destination_encryption_configuration(self):\n \"\"\"google.cloud.bigquery.encryption_configuration.EncryptionConfiguration: Custom\n encryption configuration for the destination table.\n\n Custom encryption configuration (e.g., Cloud KMS keys) or :data:`None`\n if using default encryption.\n\n See\n https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationTableCopy.FIELDS.destination_encryption_configuration\n \"\"\"\n prop = self._get_sub_prop(\"destinationEncryptionConfiguration\")\n if prop is not None:\n prop = EncryptionConfiguration.from_api_repr(prop)\n return prop\n\n @destination_encryption_configuration.setter\n def destination_encryption_configuration(self, value):\n api_repr = value\n if value is not None:\n api_repr = value.to_api_repr()\n self._set_sub_prop(\"destinationEncryptionConfiguration\", api_repr)\n\n @property\n def operation_type(self) -> str:\n \"\"\"The operation to perform with this copy job.\n\n See\n https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationTableCopy.FIELDS.operation_type\n \"\"\"\n return self._get_sub_prop(\n \"operationType\", OperationType.OPERATION_TYPE_UNSPECIFIED\n )\n\n @operation_type.setter\n def operation_type(self, value: Optional[str]):\n if value is None:\n value = OperationType.OPERATION_TYPE_UNSPECIFIED\n self._set_sub_prop(\"operationType\", value)\n\n @property\n def destination_expiration_time(self) -> str:\n \"\"\"google.cloud.bigquery.job.DestinationExpirationTime: The time when the\n destination table expires. Expired tables will be deleted and their storage reclaimed.\n\n See\n https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationTableCopy.FIELDS.destination_expiration_time\n \"\"\"\n return self._get_sub_prop(\"destinationExpirationTime\")\n\n @destination_expiration_time.setter\n def destination_expiration_time(self, value: str):\n self._set_sub_prop(\"destinationExpirationTime\", value)\n\n\nclass CopyJob(_AsyncJob):\n \"\"\"Asynchronous job: copy data into a table from other tables.\n\n Args:\n job_id (str): the job's ID, within the project belonging to ``client``.\n\n sources (List[google.cloud.bigquery.table.TableReference]): Table from which data is to be loaded.\n\n destination (google.cloud.bigquery.table.TableReference): Table into which data is to be loaded.\n\n client (google.cloud.bigquery.client.Client):\n A client which holds credentials and project configuration\n for the dataset (which requires a project).\n\n job_config (Optional[google.cloud.bigquery.job.CopyJobConfig]):\n Extra configuration options for the copy job.\n \"\"\"\n\n _JOB_TYPE = \"copy\"\n\n def __init__(self, job_id, sources, destination, client, job_config=None):\n super(CopyJob, self).__init__(job_id, client)\n\n if not job_config:\n job_config = CopyJobConfig()\n\n self._configuration = job_config\n self._properties[\"configuration\"] = job_config._properties\n\n if destination:\n _helpers._set_sub_prop(\n self._properties,\n [\"configuration\", \"copy\", \"destinationTable\"],\n destination.to_api_repr(),\n )\n\n if sources:\n source_resources = [source.to_api_repr() for source in sources]\n _helpers._set_sub_prop(\n self._properties,\n [\"configuration\", \"copy\", \"sourceTables\"],\n source_resources,\n )\n\n @property\n def destination(self):\n \"\"\"google.cloud.bigquery.table.TableReference: Table into which data\n is to be loaded.\n \"\"\"\n return TableReference.from_api_repr(\n _helpers._get_sub_prop(\n self._properties, [\"configuration\", \"copy\", \"destinationTable\"]\n )\n )\n\n @property\n def sources(self):\n \"\"\"List[google.cloud.bigquery.table.TableReference]): Table(s) from\n which data is to be loaded.\n \"\"\"\n source_configs = _helpers._get_sub_prop(\n self._properties, [\"configuration\", \"copy\", \"sourceTables\"]\n )\n if source_configs is None:\n single = _helpers._get_sub_prop(\n self._properties, [\"configuration\", \"copy\", \"sourceTable\"]\n )\n if single is None:\n raise KeyError(\"Resource missing 'sourceTables' / 'sourceTable'\")\n source_configs = [single]\n\n sources = []\n for source_config in source_configs:\n table_ref = TableReference.from_api_repr(source_config)\n sources.append(table_ref)\n return sources\n\n @property\n def create_disposition(self):\n \"\"\"See\n :attr:`google.cloud.bigquery.job.CopyJobConfig.create_disposition`.\n \"\"\"\n return self._configuration.create_disposition\n\n @property\n def write_disposition(self):\n \"\"\"See\n :attr:`google.cloud.bigquery.job.CopyJobConfig.write_disposition`.\n \"\"\"\n return self._configuration.write_disposition\n\n @property\n def destination_encryption_configuration(self):\n \"\"\"google.cloud.bigquery.encryption_configuration.EncryptionConfiguration: Custom\n encryption configuration for the destination table.\n\n Custom encryption configuration (e.g., Cloud KMS keys) or :data:`None`\n if using default encryption.\n\n See\n :attr:`google.cloud.bigquery.job.CopyJobConfig.destination_encryption_configuration`.\n \"\"\"\n return self._configuration.destination_encryption_configuration\n\n def to_api_repr(self):\n \"\"\"Generate a resource for :meth:`_begin`.\"\"\"\n # Exclude statistics, if set.\n return {\n \"jobReference\": self._properties[\"jobReference\"],\n \"configuration\": self._properties[\"configuration\"],\n }\n\n @classmethod\n def from_api_repr(cls, resource, client):\n \"\"\"Factory: construct a job given its API representation\n\n .. note::\n\n This method assumes that the project found in the resource matches\n the client's project.\n\n Args:\n resource (Dict): dataset job representation returned from the API\n client (google.cloud.bigquery.client.Client):\n Client which holds credentials and project\n configuration for the dataset.\n\n Returns:\n google.cloud.bigquery.job.CopyJob: Job parsed from ``resource``.\n \"\"\"\n cls._check_resource_config(resource)\n job_ref = _JobReference._from_api_repr(resource[\"jobReference\"])\n job = cls(job_ref, None, None, client=client)\n job._set_properties(resource)\n return job\n", "path": "google/cloud/bigquery/job/copy_.py"}]}
| 2,961 | 251 |
gh_patches_debug_2773
|
rasdani/github-patches
|
git_diff
|
Netflix__lemur-3166
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DNS Providers list doesn't show type
In the DNS Providers list, there is a column for the provider type, but it's always empty.
Looking at the code, and the API requests, the issue seems to be with the dns_providers API call, which returns the list of all providers.
There should be a providerType value in the JSON, but it's not there.
A quick glance at the `DnsProvidersNestedOutputSchema` shows that the value is called `providerType`, but in the database the field is called `provider_type` similar to `api_endpoint` which is called `api_endpoint` in the OutputSchema, so I guess, it's probably just mislabeled in the OutputSchema, and needs to be adjusted there, and maybe in the angular template.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lemur/dns_providers/schemas.py`
Content:
```
1 from marshmallow import fields
2
3 from lemur.common.fields import ArrowDateTime
4 from lemur.common.schema import LemurInputSchema, LemurOutputSchema
5
6
7 class DnsProvidersNestedOutputSchema(LemurOutputSchema):
8 __envelope__ = False
9 id = fields.Integer()
10 name = fields.String()
11 providerType = fields.String()
12 description = fields.String()
13 credentials = fields.String()
14 api_endpoint = fields.String()
15 date_created = ArrowDateTime()
16
17
18 class DnsProvidersNestedInputSchema(LemurInputSchema):
19 __envelope__ = False
20 name = fields.String()
21 description = fields.String()
22 provider_type = fields.Dict()
23
24
25 dns_provider_output_schema = DnsProvidersNestedOutputSchema()
26
27 dns_provider_input_schema = DnsProvidersNestedInputSchema()
28
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/lemur/dns_providers/schemas.py b/lemur/dns_providers/schemas.py
--- a/lemur/dns_providers/schemas.py
+++ b/lemur/dns_providers/schemas.py
@@ -8,7 +8,7 @@
__envelope__ = False
id = fields.Integer()
name = fields.String()
- providerType = fields.String()
+ provider_type = fields.String()
description = fields.String()
credentials = fields.String()
api_endpoint = fields.String()
|
{"golden_diff": "diff --git a/lemur/dns_providers/schemas.py b/lemur/dns_providers/schemas.py\n--- a/lemur/dns_providers/schemas.py\n+++ b/lemur/dns_providers/schemas.py\n@@ -8,7 +8,7 @@\n __envelope__ = False\n id = fields.Integer()\n name = fields.String()\n- providerType = fields.String()\n+ provider_type = fields.String()\n description = fields.String()\n credentials = fields.String()\n api_endpoint = fields.String()\n", "issue": "DNS Providers list doesn't show type\nIn the DNS Providers list, there is a column for the provider type, but it's always empty.\r\n\r\nLooking at the code, and the API requests, the issue seems to be with the dns_providers API call, which returns the list of all providers.\r\n\r\nThere should be a providerType value in the JSON, but it's not there. \r\n\r\nA quick glance at the `DnsProvidersNestedOutputSchema` shows that the value is called `providerType`, but in the database the field is called `provider_type` similar to `api_endpoint` which is called `api_endpoint` in the OutputSchema, so I guess, it's probably just mislabeled in the OutputSchema, and needs to be adjusted there, and maybe in the angular template.\n", "before_files": [{"content": "from marshmallow import fields\n\nfrom lemur.common.fields import ArrowDateTime\nfrom lemur.common.schema import LemurInputSchema, LemurOutputSchema\n\n\nclass DnsProvidersNestedOutputSchema(LemurOutputSchema):\n __envelope__ = False\n id = fields.Integer()\n name = fields.String()\n providerType = fields.String()\n description = fields.String()\n credentials = fields.String()\n api_endpoint = fields.String()\n date_created = ArrowDateTime()\n\n\nclass DnsProvidersNestedInputSchema(LemurInputSchema):\n __envelope__ = False\n name = fields.String()\n description = fields.String()\n provider_type = fields.Dict()\n\n\ndns_provider_output_schema = DnsProvidersNestedOutputSchema()\n\ndns_provider_input_schema = DnsProvidersNestedInputSchema()\n", "path": "lemur/dns_providers/schemas.py"}], "after_files": [{"content": "from marshmallow import fields\n\nfrom lemur.common.fields import ArrowDateTime\nfrom lemur.common.schema import LemurInputSchema, LemurOutputSchema\n\n\nclass DnsProvidersNestedOutputSchema(LemurOutputSchema):\n __envelope__ = False\n id = fields.Integer()\n name = fields.String()\n provider_type = fields.String()\n description = fields.String()\n credentials = fields.String()\n api_endpoint = fields.String()\n date_created = ArrowDateTime()\n\n\nclass DnsProvidersNestedInputSchema(LemurInputSchema):\n __envelope__ = False\n name = fields.String()\n description = fields.String()\n provider_type = fields.Dict()\n\n\ndns_provider_output_schema = DnsProvidersNestedOutputSchema()\n\ndns_provider_input_schema = DnsProvidersNestedInputSchema()\n", "path": "lemur/dns_providers/schemas.py"}]}
| 641 | 117 |
gh_patches_debug_26623
|
rasdani/github-patches
|
git_diff
|
e-valuation__EvaP-1291
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Django 2.1 upgrade
https://docs.djangoproject.com/en/2.1/releases/2.1/
There is a guide for upgrading: https://docs.djangoproject.com/en/2.1/howto/upgrade-version/
Basically
* Read the release notes
* update dependencies
* run tests with `python -Wa` and solve deprecation warnings
* put the new django into the requirements
* run tests, fix failures if any
* run tests with `python -Wa` and solve deprecation warnings again
* if there was any new feature in the release notes that might help us, use it
also, we need to check the installed python version on production, django 2.1 supports python 3.5 and newer.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `evap/evaluation/migrations/0062_replace_textanswer_id_with_uuid.py`
Content:
```
1 # Generated by Django 1.11.3 on 2017-07-03 18:31
2
3 from django.db import migrations, models
4 import uuid
5
6
7 def fill_textanswer_uuid(apps, schema_editor):
8 db_alias = schema_editor.connection.alias
9 TextAnswer = apps.get_model('evaluation', 'TextAnswer')
10 for obj in TextAnswer.objects.using(db_alias).all():
11 obj.uuid = uuid.uuid4()
12 obj.save()
13
14
15 class Migration(migrations.Migration):
16
17 dependencies = [
18 ('evaluation', '0061_editor_review_reminder_template'),
19 ]
20
21 # Based on
22 # https://gist.github.com/smcoll/8bb867dc631433c01fd0
23
24 operations = [
25 migrations.AddField(
26 model_name='textanswer',
27 name='uuid',
28 field=models.UUIDField(null=True),
29 ),
30 migrations.RunPython(fill_textanswer_uuid, migrations.RunPython.noop),
31 migrations.AlterField(
32 model_name='textanswer',
33 name='uuid',
34 field=models.UUIDField(primary_key=False, default=uuid.uuid4, serialize=False, editable=False),
35 ),
36 # rename the old id field before deleting it at the end of the
37 # migration for compatibility with the sqlite driver
38 migrations.RenameField(
39 model_name='textanswer',
40 old_name='id',
41 new_name='old_id'
42 ),
43 migrations.RenameField(
44 model_name='textanswer',
45 old_name='uuid',
46 new_name='id'
47 ),
48 migrations.AlterField(
49 model_name='textanswer',
50 name='id',
51 field=models.UUIDField(primary_key=True, default=uuid.uuid4, serialize=False, editable=False),
52 ),
53 migrations.AlterModelOptions(
54 name='textanswer',
55 options={'ordering': ['id'], 'verbose_name': 'text answer', 'verbose_name_plural': 'text answers'},
56 ),
57 migrations.RemoveField(model_name='textanswer', name='old_id'),
58 ]
59
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/evap/evaluation/migrations/0062_replace_textanswer_id_with_uuid.py b/evap/evaluation/migrations/0062_replace_textanswer_id_with_uuid.py
--- a/evap/evaluation/migrations/0062_replace_textanswer_id_with_uuid.py
+++ b/evap/evaluation/migrations/0062_replace_textanswer_id_with_uuid.py
@@ -33,13 +33,12 @@
name='uuid',
field=models.UUIDField(primary_key=False, default=uuid.uuid4, serialize=False, editable=False),
),
- # rename the old id field before deleting it at the end of the
- # migration for compatibility with the sqlite driver
- migrations.RenameField(
- model_name='textanswer',
- old_name='id',
- new_name='old_id'
- ),
+ # this causes trouble with sqlite. We have two open bug reports with django for this, see
+ # https://code.djangoproject.com/ticket/29790 and https://code.djangoproject.com/ticket/28541
+ # We can not get this to work with sqlite and postgres right now and we want django2.1, we only
+ # support postgres here. For sqlite, you need to rename the field here and move the RemoveField to
+ # the end.
+ migrations.RemoveField(model_name='textanswer', name='id'),
migrations.RenameField(
model_name='textanswer',
old_name='uuid',
@@ -54,5 +53,4 @@
name='textanswer',
options={'ordering': ['id'], 'verbose_name': 'text answer', 'verbose_name_plural': 'text answers'},
),
- migrations.RemoveField(model_name='textanswer', name='old_id'),
]
|
{"golden_diff": "diff --git a/evap/evaluation/migrations/0062_replace_textanswer_id_with_uuid.py b/evap/evaluation/migrations/0062_replace_textanswer_id_with_uuid.py\n--- a/evap/evaluation/migrations/0062_replace_textanswer_id_with_uuid.py\n+++ b/evap/evaluation/migrations/0062_replace_textanswer_id_with_uuid.py\n@@ -33,13 +33,12 @@\n name='uuid',\n field=models.UUIDField(primary_key=False, default=uuid.uuid4, serialize=False, editable=False),\n ),\n- # rename the old id field before deleting it at the end of the\n- # migration for compatibility with the sqlite driver\n- migrations.RenameField(\n- model_name='textanswer',\n- old_name='id',\n- new_name='old_id'\n- ),\n+ # this causes trouble with sqlite. We have two open bug reports with django for this, see\n+ # https://code.djangoproject.com/ticket/29790 and https://code.djangoproject.com/ticket/28541\n+ # We can not get this to work with sqlite and postgres right now and we want django2.1, we only\n+ # support postgres here. For sqlite, you need to rename the field here and move the RemoveField to\n+ # the end.\n+ migrations.RemoveField(model_name='textanswer', name='id'),\n migrations.RenameField(\n model_name='textanswer',\n old_name='uuid',\n@@ -54,5 +53,4 @@\n name='textanswer',\n options={'ordering': ['id'], 'verbose_name': 'text answer', 'verbose_name_plural': 'text answers'},\n ),\n- migrations.RemoveField(model_name='textanswer', name='old_id'),\n ]\n", "issue": "Django 2.1 upgrade\nhttps://docs.djangoproject.com/en/2.1/releases/2.1/\r\n\r\nThere is a guide for upgrading: https://docs.djangoproject.com/en/2.1/howto/upgrade-version/\r\n\r\nBasically\r\n* Read the release notes\r\n* update dependencies\r\n* run tests with `python -Wa` and solve deprecation warnings\r\n* put the new django into the requirements\r\n* run tests, fix failures if any\r\n* run tests with `python -Wa` and solve deprecation warnings again\r\n* if there was any new feature in the release notes that might help us, use it\r\n\r\nalso, we need to check the installed python version on production, django 2.1 supports python 3.5 and newer.\n", "before_files": [{"content": "# Generated by Django 1.11.3 on 2017-07-03 18:31\n\nfrom django.db import migrations, models\nimport uuid\n\n\ndef fill_textanswer_uuid(apps, schema_editor):\n db_alias = schema_editor.connection.alias\n TextAnswer = apps.get_model('evaluation', 'TextAnswer')\n for obj in TextAnswer.objects.using(db_alias).all():\n obj.uuid = uuid.uuid4()\n obj.save()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('evaluation', '0061_editor_review_reminder_template'),\n ]\n\n # Based on\n # https://gist.github.com/smcoll/8bb867dc631433c01fd0\n\n operations = [\n migrations.AddField(\n model_name='textanswer',\n name='uuid',\n field=models.UUIDField(null=True),\n ),\n migrations.RunPython(fill_textanswer_uuid, migrations.RunPython.noop),\n migrations.AlterField(\n model_name='textanswer',\n name='uuid',\n field=models.UUIDField(primary_key=False, default=uuid.uuid4, serialize=False, editable=False),\n ),\n # rename the old id field before deleting it at the end of the\n # migration for compatibility with the sqlite driver\n migrations.RenameField(\n model_name='textanswer',\n old_name='id',\n new_name='old_id'\n ),\n migrations.RenameField(\n model_name='textanswer',\n old_name='uuid',\n new_name='id'\n ),\n migrations.AlterField(\n model_name='textanswer',\n name='id',\n field=models.UUIDField(primary_key=True, default=uuid.uuid4, serialize=False, editable=False),\n ),\n migrations.AlterModelOptions(\n name='textanswer',\n options={'ordering': ['id'], 'verbose_name': 'text answer', 'verbose_name_plural': 'text answers'},\n ),\n migrations.RemoveField(model_name='textanswer', name='old_id'),\n ]\n", "path": "evap/evaluation/migrations/0062_replace_textanswer_id_with_uuid.py"}], "after_files": [{"content": "# Generated by Django 1.11.3 on 2017-07-03 18:31\n\nfrom django.db import migrations, models\nimport uuid\n\n\ndef fill_textanswer_uuid(apps, schema_editor):\n db_alias = schema_editor.connection.alias\n TextAnswer = apps.get_model('evaluation', 'TextAnswer')\n for obj in TextAnswer.objects.using(db_alias).all():\n obj.uuid = uuid.uuid4()\n obj.save()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('evaluation', '0061_editor_review_reminder_template'),\n ]\n\n # Based on\n # https://gist.github.com/smcoll/8bb867dc631433c01fd0\n\n operations = [\n migrations.AddField(\n model_name='textanswer',\n name='uuid',\n field=models.UUIDField(null=True),\n ),\n migrations.RunPython(fill_textanswer_uuid, migrations.RunPython.noop),\n migrations.AlterField(\n model_name='textanswer',\n name='uuid',\n field=models.UUIDField(primary_key=False, default=uuid.uuid4, serialize=False, editable=False),\n ),\n # this causes trouble with sqlite. We have two open bug reports with django for this, see\n # https://code.djangoproject.com/ticket/29790 and https://code.djangoproject.com/ticket/28541\n # We can not get this to work with sqlite and postgres right now and we want django2.1, we only\n # support postgres here. For sqlite, you need to rename the field here and move the RemoveField to\n # the end.\n migrations.RemoveField(model_name='textanswer', name='id'),\n migrations.RenameField(\n model_name='textanswer',\n old_name='uuid',\n new_name='id'\n ),\n migrations.AlterField(\n model_name='textanswer',\n name='id',\n field=models.UUIDField(primary_key=True, default=uuid.uuid4, serialize=False, editable=False),\n ),\n migrations.AlterModelOptions(\n name='textanswer',\n options={'ordering': ['id'], 'verbose_name': 'text answer', 'verbose_name_plural': 'text answers'},\n ),\n ]\n", "path": "evap/evaluation/migrations/0062_replace_textanswer_id_with_uuid.py"}]}
| 968 | 398 |
gh_patches_debug_2234
|
rasdani/github-patches
|
git_diff
|
redis__redis-py-2674
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Canceling async Redis command leaves connection open, in unsafe state for future commands
**Version**: 4.5.3
**Platform**: Python 3.8 on Ubuntu / Generic
**Description**: Canceling async Redis command leaves connection open, in unsafe state for future commands
This is a reincarnation of #2624, which was closed with an incomplete fix and a possibly unreliable test case. This is the same issue that recently got a lot of attention due to ChatGPT outage, and that remains only partially fixed. The cancellation shielding introduced in #2641 addressed only the cancellation of Redis pipeline operation, but non-pipelined ops are still vulnerable.
This time I am attaching a script that reproduces the issue reliably without relying on an external, slow Redis server. This is achieved by inserting a small TCP socket proxy between the Redis client and local Redis server, with the proxy introducing a 0.1 second delay when sending data in either direction.
Running this script with a Redis server running locally on port 6379 produces the following output:
```
$ python redis_cancel.py
managed to cancel the task, connection is left open with unread response
bar: b'foo'
ping: False
foo: b'PONG'
```
```python
import asyncio
from redis.asyncio import Redis
async def pipe(reader: asyncio.StreamReader, writer: asyncio.StreamWriter, delay: float, name=''):
while data := await reader.read(1000):
# print(name, 'received:', data)
await asyncio.sleep(delay)
writer.write(data)
await writer.drain()
class DelayProxy:
def __init__(self, addr, redis_addr, delay: float):
self.addr = addr
self.redis_addr = redis_addr
self.delay = delay
async def start(self):
server = await asyncio.start_server(self.handle, *self.addr)
asyncio.create_task(server.serve_forever())
async def handle(self, reader, writer):
# establish connection to redis
redis_reader, redis_writer = await asyncio.open_connection(*self.redis_addr)
pipe1 = asyncio.create_task(pipe(reader, redis_writer, self.delay, 'to redis:'))
pipe2 = asyncio.create_task(pipe(redis_reader, writer, self.delay, 'from redis:'))
await asyncio.gather(pipe1, pipe2)
async def main():
# create a tcp socket proxy that relays data to Redis and back, inserting 0.1 seconds of delay
dp = DelayProxy(addr=('localhost', 6380), redis_addr=('localhost', 6379), delay=0.1)
await dp.start()
# note that we connect to proxy, rather than to Redis directly
async with Redis(host='localhost', port=6380) as r:
await r.set('foo', 'foo')
await r.set('bar', 'bar')
t = asyncio.create_task(r.get('foo'))
await asyncio.sleep(0.050)
t.cancel()
try:
await t
print('try again, we did not cancel the task in time')
except asyncio.CancelledError:
print('managed to cancel the task, connection is left open with unread response')
print('bar:', await r.get('bar'))
print('ping:', await r.ping())
print('foo:', await r.get('foo'))
if __name__ == '__main__':
asyncio.run(main())
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 from setuptools import find_packages, setup
3
4 setup(
5 name="redis",
6 description="Python client for Redis database and key-value store",
7 long_description=open("README.md").read().strip(),
8 long_description_content_type="text/markdown",
9 keywords=["Redis", "key-value store", "database"],
10 license="MIT",
11 version="4.5.3",
12 packages=find_packages(
13 include=[
14 "redis",
15 "redis.asyncio",
16 "redis.commands",
17 "redis.commands.bf",
18 "redis.commands.json",
19 "redis.commands.search",
20 "redis.commands.timeseries",
21 "redis.commands.graph",
22 ]
23 ),
24 url="https://github.com/redis/redis-py",
25 project_urls={
26 "Documentation": "https://redis.readthedocs.io/en/latest/",
27 "Changes": "https://github.com/redis/redis-py/releases",
28 "Code": "https://github.com/redis/redis-py",
29 "Issue tracker": "https://github.com/redis/redis-py/issues",
30 },
31 author="Redis Inc.",
32 author_email="[email protected]",
33 python_requires=">=3.7",
34 install_requires=[
35 'importlib-metadata >= 1.0; python_version < "3.8"',
36 'typing-extensions; python_version<"3.8"',
37 'async-timeout>=4.0.2; python_version<="3.11.2"',
38 ],
39 classifiers=[
40 "Development Status :: 5 - Production/Stable",
41 "Environment :: Console",
42 "Intended Audience :: Developers",
43 "License :: OSI Approved :: MIT License",
44 "Operating System :: OS Independent",
45 "Programming Language :: Python",
46 "Programming Language :: Python :: 3",
47 "Programming Language :: Python :: 3 :: Only",
48 "Programming Language :: Python :: 3.7",
49 "Programming Language :: Python :: 3.8",
50 "Programming Language :: Python :: 3.9",
51 "Programming Language :: Python :: 3.10",
52 "Programming Language :: Python :: 3.11",
53 "Programming Language :: Python :: Implementation :: CPython",
54 "Programming Language :: Python :: Implementation :: PyPy",
55 ],
56 extras_require={
57 "hiredis": ["hiredis>=1.0.0"],
58 "ocsp": ["cryptography>=36.0.1", "pyopenssl==20.0.1", "requests>=2.26.0"],
59 },
60 )
61
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -8,7 +8,7 @@
long_description_content_type="text/markdown",
keywords=["Redis", "key-value store", "database"],
license="MIT",
- version="4.5.3",
+ version="4.5.4",
packages=find_packages(
include=[
"redis",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -8,7 +8,7 @@\n long_description_content_type=\"text/markdown\",\n keywords=[\"Redis\", \"key-value store\", \"database\"],\n license=\"MIT\",\n- version=\"4.5.3\",\n+ version=\"4.5.4\",\n packages=find_packages(\n include=[\n \"redis\",\n", "issue": "Canceling async Redis command leaves connection open, in unsafe state for future commands\n\r\n**Version**: 4.5.3\r\n\r\n**Platform**: Python 3.8 on Ubuntu / Generic\r\n\r\n**Description**: Canceling async Redis command leaves connection open, in unsafe state for future commands\r\n\r\nThis is a reincarnation of #2624, which was closed with an incomplete fix and a possibly unreliable test case. This is the same issue that recently got a lot of attention due to ChatGPT outage, and that remains only partially fixed. The cancellation shielding introduced in #2641 addressed only the cancellation of Redis pipeline operation, but non-pipelined ops are still vulnerable.\r\n\r\nThis time I am attaching a script that reproduces the issue reliably without relying on an external, slow Redis server. This is achieved by inserting a small TCP socket proxy between the Redis client and local Redis server, with the proxy introducing a 0.1 second delay when sending data in either direction. \r\n\r\nRunning this script with a Redis server running locally on port 6379 produces the following output:\r\n```\r\n$ python redis_cancel.py \r\nmanaged to cancel the task, connection is left open with unread response\r\nbar: b'foo'\r\nping: False\r\nfoo: b'PONG'\r\n```\r\n\r\n```python\r\nimport asyncio\r\n\r\nfrom redis.asyncio import Redis\r\n\r\n\r\nasync def pipe(reader: asyncio.StreamReader, writer: asyncio.StreamWriter, delay: float, name=''):\r\n while data := await reader.read(1000):\r\n # print(name, 'received:', data)\r\n await asyncio.sleep(delay)\r\n writer.write(data)\r\n await writer.drain()\r\n\r\n\r\nclass DelayProxy:\r\n\r\n def __init__(self, addr, redis_addr, delay: float):\r\n self.addr = addr\r\n self.redis_addr = redis_addr\r\n self.delay = delay\r\n\r\n async def start(self):\r\n server = await asyncio.start_server(self.handle, *self.addr)\r\n asyncio.create_task(server.serve_forever())\r\n\r\n async def handle(self, reader, writer):\r\n # establish connection to redis\r\n redis_reader, redis_writer = await asyncio.open_connection(*self.redis_addr)\r\n pipe1 = asyncio.create_task(pipe(reader, redis_writer, self.delay, 'to redis:'))\r\n pipe2 = asyncio.create_task(pipe(redis_reader, writer, self.delay, 'from redis:'))\r\n await asyncio.gather(pipe1, pipe2)\r\n\r\n\r\nasync def main():\r\n\r\n # create a tcp socket proxy that relays data to Redis and back, inserting 0.1 seconds of delay\r\n dp = DelayProxy(addr=('localhost', 6380), redis_addr=('localhost', 6379), delay=0.1)\r\n await dp.start()\r\n\r\n # note that we connect to proxy, rather than to Redis directly\r\n async with Redis(host='localhost', port=6380) as r:\r\n\r\n await r.set('foo', 'foo')\r\n await r.set('bar', 'bar')\r\n\r\n t = asyncio.create_task(r.get('foo'))\r\n await asyncio.sleep(0.050)\r\n t.cancel()\r\n try:\r\n await t\r\n print('try again, we did not cancel the task in time')\r\n except asyncio.CancelledError:\r\n print('managed to cancel the task, connection is left open with unread response')\r\n\r\n print('bar:', await r.get('bar'))\r\n print('ping:', await r.ping())\r\n print('foo:', await r.get('foo'))\r\n\r\nif __name__ == '__main__':\r\n asyncio.run(main())\r\n```\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\nfrom setuptools import find_packages, setup\n\nsetup(\n name=\"redis\",\n description=\"Python client for Redis database and key-value store\",\n long_description=open(\"README.md\").read().strip(),\n long_description_content_type=\"text/markdown\",\n keywords=[\"Redis\", \"key-value store\", \"database\"],\n license=\"MIT\",\n version=\"4.5.3\",\n packages=find_packages(\n include=[\n \"redis\",\n \"redis.asyncio\",\n \"redis.commands\",\n \"redis.commands.bf\",\n \"redis.commands.json\",\n \"redis.commands.search\",\n \"redis.commands.timeseries\",\n \"redis.commands.graph\",\n ]\n ),\n url=\"https://github.com/redis/redis-py\",\n project_urls={\n \"Documentation\": \"https://redis.readthedocs.io/en/latest/\",\n \"Changes\": \"https://github.com/redis/redis-py/releases\",\n \"Code\": \"https://github.com/redis/redis-py\",\n \"Issue tracker\": \"https://github.com/redis/redis-py/issues\",\n },\n author=\"Redis Inc.\",\n author_email=\"[email protected]\",\n python_requires=\">=3.7\",\n install_requires=[\n 'importlib-metadata >= 1.0; python_version < \"3.8\"',\n 'typing-extensions; python_version<\"3.8\"',\n 'async-timeout>=4.0.2; python_version<=\"3.11.2\"',\n ],\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n ],\n extras_require={\n \"hiredis\": [\"hiredis>=1.0.0\"],\n \"ocsp\": [\"cryptography>=36.0.1\", \"pyopenssl==20.0.1\", \"requests>=2.26.0\"],\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nfrom setuptools import find_packages, setup\n\nsetup(\n name=\"redis\",\n description=\"Python client for Redis database and key-value store\",\n long_description=open(\"README.md\").read().strip(),\n long_description_content_type=\"text/markdown\",\n keywords=[\"Redis\", \"key-value store\", \"database\"],\n license=\"MIT\",\n version=\"4.5.4\",\n packages=find_packages(\n include=[\n \"redis\",\n \"redis.asyncio\",\n \"redis.commands\",\n \"redis.commands.bf\",\n \"redis.commands.json\",\n \"redis.commands.search\",\n \"redis.commands.timeseries\",\n \"redis.commands.graph\",\n ]\n ),\n url=\"https://github.com/redis/redis-py\",\n project_urls={\n \"Documentation\": \"https://redis.readthedocs.io/en/latest/\",\n \"Changes\": \"https://github.com/redis/redis-py/releases\",\n \"Code\": \"https://github.com/redis/redis-py\",\n \"Issue tracker\": \"https://github.com/redis/redis-py/issues\",\n },\n author=\"Redis Inc.\",\n author_email=\"[email protected]\",\n python_requires=\">=3.7\",\n install_requires=[\n 'importlib-metadata >= 1.0; python_version < \"3.8\"',\n 'typing-extensions; python_version<\"3.8\"',\n 'async-timeout>=4.0.2; python_version<=\"3.11.2\"',\n ],\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n ],\n extras_require={\n \"hiredis\": [\"hiredis>=1.0.0\"],\n \"ocsp\": [\"cryptography>=36.0.1\", \"pyopenssl==20.0.1\", \"requests>=2.26.0\"],\n },\n)\n", "path": "setup.py"}]}
| 1,644 | 90 |
gh_patches_debug_375
|
rasdani/github-patches
|
git_diff
|
lutris__lutris-2561
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Can't use any installers
When I try to install Epic Games Store, for example https://lutris.net/games/epic-games-store/
the installer eventually fails on this:
```
2019-12-29 10:35:48,329: Error while completing task <bound method CommandsMixin.execute of <lutris.installer.interpreter.ScriptInterpreter object at 0x7f3f726e59a0>>: sequence item 1: expected str instance, list found
<class 'TypeError'> sequence item 1: expected str instance, list found
File "/usr/lib/python3.8/site-packages/lutris/util/jobs.py", line 30, in target
result = self.function(*args, **kwargs)
File "/usr/lib/python3.8/site-packages/lutris/installer/commands.py", line 152, in execute
command.start()
File "/usr/lib/python3.8/site-packages/lutris/command.py", line 116, in start
logger.debug("Running %s", " ".join(self.wrapper_command))
```
It seems to affect all installers though, not just this one.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lutris/command.py`
Content:
```
1 """Threading module, used to launch games while monitoring them."""
2
3 import io
4 import os
5 import sys
6 import fcntl
7 import shlex
8 import subprocess
9 import contextlib
10 from textwrap import dedent
11
12 from gi.repository import GLib
13
14 from lutris import settings
15 from lutris import runtime
16 from lutris.util.log import logger
17 from lutris.util import system
18
19 WRAPPER_SCRIPT = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), "lutris-wrapper")
20
21
22 class MonitoredCommand:
23 """Exexcutes a commmand while keeping track of its state"""
24
25 fallback_cwd = "/tmp"
26
27 def __init__(
28 self,
29 command,
30 runner=None,
31 env=None,
32 term=None,
33 cwd=None,
34 include_processes=None,
35 exclude_processes=None,
36 log_buffer=None,
37 title=None,
38 ): # pylint: disable=too-many-arguments
39 self.ready_state = True
40 self.env = self.get_environment(env)
41
42 self.command = command
43 self.runner = runner
44 self.stop_func = lambda: True
45 self.game_process = None
46 self.prevent_on_stop = False
47 self.return_code = None
48 self.terminal = system.find_executable(term)
49 self.is_running = True
50 self.error = None
51 self.log_handlers = [
52 self.log_handler_stdout,
53 self.log_handler_console_output,
54 ]
55 self.set_log_buffer(log_buffer)
56 self.stdout_monitor = None
57 self.include_processes = include_processes or []
58 self.exclude_processes = exclude_processes or []
59
60 self.cwd = self.get_cwd(cwd)
61
62 self._stdout = io.StringIO()
63
64 self._title = title if title else command
65
66 @property
67 def stdout(self):
68 return self._stdout.getvalue()
69
70 @property
71 def wrapper_command(self):
72 """Return launch arguments for the wrapper script"""
73
74 return [
75 WRAPPER_SCRIPT,
76 self._title,
77 str(len(self.include_processes)),
78 str(len(self.exclude_processes)),
79 ] + self.include_processes + self.exclude_processes + self.command
80
81 def set_log_buffer(self, log_buffer):
82 """Attach a TextBuffer to this command enables the buffer handler"""
83 if not log_buffer:
84 return
85 self.log_buffer = log_buffer
86 if self.log_handler_buffer not in self.log_handlers:
87 self.log_handlers.append(self.log_handler_buffer)
88
89 def get_cwd(self, cwd):
90 """Return the current working dir of the game"""
91 if not cwd:
92 cwd = self.runner.working_dir if self.runner else None
93 return os.path.expanduser(cwd or "~")
94
95 @staticmethod
96 def get_environment(user_env):
97 """Process the user provided environment variables for use as self.env"""
98 env = user_env or {}
99 # not clear why this needs to be added, the path is already added in
100 # the wrappper script.
101 env['PYTHONPATH'] = ':'.join(sys.path)
102 # Drop bad values of environment keys, those will confuse the Python
103 # interpreter.
104 return {
105 key: value for key, value in env.items() if "=" not in key
106 }
107
108 def get_child_environment(self):
109 """Returns the calculated environment for the child process."""
110 env = os.environ.copy()
111 env.update(self.env)
112 return env
113
114 def start(self):
115 """Run the thread."""
116 logger.debug("Running %s", " ".join(self.wrapper_command))
117 for key, value in self.env.items():
118 logger.debug("ENV: %s=\"%s\"", key, value)
119
120 if self.terminal:
121 self.game_process = self.run_in_terminal()
122 else:
123 env = self.get_child_environment()
124 self.game_process = self.execute_process(self.wrapper_command, env)
125
126 if not self.game_process:
127 logger.warning("No game process available")
128 return
129
130 GLib.child_watch_add(self.game_process.pid, self.on_stop)
131
132 # make stdout nonblocking.
133 fileno = self.game_process.stdout.fileno()
134 fcntl.fcntl(
135 fileno,
136 fcntl.F_SETFL,
137 fcntl.fcntl(fileno, fcntl.F_GETFL) | os.O_NONBLOCK
138 )
139
140 self.stdout_monitor = GLib.io_add_watch(
141 self.game_process.stdout,
142 GLib.IO_IN | GLib.IO_HUP,
143 self.on_stdout_output,
144 )
145
146 def log_handler_stdout(self, line):
147 """Add the line to this command's stdout attribute"""
148 self._stdout.write(line)
149
150 def log_handler_buffer(self, line):
151 """Add the line to the associated LogBuffer object"""
152 self.log_buffer.insert(self.log_buffer.get_end_iter(), line, -1)
153
154 def log_handler_console_output(self, line): # pylint: disable=no-self-use
155 """Print the line to stdout"""
156 with contextlib.suppress(BlockingIOError):
157 sys.stdout.write(line)
158 sys.stdout.flush()
159
160 def on_stop(self, _pid, returncode):
161 """Callback registered on game process termination"""
162 if self.prevent_on_stop: # stop() already in progress
163 return False
164
165 logger.debug("The process has terminated with code %s", returncode)
166 self.is_running = False
167 self.return_code = returncode
168
169 resume_stop = self.stop()
170 if not resume_stop:
171 logger.info("Full shutdown prevented")
172 return False
173
174 return False
175
176 def on_stdout_output(self, stdout, condition):
177 """Called by the stdout monitor to dispatch output to log handlers"""
178 if condition == GLib.IO_HUP:
179 self.stdout_monitor = None
180 return False
181 if not self.is_running:
182 return False
183 try:
184 line = stdout.read(262144).decode("utf-8", errors="ignore")
185 except ValueError:
186 # file_desc might be closed
187 return True
188 if "winemenubuilder.exe" in line:
189 return True
190 for log_handler in self.log_handlers:
191 log_handler(line)
192 return True
193
194 def run_in_terminal(self):
195 """Write command in a script file and run it.
196
197 Running it from a file is likely the only way to set env vars only
198 for the command (not for the terminal app).
199 It's also the only reliable way to keep the term open when the
200 game is quit.
201 """
202 script_path = os.path.join(settings.CACHE_DIR, "run_in_term.sh")
203 exported_environment = "\n".join(
204 'export %s="%s" ' % (key, value)
205 for key, value in self.env.items()
206 )
207 command = " ".join(['"%s"' % token for token in self.wrapper_command])
208 with open(script_path, "w") as script_file:
209 script_file.write(dedent(
210 """#!/bin/sh
211 cd "%s"
212 %s
213 exec %s
214 """ % (self.cwd, exported_environment, command)
215 ))
216 os.chmod(script_path, 0o744)
217 return self.execute_process([self.terminal, "-e", script_path])
218
219 def execute_process(self, command, env=None):
220 """Execute and return a subprocess"""
221 if self.cwd and not system.path_exists(self.cwd):
222 try:
223 os.makedirs(self.cwd)
224 except OSError:
225 logger.error("Failed to create working directory, falling back to %s",
226 self.fallback_cwd)
227 self.cwd = "/tmp"
228 try:
229
230 return subprocess.Popen(
231 command,
232 stdout=subprocess.PIPE,
233 stderr=subprocess.STDOUT,
234 cwd=self.cwd,
235 env=env,
236 )
237 except OSError as ex:
238 logger.exception("Failed to execute %s: %s", " ".join(command), ex)
239 self.error = ex.strerror
240
241 def stop(self):
242 """Stops the current game process and cleans up the instance"""
243 # Prevent stop() being called again by the process exiting
244 self.prevent_on_stop = True
245
246 try:
247 self.game_process.terminate()
248 except ProcessLookupError: # process already dead.
249 logger.debug("Management process looks dead already.")
250
251 if hasattr(self, "stop_func"):
252 resume_stop = self.stop_func()
253 if not resume_stop:
254 return False
255
256 if self.stdout_monitor:
257 logger.debug("Detaching logger")
258 GLib.source_remove(self.stdout_monitor)
259 self.stdout_monitor = None
260 else:
261 logger.debug("logger already detached")
262
263 self.is_running = False
264 self.ready_state = False
265 return True
266
267
268 def exec_command(command):
269 """Execute arbitrary command in a MonitoredCommand
270
271 Used by the --exec command line flag.
272 """
273 command = MonitoredCommand(shlex.split(command), env=runtime.get_env())
274 command.start()
275 return command
276
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/lutris/command.py b/lutris/command.py
--- a/lutris/command.py
+++ b/lutris/command.py
@@ -61,7 +61,7 @@
self._stdout = io.StringIO()
- self._title = title if title else command
+ self._title = title if title else command[0]
@property
def stdout(self):
|
{"golden_diff": "diff --git a/lutris/command.py b/lutris/command.py\n--- a/lutris/command.py\n+++ b/lutris/command.py\n@@ -61,7 +61,7 @@\n \n self._stdout = io.StringIO()\n \n- self._title = title if title else command\n+ self._title = title if title else command[0]\n \n @property\n def stdout(self):\n", "issue": "Can't use any installers\nWhen I try to install Epic Games Store, for example https://lutris.net/games/epic-games-store/\r\nthe installer eventually fails on this:\r\n```\r\n2019-12-29 10:35:48,329: Error while completing task <bound method CommandsMixin.execute of <lutris.installer.interpreter.ScriptInterpreter object at 0x7f3f726e59a0>>: sequence item 1: expected str instance, list found\r\n<class 'TypeError'> sequence item 1: expected str instance, list found\r\n File \"/usr/lib/python3.8/site-packages/lutris/util/jobs.py\", line 30, in target\r\n result = self.function(*args, **kwargs)\r\n File \"/usr/lib/python3.8/site-packages/lutris/installer/commands.py\", line 152, in execute\r\n command.start()\r\n File \"/usr/lib/python3.8/site-packages/lutris/command.py\", line 116, in start\r\n logger.debug(\"Running %s\", \" \".join(self.wrapper_command))\r\n```\r\nIt seems to affect all installers though, not just this one.\r\n\n", "before_files": [{"content": "\"\"\"Threading module, used to launch games while monitoring them.\"\"\"\n\nimport io\nimport os\nimport sys\nimport fcntl\nimport shlex\nimport subprocess\nimport contextlib\nfrom textwrap import dedent\n\nfrom gi.repository import GLib\n\nfrom lutris import settings\nfrom lutris import runtime\nfrom lutris.util.log import logger\nfrom lutris.util import system\n\nWRAPPER_SCRIPT = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), \"lutris-wrapper\")\n\n\nclass MonitoredCommand:\n \"\"\"Exexcutes a commmand while keeping track of its state\"\"\"\n\n fallback_cwd = \"/tmp\"\n\n def __init__(\n self,\n command,\n runner=None,\n env=None,\n term=None,\n cwd=None,\n include_processes=None,\n exclude_processes=None,\n log_buffer=None,\n title=None,\n ): # pylint: disable=too-many-arguments\n self.ready_state = True\n self.env = self.get_environment(env)\n\n self.command = command\n self.runner = runner\n self.stop_func = lambda: True\n self.game_process = None\n self.prevent_on_stop = False\n self.return_code = None\n self.terminal = system.find_executable(term)\n self.is_running = True\n self.error = None\n self.log_handlers = [\n self.log_handler_stdout,\n self.log_handler_console_output,\n ]\n self.set_log_buffer(log_buffer)\n self.stdout_monitor = None\n self.include_processes = include_processes or []\n self.exclude_processes = exclude_processes or []\n\n self.cwd = self.get_cwd(cwd)\n\n self._stdout = io.StringIO()\n\n self._title = title if title else command\n\n @property\n def stdout(self):\n return self._stdout.getvalue()\n\n @property\n def wrapper_command(self):\n \"\"\"Return launch arguments for the wrapper script\"\"\"\n\n return [\n WRAPPER_SCRIPT,\n self._title,\n str(len(self.include_processes)),\n str(len(self.exclude_processes)),\n ] + self.include_processes + self.exclude_processes + self.command\n\n def set_log_buffer(self, log_buffer):\n \"\"\"Attach a TextBuffer to this command enables the buffer handler\"\"\"\n if not log_buffer:\n return\n self.log_buffer = log_buffer\n if self.log_handler_buffer not in self.log_handlers:\n self.log_handlers.append(self.log_handler_buffer)\n\n def get_cwd(self, cwd):\n \"\"\"Return the current working dir of the game\"\"\"\n if not cwd:\n cwd = self.runner.working_dir if self.runner else None\n return os.path.expanduser(cwd or \"~\")\n\n @staticmethod\n def get_environment(user_env):\n \"\"\"Process the user provided environment variables for use as self.env\"\"\"\n env = user_env or {}\n # not clear why this needs to be added, the path is already added in\n # the wrappper script.\n env['PYTHONPATH'] = ':'.join(sys.path)\n # Drop bad values of environment keys, those will confuse the Python\n # interpreter.\n return {\n key: value for key, value in env.items() if \"=\" not in key\n }\n\n def get_child_environment(self):\n \"\"\"Returns the calculated environment for the child process.\"\"\"\n env = os.environ.copy()\n env.update(self.env)\n return env\n\n def start(self):\n \"\"\"Run the thread.\"\"\"\n logger.debug(\"Running %s\", \" \".join(self.wrapper_command))\n for key, value in self.env.items():\n logger.debug(\"ENV: %s=\\\"%s\\\"\", key, value)\n\n if self.terminal:\n self.game_process = self.run_in_terminal()\n else:\n env = self.get_child_environment()\n self.game_process = self.execute_process(self.wrapper_command, env)\n\n if not self.game_process:\n logger.warning(\"No game process available\")\n return\n\n GLib.child_watch_add(self.game_process.pid, self.on_stop)\n\n # make stdout nonblocking.\n fileno = self.game_process.stdout.fileno()\n fcntl.fcntl(\n fileno,\n fcntl.F_SETFL,\n fcntl.fcntl(fileno, fcntl.F_GETFL) | os.O_NONBLOCK\n )\n\n self.stdout_monitor = GLib.io_add_watch(\n self.game_process.stdout,\n GLib.IO_IN | GLib.IO_HUP,\n self.on_stdout_output,\n )\n\n def log_handler_stdout(self, line):\n \"\"\"Add the line to this command's stdout attribute\"\"\"\n self._stdout.write(line)\n\n def log_handler_buffer(self, line):\n \"\"\"Add the line to the associated LogBuffer object\"\"\"\n self.log_buffer.insert(self.log_buffer.get_end_iter(), line, -1)\n\n def log_handler_console_output(self, line): # pylint: disable=no-self-use\n \"\"\"Print the line to stdout\"\"\"\n with contextlib.suppress(BlockingIOError):\n sys.stdout.write(line)\n sys.stdout.flush()\n\n def on_stop(self, _pid, returncode):\n \"\"\"Callback registered on game process termination\"\"\"\n if self.prevent_on_stop: # stop() already in progress\n return False\n\n logger.debug(\"The process has terminated with code %s\", returncode)\n self.is_running = False\n self.return_code = returncode\n\n resume_stop = self.stop()\n if not resume_stop:\n logger.info(\"Full shutdown prevented\")\n return False\n\n return False\n\n def on_stdout_output(self, stdout, condition):\n \"\"\"Called by the stdout monitor to dispatch output to log handlers\"\"\"\n if condition == GLib.IO_HUP:\n self.stdout_monitor = None\n return False\n if not self.is_running:\n return False\n try:\n line = stdout.read(262144).decode(\"utf-8\", errors=\"ignore\")\n except ValueError:\n # file_desc might be closed\n return True\n if \"winemenubuilder.exe\" in line:\n return True\n for log_handler in self.log_handlers:\n log_handler(line)\n return True\n\n def run_in_terminal(self):\n \"\"\"Write command in a script file and run it.\n\n Running it from a file is likely the only way to set env vars only\n for the command (not for the terminal app).\n It's also the only reliable way to keep the term open when the\n game is quit.\n \"\"\"\n script_path = os.path.join(settings.CACHE_DIR, \"run_in_term.sh\")\n exported_environment = \"\\n\".join(\n 'export %s=\"%s\" ' % (key, value)\n for key, value in self.env.items()\n )\n command = \" \".join(['\"%s\"' % token for token in self.wrapper_command])\n with open(script_path, \"w\") as script_file:\n script_file.write(dedent(\n \"\"\"#!/bin/sh\n cd \"%s\"\n %s\n exec %s\n \"\"\" % (self.cwd, exported_environment, command)\n ))\n os.chmod(script_path, 0o744)\n return self.execute_process([self.terminal, \"-e\", script_path])\n\n def execute_process(self, command, env=None):\n \"\"\"Execute and return a subprocess\"\"\"\n if self.cwd and not system.path_exists(self.cwd):\n try:\n os.makedirs(self.cwd)\n except OSError:\n logger.error(\"Failed to create working directory, falling back to %s\",\n self.fallback_cwd)\n self.cwd = \"/tmp\"\n try:\n\n return subprocess.Popen(\n command,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n cwd=self.cwd,\n env=env,\n )\n except OSError as ex:\n logger.exception(\"Failed to execute %s: %s\", \" \".join(command), ex)\n self.error = ex.strerror\n\n def stop(self):\n \"\"\"Stops the current game process and cleans up the instance\"\"\"\n # Prevent stop() being called again by the process exiting\n self.prevent_on_stop = True\n\n try:\n self.game_process.terminate()\n except ProcessLookupError: # process already dead.\n logger.debug(\"Management process looks dead already.\")\n\n if hasattr(self, \"stop_func\"):\n resume_stop = self.stop_func()\n if not resume_stop:\n return False\n\n if self.stdout_monitor:\n logger.debug(\"Detaching logger\")\n GLib.source_remove(self.stdout_monitor)\n self.stdout_monitor = None\n else:\n logger.debug(\"logger already detached\")\n\n self.is_running = False\n self.ready_state = False\n return True\n\n\ndef exec_command(command):\n \"\"\"Execute arbitrary command in a MonitoredCommand\n\n Used by the --exec command line flag.\n \"\"\"\n command = MonitoredCommand(shlex.split(command), env=runtime.get_env())\n command.start()\n return command\n", "path": "lutris/command.py"}], "after_files": [{"content": "\"\"\"Threading module, used to launch games while monitoring them.\"\"\"\n\nimport io\nimport os\nimport sys\nimport fcntl\nimport shlex\nimport subprocess\nimport contextlib\nfrom textwrap import dedent\n\nfrom gi.repository import GLib\n\nfrom lutris import settings\nfrom lutris import runtime\nfrom lutris.util.log import logger\nfrom lutris.util import system\n\nWRAPPER_SCRIPT = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), \"lutris-wrapper\")\n\n\nclass MonitoredCommand:\n \"\"\"Exexcutes a commmand while keeping track of its state\"\"\"\n\n fallback_cwd = \"/tmp\"\n\n def __init__(\n self,\n command,\n runner=None,\n env=None,\n term=None,\n cwd=None,\n include_processes=None,\n exclude_processes=None,\n log_buffer=None,\n title=None,\n ): # pylint: disable=too-many-arguments\n self.ready_state = True\n self.env = self.get_environment(env)\n\n self.command = command\n self.runner = runner\n self.stop_func = lambda: True\n self.game_process = None\n self.prevent_on_stop = False\n self.return_code = None\n self.terminal = system.find_executable(term)\n self.is_running = True\n self.error = None\n self.log_handlers = [\n self.log_handler_stdout,\n self.log_handler_console_output,\n ]\n self.set_log_buffer(log_buffer)\n self.stdout_monitor = None\n self.include_processes = include_processes or []\n self.exclude_processes = exclude_processes or []\n\n self.cwd = self.get_cwd(cwd)\n\n self._stdout = io.StringIO()\n\n self._title = title if title else command[0]\n\n @property\n def stdout(self):\n return self._stdout.getvalue()\n\n @property\n def wrapper_command(self):\n \"\"\"Return launch arguments for the wrapper script\"\"\"\n\n return [\n WRAPPER_SCRIPT,\n self._title,\n str(len(self.include_processes)),\n str(len(self.exclude_processes)),\n ] + self.include_processes + self.exclude_processes + self.command\n\n def set_log_buffer(self, log_buffer):\n \"\"\"Attach a TextBuffer to this command enables the buffer handler\"\"\"\n if not log_buffer:\n return\n self.log_buffer = log_buffer\n if self.log_handler_buffer not in self.log_handlers:\n self.log_handlers.append(self.log_handler_buffer)\n\n def get_cwd(self, cwd):\n \"\"\"Return the current working dir of the game\"\"\"\n if not cwd:\n cwd = self.runner.working_dir if self.runner else None\n return os.path.expanduser(cwd or \"~\")\n\n @staticmethod\n def get_environment(user_env):\n \"\"\"Process the user provided environment variables for use as self.env\"\"\"\n env = user_env or {}\n # not clear why this needs to be added, the path is already added in\n # the wrappper script.\n env['PYTHONPATH'] = ':'.join(sys.path)\n # Drop bad values of environment keys, those will confuse the Python\n # interpreter.\n return {\n key: value for key, value in env.items() if \"=\" not in key\n }\n\n def get_child_environment(self):\n \"\"\"Returns the calculated environment for the child process.\"\"\"\n env = os.environ.copy()\n env.update(self.env)\n return env\n\n def start(self):\n \"\"\"Run the thread.\"\"\"\n logger.debug(\"Running %s\", \" \".join(self.wrapper_command))\n for key, value in self.env.items():\n logger.debug(\"ENV: %s=\\\"%s\\\"\", key, value)\n\n if self.terminal:\n self.game_process = self.run_in_terminal()\n else:\n env = self.get_child_environment()\n self.game_process = self.execute_process(self.wrapper_command, env)\n\n if not self.game_process:\n logger.warning(\"No game process available\")\n return\n\n GLib.child_watch_add(self.game_process.pid, self.on_stop)\n\n # make stdout nonblocking.\n fileno = self.game_process.stdout.fileno()\n fcntl.fcntl(\n fileno,\n fcntl.F_SETFL,\n fcntl.fcntl(fileno, fcntl.F_GETFL) | os.O_NONBLOCK\n )\n\n self.stdout_monitor = GLib.io_add_watch(\n self.game_process.stdout,\n GLib.IO_IN | GLib.IO_HUP,\n self.on_stdout_output,\n )\n\n def log_handler_stdout(self, line):\n \"\"\"Add the line to this command's stdout attribute\"\"\"\n self._stdout.write(line)\n\n def log_handler_buffer(self, line):\n \"\"\"Add the line to the associated LogBuffer object\"\"\"\n self.log_buffer.insert(self.log_buffer.get_end_iter(), line, -1)\n\n def log_handler_console_output(self, line): # pylint: disable=no-self-use\n \"\"\"Print the line to stdout\"\"\"\n with contextlib.suppress(BlockingIOError):\n sys.stdout.write(line)\n sys.stdout.flush()\n\n def on_stop(self, _pid, returncode):\n \"\"\"Callback registered on game process termination\"\"\"\n if self.prevent_on_stop: # stop() already in progress\n return False\n\n logger.debug(\"The process has terminated with code %s\", returncode)\n self.is_running = False\n self.return_code = returncode\n\n resume_stop = self.stop()\n if not resume_stop:\n logger.info(\"Full shutdown prevented\")\n return False\n\n return False\n\n def on_stdout_output(self, stdout, condition):\n \"\"\"Called by the stdout monitor to dispatch output to log handlers\"\"\"\n if condition == GLib.IO_HUP:\n self.stdout_monitor = None\n return False\n if not self.is_running:\n return False\n try:\n line = stdout.read(262144).decode(\"utf-8\", errors=\"ignore\")\n except ValueError:\n # file_desc might be closed\n return True\n if \"winemenubuilder.exe\" in line:\n return True\n for log_handler in self.log_handlers:\n log_handler(line)\n return True\n\n def run_in_terminal(self):\n \"\"\"Write command in a script file and run it.\n\n Running it from a file is likely the only way to set env vars only\n for the command (not for the terminal app).\n It's also the only reliable way to keep the term open when the\n game is quit.\n \"\"\"\n script_path = os.path.join(settings.CACHE_DIR, \"run_in_term.sh\")\n exported_environment = \"\\n\".join(\n 'export %s=\"%s\" ' % (key, value)\n for key, value in self.env.items()\n )\n command = \" \".join(['\"%s\"' % token for token in self.wrapper_command])\n with open(script_path, \"w\") as script_file:\n script_file.write(dedent(\n \"\"\"#!/bin/sh\n cd \"%s\"\n %s\n exec %s\n \"\"\" % (self.cwd, exported_environment, command)\n ))\n os.chmod(script_path, 0o744)\n return self.execute_process([self.terminal, \"-e\", script_path])\n\n def execute_process(self, command, env=None):\n \"\"\"Execute and return a subprocess\"\"\"\n if self.cwd and not system.path_exists(self.cwd):\n try:\n os.makedirs(self.cwd)\n except OSError:\n logger.error(\"Failed to create working directory, falling back to %s\",\n self.fallback_cwd)\n self.cwd = \"/tmp\"\n try:\n\n return subprocess.Popen(\n command,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n cwd=self.cwd,\n env=env,\n )\n except OSError as ex:\n logger.exception(\"Failed to execute %s: %s\", \" \".join(command), ex)\n self.error = ex.strerror\n\n def stop(self):\n \"\"\"Stops the current game process and cleans up the instance\"\"\"\n # Prevent stop() being called again by the process exiting\n self.prevent_on_stop = True\n\n try:\n self.game_process.terminate()\n except ProcessLookupError: # process already dead.\n logger.debug(\"Management process looks dead already.\")\n\n if hasattr(self, \"stop_func\"):\n resume_stop = self.stop_func()\n if not resume_stop:\n return False\n\n if self.stdout_monitor:\n logger.debug(\"Detaching logger\")\n GLib.source_remove(self.stdout_monitor)\n self.stdout_monitor = None\n else:\n logger.debug(\"logger already detached\")\n\n self.is_running = False\n self.ready_state = False\n return True\n\n\ndef exec_command(command):\n \"\"\"Execute arbitrary command in a MonitoredCommand\n\n Used by the --exec command line flag.\n \"\"\"\n command = MonitoredCommand(shlex.split(command), env=runtime.get_env())\n command.start()\n return command\n", "path": "lutris/command.py"}]}
| 3,126 | 91 |
gh_patches_debug_3335
|
rasdani/github-patches
|
git_diff
|
mkdocs__mkdocs-413
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`mkdocs new` will overwrite an existing index.md file without warning.
If you run the command: `mkdocs new /path/to/dir` and `/path/to/dir/docs/index.md` already exists it will be replaced with out placeholder.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mkdocs/new.py`
Content:
```
1 # coding: utf-8
2 from __future__ import print_function
3 import os
4
5 config_text = 'site_name: My Docs\n'
6 index_text = """# Welcome to MkDocs
7
8 For full documentation visit [mkdocs.org](http://mkdocs.org).
9
10 ## Commands
11
12 * `mkdocs new [dir-name]` - Create a new project.
13 * `mkdocs serve` - Start the live-reloading docs server.
14 * `mkdocs build` - Build the documentation site.
15 * `mkdocs help` - Print this help message.
16
17 ## Project layout
18
19 mkdocs.yml # The configuration file.
20 docs/
21 index.md # The documentation homepage.
22 ... # Other markdown pages, images and other files.
23 """
24
25
26 def new(args, options):
27 if len(args) != 1:
28 print("Usage 'mkdocs new [directory-name]'")
29 return
30
31 output_dir = args[0]
32
33 docs_dir = os.path.join(output_dir, 'docs')
34 config_path = os.path.join(output_dir, 'mkdocs.yml')
35 index_path = os.path.join(docs_dir, 'index.md')
36
37 if os.path.exists(config_path):
38 print('Project already exists.')
39 return
40
41 if not os.path.exists(output_dir):
42 print('Creating project directory: %s' % output_dir)
43 os.mkdir(output_dir)
44
45 print('Writing config file: %s' % config_path)
46 open(config_path, 'w').write(config_text)
47
48 print('Writing initial docs: %s' % index_path)
49 if not os.path.exists(docs_dir):
50 os.mkdir(docs_dir)
51 open(index_path, 'w').write(index_text)
52
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mkdocs/new.py b/mkdocs/new.py
--- a/mkdocs/new.py
+++ b/mkdocs/new.py
@@ -45,6 +45,9 @@
print('Writing config file: %s' % config_path)
open(config_path, 'w').write(config_text)
+ if os.path.exists(index_path):
+ return
+
print('Writing initial docs: %s' % index_path)
if not os.path.exists(docs_dir):
os.mkdir(docs_dir)
|
{"golden_diff": "diff --git a/mkdocs/new.py b/mkdocs/new.py\n--- a/mkdocs/new.py\n+++ b/mkdocs/new.py\n@@ -45,6 +45,9 @@\n print('Writing config file: %s' % config_path)\n open(config_path, 'w').write(config_text)\n \n+ if os.path.exists(index_path):\n+ return\n+\n print('Writing initial docs: %s' % index_path)\n if not os.path.exists(docs_dir):\n os.mkdir(docs_dir)\n", "issue": "`mkdocs new` will overwrite an existing index.md file without warning.\nIf you run the command: `mkdocs new /path/to/dir` and `/path/to/dir/docs/index.md` already exists it will be replaced with out placeholder.\n\n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import print_function\nimport os\n\nconfig_text = 'site_name: My Docs\\n'\nindex_text = \"\"\"# Welcome to MkDocs\n\nFor full documentation visit [mkdocs.org](http://mkdocs.org).\n\n## Commands\n\n* `mkdocs new [dir-name]` - Create a new project.\n* `mkdocs serve` - Start the live-reloading docs server.\n* `mkdocs build` - Build the documentation site.\n* `mkdocs help` - Print this help message.\n\n## Project layout\n\n mkdocs.yml # The configuration file.\n docs/\n index.md # The documentation homepage.\n ... # Other markdown pages, images and other files.\n\"\"\"\n\n\ndef new(args, options):\n if len(args) != 1:\n print(\"Usage 'mkdocs new [directory-name]'\")\n return\n\n output_dir = args[0]\n\n docs_dir = os.path.join(output_dir, 'docs')\n config_path = os.path.join(output_dir, 'mkdocs.yml')\n index_path = os.path.join(docs_dir, 'index.md')\n\n if os.path.exists(config_path):\n print('Project already exists.')\n return\n\n if not os.path.exists(output_dir):\n print('Creating project directory: %s' % output_dir)\n os.mkdir(output_dir)\n\n print('Writing config file: %s' % config_path)\n open(config_path, 'w').write(config_text)\n\n print('Writing initial docs: %s' % index_path)\n if not os.path.exists(docs_dir):\n os.mkdir(docs_dir)\n open(index_path, 'w').write(index_text)\n", "path": "mkdocs/new.py"}], "after_files": [{"content": "# coding: utf-8\nfrom __future__ import print_function\nimport os\n\nconfig_text = 'site_name: My Docs\\n'\nindex_text = \"\"\"# Welcome to MkDocs\n\nFor full documentation visit [mkdocs.org](http://mkdocs.org).\n\n## Commands\n\n* `mkdocs new [dir-name]` - Create a new project.\n* `mkdocs serve` - Start the live-reloading docs server.\n* `mkdocs build` - Build the documentation site.\n* `mkdocs help` - Print this help message.\n\n## Project layout\n\n mkdocs.yml # The configuration file.\n docs/\n index.md # The documentation homepage.\n ... # Other markdown pages, images and other files.\n\"\"\"\n\n\ndef new(args, options):\n if len(args) != 1:\n print(\"Usage 'mkdocs new [directory-name]'\")\n return\n\n output_dir = args[0]\n\n docs_dir = os.path.join(output_dir, 'docs')\n config_path = os.path.join(output_dir, 'mkdocs.yml')\n index_path = os.path.join(docs_dir, 'index.md')\n\n if os.path.exists(config_path):\n print('Project already exists.')\n return\n\n if not os.path.exists(output_dir):\n print('Creating project directory: %s' % output_dir)\n os.mkdir(output_dir)\n\n print('Writing config file: %s' % config_path)\n open(config_path, 'w').write(config_text)\n\n if os.path.exists(index_path):\n return\n\n print('Writing initial docs: %s' % index_path)\n if not os.path.exists(docs_dir):\n os.mkdir(docs_dir)\n open(index_path, 'w').write(index_text)\n", "path": "mkdocs/new.py"}]}
| 763 | 114 |
gh_patches_debug_19605
|
rasdani/github-patches
|
git_diff
|
azavea__raster-vision-849
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Randint is passed float bounds
I'm not sure what's causing this, but a user has this stack trace:
```
File "/opt/conda/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/opt/conda/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/opt/src/rastervision/__main__.py", line 17, in <module>
rv.main()
File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 722, in __call__
return self.main(*args, **kwargs)
File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 697, in main
rv = self.invoke(ctx)
File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 1066, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 895, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 535, in invoke
return callback(*args, **kwargs)
File "/opt/src/rastervision/cli/main.py", line 294, in run_command
rv.runner.CommandRunner.run(command_config_uri)
File "/opt/src/rastervision/runner/command_runner.py", line 11, in run
CommandRunner.run_from_proto(msg)
File "/opt/src/rastervision/runner/command_runner.py", line 17, in run_from_proto
command.run()
File "/opt/src/rastervision/command/chip_command.py", line 29, in run
task.make_chips(train_scenes, val_scenes, augmentors, tmp_dir)
File "/opt/src/rastervision/task/task.py", line 127, in make_chips
train_scenes, TRAIN, augment=True)
File "/opt/src/rastervision/task/task.py", line 124, in _process_scenes
return [_process_scene(scene, type_, augment) for scene in scenes]
File "/opt/src/rastervision/task/task.py", line 124, in <listcomp>
return [_process_scene(scene, type_, augment) for scene in scenes]
File "/opt/src/rastervision/task/task.py", line 107, in _process_scene
windows = self.get_train_windows(scene)
File "/opt/src/rastervision/task/object_detection.py", line 107, in get_train_windows
self.config.chip_options.label_buffer))
File "/opt/src/rastervision/task/object_detection.py", line 58, in make_pos_windows
return _make_chip_pos_windows(image_extent, label_store, chip_size)
File "/opt/src/rastervision/task/object_detection.py", line 28, in _make_chip_pos_windows
window = box.make_random_square_container(chip_size)
File "/opt/src/rastervision/core/box.py", line 121, in make_random_square_container
rand_x = random.randint(lb, ub)
File "/opt/conda/lib/python3.6/random.py", line 221, in randint
return self.randrange(a, b+1)
File "/opt/conda/lib/python3.6/random.py", line 194, in randrange
raise ValueError("non-integer stop for randrange()")
ValueError: non-integer stop for randrange()
/tmp/tmpxwof2zeo/tmp_wgvukcn/Makefile:6: recipe for target '1' failed
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rastervision/core/box.py`
Content:
```
1 import math
2 import random
3
4 import numpy as np
5 from shapely.geometry import box as ShapelyBox
6
7
8 class BoxSizeError(ValueError):
9 pass
10
11
12 class Box():
13 """A multi-purpose box (ie. rectangle)."""
14
15 def __init__(self, ymin, xmin, ymax, xmax):
16 """Construct a bounding box.
17
18 Unless otherwise stated, the convention is that these coordinates are
19 in pixel coordinates and represent boxes that lie within a
20 RasterSource.
21
22 Args:
23 ymin: minimum y value (y is row)
24 xmin: minimum x value (x is column)
25 ymax: maximum y value
26 xmax: maximum x value
27
28 """
29 self.ymin = ymin
30 self.xmin = xmin
31 self.ymax = ymax
32 self.xmax = xmax
33
34 def __eq__(self, other):
35 """Return true if other has same coordinates."""
36 return self.tuple_format() == other.tuple_format()
37
38 def __ne__(self, other):
39 """Return true if other has different coordinates."""
40 return self.tuple_format() != other.tuple_format()
41
42 def get_height(self):
43 """Return height of Box."""
44 return self.ymax - self.ymin
45
46 def get_width(self):
47 """Return width of Box."""
48 return self.xmax - self.xmin
49
50 def get_area(self):
51 """Return area of Box."""
52 return self.get_height() * self.get_width()
53
54 def rasterio_format(self):
55 """Return Box in Rasterio format."""
56 return ((self.ymin, self.ymax), (self.xmin, self.xmax))
57
58 def tuple_format(self):
59 return (self.ymin, self.xmin, self.ymax, self.xmax)
60
61 def shapely_format(self):
62 return (self.xmin, self.ymin, self.xmax, self.ymax)
63
64 def to_int(self):
65 return Box(
66 int(self.ymin), int(self.xmin), int(self.ymax), int(self.xmax))
67
68 def npbox_format(self):
69 """Return Box in npbox format used by TF Object Detection API.
70
71 Returns:
72 Numpy array of form [ymin, xmin, ymax, xmax] with float type
73
74 """
75 return np.array(
76 [self.ymin, self.xmin, self.ymax, self.xmax], dtype=np.float)
77
78 @staticmethod
79 def to_npboxes(boxes):
80 """Return nx4 numpy array from list of Box."""
81 nb_boxes = len(boxes)
82 npboxes = np.empty((nb_boxes, 4))
83 for boxind, box in enumerate(boxes):
84 npboxes[boxind, :] = box.npbox_format()
85 return npboxes
86
87 def __str__(self): # pragma: no cover
88 return str(self.npbox_format())
89
90 def __repr__(self): # pragma: no cover
91 return str(self)
92
93 def geojson_coordinates(self):
94 """Return Box as GeoJSON coordinates."""
95 # Compass directions:
96 nw = [self.xmin, self.ymin]
97 ne = [self.xmin, self.ymax]
98 se = [self.xmax, self.ymax]
99 sw = [self.xmax, self.ymin]
100 return [nw, ne, se, sw, nw]
101
102 def make_random_square_container(self, size):
103 """Return a new square Box that contains this Box.
104
105 Args:
106 size: the width and height of the new Box
107
108 """
109 if size < self.get_width():
110 raise BoxSizeError('size of random container cannot be < width')
111
112 if size < self.get_height(): # pragma: no cover
113 raise BoxSizeError('size of random container cannot be < height')
114
115 lb = self.ymin - (size - self.get_height())
116 ub = self.ymin
117 rand_y = random.randint(lb, ub)
118
119 lb = self.xmin - (size - self.get_width())
120 ub = self.xmin
121 rand_x = random.randint(lb, ub)
122
123 return Box.make_square(rand_y, rand_x, size)
124
125 def make_random_square(self, size):
126 """Return new randomly positioned square Box that lies inside this Box.
127
128 Args:
129 size: the height and width of the new Box
130
131 """
132 if size >= self.get_width():
133 raise BoxSizeError('size of random square cannot be >= width')
134
135 if size >= self.get_height(): # pragma: no cover
136 raise BoxSizeError('size of random square cannot be >= height')
137
138 lb = self.ymin
139 ub = self.ymax - size
140 rand_y = random.randint(lb, ub)
141
142 lb = self.xmin
143 ub = self.xmax - size
144 rand_x = random.randint(lb, ub)
145
146 return Box.make_square(rand_y, rand_x, size)
147
148 def intersection(self, other):
149 """Return the intersection of this Box and the other.
150
151 Args:
152 other: The box to intersect with this one.
153
154 Returns:
155 The intersection of this box and the other one.
156
157 """
158 xmin = max(self.xmin, other.xmin)
159 ymin = max(self.ymin, other.ymin)
160 xmax = min(self.xmax, other.xmax)
161 ymax = min(self.ymax, other.ymax)
162 return Box(xmin=xmin, ymin=ymin, xmax=xmax, ymax=ymax)
163
164 @staticmethod
165 def from_npbox(npbox):
166 """Return new Box based on npbox format.
167
168 Args:
169 npbox: Numpy array of form [ymin, xmin, ymax, xmax] with float type
170
171 """
172 return Box(*npbox)
173
174 @staticmethod
175 def from_shapely(shape):
176 bounds = shape.bounds
177 return Box(bounds[1], bounds[0], bounds[3], bounds[2])
178
179 @staticmethod
180 def from_tuple(tup):
181 """Return new Box based on tuple format.
182
183 Args:
184 tup: Tuple format box (ymin, xmin, ymax, xmax)
185 """
186 return Box(tup[0], tup[1], tup[2], tup[3])
187
188 def to_shapely(self):
189 return ShapelyBox(*(self.shapely_format()))
190
191 def reproject(self, transform_fn):
192 """Reprojects this box based on a transform function.
193
194 Args:
195 transform_fn - A function that takes in a tuple (x, y)
196 and reprojects that point to the target
197 coordinate reference system.
198 """
199 (xmin, ymin) = transform_fn((self.xmin, self.ymin))
200 (xmax, ymax) = transform_fn((self.xmax, self.ymax))
201
202 return Box(ymin, xmin, ymax, xmax)
203
204 @staticmethod
205 def make_square(ymin, xmin, size):
206 """Return new square Box."""
207 return Box(ymin, xmin, ymin + size, xmin + size)
208
209 def make_eroded(self, erosion_size):
210 """Return new Box whose sides are eroded by erosion_size."""
211 return Box(self.ymin + erosion_size, self.xmin + erosion_size,
212 self.ymax - erosion_size, self.xmax - erosion_size)
213
214 def make_buffer(self, buffer_size, max_extent):
215 """Return new Box whose sides are buffered by buffer_size.
216
217 The resulting box is clipped so that the values of the corners are
218 always greater than zero and less than the height and width of
219 max_extent.
220
221 """
222 buffer_size = max(0., buffer_size)
223 if buffer_size < 1.:
224 delta_width = int(round(buffer_size * self.get_width()))
225 delta_height = int(round(buffer_size * self.get_height()))
226 else:
227 delta_height = delta_width = int(round(buffer_size))
228
229 return Box(
230 max(0, math.floor(self.ymin - delta_height)),
231 max(0, math.floor(self.xmin - delta_width)),
232 min(max_extent.get_height(),
233 int(self.ymax) + delta_height),
234 min(max_extent.get_width(),
235 int(self.xmax) + delta_width))
236
237 def make_copy(self):
238 return Box(*(self.tuple_format()))
239
240 def get_windows(self, chip_size, stride):
241 """Return list of grid of boxes within this box.
242
243 Args:
244 chip_size: (int) the length of each square-shaped window in pixels
245 stride: (int) how much each window is offset from the last in pixels
246
247 """
248 height = self.get_height()
249 width = self.get_width()
250
251 result = []
252 for row_start in range(0, height, stride):
253 for col_start in range(0, width, stride):
254 result.append(Box.make_square(row_start, col_start, chip_size))
255 return result
256
257 def to_dict(self):
258 return {
259 'xmin': self.xmin,
260 'ymin': self.ymin,
261 'xmax': self.xmax,
262 'ymax': self.ymax
263 }
264
265 @classmethod
266 def from_dict(cls, d):
267 return cls(d['ymin'], d['xmin'], d['ymax'], d['xmax'])
268
269 @staticmethod
270 def filter_by_aoi(windows, aoi_polygons):
271 """Filters windows by a list of AOI polygons"""
272 result = []
273 for window in windows:
274 w = window.to_shapely()
275 for polygon in aoi_polygons:
276 if w.within(polygon):
277 result.append(window)
278 break
279
280 return result
281
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/rastervision/core/box.py b/rastervision/core/box.py
--- a/rastervision/core/box.py
+++ b/rastervision/core/box.py
@@ -114,11 +114,11 @@
lb = self.ymin - (size - self.get_height())
ub = self.ymin
- rand_y = random.randint(lb, ub)
+ rand_y = random.randint(int(lb), int(ub))
lb = self.xmin - (size - self.get_width())
ub = self.xmin
- rand_x = random.randint(lb, ub)
+ rand_x = random.randint(int(lb), int(ub))
return Box.make_square(rand_y, rand_x, size)
@@ -137,11 +137,11 @@
lb = self.ymin
ub = self.ymax - size
- rand_y = random.randint(lb, ub)
+ rand_y = random.randint(int(lb), int(ub))
lb = self.xmin
ub = self.xmax - size
- rand_x = random.randint(lb, ub)
+ rand_x = random.randint(int(lb), int(ub))
return Box.make_square(rand_y, rand_x, size)
|
{"golden_diff": "diff --git a/rastervision/core/box.py b/rastervision/core/box.py\n--- a/rastervision/core/box.py\n+++ b/rastervision/core/box.py\n@@ -114,11 +114,11 @@\n \n lb = self.ymin - (size - self.get_height())\n ub = self.ymin\n- rand_y = random.randint(lb, ub)\n+ rand_y = random.randint(int(lb), int(ub))\n \n lb = self.xmin - (size - self.get_width())\n ub = self.xmin\n- rand_x = random.randint(lb, ub)\n+ rand_x = random.randint(int(lb), int(ub))\n \n return Box.make_square(rand_y, rand_x, size)\n \n@@ -137,11 +137,11 @@\n \n lb = self.ymin\n ub = self.ymax - size\n- rand_y = random.randint(lb, ub)\n+ rand_y = random.randint(int(lb), int(ub))\n \n lb = self.xmin\n ub = self.xmax - size\n- rand_x = random.randint(lb, ub)\n+ rand_x = random.randint(int(lb), int(ub))\n \n return Box.make_square(rand_y, rand_x, size)\n", "issue": "Randint is passed float bounds\nI'm not sure what's causing this, but a user has this stack trace:\r\n\r\n```\r\nFile \"/opt/conda/lib/python3.6/runpy.py\", line 193, in _run_module_as_main\r\n \"__main__\", mod_spec)\r\n File \"/opt/conda/lib/python3.6/runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"/opt/src/rastervision/__main__.py\", line 17, in <module>\r\n rv.main()\r\n File \"/opt/conda/lib/python3.6/site-packages/click/core.py\", line 722, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"/opt/conda/lib/python3.6/site-packages/click/core.py\", line 697, in main\r\n rv = self.invoke(ctx)\r\n File \"/opt/conda/lib/python3.6/site-packages/click/core.py\", line 1066, in invoke\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n File \"/opt/conda/lib/python3.6/site-packages/click/core.py\", line 895, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"/opt/conda/lib/python3.6/site-packages/click/core.py\", line 535, in invoke\r\n return callback(*args, **kwargs)\r\n File \"/opt/src/rastervision/cli/main.py\", line 294, in run_command\r\n rv.runner.CommandRunner.run(command_config_uri)\r\n File \"/opt/src/rastervision/runner/command_runner.py\", line 11, in run\r\n CommandRunner.run_from_proto(msg)\r\n File \"/opt/src/rastervision/runner/command_runner.py\", line 17, in run_from_proto\r\n command.run()\r\n File \"/opt/src/rastervision/command/chip_command.py\", line 29, in run\r\n task.make_chips(train_scenes, val_scenes, augmentors, tmp_dir)\r\n File \"/opt/src/rastervision/task/task.py\", line 127, in make_chips\r\n train_scenes, TRAIN, augment=True)\r\n File \"/opt/src/rastervision/task/task.py\", line 124, in _process_scenes\r\n return [_process_scene(scene, type_, augment) for scene in scenes]\r\n File \"/opt/src/rastervision/task/task.py\", line 124, in <listcomp>\r\n return [_process_scene(scene, type_, augment) for scene in scenes]\r\n File \"/opt/src/rastervision/task/task.py\", line 107, in _process_scene\r\n windows = self.get_train_windows(scene)\r\n File \"/opt/src/rastervision/task/object_detection.py\", line 107, in get_train_windows\r\n self.config.chip_options.label_buffer))\r\n File \"/opt/src/rastervision/task/object_detection.py\", line 58, in make_pos_windows\r\n return _make_chip_pos_windows(image_extent, label_store, chip_size)\r\n File \"/opt/src/rastervision/task/object_detection.py\", line 28, in _make_chip_pos_windows\r\n window = box.make_random_square_container(chip_size)\r\n File \"/opt/src/rastervision/core/box.py\", line 121, in make_random_square_container\r\n rand_x = random.randint(lb, ub)\r\n File \"/opt/conda/lib/python3.6/random.py\", line 221, in randint\r\n return self.randrange(a, b+1)\r\n File \"/opt/conda/lib/python3.6/random.py\", line 194, in randrange\r\n raise ValueError(\"non-integer stop for randrange()\")\r\nValueError: non-integer stop for randrange()\r\n/tmp/tmpxwof2zeo/tmp_wgvukcn/Makefile:6: recipe for target '1' failed\r\n```\n", "before_files": [{"content": "import math\nimport random\n\nimport numpy as np\nfrom shapely.geometry import box as ShapelyBox\n\n\nclass BoxSizeError(ValueError):\n pass\n\n\nclass Box():\n \"\"\"A multi-purpose box (ie. rectangle).\"\"\"\n\n def __init__(self, ymin, xmin, ymax, xmax):\n \"\"\"Construct a bounding box.\n\n Unless otherwise stated, the convention is that these coordinates are\n in pixel coordinates and represent boxes that lie within a\n RasterSource.\n\n Args:\n ymin: minimum y value (y is row)\n xmin: minimum x value (x is column)\n ymax: maximum y value\n xmax: maximum x value\n\n \"\"\"\n self.ymin = ymin\n self.xmin = xmin\n self.ymax = ymax\n self.xmax = xmax\n\n def __eq__(self, other):\n \"\"\"Return true if other has same coordinates.\"\"\"\n return self.tuple_format() == other.tuple_format()\n\n def __ne__(self, other):\n \"\"\"Return true if other has different coordinates.\"\"\"\n return self.tuple_format() != other.tuple_format()\n\n def get_height(self):\n \"\"\"Return height of Box.\"\"\"\n return self.ymax - self.ymin\n\n def get_width(self):\n \"\"\"Return width of Box.\"\"\"\n return self.xmax - self.xmin\n\n def get_area(self):\n \"\"\"Return area of Box.\"\"\"\n return self.get_height() * self.get_width()\n\n def rasterio_format(self):\n \"\"\"Return Box in Rasterio format.\"\"\"\n return ((self.ymin, self.ymax), (self.xmin, self.xmax))\n\n def tuple_format(self):\n return (self.ymin, self.xmin, self.ymax, self.xmax)\n\n def shapely_format(self):\n return (self.xmin, self.ymin, self.xmax, self.ymax)\n\n def to_int(self):\n return Box(\n int(self.ymin), int(self.xmin), int(self.ymax), int(self.xmax))\n\n def npbox_format(self):\n \"\"\"Return Box in npbox format used by TF Object Detection API.\n\n Returns:\n Numpy array of form [ymin, xmin, ymax, xmax] with float type\n\n \"\"\"\n return np.array(\n [self.ymin, self.xmin, self.ymax, self.xmax], dtype=np.float)\n\n @staticmethod\n def to_npboxes(boxes):\n \"\"\"Return nx4 numpy array from list of Box.\"\"\"\n nb_boxes = len(boxes)\n npboxes = np.empty((nb_boxes, 4))\n for boxind, box in enumerate(boxes):\n npboxes[boxind, :] = box.npbox_format()\n return npboxes\n\n def __str__(self): # pragma: no cover\n return str(self.npbox_format())\n\n def __repr__(self): # pragma: no cover\n return str(self)\n\n def geojson_coordinates(self):\n \"\"\"Return Box as GeoJSON coordinates.\"\"\"\n # Compass directions:\n nw = [self.xmin, self.ymin]\n ne = [self.xmin, self.ymax]\n se = [self.xmax, self.ymax]\n sw = [self.xmax, self.ymin]\n return [nw, ne, se, sw, nw]\n\n def make_random_square_container(self, size):\n \"\"\"Return a new square Box that contains this Box.\n\n Args:\n size: the width and height of the new Box\n\n \"\"\"\n if size < self.get_width():\n raise BoxSizeError('size of random container cannot be < width')\n\n if size < self.get_height(): # pragma: no cover\n raise BoxSizeError('size of random container cannot be < height')\n\n lb = self.ymin - (size - self.get_height())\n ub = self.ymin\n rand_y = random.randint(lb, ub)\n\n lb = self.xmin - (size - self.get_width())\n ub = self.xmin\n rand_x = random.randint(lb, ub)\n\n return Box.make_square(rand_y, rand_x, size)\n\n def make_random_square(self, size):\n \"\"\"Return new randomly positioned square Box that lies inside this Box.\n\n Args:\n size: the height and width of the new Box\n\n \"\"\"\n if size >= self.get_width():\n raise BoxSizeError('size of random square cannot be >= width')\n\n if size >= self.get_height(): # pragma: no cover\n raise BoxSizeError('size of random square cannot be >= height')\n\n lb = self.ymin\n ub = self.ymax - size\n rand_y = random.randint(lb, ub)\n\n lb = self.xmin\n ub = self.xmax - size\n rand_x = random.randint(lb, ub)\n\n return Box.make_square(rand_y, rand_x, size)\n\n def intersection(self, other):\n \"\"\"Return the intersection of this Box and the other.\n\n Args:\n other: The box to intersect with this one.\n\n Returns:\n The intersection of this box and the other one.\n\n \"\"\"\n xmin = max(self.xmin, other.xmin)\n ymin = max(self.ymin, other.ymin)\n xmax = min(self.xmax, other.xmax)\n ymax = min(self.ymax, other.ymax)\n return Box(xmin=xmin, ymin=ymin, xmax=xmax, ymax=ymax)\n\n @staticmethod\n def from_npbox(npbox):\n \"\"\"Return new Box based on npbox format.\n\n Args:\n npbox: Numpy array of form [ymin, xmin, ymax, xmax] with float type\n\n \"\"\"\n return Box(*npbox)\n\n @staticmethod\n def from_shapely(shape):\n bounds = shape.bounds\n return Box(bounds[1], bounds[0], bounds[3], bounds[2])\n\n @staticmethod\n def from_tuple(tup):\n \"\"\"Return new Box based on tuple format.\n\n Args:\n tup: Tuple format box (ymin, xmin, ymax, xmax)\n \"\"\"\n return Box(tup[0], tup[1], tup[2], tup[3])\n\n def to_shapely(self):\n return ShapelyBox(*(self.shapely_format()))\n\n def reproject(self, transform_fn):\n \"\"\"Reprojects this box based on a transform function.\n\n Args:\n transform_fn - A function that takes in a tuple (x, y)\n and reprojects that point to the target\n coordinate reference system.\n \"\"\"\n (xmin, ymin) = transform_fn((self.xmin, self.ymin))\n (xmax, ymax) = transform_fn((self.xmax, self.ymax))\n\n return Box(ymin, xmin, ymax, xmax)\n\n @staticmethod\n def make_square(ymin, xmin, size):\n \"\"\"Return new square Box.\"\"\"\n return Box(ymin, xmin, ymin + size, xmin + size)\n\n def make_eroded(self, erosion_size):\n \"\"\"Return new Box whose sides are eroded by erosion_size.\"\"\"\n return Box(self.ymin + erosion_size, self.xmin + erosion_size,\n self.ymax - erosion_size, self.xmax - erosion_size)\n\n def make_buffer(self, buffer_size, max_extent):\n \"\"\"Return new Box whose sides are buffered by buffer_size.\n\n The resulting box is clipped so that the values of the corners are\n always greater than zero and less than the height and width of\n max_extent.\n\n \"\"\"\n buffer_size = max(0., buffer_size)\n if buffer_size < 1.:\n delta_width = int(round(buffer_size * self.get_width()))\n delta_height = int(round(buffer_size * self.get_height()))\n else:\n delta_height = delta_width = int(round(buffer_size))\n\n return Box(\n max(0, math.floor(self.ymin - delta_height)),\n max(0, math.floor(self.xmin - delta_width)),\n min(max_extent.get_height(),\n int(self.ymax) + delta_height),\n min(max_extent.get_width(),\n int(self.xmax) + delta_width))\n\n def make_copy(self):\n return Box(*(self.tuple_format()))\n\n def get_windows(self, chip_size, stride):\n \"\"\"Return list of grid of boxes within this box.\n\n Args:\n chip_size: (int) the length of each square-shaped window in pixels\n stride: (int) how much each window is offset from the last in pixels\n\n \"\"\"\n height = self.get_height()\n width = self.get_width()\n\n result = []\n for row_start in range(0, height, stride):\n for col_start in range(0, width, stride):\n result.append(Box.make_square(row_start, col_start, chip_size))\n return result\n\n def to_dict(self):\n return {\n 'xmin': self.xmin,\n 'ymin': self.ymin,\n 'xmax': self.xmax,\n 'ymax': self.ymax\n }\n\n @classmethod\n def from_dict(cls, d):\n return cls(d['ymin'], d['xmin'], d['ymax'], d['xmax'])\n\n @staticmethod\n def filter_by_aoi(windows, aoi_polygons):\n \"\"\"Filters windows by a list of AOI polygons\"\"\"\n result = []\n for window in windows:\n w = window.to_shapely()\n for polygon in aoi_polygons:\n if w.within(polygon):\n result.append(window)\n break\n\n return result\n", "path": "rastervision/core/box.py"}], "after_files": [{"content": "import math\nimport random\n\nimport numpy as np\nfrom shapely.geometry import box as ShapelyBox\n\n\nclass BoxSizeError(ValueError):\n pass\n\n\nclass Box():\n \"\"\"A multi-purpose box (ie. rectangle).\"\"\"\n\n def __init__(self, ymin, xmin, ymax, xmax):\n \"\"\"Construct a bounding box.\n\n Unless otherwise stated, the convention is that these coordinates are\n in pixel coordinates and represent boxes that lie within a\n RasterSource.\n\n Args:\n ymin: minimum y value (y is row)\n xmin: minimum x value (x is column)\n ymax: maximum y value\n xmax: maximum x value\n\n \"\"\"\n self.ymin = ymin\n self.xmin = xmin\n self.ymax = ymax\n self.xmax = xmax\n\n def __eq__(self, other):\n \"\"\"Return true if other has same coordinates.\"\"\"\n return self.tuple_format() == other.tuple_format()\n\n def __ne__(self, other):\n \"\"\"Return true if other has different coordinates.\"\"\"\n return self.tuple_format() != other.tuple_format()\n\n def get_height(self):\n \"\"\"Return height of Box.\"\"\"\n return self.ymax - self.ymin\n\n def get_width(self):\n \"\"\"Return width of Box.\"\"\"\n return self.xmax - self.xmin\n\n def get_area(self):\n \"\"\"Return area of Box.\"\"\"\n return self.get_height() * self.get_width()\n\n def rasterio_format(self):\n \"\"\"Return Box in Rasterio format.\"\"\"\n return ((self.ymin, self.ymax), (self.xmin, self.xmax))\n\n def tuple_format(self):\n return (self.ymin, self.xmin, self.ymax, self.xmax)\n\n def shapely_format(self):\n return (self.xmin, self.ymin, self.xmax, self.ymax)\n\n def to_int(self):\n return Box(\n int(self.ymin), int(self.xmin), int(self.ymax), int(self.xmax))\n\n def npbox_format(self):\n \"\"\"Return Box in npbox format used by TF Object Detection API.\n\n Returns:\n Numpy array of form [ymin, xmin, ymax, xmax] with float type\n\n \"\"\"\n return np.array(\n [self.ymin, self.xmin, self.ymax, self.xmax], dtype=np.float)\n\n @staticmethod\n def to_npboxes(boxes):\n \"\"\"Return nx4 numpy array from list of Box.\"\"\"\n nb_boxes = len(boxes)\n npboxes = np.empty((nb_boxes, 4))\n for boxind, box in enumerate(boxes):\n npboxes[boxind, :] = box.npbox_format()\n return npboxes\n\n def __str__(self): # pragma: no cover\n return str(self.npbox_format())\n\n def __repr__(self): # pragma: no cover\n return str(self)\n\n def geojson_coordinates(self):\n \"\"\"Return Box as GeoJSON coordinates.\"\"\"\n # Compass directions:\n nw = [self.xmin, self.ymin]\n ne = [self.xmin, self.ymax]\n se = [self.xmax, self.ymax]\n sw = [self.xmax, self.ymin]\n return [nw, ne, se, sw, nw]\n\n def make_random_square_container(self, size):\n \"\"\"Return a new square Box that contains this Box.\n\n Args:\n size: the width and height of the new Box\n\n \"\"\"\n if size < self.get_width():\n raise BoxSizeError('size of random container cannot be < width')\n\n if size < self.get_height(): # pragma: no cover\n raise BoxSizeError('size of random container cannot be < height')\n\n lb = self.ymin - (size - self.get_height())\n ub = self.ymin\n rand_y = random.randint(int(lb), int(ub))\n\n lb = self.xmin - (size - self.get_width())\n ub = self.xmin\n rand_x = random.randint(int(lb), int(ub))\n\n return Box.make_square(rand_y, rand_x, size)\n\n def make_random_square(self, size):\n \"\"\"Return new randomly positioned square Box that lies inside this Box.\n\n Args:\n size: the height and width of the new Box\n\n \"\"\"\n if size >= self.get_width():\n raise BoxSizeError('size of random square cannot be >= width')\n\n if size >= self.get_height(): # pragma: no cover\n raise BoxSizeError('size of random square cannot be >= height')\n\n lb = self.ymin\n ub = self.ymax - size\n rand_y = random.randint(int(lb), int(ub))\n\n lb = self.xmin\n ub = self.xmax - size\n rand_x = random.randint(int(lb), int(ub))\n\n return Box.make_square(rand_y, rand_x, size)\n\n def intersection(self, other):\n \"\"\"Return the intersection of this Box and the other.\n\n Args:\n other: The box to intersect with this one.\n\n Returns:\n The intersection of this box and the other one.\n\n \"\"\"\n xmin = max(self.xmin, other.xmin)\n ymin = max(self.ymin, other.ymin)\n xmax = min(self.xmax, other.xmax)\n ymax = min(self.ymax, other.ymax)\n return Box(xmin=xmin, ymin=ymin, xmax=xmax, ymax=ymax)\n\n @staticmethod\n def from_npbox(npbox):\n \"\"\"Return new Box based on npbox format.\n\n Args:\n npbox: Numpy array of form [ymin, xmin, ymax, xmax] with float type\n\n \"\"\"\n return Box(*npbox)\n\n @staticmethod\n def from_shapely(shape):\n bounds = shape.bounds\n return Box(bounds[1], bounds[0], bounds[3], bounds[2])\n\n @staticmethod\n def from_tuple(tup):\n \"\"\"Return new Box based on tuple format.\n\n Args:\n tup: Tuple format box (ymin, xmin, ymax, xmax)\n \"\"\"\n return Box(tup[0], tup[1], tup[2], tup[3])\n\n def to_shapely(self):\n return ShapelyBox(*(self.shapely_format()))\n\n def reproject(self, transform_fn):\n \"\"\"Reprojects this box based on a transform function.\n\n Args:\n transform_fn - A function that takes in a tuple (x, y)\n and reprojects that point to the target\n coordinate reference system.\n \"\"\"\n (xmin, ymin) = transform_fn((self.xmin, self.ymin))\n (xmax, ymax) = transform_fn((self.xmax, self.ymax))\n\n return Box(ymin, xmin, ymax, xmax)\n\n @staticmethod\n def make_square(ymin, xmin, size):\n \"\"\"Return new square Box.\"\"\"\n return Box(ymin, xmin, ymin + size, xmin + size)\n\n def make_eroded(self, erosion_size):\n \"\"\"Return new Box whose sides are eroded by erosion_size.\"\"\"\n return Box(self.ymin + erosion_size, self.xmin + erosion_size,\n self.ymax - erosion_size, self.xmax - erosion_size)\n\n def make_buffer(self, buffer_size, max_extent):\n \"\"\"Return new Box whose sides are buffered by buffer_size.\n\n The resulting box is clipped so that the values of the corners are\n always greater than zero and less than the height and width of\n max_extent.\n\n \"\"\"\n buffer_size = max(0., buffer_size)\n if buffer_size < 1.:\n delta_width = int(round(buffer_size * self.get_width()))\n delta_height = int(round(buffer_size * self.get_height()))\n else:\n delta_height = delta_width = int(round(buffer_size))\n\n return Box(\n max(0, math.floor(self.ymin - delta_height)),\n max(0, math.floor(self.xmin - delta_width)),\n min(max_extent.get_height(),\n int(self.ymax) + delta_height),\n min(max_extent.get_width(),\n int(self.xmax) + delta_width))\n\n def make_copy(self):\n return Box(*(self.tuple_format()))\n\n def get_windows(self, chip_size, stride):\n \"\"\"Return list of grid of boxes within this box.\n\n Args:\n chip_size: (int) the length of each square-shaped window in pixels\n stride: (int) how much each window is offset from the last in pixels\n\n \"\"\"\n height = self.get_height()\n width = self.get_width()\n\n result = []\n for row_start in range(0, height, stride):\n for col_start in range(0, width, stride):\n result.append(Box.make_square(row_start, col_start, chip_size))\n return result\n\n def to_dict(self):\n return {\n 'xmin': self.xmin,\n 'ymin': self.ymin,\n 'xmax': self.xmax,\n 'ymax': self.ymax\n }\n\n @classmethod\n def from_dict(cls, d):\n return cls(d['ymin'], d['xmin'], d['ymax'], d['xmax'])\n\n @staticmethod\n def filter_by_aoi(windows, aoi_polygons):\n \"\"\"Filters windows by a list of AOI polygons\"\"\"\n result = []\n for window in windows:\n w = window.to_shapely()\n for polygon in aoi_polygons:\n if w.within(polygon):\n result.append(window)\n break\n\n return result\n", "path": "rastervision/core/box.py"}]}
| 3,915 | 291 |
gh_patches_debug_25234
|
rasdani/github-patches
|
git_diff
|
cupy__cupy-1947
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`cupy.allclose` does not support comparison of complex-number arrays
As title. The reason is that in this line of the ufunc helper
https://github.com/cupy/cupy/blob/bb99716ffee178368ec71c875ace0553053cadc2/cupy/logic/comparison.py#L6
only `float16` (`e`), `float32` (`f`), and `float64` (`d`) arrays are included. Note that the NumPy counterpart does support comparing complex arrays using the same comparison logic, and I believe this can be easily patched by adding another ufunc helper for `complex64` and `complex128` arrays. PR to follow.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cupy/logic/comparison.py`
Content:
```
1 from cupy import core
2
3
4 _is_close = core.create_ufunc(
5 'cupy_is_close',
6 ('eeee?->?', 'ffff?->?', 'dddd?->?'),
7 '''
8 bool equal_nan = in4;
9 if (isfinite(in0) && isfinite(in1)) {
10 out0 = fabs(in0 - in1) <= in3 + in2 * fabs(in1);
11 } else if (equal_nan) {
12 out0 = (in0 == in1) || (isnan(in0) && isnan(in1));
13 } else {
14 out0 = (in0 == in1);
15 }
16 '''
17 )
18
19
20 def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
21 """Returns True if two arrays are element-wise equal within a tolerance.
22
23 Two values in ``a`` and ``b`` are considiered equal when the following
24 equation is satisfied.
25
26 .. math::
27
28 |a - b| \\le \\mathrm{atol} + \\mathrm{rtol} |b|
29
30 Args:
31 a (cupy.ndarray): Input array to compare.
32 b (cupy.ndarray): Input array to compare.
33 rtol (float): The relative tolerance.
34 atol (float): The absolute tolerance.
35 equal_nan (bool): If ``True``, NaN's in ``a`` will be considered equal
36 to NaN's in ``b``.
37
38 Returns:
39 bool: if ``True``, two arrays are element-wise equal within a
40 tolerance.
41
42 .. seealso:: :func:`numpy.allclose`
43
44 """
45 res = isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan).all()
46 return bool(res)
47
48
49 def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
50 """Returns a boolean array where two arrays are equal within a tolerance.
51
52 Two values in ``a`` and ``b`` are considiered equal when the following
53 equation is satisfied.
54
55 .. math::
56
57 |a - b| \\le \\mathrm{atol} + \\mathrm{rtol} |b|
58
59 Args:
60 a (cupy.ndarray): Input array to compare.
61 b (cupy.ndarray): Input array to compare.
62 rtol (float): The relative tolerance.
63 atol (float): The absolute tolerance.
64 equal_nan (bool): If ``True``, NaN's in ``a`` will be considered equal
65 to NaN's in ``b``.
66
67 Returns:
68 cupy.ndarray: A boolean array storing where ``a`` and ``b`` are equal.
69
70 .. seealso:: :func:`numpy.isclose`
71
72 """
73 return _is_close(a, b, rtol, atol, equal_nan)
74
75
76 # TODO(okuta): Implement array_equal
77
78
79 # TODO(okuta): Implement array_equiv
80
81
82 greater = core.greater
83
84
85 greater_equal = core.greater_equal
86
87
88 less = core.less
89
90
91 less_equal = core.less_equal
92
93
94 equal = core.equal
95
96
97 not_equal = core.not_equal
98
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/cupy/logic/comparison.py b/cupy/logic/comparison.py
--- a/cupy/logic/comparison.py
+++ b/cupy/logic/comparison.py
@@ -1,3 +1,6 @@
+import numpy
+
+import cupy
from cupy import core
@@ -16,6 +19,24 @@
'''
)
+# Note that in cupy/core/include/cupy/complex.cuh, we already got isfinite and
+# isnan working for complex numbers, so just replace fabs above by abs (from
+# thrust) and we are ready to go
+_is_close_complex = core.create_ufunc(
+ 'cupy_is_close_complex',
+ ('FFff?->?', 'DDdd?->?'),
+ '''
+ bool equal_nan = in4;
+ if (isfinite(in0) && isfinite(in1)) {
+ out0 = abs(in0 - in1) <= in3 + in2 * abs(in1);
+ } else if (equal_nan) {
+ out0 = (in0 == in1) || (isnan(in0) && isnan(in1));
+ } else {
+ out0 = (in0 == in1);
+ }
+ '''
+)
+
def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""Returns True if two arrays are element-wise equal within a tolerance.
@@ -70,7 +91,13 @@
.. seealso:: :func:`numpy.isclose`
"""
- return _is_close(a, b, rtol, atol, equal_nan)
+ a = cupy.asanyarray(a)
+ b = cupy.asanyarray(b)
+ if (a.dtype in [numpy.complex64, numpy.complex128]) or \
+ (b.dtype in [numpy.complex64, numpy.complex128]):
+ return _is_close_complex(a, b, rtol, atol, equal_nan)
+ else:
+ return _is_close(a, b, rtol, atol, equal_nan)
# TODO(okuta): Implement array_equal
|
{"golden_diff": "diff --git a/cupy/logic/comparison.py b/cupy/logic/comparison.py\n--- a/cupy/logic/comparison.py\n+++ b/cupy/logic/comparison.py\n@@ -1,3 +1,6 @@\n+import numpy\n+\n+import cupy\n from cupy import core\n \n \n@@ -16,6 +19,24 @@\n '''\n )\n \n+# Note that in cupy/core/include/cupy/complex.cuh, we already got isfinite and\n+# isnan working for complex numbers, so just replace fabs above by abs (from\n+# thrust) and we are ready to go\n+_is_close_complex = core.create_ufunc(\n+ 'cupy_is_close_complex',\n+ ('FFff?->?', 'DDdd?->?'),\n+ '''\n+ bool equal_nan = in4;\n+ if (isfinite(in0) && isfinite(in1)) {\n+ out0 = abs(in0 - in1) <= in3 + in2 * abs(in1);\n+ } else if (equal_nan) {\n+ out0 = (in0 == in1) || (isnan(in0) && isnan(in1));\n+ } else {\n+ out0 = (in0 == in1);\n+ }\n+ '''\n+)\n+\n \n def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):\n \"\"\"Returns True if two arrays are element-wise equal within a tolerance.\n@@ -70,7 +91,13 @@\n .. seealso:: :func:`numpy.isclose`\n \n \"\"\"\n- return _is_close(a, b, rtol, atol, equal_nan)\n+ a = cupy.asanyarray(a)\n+ b = cupy.asanyarray(b)\n+ if (a.dtype in [numpy.complex64, numpy.complex128]) or \\\n+ (b.dtype in [numpy.complex64, numpy.complex128]):\n+ return _is_close_complex(a, b, rtol, atol, equal_nan)\n+ else:\n+ return _is_close(a, b, rtol, atol, equal_nan)\n \n \n # TODO(okuta): Implement array_equal\n", "issue": "`cupy.allclose` does not support comparison of complex-number arrays\nAs title. The reason is that in this line of the ufunc helper \r\nhttps://github.com/cupy/cupy/blob/bb99716ffee178368ec71c875ace0553053cadc2/cupy/logic/comparison.py#L6\r\nonly `float16` (`e`), `float32` (`f`), and `float64` (`d`) arrays are included. Note that the NumPy counterpart does support comparing complex arrays using the same comparison logic, and I believe this can be easily patched by adding another ufunc helper for `complex64` and `complex128` arrays. PR to follow.\n", "before_files": [{"content": "from cupy import core\n\n\n_is_close = core.create_ufunc(\n 'cupy_is_close',\n ('eeee?->?', 'ffff?->?', 'dddd?->?'),\n '''\n bool equal_nan = in4;\n if (isfinite(in0) && isfinite(in1)) {\n out0 = fabs(in0 - in1) <= in3 + in2 * fabs(in1);\n } else if (equal_nan) {\n out0 = (in0 == in1) || (isnan(in0) && isnan(in1));\n } else {\n out0 = (in0 == in1);\n }\n '''\n)\n\n\ndef allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):\n \"\"\"Returns True if two arrays are element-wise equal within a tolerance.\n\n Two values in ``a`` and ``b`` are considiered equal when the following\n equation is satisfied.\n\n .. math::\n\n |a - b| \\\\le \\\\mathrm{atol} + \\\\mathrm{rtol} |b|\n\n Args:\n a (cupy.ndarray): Input array to compare.\n b (cupy.ndarray): Input array to compare.\n rtol (float): The relative tolerance.\n atol (float): The absolute tolerance.\n equal_nan (bool): If ``True``, NaN's in ``a`` will be considered equal\n to NaN's in ``b``.\n\n Returns:\n bool: if ``True``, two arrays are element-wise equal within a\n tolerance.\n\n .. seealso:: :func:`numpy.allclose`\n\n \"\"\"\n res = isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan).all()\n return bool(res)\n\n\ndef isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):\n \"\"\"Returns a boolean array where two arrays are equal within a tolerance.\n\n Two values in ``a`` and ``b`` are considiered equal when the following\n equation is satisfied.\n\n .. math::\n\n |a - b| \\\\le \\\\mathrm{atol} + \\\\mathrm{rtol} |b|\n\n Args:\n a (cupy.ndarray): Input array to compare.\n b (cupy.ndarray): Input array to compare.\n rtol (float): The relative tolerance.\n atol (float): The absolute tolerance.\n equal_nan (bool): If ``True``, NaN's in ``a`` will be considered equal\n to NaN's in ``b``.\n\n Returns:\n cupy.ndarray: A boolean array storing where ``a`` and ``b`` are equal.\n\n .. seealso:: :func:`numpy.isclose`\n\n \"\"\"\n return _is_close(a, b, rtol, atol, equal_nan)\n\n\n# TODO(okuta): Implement array_equal\n\n\n# TODO(okuta): Implement array_equiv\n\n\ngreater = core.greater\n\n\ngreater_equal = core.greater_equal\n\n\nless = core.less\n\n\nless_equal = core.less_equal\n\n\nequal = core.equal\n\n\nnot_equal = core.not_equal\n", "path": "cupy/logic/comparison.py"}], "after_files": [{"content": "import numpy\n\nimport cupy\nfrom cupy import core\n\n\n_is_close = core.create_ufunc(\n 'cupy_is_close',\n ('eeee?->?', 'ffff?->?', 'dddd?->?'),\n '''\n bool equal_nan = in4;\n if (isfinite(in0) && isfinite(in1)) {\n out0 = fabs(in0 - in1) <= in3 + in2 * fabs(in1);\n } else if (equal_nan) {\n out0 = (in0 == in1) || (isnan(in0) && isnan(in1));\n } else {\n out0 = (in0 == in1);\n }\n '''\n)\n\n# Note that in cupy/core/include/cupy/complex.cuh, we already got isfinite and\n# isnan working for complex numbers, so just replace fabs above by abs (from\n# thrust) and we are ready to go\n_is_close_complex = core.create_ufunc(\n 'cupy_is_close_complex',\n ('FFff?->?', 'DDdd?->?'),\n '''\n bool equal_nan = in4;\n if (isfinite(in0) && isfinite(in1)) {\n out0 = abs(in0 - in1) <= in3 + in2 * abs(in1);\n } else if (equal_nan) {\n out0 = (in0 == in1) || (isnan(in0) && isnan(in1));\n } else {\n out0 = (in0 == in1);\n }\n '''\n)\n\n\ndef allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):\n \"\"\"Returns True if two arrays are element-wise equal within a tolerance.\n\n Two values in ``a`` and ``b`` are considiered equal when the following\n equation is satisfied.\n\n .. math::\n\n |a - b| \\\\le \\\\mathrm{atol} + \\\\mathrm{rtol} |b|\n\n Args:\n a (cupy.ndarray): Input array to compare.\n b (cupy.ndarray): Input array to compare.\n rtol (float): The relative tolerance.\n atol (float): The absolute tolerance.\n equal_nan (bool): If ``True``, NaN's in ``a`` will be considered equal\n to NaN's in ``b``.\n\n Returns:\n bool: if ``True``, two arrays are element-wise equal within a\n tolerance.\n\n .. seealso:: :func:`numpy.allclose`\n\n \"\"\"\n res = isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan).all()\n return bool(res)\n\n\ndef isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):\n \"\"\"Returns a boolean array where two arrays are equal within a tolerance.\n\n Two values in ``a`` and ``b`` are considiered equal when the following\n equation is satisfied.\n\n .. math::\n\n |a - b| \\\\le \\\\mathrm{atol} + \\\\mathrm{rtol} |b|\n\n Args:\n a (cupy.ndarray): Input array to compare.\n b (cupy.ndarray): Input array to compare.\n rtol (float): The relative tolerance.\n atol (float): The absolute tolerance.\n equal_nan (bool): If ``True``, NaN's in ``a`` will be considered equal\n to NaN's in ``b``.\n\n Returns:\n cupy.ndarray: A boolean array storing where ``a`` and ``b`` are equal.\n\n .. seealso:: :func:`numpy.isclose`\n\n \"\"\"\n a = cupy.asanyarray(a)\n b = cupy.asanyarray(b)\n if (a.dtype in [numpy.complex64, numpy.complex128]) or \\\n (b.dtype in [numpy.complex64, numpy.complex128]):\n return _is_close_complex(a, b, rtol, atol, equal_nan)\n else:\n return _is_close(a, b, rtol, atol, equal_nan)\n\n\n# TODO(okuta): Implement array_equal\n\n\n# TODO(okuta): Implement array_equiv\n\n\ngreater = core.greater\n\n\ngreater_equal = core.greater_equal\n\n\nless = core.less\n\n\nless_equal = core.less_equal\n\n\nequal = core.equal\n\n\nnot_equal = core.not_equal\n", "path": "cupy/logic/comparison.py"}]}
| 1,309 | 484 |
gh_patches_debug_5841
|
rasdani/github-patches
|
git_diff
|
nvaccess__nvda-7846
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Problems with eSpeak NG in NVDA Next Snapshot Version next-15091,d89900a3
When using NVDA version next-15091,d89900a3, speech rate does not work how it should. At rate 100 without rate boost, NVDA speaks much slower than at rate 99. With rate boost, speech slows down above rate 18 and then seems to stay at the same rate regardless of where the slider is moved as long as you stay above rate 18. Words, especially those ending in vowel sounds, also sound as if they are cut off at the ends.
I noticed this first when I updated to this snapshot. During and after the update, speech seemed much slower than I'm used to. Once I noticed this, I tried changing the speech rate and found the results I mentioned above. To reproduce this, make sure you're using eSpeak NG and try changing the rate with and without rate boost. Pay particular attention to the rate values mentioned above.
I'm using NVDA version next-15091,d89900a3 on Windows 10 version 1803 build 17134.5. I also normally use the English America voice and the Klatt4 variant at rate 45 with rate boost turned on. I posted about this in the NVDA Skype group, and others seemed to be having the same issue when they looked into it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `source/synthDrivers/_espeak.py`
Content:
```
1 # -*- coding: UTF-8 -*-
2 #synthDrivers/_espeak.py
3 #A part of NonVisual Desktop Access (NVDA)
4 #Copyright (C) 2007-2012 NV Access Limited, Peter Vágner
5 #This file is covered by the GNU General Public License.
6 #See the file COPYING for more details.
7
8 import time
9 import nvwave
10 import threading
11 import Queue
12 from ctypes import *
13 import config
14 import globalVars
15 from logHandler import log
16 import os
17 import codecs
18
19 isSpeaking = False
20 lastIndex = None
21 bgThread=None
22 bgQueue = None
23 player = None
24 espeakDLL=None
25
26 #Parameter bounds
27 minRate=80
28 maxRate=450
29 minPitch=0
30 maxPitch=99
31
32 #event types
33 espeakEVENT_LIST_TERMINATED=0
34 espeakEVENT_WORD=1
35 espeakEVENT_SENTENCE=2
36 espeakEVENT_MARK=3
37 espeakEVENT_PLAY=4
38 espeakEVENT_END=5
39 espeakEVENT_MSG_TERMINATED=6
40 espeakEVENT_PHONEME=7
41
42 #position types
43 POS_CHARACTER=1
44 POS_WORD=2
45 POS_SENTENCE=3
46
47 #output types
48 AUDIO_OUTPUT_PLAYBACK=0
49 AUDIO_OUTPUT_RETRIEVAL=1
50 AUDIO_OUTPUT_SYNCHRONOUS=2
51 AUDIO_OUTPUT_SYNCH_PLAYBACK=3
52
53 #synth flags
54 espeakCHARS_AUTO=0
55 espeakCHARS_UTF8=1
56 espeakCHARS_8BIT=2
57 espeakCHARS_WCHAR=3
58 espeakSSML=0x10
59 espeakPHONEMES=0x100
60 espeakENDPAUSE=0x1000
61 espeakKEEP_NAMEDATA=0x2000
62
63 #speech parameters
64 espeakSILENCE=0
65 espeakRATE=1
66 espeakVOLUME=2
67 espeakPITCH=3
68 espeakRANGE=4
69 espeakPUNCTUATION=5
70 espeakCAPITALS=6
71 espeakWORDGAP=7
72 espeakOPTIONS=8 # reserved for misc. options. not yet used
73 espeakINTONATION=9
74 espeakRESERVED1=10
75 espeakRESERVED2=11
76
77 #error codes
78 EE_OK=0
79 #EE_INTERNAL_ERROR=-1
80 #EE_BUFFER_FULL=1
81 #EE_NOT_FOUND=2
82
83 class espeak_EVENT_id(Union):
84 _fields_=[
85 ('number',c_int),
86 ('name',c_char_p),
87 ('string',c_char*8),
88 ]
89
90 class espeak_EVENT(Structure):
91 _fields_=[
92 ('type',c_int),
93 ('unique_identifier',c_uint),
94 ('text_position',c_int),
95 ('length',c_int),
96 ('audio_position',c_int),
97 ('sample',c_int),
98 ('user_data',c_void_p),
99 ('id',espeak_EVENT_id),
100 ]
101
102 class espeak_VOICE(Structure):
103 _fields_=[
104 ('name',c_char_p),
105 ('languages',c_char_p),
106 ('identifier',c_char_p),
107 ('gender',c_byte),
108 ('age',c_byte),
109 ('variant',c_byte),
110 ('xx1',c_byte),
111 ('score',c_int),
112 ('spare',c_void_p),
113 ]
114
115 def __eq__(self, other):
116 return isinstance(other, type(self)) and addressof(self) == addressof(other)
117
118 t_espeak_callback=CFUNCTYPE(c_int,POINTER(c_short),c_int,POINTER(espeak_EVENT))
119
120 @t_espeak_callback
121 def callback(wav,numsamples,event):
122 try:
123 global player, isSpeaking, lastIndex
124 if not isSpeaking:
125 return 1
126 for e in event:
127 if e.type==espeakEVENT_MARK:
128 lastIndex=int(e.id.name)
129 elif e.type==espeakEVENT_LIST_TERMINATED:
130 break
131 if not wav:
132 player.idle()
133 isSpeaking = False
134 return 0
135 if numsamples > 0:
136 try:
137 player.feed(string_at(wav, numsamples * sizeof(c_short)))
138 except:
139 log.debugWarning("Error feeding audio to nvWave",exc_info=True)
140 return 0
141 except:
142 log.error("callback", exc_info=True)
143
144 class BgThread(threading.Thread):
145 def __init__(self):
146 threading.Thread.__init__(self)
147 self.setDaemon(True)
148
149 def run(self):
150 global isSpeaking
151 while True:
152 func, args, kwargs = bgQueue.get()
153 if not func:
154 break
155 try:
156 func(*args, **kwargs)
157 except:
158 log.error("Error running function from queue", exc_info=True)
159 bgQueue.task_done()
160
161 def _execWhenDone(func, *args, **kwargs):
162 global bgQueue
163 # This can't be a kwarg in the function definition because it will consume the first non-keywor dargument which is meant for func.
164 mustBeAsync = kwargs.pop("mustBeAsync", False)
165 if mustBeAsync or bgQueue.unfinished_tasks != 0:
166 # Either this operation must be asynchronous or There is still an operation in progress.
167 # Therefore, run this asynchronously in the background thread.
168 bgQueue.put((func, args, kwargs))
169 else:
170 func(*args, **kwargs)
171
172 def _speak(text):
173 global isSpeaking
174 uniqueID=c_int()
175 isSpeaking = True
176 flags = espeakCHARS_WCHAR | espeakSSML | espeakPHONEMES
177 return espeakDLL.espeak_Synth(text,0,0,0,0,flags,byref(uniqueID),0)
178
179 def speak(text):
180 global bgQueue
181 _execWhenDone(_speak, text, mustBeAsync=True)
182
183 def stop():
184 global isSpeaking, bgQueue, lastIndex
185 # Kill all speech from now.
186 # We still want parameter changes to occur, so requeue them.
187 params = []
188 try:
189 while True:
190 item = bgQueue.get_nowait()
191 if item[0] != _speak:
192 params.append(item)
193 bgQueue.task_done()
194 except Queue.Empty:
195 # Let the exception break us out of this loop, as queue.empty() is not reliable anyway.
196 pass
197 for item in params:
198 bgQueue.put(item)
199 isSpeaking = False
200 player.stop()
201 lastIndex=None
202
203 def pause(switch):
204 global player
205 player.pause(switch)
206
207 def setParameter(param,value,relative):
208 _execWhenDone(espeakDLL.espeak_SetParameter,param,value,relative)
209
210 def getParameter(param,current):
211 return espeakDLL.espeak_GetParameter(param,current)
212
213 def getVoiceList():
214 voices=espeakDLL.espeak_ListVoices(None)
215 voiceList=[]
216 for voice in voices:
217 if not voice: break
218 voiceList.append(voice.contents)
219 return voiceList
220
221 def getCurrentVoice():
222 voice = espeakDLL.espeak_GetCurrentVoice()
223 if voice:
224 return voice.contents
225 else:
226 return None
227
228 def setVoice(voice):
229 # For some weird reason, espeak_EspeakSetVoiceByProperties throws an integer divide by zero exception.
230 setVoiceByName(voice.identifier)
231
232 def setVoiceByName(name):
233 _execWhenDone(espeakDLL.espeak_SetVoiceByName,name)
234
235 def _setVoiceAndVariant(voice=None, variant=None):
236 res = getCurrentVoice().identifier.split("+")
237 if not voice:
238 voice = res[0]
239 if not variant:
240 if len(res) == 2:
241 variant = res[1]
242 else:
243 variant = "none"
244 if variant == "none":
245 espeakDLL.espeak_SetVoiceByName(voice)
246 else:
247 try:
248 espeakDLL.espeak_SetVoiceByName("%s+%s" % (voice, variant))
249 except:
250 espeakDLL.espeak_SetVoiceByName(voice)
251
252 def setVoiceAndVariant(voice=None, variant=None):
253 _execWhenDone(_setVoiceAndVariant, voice=voice, variant=variant)
254
255 def _setVoiceByLanguage(lang):
256 v=espeak_VOICE()
257 lang=lang.replace('_','-')
258 v.languages=lang
259 try:
260 espeakDLL.espeak_SetVoiceByProperties(byref(v))
261 except:
262 v.languages="en"
263 espeakDLL.espeak_SetVoiceByProperties(byref(v))
264
265 def setVoiceByLanguage(lang):
266 _execWhenDone(_setVoiceByLanguage, lang)
267
268 def espeak_errcheck(res, func, args):
269 if res != EE_OK:
270 raise RuntimeError("%s: code %d" % (func.__name__, res))
271 return res
272
273 def initialize():
274 global espeakDLL, bgThread, bgQueue, player
275 espeakDLL=cdll.LoadLibrary(r"synthDrivers\espeak.dll")
276 espeakDLL.espeak_Info.restype=c_char_p
277 espeakDLL.espeak_Synth.errcheck=espeak_errcheck
278 espeakDLL.espeak_SetVoiceByName.errcheck=espeak_errcheck
279 espeakDLL.espeak_SetVoiceByProperties.errcheck=espeak_errcheck
280 espeakDLL.espeak_SetParameter.errcheck=espeak_errcheck
281 espeakDLL.espeak_Terminate.errcheck=espeak_errcheck
282 espeakDLL.espeak_ListVoices.restype=POINTER(POINTER(espeak_VOICE))
283 espeakDLL.espeak_GetCurrentVoice.restype=POINTER(espeak_VOICE)
284 espeakDLL.espeak_SetVoiceByName.argtypes=(c_char_p,)
285 sampleRate=espeakDLL.espeak_Initialize(AUDIO_OUTPUT_SYNCHRONOUS,300,
286 os.path.abspath("synthDrivers"),0)
287 if sampleRate<0:
288 raise OSError("espeak_Initialize %d"%sampleRate)
289 player = nvwave.WavePlayer(channels=1, samplesPerSec=sampleRate, bitsPerSample=16, outputDevice=config.conf["speech"]["outputDevice"])
290 espeakDLL.espeak_SetSynthCallback(callback)
291 bgQueue = Queue.Queue()
292 bgThread=BgThread()
293 bgThread.start()
294
295 def terminate():
296 global bgThread, bgQueue, player, espeakDLL
297 stop()
298 bgQueue.put((None, None, None))
299 bgThread.join()
300 espeakDLL.espeak_Terminate()
301 bgThread=None
302 bgQueue=None
303 player.close()
304 player=None
305 espeakDLL=None
306
307 def info():
308 return espeakDLL.espeak_Info()
309
310 def getVariantDict():
311 dir='synthDrivers\\espeak-ng-data\\voices\\!v'
312 # Translators: name of the default espeak varient.
313 variantDict={"none": pgettext("espeakVarient", "none")}
314 for fileName in os.listdir(dir):
315 if os.path.isfile("%s\\%s"%(dir,fileName)):
316 file=codecs.open("%s\\%s"%(dir,fileName))
317 for line in file:
318 if line.startswith('name '):
319 temp=line.split(" ")
320 if len(temp) ==2:
321 name=temp[1].rstrip()
322 break
323 name=None
324 file.close()
325 if name is not None:
326 variantDict[fileName]=name
327 return variantDict
328
329
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/source/synthDrivers/_espeak.py b/source/synthDrivers/_espeak.py
--- a/source/synthDrivers/_espeak.py
+++ b/source/synthDrivers/_espeak.py
@@ -173,7 +173,9 @@
global isSpeaking
uniqueID=c_int()
isSpeaking = True
- flags = espeakCHARS_WCHAR | espeakSSML | espeakPHONEMES
+ # eSpeak can only process compound emojis when using a UTF8 encoding
+ text=text.encode('utf8',errors='ignore')
+ flags = espeakCHARS_UTF8 | espeakSSML | espeakPHONEMES
return espeakDLL.espeak_Synth(text,0,0,0,0,flags,byref(uniqueID),0)
def speak(text):
|
{"golden_diff": "diff --git a/source/synthDrivers/_espeak.py b/source/synthDrivers/_espeak.py\n--- a/source/synthDrivers/_espeak.py\n+++ b/source/synthDrivers/_espeak.py\n@@ -173,7 +173,9 @@\n \tglobal isSpeaking\r\n \tuniqueID=c_int()\r\n \tisSpeaking = True\r\n-\tflags = espeakCHARS_WCHAR | espeakSSML | espeakPHONEMES\r\n+\t# eSpeak can only process compound emojis when using a UTF8 encoding\r\n+\ttext=text.encode('utf8',errors='ignore')\r\n+\tflags = espeakCHARS_UTF8 | espeakSSML | espeakPHONEMES\r\n \treturn espeakDLL.espeak_Synth(text,0,0,0,0,flags,byref(uniqueID),0)\r\n \r\n def speak(text):\n", "issue": "Problems with eSpeak NG in NVDA Next Snapshot Version next-15091,d89900a3\nWhen using NVDA version next-15091,d89900a3, speech rate does not work how it should. At rate 100 without rate boost, NVDA speaks much slower than at rate 99. With rate boost, speech slows down above rate 18 and then seems to stay at the same rate regardless of where the slider is moved as long as you stay above rate 18. Words, especially those ending in vowel sounds, also sound as if they are cut off at the ends.\r\nI noticed this first when I updated to this snapshot. During and after the update, speech seemed much slower than I'm used to. Once I noticed this, I tried changing the speech rate and found the results I mentioned above. To reproduce this, make sure you're using eSpeak NG and try changing the rate with and without rate boost. Pay particular attention to the rate values mentioned above.\r\nI'm using NVDA version next-15091,d89900a3 on Windows 10 version 1803 build 17134.5. I also normally use the English America voice and the Klatt4 variant at rate 45 with rate boost turned on. I posted about this in the NVDA Skype group, and others seemed to be having the same issue when they looked into it.\n", "before_files": [{"content": "# -*- coding: UTF-8 -*-\r\n#synthDrivers/_espeak.py\r\n#A part of NonVisual Desktop Access (NVDA)\r\n#Copyright (C) 2007-2012 NV Access Limited, Peter V\u00e1gner\r\n#This file is covered by the GNU General Public License.\r\n#See the file COPYING for more details.\r\n\r\nimport time\r\nimport nvwave\r\nimport threading\r\nimport Queue\r\nfrom ctypes import *\r\nimport config\r\nimport globalVars\r\nfrom logHandler import log\r\nimport os\r\nimport codecs\r\n\r\nisSpeaking = False\r\nlastIndex = None\r\nbgThread=None\r\nbgQueue = None\r\nplayer = None\r\nespeakDLL=None\r\n\r\n#Parameter bounds\r\nminRate=80\r\nmaxRate=450\r\nminPitch=0\r\nmaxPitch=99\r\n\r\n#event types\r\nespeakEVENT_LIST_TERMINATED=0\r\nespeakEVENT_WORD=1\r\nespeakEVENT_SENTENCE=2\r\nespeakEVENT_MARK=3\r\nespeakEVENT_PLAY=4\r\nespeakEVENT_END=5\r\nespeakEVENT_MSG_TERMINATED=6\r\nespeakEVENT_PHONEME=7\r\n\r\n#position types\r\nPOS_CHARACTER=1\r\nPOS_WORD=2\r\nPOS_SENTENCE=3\r\n\r\n#output types\r\nAUDIO_OUTPUT_PLAYBACK=0\r\nAUDIO_OUTPUT_RETRIEVAL=1\r\nAUDIO_OUTPUT_SYNCHRONOUS=2\r\nAUDIO_OUTPUT_SYNCH_PLAYBACK=3\r\n\r\n#synth flags\r\nespeakCHARS_AUTO=0\r\nespeakCHARS_UTF8=1\r\nespeakCHARS_8BIT=2\r\nespeakCHARS_WCHAR=3\r\nespeakSSML=0x10\r\nespeakPHONEMES=0x100\r\nespeakENDPAUSE=0x1000\r\nespeakKEEP_NAMEDATA=0x2000\r\n\r\n#speech parameters\r\nespeakSILENCE=0\r\nespeakRATE=1\r\nespeakVOLUME=2\r\nespeakPITCH=3\r\nespeakRANGE=4\r\nespeakPUNCTUATION=5\r\nespeakCAPITALS=6\r\nespeakWORDGAP=7\r\nespeakOPTIONS=8 # reserved for misc. options. not yet used\r\nespeakINTONATION=9\r\nespeakRESERVED1=10\r\nespeakRESERVED2=11\r\n\r\n#error codes\r\nEE_OK=0\r\n#EE_INTERNAL_ERROR=-1\r\n#EE_BUFFER_FULL=1\r\n#EE_NOT_FOUND=2\r\n\r\nclass espeak_EVENT_id(Union):\r\n\t_fields_=[\r\n\t\t('number',c_int),\r\n\t\t('name',c_char_p),\r\n\t\t('string',c_char*8),\r\n\t]\r\n\r\nclass espeak_EVENT(Structure):\r\n\t_fields_=[\r\n\t\t('type',c_int),\r\n\t\t('unique_identifier',c_uint),\r\n\t\t('text_position',c_int),\r\n\t\t('length',c_int),\r\n\t\t('audio_position',c_int),\r\n\t\t('sample',c_int),\r\n\t\t('user_data',c_void_p),\r\n\t\t('id',espeak_EVENT_id),\r\n\t]\r\n\r\nclass espeak_VOICE(Structure):\r\n\t_fields_=[\r\n\t\t('name',c_char_p),\r\n\t\t('languages',c_char_p),\r\n\t\t('identifier',c_char_p),\r\n\t\t('gender',c_byte),\r\n\t\t('age',c_byte),\r\n\t\t('variant',c_byte),\r\n\t\t('xx1',c_byte),\r\n\t\t('score',c_int),\r\n\t\t('spare',c_void_p),\r\n\t]\r\n\r\n\tdef __eq__(self, other):\r\n\t\treturn isinstance(other, type(self)) and addressof(self) == addressof(other)\r\n\r\nt_espeak_callback=CFUNCTYPE(c_int,POINTER(c_short),c_int,POINTER(espeak_EVENT))\r\n\r\n@t_espeak_callback\r\ndef callback(wav,numsamples,event):\r\n\ttry:\r\n\t\tglobal player, isSpeaking, lastIndex\r\n\t\tif not isSpeaking:\r\n\t\t\treturn 1\r\n\t\tfor e in event:\r\n\t\t\tif e.type==espeakEVENT_MARK:\r\n\t\t\t\tlastIndex=int(e.id.name)\r\n\t\t\telif e.type==espeakEVENT_LIST_TERMINATED:\r\n\t\t\t\tbreak\r\n\t\tif not wav:\r\n\t\t\tplayer.idle()\r\n\t\t\tisSpeaking = False\r\n\t\t\treturn 0\r\n\t\tif numsamples > 0:\r\n\t\t\ttry:\r\n\t\t\t\tplayer.feed(string_at(wav, numsamples * sizeof(c_short)))\r\n\t\t\texcept:\r\n\t\t\t\tlog.debugWarning(\"Error feeding audio to nvWave\",exc_info=True)\r\n\t\treturn 0\r\n\texcept:\r\n\t\tlog.error(\"callback\", exc_info=True)\r\n\r\nclass BgThread(threading.Thread):\r\n\tdef __init__(self):\r\n\t\tthreading.Thread.__init__(self)\r\n\t\tself.setDaemon(True)\r\n\r\n\tdef run(self):\r\n\t\tglobal isSpeaking\r\n\t\twhile True:\r\n\t\t\tfunc, args, kwargs = bgQueue.get()\r\n\t\t\tif not func:\r\n\t\t\t\tbreak\r\n\t\t\ttry:\r\n\t\t\t\tfunc(*args, **kwargs)\r\n\t\t\texcept:\r\n\t\t\t\tlog.error(\"Error running function from queue\", exc_info=True)\r\n\t\t\tbgQueue.task_done()\r\n\r\ndef _execWhenDone(func, *args, **kwargs):\r\n\tglobal bgQueue\r\n\t# This can't be a kwarg in the function definition because it will consume the first non-keywor dargument which is meant for func.\r\n\tmustBeAsync = kwargs.pop(\"mustBeAsync\", False)\r\n\tif mustBeAsync or bgQueue.unfinished_tasks != 0:\r\n\t\t# Either this operation must be asynchronous or There is still an operation in progress.\r\n\t\t# Therefore, run this asynchronously in the background thread.\r\n\t\tbgQueue.put((func, args, kwargs))\r\n\telse:\r\n\t\tfunc(*args, **kwargs)\r\n\r\ndef _speak(text):\r\n\tglobal isSpeaking\r\n\tuniqueID=c_int()\r\n\tisSpeaking = True\r\n\tflags = espeakCHARS_WCHAR | espeakSSML | espeakPHONEMES\r\n\treturn espeakDLL.espeak_Synth(text,0,0,0,0,flags,byref(uniqueID),0)\r\n\r\ndef speak(text):\r\n\tglobal bgQueue\r\n\t_execWhenDone(_speak, text, mustBeAsync=True)\r\n\r\ndef stop():\r\n\tglobal isSpeaking, bgQueue, lastIndex\r\n\t# Kill all speech from now.\r\n\t# We still want parameter changes to occur, so requeue them.\r\n\tparams = []\r\n\ttry:\r\n\t\twhile True:\r\n\t\t\titem = bgQueue.get_nowait()\r\n\t\t\tif item[0] != _speak:\r\n\t\t\t\tparams.append(item)\r\n\t\t\tbgQueue.task_done()\r\n\texcept Queue.Empty:\r\n\t\t# Let the exception break us out of this loop, as queue.empty() is not reliable anyway.\r\n\t\tpass\r\n\tfor item in params:\r\n\t\tbgQueue.put(item)\r\n\tisSpeaking = False\r\n\tplayer.stop()\r\n\tlastIndex=None\r\n\r\ndef pause(switch):\r\n\tglobal player\r\n\tplayer.pause(switch)\r\n\r\ndef setParameter(param,value,relative):\r\n\t_execWhenDone(espeakDLL.espeak_SetParameter,param,value,relative)\r\n\r\ndef getParameter(param,current):\r\n\treturn espeakDLL.espeak_GetParameter(param,current)\r\n\r\ndef getVoiceList():\r\n\tvoices=espeakDLL.espeak_ListVoices(None)\r\n\tvoiceList=[]\r\n\tfor voice in voices:\r\n\t\tif not voice: break\r\n\t\tvoiceList.append(voice.contents)\r\n\treturn voiceList\r\n\r\ndef getCurrentVoice():\r\n\tvoice = espeakDLL.espeak_GetCurrentVoice()\r\n\tif voice:\r\n\t\treturn voice.contents\r\n\telse:\r\n\t\treturn None\r\n\r\ndef setVoice(voice):\r\n\t# For some weird reason, espeak_EspeakSetVoiceByProperties throws an integer divide by zero exception.\r\n\tsetVoiceByName(voice.identifier)\r\n\r\ndef setVoiceByName(name):\r\n\t_execWhenDone(espeakDLL.espeak_SetVoiceByName,name)\r\n\r\ndef _setVoiceAndVariant(voice=None, variant=None):\r\n\tres = getCurrentVoice().identifier.split(\"+\")\r\n\tif not voice:\r\n\t\tvoice = res[0]\r\n\tif not variant:\r\n\t\tif len(res) == 2:\r\n\t\t\tvariant = res[1]\r\n\t\telse:\r\n\t\t\tvariant = \"none\"\r\n\tif variant == \"none\":\r\n\t\tespeakDLL.espeak_SetVoiceByName(voice)\r\n\telse:\r\n\t\ttry:\r\n\t\t\tespeakDLL.espeak_SetVoiceByName(\"%s+%s\" % (voice, variant))\r\n\t\texcept:\r\n\t\t\tespeakDLL.espeak_SetVoiceByName(voice)\r\n\r\ndef setVoiceAndVariant(voice=None, variant=None):\r\n\t_execWhenDone(_setVoiceAndVariant, voice=voice, variant=variant)\r\n\r\ndef _setVoiceByLanguage(lang):\r\n\tv=espeak_VOICE()\r\n\tlang=lang.replace('_','-')\r\n\tv.languages=lang\r\n\ttry:\r\n\t\tespeakDLL.espeak_SetVoiceByProperties(byref(v))\r\n\texcept:\r\n\t\tv.languages=\"en\"\r\n\t\tespeakDLL.espeak_SetVoiceByProperties(byref(v))\r\n\r\ndef setVoiceByLanguage(lang):\r\n\t_execWhenDone(_setVoiceByLanguage, lang)\r\n\r\ndef espeak_errcheck(res, func, args):\r\n\tif res != EE_OK:\r\n\t\traise RuntimeError(\"%s: code %d\" % (func.__name__, res))\r\n\treturn res\r\n\r\ndef initialize():\r\n\tglobal espeakDLL, bgThread, bgQueue, player\r\n\tespeakDLL=cdll.LoadLibrary(r\"synthDrivers\\espeak.dll\")\r\n\tespeakDLL.espeak_Info.restype=c_char_p\r\n\tespeakDLL.espeak_Synth.errcheck=espeak_errcheck\r\n\tespeakDLL.espeak_SetVoiceByName.errcheck=espeak_errcheck\r\n\tespeakDLL.espeak_SetVoiceByProperties.errcheck=espeak_errcheck\r\n\tespeakDLL.espeak_SetParameter.errcheck=espeak_errcheck\r\n\tespeakDLL.espeak_Terminate.errcheck=espeak_errcheck\r\n\tespeakDLL.espeak_ListVoices.restype=POINTER(POINTER(espeak_VOICE))\r\n\tespeakDLL.espeak_GetCurrentVoice.restype=POINTER(espeak_VOICE)\r\n\tespeakDLL.espeak_SetVoiceByName.argtypes=(c_char_p,)\r\n\tsampleRate=espeakDLL.espeak_Initialize(AUDIO_OUTPUT_SYNCHRONOUS,300,\r\n\t\tos.path.abspath(\"synthDrivers\"),0)\r\n\tif sampleRate<0:\r\n\t\traise OSError(\"espeak_Initialize %d\"%sampleRate)\r\n\tplayer = nvwave.WavePlayer(channels=1, samplesPerSec=sampleRate, bitsPerSample=16, outputDevice=config.conf[\"speech\"][\"outputDevice\"])\r\n\tespeakDLL.espeak_SetSynthCallback(callback)\r\n\tbgQueue = Queue.Queue()\r\n\tbgThread=BgThread()\r\n\tbgThread.start()\r\n\r\ndef terminate():\r\n\tglobal bgThread, bgQueue, player, espeakDLL \r\n\tstop()\r\n\tbgQueue.put((None, None, None))\r\n\tbgThread.join()\r\n\tespeakDLL.espeak_Terminate()\r\n\tbgThread=None\r\n\tbgQueue=None\r\n\tplayer.close()\r\n\tplayer=None\r\n\tespeakDLL=None\r\n\r\ndef info():\r\n\treturn espeakDLL.espeak_Info()\r\n\r\ndef getVariantDict():\r\n\tdir='synthDrivers\\\\espeak-ng-data\\\\voices\\\\!v'\r\n\t# Translators: name of the default espeak varient.\r\n\tvariantDict={\"none\": pgettext(\"espeakVarient\", \"none\")}\r\n\tfor fileName in os.listdir(dir):\r\n\t\tif os.path.isfile(\"%s\\\\%s\"%(dir,fileName)):\r\n\t\t\tfile=codecs.open(\"%s\\\\%s\"%(dir,fileName))\r\n\t\t\tfor line in file:\r\n\t\t\t\tif line.startswith('name '):\r\n\t\t\t\t\ttemp=line.split(\" \")\r\n\t\t\t\t\tif len(temp) ==2:\r\n\t\t\t\t\t\tname=temp[1].rstrip()\r\n\t\t\t\t\t\tbreak\r\n\t\t\t\tname=None\r\n\t\t\tfile.close()\r\n\t\tif name is not None:\r\n\t\t\tvariantDict[fileName]=name\r\n\treturn variantDict\r\n\r\n", "path": "source/synthDrivers/_espeak.py"}], "after_files": [{"content": "# -*- coding: UTF-8 -*-\r\n#synthDrivers/_espeak.py\r\n#A part of NonVisual Desktop Access (NVDA)\r\n#Copyright (C) 2007-2012 NV Access Limited, Peter V\u00e1gner\r\n#This file is covered by the GNU General Public License.\r\n#See the file COPYING for more details.\r\n\r\nimport time\r\nimport nvwave\r\nimport threading\r\nimport Queue\r\nfrom ctypes import *\r\nimport config\r\nimport globalVars\r\nfrom logHandler import log\r\nimport os\r\nimport codecs\r\n\r\nisSpeaking = False\r\nlastIndex = None\r\nbgThread=None\r\nbgQueue = None\r\nplayer = None\r\nespeakDLL=None\r\n\r\n#Parameter bounds\r\nminRate=80\r\nmaxRate=450\r\nminPitch=0\r\nmaxPitch=99\r\n\r\n#event types\r\nespeakEVENT_LIST_TERMINATED=0\r\nespeakEVENT_WORD=1\r\nespeakEVENT_SENTENCE=2\r\nespeakEVENT_MARK=3\r\nespeakEVENT_PLAY=4\r\nespeakEVENT_END=5\r\nespeakEVENT_MSG_TERMINATED=6\r\nespeakEVENT_PHONEME=7\r\n\r\n#position types\r\nPOS_CHARACTER=1\r\nPOS_WORD=2\r\nPOS_SENTENCE=3\r\n\r\n#output types\r\nAUDIO_OUTPUT_PLAYBACK=0\r\nAUDIO_OUTPUT_RETRIEVAL=1\r\nAUDIO_OUTPUT_SYNCHRONOUS=2\r\nAUDIO_OUTPUT_SYNCH_PLAYBACK=3\r\n\r\n#synth flags\r\nespeakCHARS_AUTO=0\r\nespeakCHARS_UTF8=1\r\nespeakCHARS_8BIT=2\r\nespeakCHARS_WCHAR=3\r\nespeakSSML=0x10\r\nespeakPHONEMES=0x100\r\nespeakENDPAUSE=0x1000\r\nespeakKEEP_NAMEDATA=0x2000\r\n\r\n#speech parameters\r\nespeakSILENCE=0\r\nespeakRATE=1\r\nespeakVOLUME=2\r\nespeakPITCH=3\r\nespeakRANGE=4\r\nespeakPUNCTUATION=5\r\nespeakCAPITALS=6\r\nespeakWORDGAP=7\r\nespeakOPTIONS=8 # reserved for misc. options. not yet used\r\nespeakINTONATION=9\r\nespeakRESERVED1=10\r\nespeakRESERVED2=11\r\n\r\n#error codes\r\nEE_OK=0\r\n#EE_INTERNAL_ERROR=-1\r\n#EE_BUFFER_FULL=1\r\n#EE_NOT_FOUND=2\r\n\r\nclass espeak_EVENT_id(Union):\r\n\t_fields_=[\r\n\t\t('number',c_int),\r\n\t\t('name',c_char_p),\r\n\t\t('string',c_char*8),\r\n\t]\r\n\r\nclass espeak_EVENT(Structure):\r\n\t_fields_=[\r\n\t\t('type',c_int),\r\n\t\t('unique_identifier',c_uint),\r\n\t\t('text_position',c_int),\r\n\t\t('length',c_int),\r\n\t\t('audio_position',c_int),\r\n\t\t('sample',c_int),\r\n\t\t('user_data',c_void_p),\r\n\t\t('id',espeak_EVENT_id),\r\n\t]\r\n\r\nclass espeak_VOICE(Structure):\r\n\t_fields_=[\r\n\t\t('name',c_char_p),\r\n\t\t('languages',c_char_p),\r\n\t\t('identifier',c_char_p),\r\n\t\t('gender',c_byte),\r\n\t\t('age',c_byte),\r\n\t\t('variant',c_byte),\r\n\t\t('xx1',c_byte),\r\n\t\t('score',c_int),\r\n\t\t('spare',c_void_p),\r\n\t]\r\n\r\n\tdef __eq__(self, other):\r\n\t\treturn isinstance(other, type(self)) and addressof(self) == addressof(other)\r\n\r\nt_espeak_callback=CFUNCTYPE(c_int,POINTER(c_short),c_int,POINTER(espeak_EVENT))\r\n\r\n@t_espeak_callback\r\ndef callback(wav,numsamples,event):\r\n\ttry:\r\n\t\tglobal player, isSpeaking, lastIndex\r\n\t\tif not isSpeaking:\r\n\t\t\treturn 1\r\n\t\tfor e in event:\r\n\t\t\tif e.type==espeakEVENT_MARK:\r\n\t\t\t\tlastIndex=int(e.id.name)\r\n\t\t\telif e.type==espeakEVENT_LIST_TERMINATED:\r\n\t\t\t\tbreak\r\n\t\tif not wav:\r\n\t\t\tplayer.idle()\r\n\t\t\tisSpeaking = False\r\n\t\t\treturn 0\r\n\t\tif numsamples > 0:\r\n\t\t\ttry:\r\n\t\t\t\tplayer.feed(string_at(wav, numsamples * sizeof(c_short)))\r\n\t\t\texcept:\r\n\t\t\t\tlog.debugWarning(\"Error feeding audio to nvWave\",exc_info=True)\r\n\t\treturn 0\r\n\texcept:\r\n\t\tlog.error(\"callback\", exc_info=True)\r\n\r\nclass BgThread(threading.Thread):\r\n\tdef __init__(self):\r\n\t\tthreading.Thread.__init__(self)\r\n\t\tself.setDaemon(True)\r\n\r\n\tdef run(self):\r\n\t\tglobal isSpeaking\r\n\t\twhile True:\r\n\t\t\tfunc, args, kwargs = bgQueue.get()\r\n\t\t\tif not func:\r\n\t\t\t\tbreak\r\n\t\t\ttry:\r\n\t\t\t\tfunc(*args, **kwargs)\r\n\t\t\texcept:\r\n\t\t\t\tlog.error(\"Error running function from queue\", exc_info=True)\r\n\t\t\tbgQueue.task_done()\r\n\r\ndef _execWhenDone(func, *args, **kwargs):\r\n\tglobal bgQueue\r\n\t# This can't be a kwarg in the function definition because it will consume the first non-keywor dargument which is meant for func.\r\n\tmustBeAsync = kwargs.pop(\"mustBeAsync\", False)\r\n\tif mustBeAsync or bgQueue.unfinished_tasks != 0:\r\n\t\t# Either this operation must be asynchronous or There is still an operation in progress.\r\n\t\t# Therefore, run this asynchronously in the background thread.\r\n\t\tbgQueue.put((func, args, kwargs))\r\n\telse:\r\n\t\tfunc(*args, **kwargs)\r\n\r\ndef _speak(text):\r\n\tglobal isSpeaking\r\n\tuniqueID=c_int()\r\n\tisSpeaking = True\r\n\t# eSpeak can only process compound emojis when using a UTF8 encoding\r\n\ttext=text.encode('utf8',errors='ignore')\r\n\tflags = espeakCHARS_UTF8 | espeakSSML | espeakPHONEMES\r\n\treturn espeakDLL.espeak_Synth(text,0,0,0,0,flags,byref(uniqueID),0)\r\n\r\ndef speak(text):\r\n\tglobal bgQueue\r\n\t_execWhenDone(_speak, text, mustBeAsync=True)\r\n\r\ndef stop():\r\n\tglobal isSpeaking, bgQueue, lastIndex\r\n\t# Kill all speech from now.\r\n\t# We still want parameter changes to occur, so requeue them.\r\n\tparams = []\r\n\ttry:\r\n\t\twhile True:\r\n\t\t\titem = bgQueue.get_nowait()\r\n\t\t\tif item[0] != _speak:\r\n\t\t\t\tparams.append(item)\r\n\t\t\tbgQueue.task_done()\r\n\texcept Queue.Empty:\r\n\t\t# Let the exception break us out of this loop, as queue.empty() is not reliable anyway.\r\n\t\tpass\r\n\tfor item in params:\r\n\t\tbgQueue.put(item)\r\n\tisSpeaking = False\r\n\tplayer.stop()\r\n\tlastIndex=None\r\n\r\ndef pause(switch):\r\n\tglobal player\r\n\tplayer.pause(switch)\r\n\r\ndef setParameter(param,value,relative):\r\n\t_execWhenDone(espeakDLL.espeak_SetParameter,param,value,relative)\r\n\r\ndef getParameter(param,current):\r\n\treturn espeakDLL.espeak_GetParameter(param,current)\r\n\r\ndef getVoiceList():\r\n\tvoices=espeakDLL.espeak_ListVoices(None)\r\n\tvoiceList=[]\r\n\tfor voice in voices:\r\n\t\tif not voice: break\r\n\t\tvoiceList.append(voice.contents)\r\n\treturn voiceList\r\n\r\ndef getCurrentVoice():\r\n\tvoice = espeakDLL.espeak_GetCurrentVoice()\r\n\tif voice:\r\n\t\treturn voice.contents\r\n\telse:\r\n\t\treturn None\r\n\r\ndef setVoice(voice):\r\n\t# For some weird reason, espeak_EspeakSetVoiceByProperties throws an integer divide by zero exception.\r\n\tsetVoiceByName(voice.identifier)\r\n\r\ndef setVoiceByName(name):\r\n\t_execWhenDone(espeakDLL.espeak_SetVoiceByName,name)\r\n\r\ndef _setVoiceAndVariant(voice=None, variant=None):\r\n\tres = getCurrentVoice().identifier.split(\"+\")\r\n\tif not voice:\r\n\t\tvoice = res[0]\r\n\tif not variant:\r\n\t\tif len(res) == 2:\r\n\t\t\tvariant = res[1]\r\n\t\telse:\r\n\t\t\tvariant = \"none\"\r\n\tif variant == \"none\":\r\n\t\tespeakDLL.espeak_SetVoiceByName(voice)\r\n\telse:\r\n\t\ttry:\r\n\t\t\tespeakDLL.espeak_SetVoiceByName(\"%s+%s\" % (voice, variant))\r\n\t\texcept:\r\n\t\t\tespeakDLL.espeak_SetVoiceByName(voice)\r\n\r\ndef setVoiceAndVariant(voice=None, variant=None):\r\n\t_execWhenDone(_setVoiceAndVariant, voice=voice, variant=variant)\r\n\r\ndef _setVoiceByLanguage(lang):\r\n\tv=espeak_VOICE()\r\n\tlang=lang.replace('_','-')\r\n\tv.languages=lang\r\n\ttry:\r\n\t\tespeakDLL.espeak_SetVoiceByProperties(byref(v))\r\n\texcept:\r\n\t\tv.languages=\"en\"\r\n\t\tespeakDLL.espeak_SetVoiceByProperties(byref(v))\r\n\r\ndef setVoiceByLanguage(lang):\r\n\t_execWhenDone(_setVoiceByLanguage, lang)\r\n\r\ndef espeak_errcheck(res, func, args):\r\n\tif res != EE_OK:\r\n\t\traise RuntimeError(\"%s: code %d\" % (func.__name__, res))\r\n\treturn res\r\n\r\ndef initialize():\r\n\tglobal espeakDLL, bgThread, bgQueue, player\r\n\tespeakDLL=cdll.LoadLibrary(r\"synthDrivers\\espeak.dll\")\r\n\tespeakDLL.espeak_Info.restype=c_char_p\r\n\tespeakDLL.espeak_Synth.errcheck=espeak_errcheck\r\n\tespeakDLL.espeak_SetVoiceByName.errcheck=espeak_errcheck\r\n\tespeakDLL.espeak_SetVoiceByProperties.errcheck=espeak_errcheck\r\n\tespeakDLL.espeak_SetParameter.errcheck=espeak_errcheck\r\n\tespeakDLL.espeak_Terminate.errcheck=espeak_errcheck\r\n\tespeakDLL.espeak_ListVoices.restype=POINTER(POINTER(espeak_VOICE))\r\n\tespeakDLL.espeak_GetCurrentVoice.restype=POINTER(espeak_VOICE)\r\n\tespeakDLL.espeak_SetVoiceByName.argtypes=(c_char_p,)\r\n\tsampleRate=espeakDLL.espeak_Initialize(AUDIO_OUTPUT_SYNCHRONOUS,300,\r\n\t\tos.path.abspath(\"synthDrivers\"),0)\r\n\tif sampleRate<0:\r\n\t\traise OSError(\"espeak_Initialize %d\"%sampleRate)\r\n\tplayer = nvwave.WavePlayer(channels=1, samplesPerSec=sampleRate, bitsPerSample=16, outputDevice=config.conf[\"speech\"][\"outputDevice\"])\r\n\tespeakDLL.espeak_SetSynthCallback(callback)\r\n\tbgQueue = Queue.Queue()\r\n\tbgThread=BgThread()\r\n\tbgThread.start()\r\n\r\ndef terminate():\r\n\tglobal bgThread, bgQueue, player, espeakDLL \r\n\tstop()\r\n\tbgQueue.put((None, None, None))\r\n\tbgThread.join()\r\n\tespeakDLL.espeak_Terminate()\r\n\tbgThread=None\r\n\tbgQueue=None\r\n\tplayer.close()\r\n\tplayer=None\r\n\tespeakDLL=None\r\n\r\ndef info():\r\n\treturn espeakDLL.espeak_Info()\r\n\r\ndef getVariantDict():\r\n\tdir='synthDrivers\\\\espeak-ng-data\\\\voices\\\\!v'\r\n\t# Translators: name of the default espeak varient.\r\n\tvariantDict={\"none\": pgettext(\"espeakVarient\", \"none\")}\r\n\tfor fileName in os.listdir(dir):\r\n\t\tif os.path.isfile(\"%s\\\\%s\"%(dir,fileName)):\r\n\t\t\tfile=codecs.open(\"%s\\\\%s\"%(dir,fileName))\r\n\t\t\tfor line in file:\r\n\t\t\t\tif line.startswith('name '):\r\n\t\t\t\t\ttemp=line.split(\" \")\r\n\t\t\t\t\tif len(temp) ==2:\r\n\t\t\t\t\t\tname=temp[1].rstrip()\r\n\t\t\t\t\t\tbreak\r\n\t\t\t\tname=None\r\n\t\t\tfile.close()\r\n\t\tif name is not None:\r\n\t\t\tvariantDict[fileName]=name\r\n\treturn variantDict\r\n\r\n", "path": "source/synthDrivers/_espeak.py"}]}
| 3,966 | 183 |
gh_patches_debug_5142
|
rasdani/github-patches
|
git_diff
|
microsoft__qlib-1246
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Misleading Error "Please install necessary libs for CatBoostModel."
## 🐛 Bug Description
Qlib does not require the installation of packages like `CatBoostModel`
But the output looks a little misleading.
## To Reproduce
Run `examples/workflow_by_code.ipynb` in jupyter notebook.
## Expected Behavior
Successfully run the script without installing CatBoostModel and warning.
## Screenshot

<!-- A screenshot of the error message or anything shouldn't appear-->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qlib/contrib/model/__init__.py`
Content:
```
1 # Copyright (c) Microsoft Corporation.
2 # Licensed under the MIT License.
3 try:
4 from .catboost_model import CatBoostModel
5 except ModuleNotFoundError:
6 CatBoostModel = None
7 print("Please install necessary libs for CatBoostModel.")
8 try:
9 from .double_ensemble import DEnsembleModel
10 from .gbdt import LGBModel
11 except ModuleNotFoundError:
12 DEnsembleModel, LGBModel = None, None
13 print(
14 "ModuleNotFoundError. DEnsembleModel and LGBModel are skipped. (optional: maybe installing lightgbm can fix it.)"
15 )
16 try:
17 from .xgboost import XGBModel
18 except ModuleNotFoundError:
19 XGBModel = None
20 print("ModuleNotFoundError. XGBModel is skipped(optional: maybe installing xgboost can fix it).")
21 try:
22 from .linear import LinearModel
23 except ModuleNotFoundError:
24 LinearModel = None
25 print("ModuleNotFoundError. LinearModel is skipped(optional: maybe installing scipy and sklearn can fix it).")
26 # import pytorch models
27 try:
28 from .pytorch_alstm import ALSTM
29 from .pytorch_gats import GATs
30 from .pytorch_gru import GRU
31 from .pytorch_lstm import LSTM
32 from .pytorch_nn import DNNModelPytorch
33 from .pytorch_tabnet import TabnetModel
34 from .pytorch_sfm import SFM_Model
35 from .pytorch_tcn import TCN
36 from .pytorch_add import ADD
37
38 pytorch_classes = (ALSTM, GATs, GRU, LSTM, DNNModelPytorch, TabnetModel, SFM_Model, TCN, ADD)
39 except ModuleNotFoundError:
40 pytorch_classes = ()
41 print("ModuleNotFoundError. PyTorch models are skipped (optional: maybe installing pytorch can fix it).")
42
43 all_model_classes = (CatBoostModel, DEnsembleModel, LGBModel, XGBModel, LinearModel) + pytorch_classes
44
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/qlib/contrib/model/__init__.py b/qlib/contrib/model/__init__.py
--- a/qlib/contrib/model/__init__.py
+++ b/qlib/contrib/model/__init__.py
@@ -4,7 +4,7 @@
from .catboost_model import CatBoostModel
except ModuleNotFoundError:
CatBoostModel = None
- print("Please install necessary libs for CatBoostModel.")
+ print("ModuleNotFoundError. CatBoostModel are skipped. (optional: maybe installing CatBoostModel can fix it.)")
try:
from .double_ensemble import DEnsembleModel
from .gbdt import LGBModel
|
{"golden_diff": "diff --git a/qlib/contrib/model/__init__.py b/qlib/contrib/model/__init__.py\n--- a/qlib/contrib/model/__init__.py\n+++ b/qlib/contrib/model/__init__.py\n@@ -4,7 +4,7 @@\n from .catboost_model import CatBoostModel\n except ModuleNotFoundError:\n CatBoostModel = None\n- print(\"Please install necessary libs for CatBoostModel.\")\n+ print(\"ModuleNotFoundError. CatBoostModel are skipped. (optional: maybe installing CatBoostModel can fix it.)\")\n try:\n from .double_ensemble import DEnsembleModel\n from .gbdt import LGBModel\n", "issue": "Misleading Error \"Please install necessary libs for CatBoostModel.\"\n## \ud83d\udc1b Bug Description\r\n\r\nQlib does not require the installation of packages like `CatBoostModel`\r\n\r\nBut the output looks a little misleading.\r\n\r\n\r\n## To Reproduce\r\nRun `examples/workflow_by_code.ipynb` in jupyter notebook.\r\n\r\n## Expected Behavior\r\n\r\nSuccessfully run the script without installing CatBoostModel and warning.\r\n\r\n## Screenshot\r\n\r\n\r\n<!-- A screenshot of the error message or anything shouldn't appear-->\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\ntry:\n from .catboost_model import CatBoostModel\nexcept ModuleNotFoundError:\n CatBoostModel = None\n print(\"Please install necessary libs for CatBoostModel.\")\ntry:\n from .double_ensemble import DEnsembleModel\n from .gbdt import LGBModel\nexcept ModuleNotFoundError:\n DEnsembleModel, LGBModel = None, None\n print(\n \"ModuleNotFoundError. DEnsembleModel and LGBModel are skipped. (optional: maybe installing lightgbm can fix it.)\"\n )\ntry:\n from .xgboost import XGBModel\nexcept ModuleNotFoundError:\n XGBModel = None\n print(\"ModuleNotFoundError. XGBModel is skipped(optional: maybe installing xgboost can fix it).\")\ntry:\n from .linear import LinearModel\nexcept ModuleNotFoundError:\n LinearModel = None\n print(\"ModuleNotFoundError. LinearModel is skipped(optional: maybe installing scipy and sklearn can fix it).\")\n# import pytorch models\ntry:\n from .pytorch_alstm import ALSTM\n from .pytorch_gats import GATs\n from .pytorch_gru import GRU\n from .pytorch_lstm import LSTM\n from .pytorch_nn import DNNModelPytorch\n from .pytorch_tabnet import TabnetModel\n from .pytorch_sfm import SFM_Model\n from .pytorch_tcn import TCN\n from .pytorch_add import ADD\n\n pytorch_classes = (ALSTM, GATs, GRU, LSTM, DNNModelPytorch, TabnetModel, SFM_Model, TCN, ADD)\nexcept ModuleNotFoundError:\n pytorch_classes = ()\n print(\"ModuleNotFoundError. PyTorch models are skipped (optional: maybe installing pytorch can fix it).\")\n\nall_model_classes = (CatBoostModel, DEnsembleModel, LGBModel, XGBModel, LinearModel) + pytorch_classes\n", "path": "qlib/contrib/model/__init__.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\ntry:\n from .catboost_model import CatBoostModel\nexcept ModuleNotFoundError:\n CatBoostModel = None\n print(\"ModuleNotFoundError. CatBoostModel are skipped. (optional: maybe installing CatBoostModel can fix it.)\")\ntry:\n from .double_ensemble import DEnsembleModel\n from .gbdt import LGBModel\nexcept ModuleNotFoundError:\n DEnsembleModel, LGBModel = None, None\n print(\n \"ModuleNotFoundError. DEnsembleModel and LGBModel are skipped. (optional: maybe installing lightgbm can fix it.)\"\n )\ntry:\n from .xgboost import XGBModel\nexcept ModuleNotFoundError:\n XGBModel = None\n print(\"ModuleNotFoundError. XGBModel is skipped(optional: maybe installing xgboost can fix it).\")\ntry:\n from .linear import LinearModel\nexcept ModuleNotFoundError:\n LinearModel = None\n print(\"ModuleNotFoundError. LinearModel is skipped(optional: maybe installing scipy and sklearn can fix it).\")\n# import pytorch models\ntry:\n from .pytorch_alstm import ALSTM\n from .pytorch_gats import GATs\n from .pytorch_gru import GRU\n from .pytorch_lstm import LSTM\n from .pytorch_nn import DNNModelPytorch\n from .pytorch_tabnet import TabnetModel\n from .pytorch_sfm import SFM_Model\n from .pytorch_tcn import TCN\n from .pytorch_add import ADD\n\n pytorch_classes = (ALSTM, GATs, GRU, LSTM, DNNModelPytorch, TabnetModel, SFM_Model, TCN, ADD)\nexcept ModuleNotFoundError:\n pytorch_classes = ()\n print(\"ModuleNotFoundError. PyTorch models are skipped (optional: maybe installing pytorch can fix it).\")\n\nall_model_classes = (CatBoostModel, DEnsembleModel, LGBModel, XGBModel, LinearModel) + pytorch_classes\n", "path": "qlib/contrib/model/__init__.py"}]}
| 934 | 147 |
gh_patches_debug_5703
|
rasdani/github-patches
|
git_diff
|
fidals__shopelectro-806
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Adapt catalog models to the new Product-Position hierarhy. stb2
Created the new hierarhy at https://github.com/fidals/refarm-site/issues/261
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `shopelectro/models.py`
Content:
```
1 import enum
2 import random
3 import string
4 import typing
5 from uuid import uuid4
6
7 from django.conf import settings
8 from django.db import models
9 from django.urls import reverse
10 from django.utils.translation import ugettext_lazy as _
11
12 from catalog import models as catalog_models
13 from ecommerce import models as ecommerce_models
14 from pages import models as pages_models
15
16
17 def randomize_slug(slug: str) -> str:
18 slug_hash = ''.join(
19 random.choices(string.ascii_lowercase, k=settings.SLUG_HASH_SIZE)
20 )
21 return f'{slug}_{slug_hash}'
22
23
24 class SECategoryQuerySet(catalog_models.CategoryQuerySet):
25 def get_categories_tree_with_pictures(self) -> 'SECategoryQuerySet':
26 categories_with_pictures = (
27 self
28 .filter(products__page__images__isnull=False)
29 .distinct()
30 )
31
32 return categories_with_pictures.get_ancestors(include_self=True)
33
34
35 class SECategoryManager(
36 catalog_models.CategoryManager.from_queryset(SECategoryQuerySet)
37 ):
38 pass
39
40
41 class Category(catalog_models.AbstractCategory, pages_models.SyncPageMixin):
42
43 objects = SECategoryManager()
44 uuid = models.UUIDField(default=uuid4, editable=False)
45
46 @classmethod
47 def get_default_parent(cls):
48 return pages_models.CustomPage.objects.filter(slug='catalog').first()
49
50 @property
51 def image(self):
52 products = self.products.all()
53 return products[0].image if products else None
54
55 def get_absolute_url(self):
56 return reverse('category', args=(self.page.slug,))
57
58
59 class Product(catalog_models.AbstractProduct, pages_models.SyncPageMixin):
60
61 # That's why we are needed to explicitly add objects manager here
62 # because of Django special managers behaviour.
63 # Se se#480 for details.
64 objects = catalog_models.ProductManager()
65
66 category = models.ForeignKey(
67 Category,
68 on_delete=models.CASCADE,
69 null=True,
70 related_name='products',
71 verbose_name=_('category'),
72 )
73
74 tags = models.ManyToManyField(
75 'Tag',
76 related_name='products',
77 blank=True,
78 verbose_name=_('tags'),
79 )
80
81 vendor_code = models.SmallIntegerField(verbose_name=_('vendor_code'))
82 uuid = models.UUIDField(default=uuid4, editable=False)
83 purchase_price = models.FloatField(
84 default=0, verbose_name=_('purchase_price'))
85 wholesale_small = models.FloatField(
86 default=0, verbose_name=_('wholesale_small'))
87 wholesale_medium = models.FloatField(
88 default=0, verbose_name=_('wholesale_medium'))
89 wholesale_large = models.FloatField(
90 default=0, verbose_name=_('wholesale_large'))
91
92 def get_absolute_url(self):
93 return reverse('product', args=(self.vendor_code,))
94
95 @property
96 def average_rate(self):
97 """Return rounded to first decimal averaged rating."""
98 rating = self.product_feedbacks.aggregate(
99 avg=models.Avg('rating')).get('avg', 0)
100 return round(rating, 1)
101
102 @property
103 def feedback_count(self):
104 return self.product_feedbacks.count()
105
106 @property
107 def feedback(self):
108 return self.product_feedbacks.all().order_by('-date')
109
110 def get_params(self):
111 return Tag.objects.filter_by_products([self]).group_tags()
112
113 def get_brand_name(self) -> str:
114 brand: typing.Optional['Tag'] = Tag.objects.get_brands([self]).get(self)
115 return brand.name if brand else ''
116
117
118 class ProductFeedback(models.Model):
119 product = models.ForeignKey(
120 Product, on_delete=models.CASCADE, null=True,
121 related_name='product_feedbacks'
122 )
123
124 date = models.DateTimeField(
125 auto_now=True, db_index=True, verbose_name=_('date'))
126 name = models.CharField(
127 max_length=255, db_index=True, verbose_name=_('name'))
128 rating = models.PositiveSmallIntegerField(
129 default=1, db_index=True, verbose_name=_('rating'))
130 dignities = models.TextField(
131 default='', blank=True, verbose_name=_('dignities'))
132 limitations = models.TextField(
133 default='', blank=True, verbose_name=_('limitations'))
134 general = models.TextField(
135 default='', blank=True, verbose_name=_('limitations'))
136
137
138 class ItemsEnum(enum.EnumMeta):
139 """
140 Provide dict-like `items` method.
141
142 https://docs.python.org/3/library/enum.html#enum-classes
143 """
144
145 def items(self):
146 return [(i.name, i.value) for i in self]
147
148 def __repr__(self):
149 fields = ', '.join(i.name for i in self)
150 return f"<enum '{self.__name__}: {fields}'>"
151
152
153 class PaymentOptions(enum.Enum, metaclass=ItemsEnum):
154 cash = 'Наличные'
155 cashless = 'Безналичные и денежные переводы'
156 AC = 'Банковская карта'
157 PC = 'Яндекс.Деньги'
158 GP = 'Связной (терминал)'
159 AB = 'Альфа-Клик'
160
161 @staticmethod
162 def default():
163 return PaymentOptions.cash
164
165
166 class Order(ecommerce_models.Order):
167 address = models.TextField(blank=True, default='')
168 payment_type = models.CharField(
169 max_length=255,
170 choices=PaymentOptions.items(),
171 default=PaymentOptions.default().name,
172 )
173 comment = models.TextField(blank=True, default='')
174 # total price - total purchase price
175 revenue = models.FloatField(default=0, null=True, verbose_name=_('revenue'))
176
177 @property
178 def payment_type_label(self):
179 """Return label for an order's payment option."""
180 return PaymentOptions[self.payment_type].value
181
182 def set_positions(self, cart):
183 """
184 Save cart's state into Order instance.
185
186 @todo #589:60m Create Cart model.
187 See details here: https://github.com/fidals/shopelectro/pull/590#discussion_r222544672
188 """
189 self.revenue = cart.total_revenue()
190 self.save()
191 for id_, position in cart:
192 self.positions.create(
193 order=self,
194 product_id=id_,
195 vendor_code=position['vendor_code'],
196 name=position['name'],
197 price=position['price'],
198 quantity=position['quantity'],
199 )
200 return self
201
202
203 class CategoryPage(pages_models.ModelPage):
204 """Create proxy model for Admin."""
205
206 class Meta(pages_models.ModelPage.Meta): # Ignore PycodestyleBear (E303)
207 proxy = True
208
209 # noinspection PyTypeChecker
210 objects = pages_models.ModelPage.create_model_page_managers(Category)
211
212
213 class ProductPage(pages_models.ModelPage):
214 """Create proxy model for Admin."""
215
216 class Meta(pages_models.ModelPage.Meta): # Ignore PycodestyleBear (E303)
217 proxy = True
218
219 # noinspection PyTypeChecker
220 objects = (
221 pages_models.ModelPage
222 .create_model_page_managers(Product)
223 )
224
225
226 class TagGroup(catalog_models.TagGroup):
227 pass
228
229
230 class TagQuerySet(catalog_models.TagQuerySet):
231 pass
232
233
234 class Tag(catalog_models.Tag):
235 group = models.ForeignKey(
236 TagGroup, on_delete=models.CASCADE, null=True, related_name='tags',
237 )
238
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/shopelectro/models.py b/shopelectro/models.py
--- a/shopelectro/models.py
+++ b/shopelectro/models.py
@@ -56,7 +56,11 @@
return reverse('category', args=(self.page.slug,))
-class Product(catalog_models.AbstractProduct, pages_models.SyncPageMixin):
+class Product(
+ catalog_models.AbstractProduct,
+ catalog_models.AbstractPosition,
+ pages_models.SyncPageMixin
+):
# That's why we are needed to explicitly add objects manager here
# because of Django special managers behaviour.
|
{"golden_diff": "diff --git a/shopelectro/models.py b/shopelectro/models.py\n--- a/shopelectro/models.py\n+++ b/shopelectro/models.py\n@@ -56,7 +56,11 @@\n return reverse('category', args=(self.page.slug,))\n \n \n-class Product(catalog_models.AbstractProduct, pages_models.SyncPageMixin):\n+class Product(\n+ catalog_models.AbstractProduct,\n+ catalog_models.AbstractPosition,\n+ pages_models.SyncPageMixin\n+):\n \n # That's why we are needed to explicitly add objects manager here\n # because of Django special managers behaviour.\n", "issue": "Adapt catalog models to the new Product-Position hierarhy. stb2\nCreated the new hierarhy at https://github.com/fidals/refarm-site/issues/261\n", "before_files": [{"content": "import enum\nimport random\nimport string\nimport typing\nfrom uuid import uuid4\n\nfrom django.conf import settings\nfrom django.db import models\nfrom django.urls import reverse\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom catalog import models as catalog_models\nfrom ecommerce import models as ecommerce_models\nfrom pages import models as pages_models\n\n\ndef randomize_slug(slug: str) -> str:\n slug_hash = ''.join(\n random.choices(string.ascii_lowercase, k=settings.SLUG_HASH_SIZE)\n )\n return f'{slug}_{slug_hash}'\n\n\nclass SECategoryQuerySet(catalog_models.CategoryQuerySet):\n def get_categories_tree_with_pictures(self) -> 'SECategoryQuerySet':\n categories_with_pictures = (\n self\n .filter(products__page__images__isnull=False)\n .distinct()\n )\n\n return categories_with_pictures.get_ancestors(include_self=True)\n\n\nclass SECategoryManager(\n catalog_models.CategoryManager.from_queryset(SECategoryQuerySet)\n):\n pass\n\n\nclass Category(catalog_models.AbstractCategory, pages_models.SyncPageMixin):\n\n objects = SECategoryManager()\n uuid = models.UUIDField(default=uuid4, editable=False)\n\n @classmethod\n def get_default_parent(cls):\n return pages_models.CustomPage.objects.filter(slug='catalog').first()\n\n @property\n def image(self):\n products = self.products.all()\n return products[0].image if products else None\n\n def get_absolute_url(self):\n return reverse('category', args=(self.page.slug,))\n\n\nclass Product(catalog_models.AbstractProduct, pages_models.SyncPageMixin):\n\n # That's why we are needed to explicitly add objects manager here\n # because of Django special managers behaviour.\n # Se se#480 for details.\n objects = catalog_models.ProductManager()\n\n category = models.ForeignKey(\n Category,\n on_delete=models.CASCADE,\n null=True,\n related_name='products',\n verbose_name=_('category'),\n )\n\n tags = models.ManyToManyField(\n 'Tag',\n related_name='products',\n blank=True,\n verbose_name=_('tags'),\n )\n\n vendor_code = models.SmallIntegerField(verbose_name=_('vendor_code'))\n uuid = models.UUIDField(default=uuid4, editable=False)\n purchase_price = models.FloatField(\n default=0, verbose_name=_('purchase_price'))\n wholesale_small = models.FloatField(\n default=0, verbose_name=_('wholesale_small'))\n wholesale_medium = models.FloatField(\n default=0, verbose_name=_('wholesale_medium'))\n wholesale_large = models.FloatField(\n default=0, verbose_name=_('wholesale_large'))\n\n def get_absolute_url(self):\n return reverse('product', args=(self.vendor_code,))\n\n @property\n def average_rate(self):\n \"\"\"Return rounded to first decimal averaged rating.\"\"\"\n rating = self.product_feedbacks.aggregate(\n avg=models.Avg('rating')).get('avg', 0)\n return round(rating, 1)\n\n @property\n def feedback_count(self):\n return self.product_feedbacks.count()\n\n @property\n def feedback(self):\n return self.product_feedbacks.all().order_by('-date')\n\n def get_params(self):\n return Tag.objects.filter_by_products([self]).group_tags()\n\n def get_brand_name(self) -> str:\n brand: typing.Optional['Tag'] = Tag.objects.get_brands([self]).get(self)\n return brand.name if brand else ''\n\n\nclass ProductFeedback(models.Model):\n product = models.ForeignKey(\n Product, on_delete=models.CASCADE, null=True,\n related_name='product_feedbacks'\n )\n\n date = models.DateTimeField(\n auto_now=True, db_index=True, verbose_name=_('date'))\n name = models.CharField(\n max_length=255, db_index=True, verbose_name=_('name'))\n rating = models.PositiveSmallIntegerField(\n default=1, db_index=True, verbose_name=_('rating'))\n dignities = models.TextField(\n default='', blank=True, verbose_name=_('dignities'))\n limitations = models.TextField(\n default='', blank=True, verbose_name=_('limitations'))\n general = models.TextField(\n default='', blank=True, verbose_name=_('limitations'))\n\n\nclass ItemsEnum(enum.EnumMeta):\n \"\"\"\n Provide dict-like `items` method.\n\n https://docs.python.org/3/library/enum.html#enum-classes\n \"\"\"\n\n def items(self):\n return [(i.name, i.value) for i in self]\n\n def __repr__(self):\n fields = ', '.join(i.name for i in self)\n return f\"<enum '{self.__name__}: {fields}'>\"\n\n\nclass PaymentOptions(enum.Enum, metaclass=ItemsEnum):\n cash = '\u041d\u0430\u043b\u0438\u0447\u043d\u044b\u0435'\n cashless = '\u0411\u0435\u0437\u043d\u0430\u043b\u0438\u0447\u043d\u044b\u0435 \u0438 \u0434\u0435\u043d\u0435\u0436\u043d\u044b\u0435 \u043f\u0435\u0440\u0435\u0432\u043e\u0434\u044b'\n AC = '\u0411\u0430\u043d\u043a\u043e\u0432\u0441\u043a\u0430\u044f \u043a\u0430\u0440\u0442\u0430'\n PC = '\u042f\u043d\u0434\u0435\u043a\u0441.\u0414\u0435\u043d\u044c\u0433\u0438'\n GP = '\u0421\u0432\u044f\u0437\u043d\u043e\u0439 (\u0442\u0435\u0440\u043c\u0438\u043d\u0430\u043b)'\n AB = '\u0410\u043b\u044c\u0444\u0430-\u041a\u043b\u0438\u043a'\n\n @staticmethod\n def default():\n return PaymentOptions.cash\n\n\nclass Order(ecommerce_models.Order):\n address = models.TextField(blank=True, default='')\n payment_type = models.CharField(\n max_length=255,\n choices=PaymentOptions.items(),\n default=PaymentOptions.default().name,\n )\n comment = models.TextField(blank=True, default='')\n # total price - total purchase price\n revenue = models.FloatField(default=0, null=True, verbose_name=_('revenue'))\n\n @property\n def payment_type_label(self):\n \"\"\"Return label for an order's payment option.\"\"\"\n return PaymentOptions[self.payment_type].value\n\n def set_positions(self, cart):\n \"\"\"\n Save cart's state into Order instance.\n\n @todo #589:60m Create Cart model.\n See details here: https://github.com/fidals/shopelectro/pull/590#discussion_r222544672\n \"\"\"\n self.revenue = cart.total_revenue()\n self.save()\n for id_, position in cart:\n self.positions.create(\n order=self,\n product_id=id_,\n vendor_code=position['vendor_code'],\n name=position['name'],\n price=position['price'],\n quantity=position['quantity'],\n )\n return self\n\n\nclass CategoryPage(pages_models.ModelPage):\n \"\"\"Create proxy model for Admin.\"\"\"\n\n class Meta(pages_models.ModelPage.Meta): # Ignore PycodestyleBear (E303)\n proxy = True\n\n # noinspection PyTypeChecker\n objects = pages_models.ModelPage.create_model_page_managers(Category)\n\n\nclass ProductPage(pages_models.ModelPage):\n \"\"\"Create proxy model for Admin.\"\"\"\n\n class Meta(pages_models.ModelPage.Meta): # Ignore PycodestyleBear (E303)\n proxy = True\n\n # noinspection PyTypeChecker\n objects = (\n pages_models.ModelPage\n .create_model_page_managers(Product)\n )\n\n\nclass TagGroup(catalog_models.TagGroup):\n pass\n\n\nclass TagQuerySet(catalog_models.TagQuerySet):\n pass\n\n\nclass Tag(catalog_models.Tag):\n group = models.ForeignKey(\n TagGroup, on_delete=models.CASCADE, null=True, related_name='tags',\n )\n", "path": "shopelectro/models.py"}], "after_files": [{"content": "import enum\nimport random\nimport string\nimport typing\nfrom uuid import uuid4\n\nfrom django.conf import settings\nfrom django.db import models\nfrom django.urls import reverse\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom catalog import models as catalog_models\nfrom ecommerce import models as ecommerce_models\nfrom pages import models as pages_models\n\n\ndef randomize_slug(slug: str) -> str:\n slug_hash = ''.join(\n random.choices(string.ascii_lowercase, k=settings.SLUG_HASH_SIZE)\n )\n return f'{slug}_{slug_hash}'\n\n\nclass SECategoryQuerySet(catalog_models.CategoryQuerySet):\n def get_categories_tree_with_pictures(self) -> 'SECategoryQuerySet':\n categories_with_pictures = (\n self\n .filter(products__page__images__isnull=False)\n .distinct()\n )\n\n return categories_with_pictures.get_ancestors(include_self=True)\n\n\nclass SECategoryManager(\n catalog_models.CategoryManager.from_queryset(SECategoryQuerySet)\n):\n pass\n\n\nclass Category(catalog_models.AbstractCategory, pages_models.SyncPageMixin):\n\n objects = SECategoryManager()\n uuid = models.UUIDField(default=uuid4, editable=False)\n\n @classmethod\n def get_default_parent(cls):\n return pages_models.CustomPage.objects.filter(slug='catalog').first()\n\n @property\n def image(self):\n products = self.products.all()\n return products[0].image if products else None\n\n def get_absolute_url(self):\n return reverse('category', args=(self.page.slug,))\n\n\nclass Product(\n catalog_models.AbstractProduct,\n catalog_models.AbstractPosition,\n pages_models.SyncPageMixin\n):\n\n # That's why we are needed to explicitly add objects manager here\n # because of Django special managers behaviour.\n # Se se#480 for details.\n objects = catalog_models.ProductManager()\n\n category = models.ForeignKey(\n Category,\n on_delete=models.CASCADE,\n null=True,\n related_name='products',\n verbose_name=_('category'),\n )\n\n tags = models.ManyToManyField(\n 'Tag',\n related_name='products',\n blank=True,\n verbose_name=_('tags'),\n )\n\n vendor_code = models.SmallIntegerField(verbose_name=_('vendor_code'))\n uuid = models.UUIDField(default=uuid4, editable=False)\n purchase_price = models.FloatField(\n default=0, verbose_name=_('purchase_price'))\n wholesale_small = models.FloatField(\n default=0, verbose_name=_('wholesale_small'))\n wholesale_medium = models.FloatField(\n default=0, verbose_name=_('wholesale_medium'))\n wholesale_large = models.FloatField(\n default=0, verbose_name=_('wholesale_large'))\n\n def get_absolute_url(self):\n return reverse('product', args=(self.vendor_code,))\n\n @property\n def average_rate(self):\n \"\"\"Return rounded to first decimal averaged rating.\"\"\"\n rating = self.product_feedbacks.aggregate(\n avg=models.Avg('rating')).get('avg', 0)\n return round(rating, 1)\n\n @property\n def feedback_count(self):\n return self.product_feedbacks.count()\n\n @property\n def feedback(self):\n return self.product_feedbacks.all().order_by('-date')\n\n def get_params(self):\n return Tag.objects.filter_by_products([self]).group_tags()\n\n def get_brand_name(self) -> str:\n brand: typing.Optional['Tag'] = Tag.objects.get_brands([self]).get(self)\n return brand.name if brand else ''\n\n\nclass ProductFeedback(models.Model):\n product = models.ForeignKey(\n Product, on_delete=models.CASCADE, null=True,\n related_name='product_feedbacks'\n )\n\n date = models.DateTimeField(\n auto_now=True, db_index=True, verbose_name=_('date'))\n name = models.CharField(\n max_length=255, db_index=True, verbose_name=_('name'))\n rating = models.PositiveSmallIntegerField(\n default=1, db_index=True, verbose_name=_('rating'))\n dignities = models.TextField(\n default='', blank=True, verbose_name=_('dignities'))\n limitations = models.TextField(\n default='', blank=True, verbose_name=_('limitations'))\n general = models.TextField(\n default='', blank=True, verbose_name=_('limitations'))\n\n\nclass ItemsEnum(enum.EnumMeta):\n \"\"\"\n Provide dict-like `items` method.\n\n https://docs.python.org/3/library/enum.html#enum-classes\n \"\"\"\n\n def items(self):\n return [(i.name, i.value) for i in self]\n\n def __repr__(self):\n fields = ', '.join(i.name for i in self)\n return f\"<enum '{self.__name__}: {fields}'>\"\n\n\nclass PaymentOptions(enum.Enum, metaclass=ItemsEnum):\n cash = '\u041d\u0430\u043b\u0438\u0447\u043d\u044b\u0435'\n cashless = '\u0411\u0435\u0437\u043d\u0430\u043b\u0438\u0447\u043d\u044b\u0435 \u0438 \u0434\u0435\u043d\u0435\u0436\u043d\u044b\u0435 \u043f\u0435\u0440\u0435\u0432\u043e\u0434\u044b'\n AC = '\u0411\u0430\u043d\u043a\u043e\u0432\u0441\u043a\u0430\u044f \u043a\u0430\u0440\u0442\u0430'\n PC = '\u042f\u043d\u0434\u0435\u043a\u0441.\u0414\u0435\u043d\u044c\u0433\u0438'\n GP = '\u0421\u0432\u044f\u0437\u043d\u043e\u0439 (\u0442\u0435\u0440\u043c\u0438\u043d\u0430\u043b)'\n AB = '\u0410\u043b\u044c\u0444\u0430-\u041a\u043b\u0438\u043a'\n\n @staticmethod\n def default():\n return PaymentOptions.cash\n\n\nclass Order(ecommerce_models.Order):\n address = models.TextField(blank=True, default='')\n payment_type = models.CharField(\n max_length=255,\n choices=PaymentOptions.items(),\n default=PaymentOptions.default().name,\n )\n comment = models.TextField(blank=True, default='')\n # total price - total purchase price\n revenue = models.FloatField(default=0, null=True, verbose_name=_('revenue'))\n\n @property\n def payment_type_label(self):\n \"\"\"Return label for an order's payment option.\"\"\"\n return PaymentOptions[self.payment_type].value\n\n def set_positions(self, cart):\n \"\"\"\n Save cart's state into Order instance.\n\n @todo #589:60m Create Cart model.\n See details here: https://github.com/fidals/shopelectro/pull/590#discussion_r222544672\n \"\"\"\n self.revenue = cart.total_revenue()\n self.save()\n for id_, position in cart:\n self.positions.create(\n order=self,\n product_id=id_,\n vendor_code=position['vendor_code'],\n name=position['name'],\n price=position['price'],\n quantity=position['quantity'],\n )\n return self\n\n\nclass CategoryPage(pages_models.ModelPage):\n \"\"\"Create proxy model for Admin.\"\"\"\n\n class Meta(pages_models.ModelPage.Meta): # Ignore PycodestyleBear (E303)\n proxy = True\n\n # noinspection PyTypeChecker\n objects = pages_models.ModelPage.create_model_page_managers(Category)\n\n\nclass ProductPage(pages_models.ModelPage):\n \"\"\"Create proxy model for Admin.\"\"\"\n\n class Meta(pages_models.ModelPage.Meta): # Ignore PycodestyleBear (E303)\n proxy = True\n\n # noinspection PyTypeChecker\n objects = (\n pages_models.ModelPage\n .create_model_page_managers(Product)\n )\n\n\nclass TagGroup(catalog_models.TagGroup):\n pass\n\n\nclass TagQuerySet(catalog_models.TagQuerySet):\n pass\n\n\nclass Tag(catalog_models.Tag):\n group = models.ForeignKey(\n TagGroup, on_delete=models.CASCADE, null=True, related_name='tags',\n )\n", "path": "shopelectro/models.py"}]}
| 2,469 | 129 |
gh_patches_debug_45119
|
rasdani/github-patches
|
git_diff
|
aws-cloudformation__cfn-lint-1144
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
E1029 - False positive with IAM policy Redshift condition keys
*cfn-lint version: (`cfn-lint --version`)*
cfn-lint 0.24.1
*Description of issue.*
${redshift:DbUser} triggers:
`E1029:Found an embedded parameter outside of an "Fn::Sub"`
I'm defining a policy document similar to the one below. cfn-lint returns a E1029 on each line where the ${redshift:DbUser} condition key is used. Source: [https://docs.aws.amazon.com/redshift/latest/mgmt/redshift-iam-access-control-identity-based.html#redshift-policy-resources.getclustercredentials-resources](url)
Sample:
```json
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "GetClusterCredsStatement",
"Effect": "Allow",
"Action": [
"redshift:GetClusterCredentials"
],
"Resource": [
"arn:aws:redshift:us-west-2:123456789012:dbuser:examplecluster/${redshift:DbUser}",
"arn:aws:redshift:us-west-2:123456789012:dbname:examplecluster/testdb",
"arn:aws:redshift:us-west-2:123456789012:dbgroup:examplecluster/common_group"
],
"Condition": {
"StringEquals": {
"aws:userid":"AIDIODR4TAW7CSEXAMPLE:${redshift:DbUser}@yourdomain.com"
}
}
},
{
"Sid": "CreateClusterUserStatement",
"Effect": "Allow",
"Action": [
"redshift:CreateClusterUser"
],
"Resource": [
"arn:aws:redshift:us-west-2:123456789012:dbuser:examplecluster/${redshift:DbUser}"
],
"Condition": {
"StringEquals": {
"aws:userid":"AIDIODR4TAW7CSEXAMPLE:${redshift:DbUser}@yourdomain.com"
}
}
},
{
"Sid": "RedshiftJoinGroupStatement",
"Effect": "Allow",
"Action": [
"redshift:JoinGroup"
],
"Resource": [
"arn:aws:redshift:us-west-2:123456789012:dbgroup:examplecluster/common_group"
]
}
]
}
```
Resource string from actual template:
`{ "Fn::Join" : [ "", [ "arn:aws:redshift:us-west-2:", { "Ref" : "AWS::AccountId" }, ":dbuser:", { "Fn::FindInMap" : [ "Environment", { "Ref" : "EnvironmentType" }, "RSClusterName" ] }, "/${redshift:DBUser}" ] ] }`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cfnlint/rules/functions/SubNeeded.py`
Content:
```
1 """
2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
4 Permission is hereby granted, free of charge, to any person obtaining a copy of this
5 software and associated documentation files (the "Software"), to deal in the Software
6 without restriction, including without limitation the rights to use, copy, modify,
7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
8 permit persons to whom the Software is furnished to do so.
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
16 """
17 import re
18 from cfnlint.rules import CloudFormationLintRule
19 from cfnlint.rules import RuleMatch
20
21 class SubNeeded(CloudFormationLintRule):
22 """Check if a substitution string exists without a substitution function"""
23 id = 'E1029'
24 shortdesc = 'Sub is required if a variable is used in a string'
25 description = 'If a substitution variable exists in a string but isn\'t wrapped with the Fn::Sub function the deployment will fail.'
26 source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html'
27 tags = ['functions', 'sub']
28
29 # Free-form text properties to exclude from this rule
30 # content is part of AWS::CloudFormation::Init
31 excludes = ['UserData', 'ZipFile', 'Condition', 'AWS::CloudFormation::Init', 'CloudWatchAlarmDefinition', 'TopicRulePayload']
32 api_excludes = ['Uri', 'Body']
33
34 # IAM Policy has special variables that don't require !Sub, Check for these
35 # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html
36 # https://docs.aws.amazon.com/iot/latest/developerguide/basic-policy-variables.html
37 # https://docs.aws.amazon.com/iot/latest/developerguide/thing-policy-variables.html
38 # https://docs.aws.amazon.com/transfer/latest/userguide/users.html#users-policies-scope-down
39 # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_iam-condition-keys.html
40 resource_excludes = ['${aws:CurrentTime}', '${aws:EpochTime}', '${aws:TokenIssueTime}', '${aws:principaltype}',
41 '${aws:SecureTransport}', '${aws:SourceIp}', '${aws:UserAgent}', '${aws:userid}',
42 '${aws:username}', '${ec2:SourceInstanceARN}',
43 '${iot:Connection.Thing.ThingName}', '${iot:Connection.Thing.ThingTypeName}',
44 '${iot:Connection.Thing.IsAttached}', '${iot:ClientId}', '${transfer:HomeBucket}',
45 '${transfer:HomeDirectory}', '${transfer:HomeFolder}', '${transfer:UserName}',
46 '${cognito-identity.amazonaws.com:aud}', '${cognito-identity.amazonaws.com:sub}', '${cognito-identity.amazonaws.com:amr}']
47
48 def _match_values(self, searchRegex, cfnelem, path):
49 """Recursively search for values matching the searchRegex"""
50 values = []
51 if isinstance(cfnelem, dict):
52 for key in cfnelem:
53 pathprop = path[:]
54 pathprop.append(key)
55 values.extend(self._match_values(searchRegex, cfnelem[key], pathprop))
56 elif isinstance(cfnelem, list):
57 for index, item in enumerate(cfnelem):
58 pathprop = path[:]
59 pathprop.append(index)
60 values.extend(self._match_values(searchRegex, item, pathprop))
61 else:
62 # Leaf node
63 if isinstance(cfnelem, str) and re.match(searchRegex, cfnelem):
64 # Get all variables as seperate paths
65 regex = re.compile(r'(\$\{.*?\.?.*?})')
66 for variable in re.findall(regex, cfnelem):
67 values.append(path + [variable])
68
69 return values
70
71 def match_values(self, searchRegex, cfn):
72 """
73 Search for values in all parts of the templates that match the searchRegex
74 """
75 results = []
76 results.extend(self._match_values(searchRegex, cfn.template, []))
77 # Globals are removed during a transform. They need to be checked manually
78 results.extend(self._match_values(searchRegex, cfn.template.get('Globals', {}), []))
79 return results
80
81 def _api_exceptions(self, value):
82 """ Key value exceptions """
83 parameter_search = re.compile(r'^\$\{stageVariables\..*\}$')
84 return re.match(parameter_search, value)
85
86 def match(self, cfn):
87 """Basic Rule Matching"""
88
89 matches = []
90
91 # Generic regex to match a string containing at least one ${parameter}
92 parameter_search = re.compile(r'^.*(\$\{.*\}.*(\$\{.*\}.*)*)$')
93
94 # Get a list of paths to every leaf node string containing at least one ${parameter}
95 parameter_string_paths = self.match_values(parameter_search, cfn)
96
97 # We want to search all of the paths to check if each one contains an 'Fn::Sub'
98 for parameter_string_path in parameter_string_paths:
99
100 # Exxclude the special IAM variables
101 variable = parameter_string_path[-1]
102
103 if 'Resource' in parameter_string_path:
104 if variable in self.resource_excludes:
105 continue
106
107 # Exclude literals (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html)
108 if variable.startswith('${!'):
109 continue
110
111 found_sub = False
112 # Does the path contain an 'Fn::Sub'?
113 for step in parameter_string_path:
114 if step in self.api_excludes:
115 if self._api_exceptions(parameter_string_path[-1]):
116 found_sub = True
117 elif step == 'Fn::Sub' or step in self.excludes:
118 found_sub = True
119
120 # If we didn't find an 'Fn::Sub' it means a string containing a ${parameter} may not be evaluated correctly
121 if not found_sub:
122 # Remove the last item (the variable) to prevent multiple errors on 1 line errors
123 path = parameter_string_path[:-1]
124 message = 'Found an embedded parameter outside of an "Fn::Sub" at {}'.format('/'.join(map(str, path)))
125 matches.append(RuleMatch(path, message))
126
127 return matches
128
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/cfnlint/rules/functions/SubNeeded.py b/src/cfnlint/rules/functions/SubNeeded.py
--- a/src/cfnlint/rules/functions/SubNeeded.py
+++ b/src/cfnlint/rules/functions/SubNeeded.py
@@ -18,6 +18,7 @@
from cfnlint.rules import CloudFormationLintRule
from cfnlint.rules import RuleMatch
+
class SubNeeded(CloudFormationLintRule):
"""Check if a substitution string exists without a substitution function"""
id = 'E1029'
@@ -28,7 +29,8 @@
# Free-form text properties to exclude from this rule
# content is part of AWS::CloudFormation::Init
- excludes = ['UserData', 'ZipFile', 'Condition', 'AWS::CloudFormation::Init', 'CloudWatchAlarmDefinition', 'TopicRulePayload']
+ excludes = ['UserData', 'ZipFile', 'Condition', 'AWS::CloudFormation::Init',
+ 'CloudWatchAlarmDefinition', 'TopicRulePayload']
api_excludes = ['Uri', 'Body']
# IAM Policy has special variables that don't require !Sub, Check for these
@@ -37,13 +39,25 @@
# https://docs.aws.amazon.com/iot/latest/developerguide/thing-policy-variables.html
# https://docs.aws.amazon.com/transfer/latest/userguide/users.html#users-policies-scope-down
# https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_iam-condition-keys.html
- resource_excludes = ['${aws:CurrentTime}', '${aws:EpochTime}', '${aws:TokenIssueTime}', '${aws:principaltype}',
- '${aws:SecureTransport}', '${aws:SourceIp}', '${aws:UserAgent}', '${aws:userid}',
+ resource_excludes = ['${aws:CurrentTime}', '${aws:EpochTime}',
+ '${aws:TokenIssueTime}', '${aws:principaltype}',
+ '${aws:SecureTransport}', '${aws:SourceIp}',
+ '${aws:UserAgent}', '${aws:userid}',
'${aws:username}', '${ec2:SourceInstanceARN}',
- '${iot:Connection.Thing.ThingName}', '${iot:Connection.Thing.ThingTypeName}',
- '${iot:Connection.Thing.IsAttached}', '${iot:ClientId}', '${transfer:HomeBucket}',
- '${transfer:HomeDirectory}', '${transfer:HomeFolder}', '${transfer:UserName}',
- '${cognito-identity.amazonaws.com:aud}', '${cognito-identity.amazonaws.com:sub}', '${cognito-identity.amazonaws.com:amr}']
+ '${iot:Connection.Thing.ThingName}',
+ '${iot:Connection.Thing.ThingTypeName}',
+ '${iot:Connection.Thing.IsAttached}',
+ '${iot:ClientId}', '${transfer:HomeBucket}',
+ '${transfer:HomeDirectory}', '${transfer:HomeFolder}',
+ '${transfer:UserName}', '${redshift:DbUser}',
+ '${cognito-identity.amazonaws.com:aud}',
+ '${cognito-identity.amazonaws.com:sub}',
+ '${cognito-identity.amazonaws.com:amr}']
+
+ # https://docs.aws.amazon.com/redshift/latest/mgmt/redshift-iam-access-control-identity-based.html
+ condition_excludes = [
+ '${redshift:DbUser}',
+ ]
def _match_values(self, searchRegex, cfnelem, path):
"""Recursively search for values matching the searchRegex"""
@@ -96,13 +110,15 @@
# We want to search all of the paths to check if each one contains an 'Fn::Sub'
for parameter_string_path in parameter_string_paths:
-
# Exxclude the special IAM variables
variable = parameter_string_path[-1]
if 'Resource' in parameter_string_path:
if variable in self.resource_excludes:
continue
+ if 'Condition' in parameter_string_path:
+ if variable in self.condition_excludes:
+ continue
# Exclude literals (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html)
if variable.startswith('${!'):
@@ -121,7 +137,8 @@
if not found_sub:
# Remove the last item (the variable) to prevent multiple errors on 1 line errors
path = parameter_string_path[:-1]
- message = 'Found an embedded parameter outside of an "Fn::Sub" at {}'.format('/'.join(map(str, path)))
+ message = 'Found an embedded parameter outside of an "Fn::Sub" at {}'.format(
+ '/'.join(map(str, path)))
matches.append(RuleMatch(path, message))
return matches
|
{"golden_diff": "diff --git a/src/cfnlint/rules/functions/SubNeeded.py b/src/cfnlint/rules/functions/SubNeeded.py\n--- a/src/cfnlint/rules/functions/SubNeeded.py\n+++ b/src/cfnlint/rules/functions/SubNeeded.py\n@@ -18,6 +18,7 @@\n from cfnlint.rules import CloudFormationLintRule\n from cfnlint.rules import RuleMatch\n \n+\n class SubNeeded(CloudFormationLintRule):\n \"\"\"Check if a substitution string exists without a substitution function\"\"\"\n id = 'E1029'\n@@ -28,7 +29,8 @@\n \n # Free-form text properties to exclude from this rule\n # content is part of AWS::CloudFormation::Init\n- excludes = ['UserData', 'ZipFile', 'Condition', 'AWS::CloudFormation::Init', 'CloudWatchAlarmDefinition', 'TopicRulePayload']\n+ excludes = ['UserData', 'ZipFile', 'Condition', 'AWS::CloudFormation::Init',\n+ 'CloudWatchAlarmDefinition', 'TopicRulePayload']\n api_excludes = ['Uri', 'Body']\n \n # IAM Policy has special variables that don't require !Sub, Check for these\n@@ -37,13 +39,25 @@\n # https://docs.aws.amazon.com/iot/latest/developerguide/thing-policy-variables.html\n # https://docs.aws.amazon.com/transfer/latest/userguide/users.html#users-policies-scope-down\n # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_iam-condition-keys.html\n- resource_excludes = ['${aws:CurrentTime}', '${aws:EpochTime}', '${aws:TokenIssueTime}', '${aws:principaltype}',\n- '${aws:SecureTransport}', '${aws:SourceIp}', '${aws:UserAgent}', '${aws:userid}',\n+ resource_excludes = ['${aws:CurrentTime}', '${aws:EpochTime}',\n+ '${aws:TokenIssueTime}', '${aws:principaltype}',\n+ '${aws:SecureTransport}', '${aws:SourceIp}',\n+ '${aws:UserAgent}', '${aws:userid}',\n '${aws:username}', '${ec2:SourceInstanceARN}',\n- '${iot:Connection.Thing.ThingName}', '${iot:Connection.Thing.ThingTypeName}',\n- '${iot:Connection.Thing.IsAttached}', '${iot:ClientId}', '${transfer:HomeBucket}',\n- '${transfer:HomeDirectory}', '${transfer:HomeFolder}', '${transfer:UserName}',\n- '${cognito-identity.amazonaws.com:aud}', '${cognito-identity.amazonaws.com:sub}', '${cognito-identity.amazonaws.com:amr}']\n+ '${iot:Connection.Thing.ThingName}',\n+ '${iot:Connection.Thing.ThingTypeName}',\n+ '${iot:Connection.Thing.IsAttached}',\n+ '${iot:ClientId}', '${transfer:HomeBucket}',\n+ '${transfer:HomeDirectory}', '${transfer:HomeFolder}',\n+ '${transfer:UserName}', '${redshift:DbUser}',\n+ '${cognito-identity.amazonaws.com:aud}',\n+ '${cognito-identity.amazonaws.com:sub}',\n+ '${cognito-identity.amazonaws.com:amr}']\n+\n+ # https://docs.aws.amazon.com/redshift/latest/mgmt/redshift-iam-access-control-identity-based.html\n+ condition_excludes = [\n+ '${redshift:DbUser}',\n+ ]\n \n def _match_values(self, searchRegex, cfnelem, path):\n \"\"\"Recursively search for values matching the searchRegex\"\"\"\n@@ -96,13 +110,15 @@\n \n # We want to search all of the paths to check if each one contains an 'Fn::Sub'\n for parameter_string_path in parameter_string_paths:\n-\n # Exxclude the special IAM variables\n variable = parameter_string_path[-1]\n \n if 'Resource' in parameter_string_path:\n if variable in self.resource_excludes:\n continue\n+ if 'Condition' in parameter_string_path:\n+ if variable in self.condition_excludes:\n+ continue\n \n # Exclude literals (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html)\n if variable.startswith('${!'):\n@@ -121,7 +137,8 @@\n if not found_sub:\n # Remove the last item (the variable) to prevent multiple errors on 1 line errors\n path = parameter_string_path[:-1]\n- message = 'Found an embedded parameter outside of an \"Fn::Sub\" at {}'.format('/'.join(map(str, path)))\n+ message = 'Found an embedded parameter outside of an \"Fn::Sub\" at {}'.format(\n+ '/'.join(map(str, path)))\n matches.append(RuleMatch(path, message))\n \n return matches\n", "issue": "E1029 - False positive with IAM policy Redshift condition keys\n*cfn-lint version: (`cfn-lint --version`)*\r\ncfn-lint 0.24.1\r\n\r\n*Description of issue.*\r\n${redshift:DbUser} triggers:\r\n`E1029:Found an embedded parameter outside of an \"Fn::Sub\"`\r\n\r\nI'm defining a policy document similar to the one below. cfn-lint returns a E1029 on each line where the ${redshift:DbUser} condition key is used. Source: [https://docs.aws.amazon.com/redshift/latest/mgmt/redshift-iam-access-control-identity-based.html#redshift-policy-resources.getclustercredentials-resources](url)\r\n\r\nSample:\r\n```json\r\n{\r\n\"Version\": \"2012-10-17\",\r\n \"Statement\": [\r\n {\r\n \"Sid\": \"GetClusterCredsStatement\",\r\n \"Effect\": \"Allow\",\r\n \"Action\": [\r\n \"redshift:GetClusterCredentials\"\r\n ],\r\n \"Resource\": [\r\n \"arn:aws:redshift:us-west-2:123456789012:dbuser:examplecluster/${redshift:DbUser}\",\r\n \"arn:aws:redshift:us-west-2:123456789012:dbname:examplecluster/testdb\",\r\n \"arn:aws:redshift:us-west-2:123456789012:dbgroup:examplecluster/common_group\"\r\n ],\r\n \"Condition\": {\r\n \"StringEquals\": {\r\n \"aws:userid\":\"AIDIODR4TAW7CSEXAMPLE:${redshift:DbUser}@yourdomain.com\"\r\n }\r\n }\r\n },\r\n {\r\n \"Sid\": \"CreateClusterUserStatement\",\r\n \"Effect\": \"Allow\",\r\n \"Action\": [\r\n \"redshift:CreateClusterUser\"\r\n ],\r\n \"Resource\": [\r\n \"arn:aws:redshift:us-west-2:123456789012:dbuser:examplecluster/${redshift:DbUser}\"\r\n ],\r\n \"Condition\": {\r\n \"StringEquals\": {\r\n \"aws:userid\":\"AIDIODR4TAW7CSEXAMPLE:${redshift:DbUser}@yourdomain.com\"\r\n }\r\n }\r\n },\r\n {\r\n \"Sid\": \"RedshiftJoinGroupStatement\",\r\n \"Effect\": \"Allow\",\r\n \"Action\": [\r\n \"redshift:JoinGroup\"\r\n ],\r\n \"Resource\": [\r\n \"arn:aws:redshift:us-west-2:123456789012:dbgroup:examplecluster/common_group\"\r\n ]\r\n }\r\n ]\r\n}\r\n```\r\nResource string from actual template:\r\n`{ \"Fn::Join\" : [ \"\", [ \"arn:aws:redshift:us-west-2:\", { \"Ref\" : \"AWS::AccountId\" }, \":dbuser:\", { \"Fn::FindInMap\" : [ \"Environment\", { \"Ref\" : \"EnvironmentType\" }, \"RSClusterName\" ] }, \"/${redshift:DBUser}\" ] ] }`\n", "before_files": [{"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport re\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\n\nclass SubNeeded(CloudFormationLintRule):\n \"\"\"Check if a substitution string exists without a substitution function\"\"\"\n id = 'E1029'\n shortdesc = 'Sub is required if a variable is used in a string'\n description = 'If a substitution variable exists in a string but isn\\'t wrapped with the Fn::Sub function the deployment will fail.'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html'\n tags = ['functions', 'sub']\n\n # Free-form text properties to exclude from this rule\n # content is part of AWS::CloudFormation::Init\n excludes = ['UserData', 'ZipFile', 'Condition', 'AWS::CloudFormation::Init', 'CloudWatchAlarmDefinition', 'TopicRulePayload']\n api_excludes = ['Uri', 'Body']\n\n # IAM Policy has special variables that don't require !Sub, Check for these\n # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html\n # https://docs.aws.amazon.com/iot/latest/developerguide/basic-policy-variables.html\n # https://docs.aws.amazon.com/iot/latest/developerguide/thing-policy-variables.html\n # https://docs.aws.amazon.com/transfer/latest/userguide/users.html#users-policies-scope-down\n # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_iam-condition-keys.html\n resource_excludes = ['${aws:CurrentTime}', '${aws:EpochTime}', '${aws:TokenIssueTime}', '${aws:principaltype}',\n '${aws:SecureTransport}', '${aws:SourceIp}', '${aws:UserAgent}', '${aws:userid}',\n '${aws:username}', '${ec2:SourceInstanceARN}',\n '${iot:Connection.Thing.ThingName}', '${iot:Connection.Thing.ThingTypeName}',\n '${iot:Connection.Thing.IsAttached}', '${iot:ClientId}', '${transfer:HomeBucket}',\n '${transfer:HomeDirectory}', '${transfer:HomeFolder}', '${transfer:UserName}',\n '${cognito-identity.amazonaws.com:aud}', '${cognito-identity.amazonaws.com:sub}', '${cognito-identity.amazonaws.com:amr}']\n\n def _match_values(self, searchRegex, cfnelem, path):\n \"\"\"Recursively search for values matching the searchRegex\"\"\"\n values = []\n if isinstance(cfnelem, dict):\n for key in cfnelem:\n pathprop = path[:]\n pathprop.append(key)\n values.extend(self._match_values(searchRegex, cfnelem[key], pathprop))\n elif isinstance(cfnelem, list):\n for index, item in enumerate(cfnelem):\n pathprop = path[:]\n pathprop.append(index)\n values.extend(self._match_values(searchRegex, item, pathprop))\n else:\n # Leaf node\n if isinstance(cfnelem, str) and re.match(searchRegex, cfnelem):\n # Get all variables as seperate paths\n regex = re.compile(r'(\\$\\{.*?\\.?.*?})')\n for variable in re.findall(regex, cfnelem):\n values.append(path + [variable])\n\n return values\n\n def match_values(self, searchRegex, cfn):\n \"\"\"\n Search for values in all parts of the templates that match the searchRegex\n \"\"\"\n results = []\n results.extend(self._match_values(searchRegex, cfn.template, []))\n # Globals are removed during a transform. They need to be checked manually\n results.extend(self._match_values(searchRegex, cfn.template.get('Globals', {}), []))\n return results\n\n def _api_exceptions(self, value):\n \"\"\" Key value exceptions \"\"\"\n parameter_search = re.compile(r'^\\$\\{stageVariables\\..*\\}$')\n return re.match(parameter_search, value)\n\n def match(self, cfn):\n \"\"\"Basic Rule Matching\"\"\"\n\n matches = []\n\n # Generic regex to match a string containing at least one ${parameter}\n parameter_search = re.compile(r'^.*(\\$\\{.*\\}.*(\\$\\{.*\\}.*)*)$')\n\n # Get a list of paths to every leaf node string containing at least one ${parameter}\n parameter_string_paths = self.match_values(parameter_search, cfn)\n\n # We want to search all of the paths to check if each one contains an 'Fn::Sub'\n for parameter_string_path in parameter_string_paths:\n\n # Exxclude the special IAM variables\n variable = parameter_string_path[-1]\n\n if 'Resource' in parameter_string_path:\n if variable in self.resource_excludes:\n continue\n\n # Exclude literals (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html)\n if variable.startswith('${!'):\n continue\n\n found_sub = False\n # Does the path contain an 'Fn::Sub'?\n for step in parameter_string_path:\n if step in self.api_excludes:\n if self._api_exceptions(parameter_string_path[-1]):\n found_sub = True\n elif step == 'Fn::Sub' or step in self.excludes:\n found_sub = True\n\n # If we didn't find an 'Fn::Sub' it means a string containing a ${parameter} may not be evaluated correctly\n if not found_sub:\n # Remove the last item (the variable) to prevent multiple errors on 1 line errors\n path = parameter_string_path[:-1]\n message = 'Found an embedded parameter outside of an \"Fn::Sub\" at {}'.format('/'.join(map(str, path)))\n matches.append(RuleMatch(path, message))\n\n return matches\n", "path": "src/cfnlint/rules/functions/SubNeeded.py"}], "after_files": [{"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport re\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\n\n\nclass SubNeeded(CloudFormationLintRule):\n \"\"\"Check if a substitution string exists without a substitution function\"\"\"\n id = 'E1029'\n shortdesc = 'Sub is required if a variable is used in a string'\n description = 'If a substitution variable exists in a string but isn\\'t wrapped with the Fn::Sub function the deployment will fail.'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html'\n tags = ['functions', 'sub']\n\n # Free-form text properties to exclude from this rule\n # content is part of AWS::CloudFormation::Init\n excludes = ['UserData', 'ZipFile', 'Condition', 'AWS::CloudFormation::Init',\n 'CloudWatchAlarmDefinition', 'TopicRulePayload']\n api_excludes = ['Uri', 'Body']\n\n # IAM Policy has special variables that don't require !Sub, Check for these\n # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html\n # https://docs.aws.amazon.com/iot/latest/developerguide/basic-policy-variables.html\n # https://docs.aws.amazon.com/iot/latest/developerguide/thing-policy-variables.html\n # https://docs.aws.amazon.com/transfer/latest/userguide/users.html#users-policies-scope-down\n # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_iam-condition-keys.html\n resource_excludes = ['${aws:CurrentTime}', '${aws:EpochTime}',\n '${aws:TokenIssueTime}', '${aws:principaltype}',\n '${aws:SecureTransport}', '${aws:SourceIp}',\n '${aws:UserAgent}', '${aws:userid}',\n '${aws:username}', '${ec2:SourceInstanceARN}',\n '${iot:Connection.Thing.ThingName}',\n '${iot:Connection.Thing.ThingTypeName}',\n '${iot:Connection.Thing.IsAttached}',\n '${iot:ClientId}', '${transfer:HomeBucket}',\n '${transfer:HomeDirectory}', '${transfer:HomeFolder}',\n '${transfer:UserName}', '${redshift:DbUser}',\n '${cognito-identity.amazonaws.com:aud}',\n '${cognito-identity.amazonaws.com:sub}',\n '${cognito-identity.amazonaws.com:amr}']\n\n # https://docs.aws.amazon.com/redshift/latest/mgmt/redshift-iam-access-control-identity-based.html\n condition_excludes = [\n '${redshift:DbUser}',\n ]\n\n def _match_values(self, searchRegex, cfnelem, path):\n \"\"\"Recursively search for values matching the searchRegex\"\"\"\n values = []\n if isinstance(cfnelem, dict):\n for key in cfnelem:\n pathprop = path[:]\n pathprop.append(key)\n values.extend(self._match_values(searchRegex, cfnelem[key], pathprop))\n elif isinstance(cfnelem, list):\n for index, item in enumerate(cfnelem):\n pathprop = path[:]\n pathprop.append(index)\n values.extend(self._match_values(searchRegex, item, pathprop))\n else:\n # Leaf node\n if isinstance(cfnelem, str) and re.match(searchRegex, cfnelem):\n # Get all variables as seperate paths\n regex = re.compile(r'(\\$\\{.*?\\.?.*?})')\n for variable in re.findall(regex, cfnelem):\n values.append(path + [variable])\n\n return values\n\n def match_values(self, searchRegex, cfn):\n \"\"\"\n Search for values in all parts of the templates that match the searchRegex\n \"\"\"\n results = []\n results.extend(self._match_values(searchRegex, cfn.template, []))\n # Globals are removed during a transform. They need to be checked manually\n results.extend(self._match_values(searchRegex, cfn.template.get('Globals', {}), []))\n return results\n\n def _api_exceptions(self, value):\n \"\"\" Key value exceptions \"\"\"\n parameter_search = re.compile(r'^\\$\\{stageVariables\\..*\\}$')\n return re.match(parameter_search, value)\n\n def match(self, cfn):\n \"\"\"Basic Rule Matching\"\"\"\n\n matches = []\n\n # Generic regex to match a string containing at least one ${parameter}\n parameter_search = re.compile(r'^.*(\\$\\{.*\\}.*(\\$\\{.*\\}.*)*)$')\n\n # Get a list of paths to every leaf node string containing at least one ${parameter}\n parameter_string_paths = self.match_values(parameter_search, cfn)\n\n # We want to search all of the paths to check if each one contains an 'Fn::Sub'\n for parameter_string_path in parameter_string_paths:\n # Exxclude the special IAM variables\n variable = parameter_string_path[-1]\n\n if 'Resource' in parameter_string_path:\n if variable in self.resource_excludes:\n continue\n if 'Condition' in parameter_string_path:\n if variable in self.condition_excludes:\n continue\n\n # Exclude literals (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html)\n if variable.startswith('${!'):\n continue\n\n found_sub = False\n # Does the path contain an 'Fn::Sub'?\n for step in parameter_string_path:\n if step in self.api_excludes:\n if self._api_exceptions(parameter_string_path[-1]):\n found_sub = True\n elif step == 'Fn::Sub' or step in self.excludes:\n found_sub = True\n\n # If we didn't find an 'Fn::Sub' it means a string containing a ${parameter} may not be evaluated correctly\n if not found_sub:\n # Remove the last item (the variable) to prevent multiple errors on 1 line errors\n path = parameter_string_path[:-1]\n message = 'Found an embedded parameter outside of an \"Fn::Sub\" at {}'.format(\n '/'.join(map(str, path)))\n matches.append(RuleMatch(path, message))\n\n return matches\n", "path": "src/cfnlint/rules/functions/SubNeeded.py"}]}
| 2,645 | 1,018 |
gh_patches_debug_41207
|
rasdani/github-patches
|
git_diff
|
svthalia__concrexit-2399
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add sales order API
### Describe the solution you'd like
- An api endpoint `api/v2/sales/order/<uuid>/` that returns the order information that is shown on the website when you pay for the url from a qr code.
- It should be possible to pay the order through `api/v2/payments/sales/order/<uuid>`. This might already be possible, I haven't checked.
### Motivation
This way people will be able to pay from the app, instead of through the website where they may even need to log in. I think this is an obvious case where the ease of use of an app is very welcome.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/sales/api/v2/views.py`
Content:
```
1 from django.db.models import Q
2 from oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope
3 from rest_framework.exceptions import PermissionDenied
4 from rest_framework.generics import (
5 ListAPIView,
6 RetrieveAPIView,
7 CreateAPIView,
8 UpdateAPIView,
9 DestroyAPIView,
10 )
11 from rest_framework.permissions import DjangoModelPermissionsOrAnonReadOnly
12
13 from sales.api.v2.admin.serializers.order import OrderSerializer, OrderListSerializer
14 from sales.api.v2.admin.views import (
15 OrderListView,
16 OrderDetailView,
17 ShiftDetailView,
18 ShiftListView,
19 )
20 from sales.api.v2.serializers.user_order import UserOrderSerializer
21 from sales.api.v2.serializers.user_shift import UserShiftSerializer
22 from sales.models.shift import SelfOrderPeriod, Shift
23 from thaliawebsite.api.v2.permissions import IsAuthenticatedOrTokenHasScopeForMethod
24
25
26 class UserShiftListView(ShiftListView):
27 serializer_class = UserShiftSerializer
28 # queryset = SelfOrderPeriod.objects.all()
29 permission_classes = [
30 IsAuthenticatedOrTokenHasScope,
31 DjangoModelPermissionsOrAnonReadOnly,
32 ]
33 required_scopes = ["sales:read"]
34
35
36 class UserShiftDetailView(ShiftDetailView):
37 serializer_class = UserShiftSerializer
38 # queryset = SelfOrderPeriod.objects.all()
39 permission_classes = [
40 IsAuthenticatedOrTokenHasScope,
41 DjangoModelPermissionsOrAnonReadOnly,
42 ]
43 required_scopes = ["sales:read"]
44
45
46 class UserOrderListView(OrderListView):
47 permission_classes = [
48 IsAuthenticatedOrTokenHasScopeForMethod,
49 ]
50 required_scopes_per_method = {
51 "GET": ["sales:read"],
52 "POST": ["sales:order"],
53 }
54 method_serializer_classes = {
55 ("GET",): OrderListSerializer,
56 ("POST",): UserOrderSerializer,
57 }
58
59 def create(self, request, *args, **kwargs):
60 shift = Shift.objects.get(pk=kwargs["pk"])
61 if not shift.user_orders_allowed:
62 raise PermissionDenied
63 return super(UserOrderListView, self).create(request, *args, **kwargs)
64
65 def perform_create(self, serializer):
66 serializer.save(
67 payer_id=self.request.member.pk, created_by_id=self.request.member.pk
68 )
69
70 def get_queryset(self):
71 queryset = super(UserOrderListView, self).get_queryset()
72 return queryset.filter(
73 Q(payer=self.request.member) | Q(created_by=self.request.member)
74 )
75
76
77 class UserOrderDetailView(OrderDetailView):
78 serializer_class = UserOrderSerializer
79 permission_classes = [
80 IsAuthenticatedOrTokenHasScopeForMethod,
81 ]
82 required_scopes_per_method = {
83 "GET": ["sales:read"],
84 "PATCH": ["sales:order"],
85 "PUT": ["sales:order"],
86 "DELETE": ["sales:order"],
87 }
88
89 def get_queryset(self):
90 queryset = super(UserOrderDetailView, self).get_queryset()
91 return queryset.filter(
92 Q(payer=self.request.member) | Q(created_by=self.request.member)
93 )
94
95 def update(self, request, *args, **kwargs):
96 if not self.get_object().shift.user_orders_allowed:
97 raise PermissionDenied
98 if self.get_object().payment:
99 raise PermissionDenied
100 return super(UserOrderDetailView, self).update(request, *args, **kwargs)
101
102 def partial_update(self, request, *args, **kwargs):
103 if not self.get_object().shift.user_orders_allowed:
104 raise PermissionDenied
105 if self.get_object().payment:
106 raise PermissionDenied
107 return super(UserOrderDetailView, self).partial_update(request, *args, **kwargs)
108
109 def destroy(self, request, *args, **kwargs):
110 if not self.get_object().shift.user_orders_allowed:
111 raise PermissionDenied
112 if self.get_object().payment:
113 raise PermissionDenied
114 return super(UserOrderDetailView, self).destroy(request, *args, **kwargs)
115
```
Path: `website/sales/api/v2/urls.py`
Content:
```
1 from django.urls import path
2
3 from sales.api.v2.views import (
4 UserShiftListView,
5 UserShiftDetailView,
6 UserOrderListView,
7 UserOrderDetailView,
8 )
9
10 app_name = "sales"
11
12 urlpatterns = [
13 path("sales/shifts/", UserShiftListView.as_view(), name="user-shift-list"),
14 path(
15 "sales/shifts/<int:pk>/",
16 UserShiftDetailView.as_view(),
17 name="user-shift-detail",
18 ),
19 path(
20 "sales/shifts/<int:pk>/orders/",
21 UserOrderListView.as_view(),
22 name="user-order-list",
23 ),
24 path(
25 "sales/orders/<uuid:pk>/",
26 UserOrderDetailView.as_view(),
27 name="user-order-detail",
28 ),
29 ]
30
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/website/sales/api/v2/urls.py b/website/sales/api/v2/urls.py
--- a/website/sales/api/v2/urls.py
+++ b/website/sales/api/v2/urls.py
@@ -1,6 +1,7 @@
from django.urls import path
from sales.api.v2.views import (
+ OrderClaimView,
UserShiftListView,
UserShiftDetailView,
UserOrderListView,
@@ -10,6 +11,7 @@
app_name = "sales"
urlpatterns = [
+ path("sales/order/<uuid:pk>/claim/", OrderClaimView.as_view(), name="order-claim"),
path("sales/shifts/", UserShiftListView.as_view(), name="user-shift-list"),
path(
"sales/shifts/<int:pk>/",
diff --git a/website/sales/api/v2/views.py b/website/sales/api/v2/views.py
--- a/website/sales/api/v2/views.py
+++ b/website/sales/api/v2/views.py
@@ -1,25 +1,23 @@
from django.db.models import Q
from oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope
from rest_framework.exceptions import PermissionDenied
-from rest_framework.generics import (
- ListAPIView,
- RetrieveAPIView,
- CreateAPIView,
- UpdateAPIView,
- DestroyAPIView,
-)
+from rest_framework.generics import GenericAPIView
from rest_framework.permissions import DjangoModelPermissionsOrAnonReadOnly
+from rest_framework.response import Response
+from rest_framework.schemas.openapi import AutoSchema
-from sales.api.v2.admin.serializers.order import OrderSerializer, OrderListSerializer
+from sales.api.v2.admin.serializers.order import OrderListSerializer
from sales.api.v2.admin.views import (
- OrderListView,
OrderDetailView,
+ OrderListView,
ShiftDetailView,
ShiftListView,
)
+from sales import services
from sales.api.v2.serializers.user_order import UserOrderSerializer
from sales.api.v2.serializers.user_shift import UserShiftSerializer
-from sales.models.shift import SelfOrderPeriod, Shift
+from sales.models.shift import Shift
+from sales.models.order import Order
from thaliawebsite.api.v2.permissions import IsAuthenticatedOrTokenHasScopeForMethod
@@ -111,4 +109,40 @@
raise PermissionDenied
if self.get_object().payment:
raise PermissionDenied
- return super(UserOrderDetailView, self).destroy(request, *args, **kwargs)
+
+
+class OrderClaimView(GenericAPIView):
+ """Claims an order to be paid by the current user."""
+
+ class OrderClaimViewSchema(AutoSchema):
+ def get_request_serializer(self, path, method):
+ # This endpoint does not expect any content in the request body.
+ return None
+
+ queryset = Order.objects.all()
+ serializer_class = UserOrderSerializer
+ schema = OrderClaimViewSchema(operation_id_base="claimOrder")
+ permission_classes = [IsAuthenticatedOrTokenHasScope]
+ required_scopes = ["sales:order"]
+
+ def patch(self, request, *args, **kwargs):
+ if request.member is None:
+ raise PermissionDenied("You need to be a member to pay for an order.")
+
+ order = self.get_object()
+ if order.payment:
+ raise PermissionDenied(detail="This order was already paid for.")
+
+ if order.payer is not None and order.payer != request.member:
+ raise PermissionDenied(detail="This order is not yours.")
+
+ order.payer = request.member
+ order.save()
+
+ if order.age_restricted and not services.is_adult(request.member):
+ raise PermissionDenied(
+ "The age restrictions on this order do not allow you to pay for this order."
+ )
+
+ serializer = self.get_serializer(order)
+ return Response(serializer.data)
|
{"golden_diff": "diff --git a/website/sales/api/v2/urls.py b/website/sales/api/v2/urls.py\n--- a/website/sales/api/v2/urls.py\n+++ b/website/sales/api/v2/urls.py\n@@ -1,6 +1,7 @@\n from django.urls import path\n \n from sales.api.v2.views import (\n+ OrderClaimView,\n UserShiftListView,\n UserShiftDetailView,\n UserOrderListView,\n@@ -10,6 +11,7 @@\n app_name = \"sales\"\n \n urlpatterns = [\n+ path(\"sales/order/<uuid:pk>/claim/\", OrderClaimView.as_view(), name=\"order-claim\"),\n path(\"sales/shifts/\", UserShiftListView.as_view(), name=\"user-shift-list\"),\n path(\n \"sales/shifts/<int:pk>/\",\ndiff --git a/website/sales/api/v2/views.py b/website/sales/api/v2/views.py\n--- a/website/sales/api/v2/views.py\n+++ b/website/sales/api/v2/views.py\n@@ -1,25 +1,23 @@\n from django.db.models import Q\n from oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope\n from rest_framework.exceptions import PermissionDenied\n-from rest_framework.generics import (\n- ListAPIView,\n- RetrieveAPIView,\n- CreateAPIView,\n- UpdateAPIView,\n- DestroyAPIView,\n-)\n+from rest_framework.generics import GenericAPIView\n from rest_framework.permissions import DjangoModelPermissionsOrAnonReadOnly\n+from rest_framework.response import Response\n+from rest_framework.schemas.openapi import AutoSchema\n \n-from sales.api.v2.admin.serializers.order import OrderSerializer, OrderListSerializer\n+from sales.api.v2.admin.serializers.order import OrderListSerializer\n from sales.api.v2.admin.views import (\n- OrderListView,\n OrderDetailView,\n+ OrderListView,\n ShiftDetailView,\n ShiftListView,\n )\n+from sales import services\n from sales.api.v2.serializers.user_order import UserOrderSerializer\n from sales.api.v2.serializers.user_shift import UserShiftSerializer\n-from sales.models.shift import SelfOrderPeriod, Shift\n+from sales.models.shift import Shift\n+from sales.models.order import Order\n from thaliawebsite.api.v2.permissions import IsAuthenticatedOrTokenHasScopeForMethod\n \n \n@@ -111,4 +109,40 @@\n raise PermissionDenied\n if self.get_object().payment:\n raise PermissionDenied\n- return super(UserOrderDetailView, self).destroy(request, *args, **kwargs)\n+\n+\n+class OrderClaimView(GenericAPIView):\n+ \"\"\"Claims an order to be paid by the current user.\"\"\"\n+\n+ class OrderClaimViewSchema(AutoSchema):\n+ def get_request_serializer(self, path, method):\n+ # This endpoint does not expect any content in the request body.\n+ return None\n+\n+ queryset = Order.objects.all()\n+ serializer_class = UserOrderSerializer\n+ schema = OrderClaimViewSchema(operation_id_base=\"claimOrder\")\n+ permission_classes = [IsAuthenticatedOrTokenHasScope]\n+ required_scopes = [\"sales:order\"]\n+\n+ def patch(self, request, *args, **kwargs):\n+ if request.member is None:\n+ raise PermissionDenied(\"You need to be a member to pay for an order.\")\n+\n+ order = self.get_object()\n+ if order.payment:\n+ raise PermissionDenied(detail=\"This order was already paid for.\")\n+\n+ if order.payer is not None and order.payer != request.member:\n+ raise PermissionDenied(detail=\"This order is not yours.\")\n+\n+ order.payer = request.member\n+ order.save()\n+\n+ if order.age_restricted and not services.is_adult(request.member):\n+ raise PermissionDenied(\n+ \"The age restrictions on this order do not allow you to pay for this order.\"\n+ )\n+\n+ serializer = self.get_serializer(order)\n+ return Response(serializer.data)\n", "issue": "Add sales order API\n### Describe the solution you'd like\r\n- An api endpoint `api/v2/sales/order/<uuid>/` that returns the order information that is shown on the website when you pay for the url from a qr code.\r\n- It should be possible to pay the order through `api/v2/payments/sales/order/<uuid>`. This might already be possible, I haven't checked.\r\n\r\n### Motivation\r\nThis way people will be able to pay from the app, instead of through the website where they may even need to log in. I think this is an obvious case where the ease of use of an app is very welcome.\r\n\r\n\n", "before_files": [{"content": "from django.db.models import Q\nfrom oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope\nfrom rest_framework.exceptions import PermissionDenied\nfrom rest_framework.generics import (\n ListAPIView,\n RetrieveAPIView,\n CreateAPIView,\n UpdateAPIView,\n DestroyAPIView,\n)\nfrom rest_framework.permissions import DjangoModelPermissionsOrAnonReadOnly\n\nfrom sales.api.v2.admin.serializers.order import OrderSerializer, OrderListSerializer\nfrom sales.api.v2.admin.views import (\n OrderListView,\n OrderDetailView,\n ShiftDetailView,\n ShiftListView,\n)\nfrom sales.api.v2.serializers.user_order import UserOrderSerializer\nfrom sales.api.v2.serializers.user_shift import UserShiftSerializer\nfrom sales.models.shift import SelfOrderPeriod, Shift\nfrom thaliawebsite.api.v2.permissions import IsAuthenticatedOrTokenHasScopeForMethod\n\n\nclass UserShiftListView(ShiftListView):\n serializer_class = UserShiftSerializer\n # queryset = SelfOrderPeriod.objects.all()\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n DjangoModelPermissionsOrAnonReadOnly,\n ]\n required_scopes = [\"sales:read\"]\n\n\nclass UserShiftDetailView(ShiftDetailView):\n serializer_class = UserShiftSerializer\n # queryset = SelfOrderPeriod.objects.all()\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n DjangoModelPermissionsOrAnonReadOnly,\n ]\n required_scopes = [\"sales:read\"]\n\n\nclass UserOrderListView(OrderListView):\n permission_classes = [\n IsAuthenticatedOrTokenHasScopeForMethod,\n ]\n required_scopes_per_method = {\n \"GET\": [\"sales:read\"],\n \"POST\": [\"sales:order\"],\n }\n method_serializer_classes = {\n (\"GET\",): OrderListSerializer,\n (\"POST\",): UserOrderSerializer,\n }\n\n def create(self, request, *args, **kwargs):\n shift = Shift.objects.get(pk=kwargs[\"pk\"])\n if not shift.user_orders_allowed:\n raise PermissionDenied\n return super(UserOrderListView, self).create(request, *args, **kwargs)\n\n def perform_create(self, serializer):\n serializer.save(\n payer_id=self.request.member.pk, created_by_id=self.request.member.pk\n )\n\n def get_queryset(self):\n queryset = super(UserOrderListView, self).get_queryset()\n return queryset.filter(\n Q(payer=self.request.member) | Q(created_by=self.request.member)\n )\n\n\nclass UserOrderDetailView(OrderDetailView):\n serializer_class = UserOrderSerializer\n permission_classes = [\n IsAuthenticatedOrTokenHasScopeForMethod,\n ]\n required_scopes_per_method = {\n \"GET\": [\"sales:read\"],\n \"PATCH\": [\"sales:order\"],\n \"PUT\": [\"sales:order\"],\n \"DELETE\": [\"sales:order\"],\n }\n\n def get_queryset(self):\n queryset = super(UserOrderDetailView, self).get_queryset()\n return queryset.filter(\n Q(payer=self.request.member) | Q(created_by=self.request.member)\n )\n\n def update(self, request, *args, **kwargs):\n if not self.get_object().shift.user_orders_allowed:\n raise PermissionDenied\n if self.get_object().payment:\n raise PermissionDenied\n return super(UserOrderDetailView, self).update(request, *args, **kwargs)\n\n def partial_update(self, request, *args, **kwargs):\n if not self.get_object().shift.user_orders_allowed:\n raise PermissionDenied\n if self.get_object().payment:\n raise PermissionDenied\n return super(UserOrderDetailView, self).partial_update(request, *args, **kwargs)\n\n def destroy(self, request, *args, **kwargs):\n if not self.get_object().shift.user_orders_allowed:\n raise PermissionDenied\n if self.get_object().payment:\n raise PermissionDenied\n return super(UserOrderDetailView, self).destroy(request, *args, **kwargs)\n", "path": "website/sales/api/v2/views.py"}, {"content": "from django.urls import path\n\nfrom sales.api.v2.views import (\n UserShiftListView,\n UserShiftDetailView,\n UserOrderListView,\n UserOrderDetailView,\n)\n\napp_name = \"sales\"\n\nurlpatterns = [\n path(\"sales/shifts/\", UserShiftListView.as_view(), name=\"user-shift-list\"),\n path(\n \"sales/shifts/<int:pk>/\",\n UserShiftDetailView.as_view(),\n name=\"user-shift-detail\",\n ),\n path(\n \"sales/shifts/<int:pk>/orders/\",\n UserOrderListView.as_view(),\n name=\"user-order-list\",\n ),\n path(\n \"sales/orders/<uuid:pk>/\",\n UserOrderDetailView.as_view(),\n name=\"user-order-detail\",\n ),\n]\n", "path": "website/sales/api/v2/urls.py"}], "after_files": [{"content": "from django.db.models import Q\nfrom oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope\nfrom rest_framework.exceptions import PermissionDenied\nfrom rest_framework.generics import GenericAPIView\nfrom rest_framework.permissions import DjangoModelPermissionsOrAnonReadOnly\nfrom rest_framework.response import Response\nfrom rest_framework.schemas.openapi import AutoSchema\n\nfrom sales.api.v2.admin.serializers.order import OrderListSerializer\nfrom sales.api.v2.admin.views import (\n OrderDetailView,\n OrderListView,\n ShiftDetailView,\n ShiftListView,\n)\nfrom sales import services\nfrom sales.api.v2.serializers.user_order import UserOrderSerializer\nfrom sales.api.v2.serializers.user_shift import UserShiftSerializer\nfrom sales.models.shift import Shift\nfrom sales.models.order import Order\nfrom thaliawebsite.api.v2.permissions import IsAuthenticatedOrTokenHasScopeForMethod\n\n\nclass UserShiftListView(ShiftListView):\n serializer_class = UserShiftSerializer\n # queryset = SelfOrderPeriod.objects.all()\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n DjangoModelPermissionsOrAnonReadOnly,\n ]\n required_scopes = [\"sales:read\"]\n\n\nclass UserShiftDetailView(ShiftDetailView):\n serializer_class = UserShiftSerializer\n # queryset = SelfOrderPeriod.objects.all()\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n DjangoModelPermissionsOrAnonReadOnly,\n ]\n required_scopes = [\"sales:read\"]\n\n\nclass UserOrderListView(OrderListView):\n permission_classes = [\n IsAuthenticatedOrTokenHasScopeForMethod,\n ]\n required_scopes_per_method = {\n \"GET\": [\"sales:read\"],\n \"POST\": [\"sales:order\"],\n }\n method_serializer_classes = {\n (\"GET\",): OrderListSerializer,\n (\"POST\",): UserOrderSerializer,\n }\n\n def create(self, request, *args, **kwargs):\n shift = Shift.objects.get(pk=kwargs[\"pk\"])\n if not shift.user_orders_allowed:\n raise PermissionDenied\n return super(UserOrderListView, self).create(request, *args, **kwargs)\n\n def perform_create(self, serializer):\n serializer.save(\n payer_id=self.request.member.pk, created_by_id=self.request.member.pk\n )\n\n def get_queryset(self):\n queryset = super(UserOrderListView, self).get_queryset()\n return queryset.filter(\n Q(payer=self.request.member) | Q(created_by=self.request.member)\n )\n\n\nclass UserOrderDetailView(OrderDetailView):\n serializer_class = UserOrderSerializer\n permission_classes = [\n IsAuthenticatedOrTokenHasScopeForMethod,\n ]\n required_scopes_per_method = {\n \"GET\": [\"sales:read\"],\n \"PATCH\": [\"sales:order\"],\n \"PUT\": [\"sales:order\"],\n \"DELETE\": [\"sales:order\"],\n }\n\n def get_queryset(self):\n queryset = super(UserOrderDetailView, self).get_queryset()\n return queryset.filter(\n Q(payer=self.request.member) | Q(created_by=self.request.member)\n )\n\n def update(self, request, *args, **kwargs):\n if not self.get_object().shift.user_orders_allowed:\n raise PermissionDenied\n if self.get_object().payment:\n raise PermissionDenied\n return super(UserOrderDetailView, self).update(request, *args, **kwargs)\n\n def partial_update(self, request, *args, **kwargs):\n if not self.get_object().shift.user_orders_allowed:\n raise PermissionDenied\n if self.get_object().payment:\n raise PermissionDenied\n return super(UserOrderDetailView, self).partial_update(request, *args, **kwargs)\n\n def destroy(self, request, *args, **kwargs):\n if not self.get_object().shift.user_orders_allowed:\n raise PermissionDenied\n if self.get_object().payment:\n raise PermissionDenied\n\n\nclass OrderClaimView(GenericAPIView):\n \"\"\"Claims an order to be paid by the current user.\"\"\"\n\n class OrderClaimViewSchema(AutoSchema):\n def get_request_serializer(self, path, method):\n # This endpoint does not expect any content in the request body.\n return None\n\n queryset = Order.objects.all()\n serializer_class = UserOrderSerializer\n schema = OrderClaimViewSchema(operation_id_base=\"claimOrder\")\n permission_classes = [IsAuthenticatedOrTokenHasScope]\n required_scopes = [\"sales:order\"]\n\n def patch(self, request, *args, **kwargs):\n if request.member is None:\n raise PermissionDenied(\"You need to be a member to pay for an order.\")\n\n order = self.get_object()\n if order.payment:\n raise PermissionDenied(detail=\"This order was already paid for.\")\n\n if order.payer is not None and order.payer != request.member:\n raise PermissionDenied(detail=\"This order is not yours.\")\n\n order.payer = request.member\n order.save()\n\n if order.age_restricted and not services.is_adult(request.member):\n raise PermissionDenied(\n \"The age restrictions on this order do not allow you to pay for this order.\"\n )\n\n serializer = self.get_serializer(order)\n return Response(serializer.data)\n", "path": "website/sales/api/v2/views.py"}, {"content": "from django.urls import path\n\nfrom sales.api.v2.views import (\n OrderClaimView,\n UserShiftListView,\n UserShiftDetailView,\n UserOrderListView,\n UserOrderDetailView,\n)\n\napp_name = \"sales\"\n\nurlpatterns = [\n path(\"sales/order/<uuid:pk>/claim/\", OrderClaimView.as_view(), name=\"order-claim\"),\n path(\"sales/shifts/\", UserShiftListView.as_view(), name=\"user-shift-list\"),\n path(\n \"sales/shifts/<int:pk>/\",\n UserShiftDetailView.as_view(),\n name=\"user-shift-detail\",\n ),\n path(\n \"sales/shifts/<int:pk>/orders/\",\n UserOrderListView.as_view(),\n name=\"user-order-list\",\n ),\n path(\n \"sales/orders/<uuid:pk>/\",\n UserOrderDetailView.as_view(),\n name=\"user-order-detail\",\n ),\n]\n", "path": "website/sales/api/v2/urls.py"}]}
| 1,681 | 844 |
gh_patches_debug_37288
|
rasdani/github-patches
|
git_diff
|
google__clusterfuzz-939
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Show time executed / cpu hours instead of number of runs in fuzzer stats.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/local/butler/scripts/setup.py`
Content:
```
1 # Copyright 2019 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Initial datastore setup."""
15 from __future__ import print_function
16
17 from builtins import object
18 import six
19
20 from google.cloud import monitoring_v3
21
22 from base import utils
23 from datastore import data_types
24 from metrics import monitor
25 from metrics import monitoring_metrics
26
27 LIBFUZZER_TEMPLATE = """APP_NAME = launcher.py
28 MAX_FUZZ_THREADS = 1
29 MAX_TESTCASES = 4
30 FUZZ_TEST_TIMEOUT = 4800
31 TEST_TIMEOUT = 30
32 WARMUP_TIMEOUT = 30
33 BAD_BUILD_CHECK = False
34 THREAD_ALIVE_CHECK_INTERVAL = 1
35 REPORT_OOMS_AND_HANGS = True
36 CORPUS_FUZZER_NAME_OVERRIDE = libFuzzer
37 ENABLE_GESTURES = False
38 THREAD_DELAY = 30.0
39 """
40
41 AFL_TEMPLATE = """APP_NAME = launcher.py
42 MAX_FUZZ_THREADS = 1
43 MAX_TESTCASES = 4
44 FUZZ_TEST_TIMEOUT = 4800
45 TEST_TIMEOUT = 30
46 WARMUP_TIMEOUT = 30
47 BAD_BUILD_CHECK = False
48 THREAD_ALIVE_CHECK_INTERVAL = 1
49 CORPUS_FUZZER_NAME_OVERRIDE = libFuzzer
50 ADDITIONAL_PROCESSES_TO_KILL = afl-fuzz afl-showmap
51 ENABLE_GESTURES = False
52 THREAD_DELAY = 30.0
53 """
54
55 ENGINE_ASAN_TEMPLATE = """LSAN = True
56 ADDITIONAL_ASAN_OPTIONS = quarantine_size_mb=64:strict_memcmp=1:symbolize=0:fast_unwind_on_fatal=0:allocator_release_to_os_interval_ms=500
57 """
58
59 ENGINE_MSAN_TEMPLATE = ('ADDITIONAL_MSAN_OPTIONS = symbolize=0:print_stats=1:'
60 'allocator_release_to_os_interval_ms=500:'
61 'halt_on_error=1')
62
63 ENGINE_UBSAN_TEMPLATE = """LSAN = False
64 ADDITIONAL_UBSAN_OPTIONS = symbolize=0:allocator_release_to_os_interval_ms=500
65 """
66
67 PRUNE_TEMPLATE = 'CORPUS_PRUNE = True'
68
69 TEMPLATES = {
70 'afl': AFL_TEMPLATE,
71 'engine_asan': ENGINE_ASAN_TEMPLATE,
72 'engine_msan': ENGINE_MSAN_TEMPLATE,
73 'engine_ubsan': ENGINE_UBSAN_TEMPLATE,
74 'libfuzzer': LIBFUZZER_TEMPLATE,
75 'prune': PRUNE_TEMPLATE,
76 }
77
78
79 class BaseBuiltinFuzzerDefaults(object):
80 """Default values for a builtin Fuzzer data_type. Note this class should be
81 inherited and should not be used directly."""
82
83 def __init__(self):
84 # Set defaults for any builtin fuzzer.
85 self.revision = 1
86 self.file_size = 'builtin'
87 self.source = 'builtin'
88 self.builtin = True
89
90 # Create attributes that must be set by child classes.
91 self.name = None
92 self.stats_column_descriptions = None
93 self.stats_columns = None
94 self.key_id = None
95
96 def create_fuzzer(self):
97 """Create a Fuzzer data_type with columns set to the defaults specified by
98 this object."""
99 assert self.name is not None
100 return data_types.Fuzzer(
101 id=self.key_id,
102 revision=self.revision,
103 file_size=self.file_size,
104 source=self.source,
105 name=self.name,
106 builtin=self.builtin,
107 stats_column_descriptions=self.stats_column_descriptions,
108 stats_columns=self.stats_columns)
109
110
111 class LibFuzzerDefaults(BaseBuiltinFuzzerDefaults):
112 """Default values for libFuzzer."""
113
114 def __init__(self):
115 super(LibFuzzerDefaults, self).__init__()
116 # Override empty values from parent.
117 self.name = 'libFuzzer'
118 self.key_id = 1337
119 # Use single quotes since the string ends in a double quote.
120 # pylint: disable=line-too-long
121 self.stats_column_descriptions = '''fuzzer: "Fuzz target"
122 perf_report: "Link to performance analysis report"
123 tests_executed: "Number of testcases executed during this time period"
124 new_crashes: "Number of new unique crashes observed during this time period"
125 edge_coverage: "Coverage for this fuzz target (number of edges/total)"
126 cov_report: "Link to coverage report"
127 corpus_size: "Size of the minimized corpus generated based on code coverage (number of testcases and total size on disk)"
128 avg_exec_per_sec: "Average number of testcases executed per second"
129 fuzzing_time_percent: "Percent of expected fuzzing time that is actually spent fuzzing."
130 new_tests_added: "New testcases added to the corpus during fuzzing based on code coverage"
131 new_features: "New coverage features based on new tests added to corpus."
132 regular_crash_percent: "Percent of fuzzing runs that had regular crashes (other than ooms, leaks, timeouts, startup and bad instrumentation crashes)"
133 oom_percent: "Percent of fuzzing runs that crashed on OOMs (should be 0)"
134 leak_percent: "Percent of fuzzing runs that crashed on memory leaks (should be 0)"
135 timeout_percent: "Percent of fuzzing runs that had testcases timeout (should be 0)"
136 startup_crash_percent: "Percent of fuzzing runs that crashed on startup (should be 0)"
137 avg_unwanted_log_lines: "Average number of unwanted log lines in fuzzing runs (should be 0)"
138 runs_count: "Number of libFuzzer fuzzing runs or sessions (default is ~1 hr)"
139 logs: "Link to fuzzing logs"
140 corpus_backup: "Backup copy of the minimized corpus generated based on code coverage"'''
141
142 self.stats_columns = """_PERFORMANCE_REPORT as perf_report,
143 sum(t.number_of_executed_units) as tests_executed,
144 custom(j.new_crashes) as new_crashes,
145 _EDGE_COV as edge_coverage,
146 _COV_REPORT as cov_report,
147 _CORPUS_SIZE as corpus_size,
148 avg(t.average_exec_per_sec) as avg_exec_per_sec,
149 avg(t.fuzzing_time_percent) as fuzzing_time_percent,
150 sum(t.new_units_added) as new_tests_added,
151 sum(t.new_features) as new_features,
152 avg(t.crash_count*100) as regular_crash_percent,
153 avg(t.oom_count*100) as oom_percent,
154 avg(t.leak_count*100) as leak_percent,
155 avg(t.timeout_count*100) as timeout_percent,
156 avg(t.startup_crash_count*100) as startup_crash_percent,
157 avg(t.log_lines_unwanted) as avg_unwanted_log_lines,
158 COUNT(t.bad_instrumentation) as runs_count,
159 _FUZZER_RUN_LOGS as logs,
160 _CORPUS_BACKUP as corpus_backup,"""
161
162
163 class AflDefaults(BaseBuiltinFuzzerDefaults):
164 """Default values for AFL."""
165
166 def __init__(self):
167 super(AflDefaults, self).__init__()
168 # Override empty values from parent.
169 self.name = 'afl'
170 self.key_id = 1338
171 # Use single quotes since the string ends in a double quote.
172 # pylint: disable=line-too-long
173 self.stats_column_descriptions = '''fuzzer: "Fuzz target"
174 new_crashes: "Number of new unique crashes observed during this time period"
175 edge_coverage: "Edge coverage for this fuzz target (number of edges / total)"
176 cov_report: "Link to coverage report"
177 corpus_size: "Size of the minimized corpus generated based on code coverage (number of testcases and total size on disk)"
178 avg_exec_per_sec: "Average number of testcases executed per second"
179 stability: "Percentage of edges that behave deterministically"
180 new_tests_added: "New testcases added to the corpus during fuzzing based on code coverage"
181 regular_crash_percent: "Percent of fuzzing runs that had regular crashes (other than startup and bad instrumentation crashes)"
182 timeout_percent: "Percent of fuzzing runs that had testcases timeout (should be 0)"
183 startup_crash_percent: "Percent of fuzzing runs that crashed on startup (should be 0)"
184 avg_unwanted_log_lines: "Average number of unwanted log lines in fuzzing runs (should be 0)"
185 runs_count: "Number of libFuzzer fuzzing runs or sessions (default is ~1 hr)"
186 logs: "Link to fuzzing logs"
187 corpus_backup: "Backup copy of the minimized corpus generated based on code coverage"'''
188
189 self.stats_columns = """custom(j.new_crashes) as new_crashes,
190 _EDGE_COV as edge_coverage,
191 _COV_REPORT as cov_report,
192 _CORPUS_SIZE as corpus_size,
193 avg(t.average_exec_per_sec) as avg_exec_per_sec,
194 avg(t.stability) as stability,
195 sum(t.new_units_added) as new_tests_added,
196 avg(t.crash_count*100) as regular_crash_percent,
197 avg(t.timeout_count*100) as timeout_percent,
198 avg(t.startup_crash_count*100) as startup_crash_percent,
199 avg(t.log_lines_unwanted) as avg_unwanted_log_lines,
200 COUNT(t.bad_instrumentation) as runs_count,
201 _FUZZER_RUN_LOGS as logs,
202 _CORPUS_BACKUP as corpus_backup,"""
203
204
205 def setup_config(non_dry_run):
206 """Set up configuration."""
207 config = data_types.Config.query().get()
208 if not config:
209 config = data_types.Config()
210
211 if non_dry_run:
212 print('Creating config')
213 config.put()
214 else:
215 print('Skip creating config (dry-run mode)')
216
217
218 def setup_fuzzers(non_dry_run):
219 """Set up fuzzers."""
220 for fuzzer_defaults in [AflDefaults(), LibFuzzerDefaults()]:
221 fuzzer = data_types.Fuzzer.query(
222 data_types.Fuzzer.name == fuzzer_defaults.name).get()
223 if fuzzer:
224 print(fuzzer_defaults.name, 'fuzzer already exists')
225 if non_dry_run:
226 print('Updating stats metrics.')
227 fuzzer.stats_columns = fuzzer_defaults.stats_columns
228 fuzzer.stats_column_descriptions = (
229 fuzzer_defaults.stats_column_descriptions)
230 fuzzer.put()
231
232 continue
233
234 if non_dry_run:
235 print('Creating fuzzer', fuzzer_defaults.name)
236 fuzzer_defaults.create_fuzzer().put()
237 else:
238 print('Skip creating fuzzer', fuzzer_defaults.name, '(dry-run mode)')
239
240
241 def setup_templates(non_dry_run):
242 """Set up templates."""
243 for name, template in six.iteritems(TEMPLATES):
244 job = data_types.JobTemplate.query(
245 data_types.JobTemplate.name == name).get()
246 if job:
247 print('Template with name', name, 'already exists.')
248 continue
249
250 if non_dry_run:
251 print('Creating template', name)
252 data_types.JobTemplate(name=name, environment_string=template).put()
253 else:
254 print('Skip creating template', name, '(dry-run mode)')
255
256
257 def setup_metrics(non_dry_run):
258 """Set up metrics."""
259 client = monitoring_v3.MetricServiceClient()
260 project_name = utils.get_application_id()
261 project_path = client.project_path(project_name)
262
263 for name in dir(monitoring_metrics):
264 metric = getattr(monitoring_metrics, name)
265 if not isinstance(metric, monitor.Metric):
266 continue
267
268 descriptor = monitoring_v3.types.MetricDescriptor()
269 metric.monitoring_v3_metric_descriptor(descriptor)
270
271 if non_dry_run:
272 print('Creating metric', descriptor)
273 client.create_metric_descriptor(project_path, descriptor)
274 else:
275 print('Skip creating metric', descriptor, '(dry-run mode)')
276
277
278 def execute(args):
279 """Set up initial Datastore models."""
280 setup_config(args.non_dry_run)
281 setup_fuzzers(args.non_dry_run)
282 setup_templates(args.non_dry_run)
283
284 if not args.local:
285 setup_metrics(args.non_dry_run)
286
287 print('Done')
288
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/local/butler/scripts/setup.py b/src/local/butler/scripts/setup.py
--- a/src/local/butler/scripts/setup.py
+++ b/src/local/butler/scripts/setup.py
@@ -135,7 +135,7 @@
timeout_percent: "Percent of fuzzing runs that had testcases timeout (should be 0)"
startup_crash_percent: "Percent of fuzzing runs that crashed on startup (should be 0)"
avg_unwanted_log_lines: "Average number of unwanted log lines in fuzzing runs (should be 0)"
-runs_count: "Number of libFuzzer fuzzing runs or sessions (default is ~1 hr)"
+total_fuzzing_time_hrs: "Total time in hours for which the fuzzer(s) ran. Will be lower if fuzzer hits a crash frequently."
logs: "Link to fuzzing logs"
corpus_backup: "Backup copy of the minimized corpus generated based on code coverage"'''
@@ -155,7 +155,7 @@
avg(t.timeout_count*100) as timeout_percent,
avg(t.startup_crash_count*100) as startup_crash_percent,
avg(t.log_lines_unwanted) as avg_unwanted_log_lines,
-COUNT(t.bad_instrumentation) as runs_count,
+sum(t.actual_duration/3600.0) as total_fuzzing_time_hrs,
_FUZZER_RUN_LOGS as logs,
_CORPUS_BACKUP as corpus_backup,"""
@@ -182,7 +182,7 @@
timeout_percent: "Percent of fuzzing runs that had testcases timeout (should be 0)"
startup_crash_percent: "Percent of fuzzing runs that crashed on startup (should be 0)"
avg_unwanted_log_lines: "Average number of unwanted log lines in fuzzing runs (should be 0)"
-runs_count: "Number of libFuzzer fuzzing runs or sessions (default is ~1 hr)"
+total_fuzzing_time_hrs: "Total time in hours for which the fuzzer(s) ran. Will be lower if fuzzer hits a crash frequently."
logs: "Link to fuzzing logs"
corpus_backup: "Backup copy of the minimized corpus generated based on code coverage"'''
@@ -197,7 +197,7 @@
avg(t.timeout_count*100) as timeout_percent,
avg(t.startup_crash_count*100) as startup_crash_percent,
avg(t.log_lines_unwanted) as avg_unwanted_log_lines,
-COUNT(t.bad_instrumentation) as runs_count,
+sum(t.actual_duration/3600.0) as total_fuzzing_time_hrs,
_FUZZER_RUN_LOGS as logs,
_CORPUS_BACKUP as corpus_backup,"""
|
{"golden_diff": "diff --git a/src/local/butler/scripts/setup.py b/src/local/butler/scripts/setup.py\n--- a/src/local/butler/scripts/setup.py\n+++ b/src/local/butler/scripts/setup.py\n@@ -135,7 +135,7 @@\n timeout_percent: \"Percent of fuzzing runs that had testcases timeout (should be 0)\"\n startup_crash_percent: \"Percent of fuzzing runs that crashed on startup (should be 0)\"\n avg_unwanted_log_lines: \"Average number of unwanted log lines in fuzzing runs (should be 0)\"\n-runs_count: \"Number of libFuzzer fuzzing runs or sessions (default is ~1 hr)\"\n+total_fuzzing_time_hrs: \"Total time in hours for which the fuzzer(s) ran. Will be lower if fuzzer hits a crash frequently.\"\n logs: \"Link to fuzzing logs\"\n corpus_backup: \"Backup copy of the minimized corpus generated based on code coverage\"'''\n \n@@ -155,7 +155,7 @@\n avg(t.timeout_count*100) as timeout_percent,\n avg(t.startup_crash_count*100) as startup_crash_percent,\n avg(t.log_lines_unwanted) as avg_unwanted_log_lines,\n-COUNT(t.bad_instrumentation) as runs_count,\n+sum(t.actual_duration/3600.0) as total_fuzzing_time_hrs,\n _FUZZER_RUN_LOGS as logs,\n _CORPUS_BACKUP as corpus_backup,\"\"\"\n \n@@ -182,7 +182,7 @@\n timeout_percent: \"Percent of fuzzing runs that had testcases timeout (should be 0)\"\n startup_crash_percent: \"Percent of fuzzing runs that crashed on startup (should be 0)\"\n avg_unwanted_log_lines: \"Average number of unwanted log lines in fuzzing runs (should be 0)\"\n-runs_count: \"Number of libFuzzer fuzzing runs or sessions (default is ~1 hr)\"\n+total_fuzzing_time_hrs: \"Total time in hours for which the fuzzer(s) ran. Will be lower if fuzzer hits a crash frequently.\"\n logs: \"Link to fuzzing logs\"\n corpus_backup: \"Backup copy of the minimized corpus generated based on code coverage\"'''\n \n@@ -197,7 +197,7 @@\n avg(t.timeout_count*100) as timeout_percent,\n avg(t.startup_crash_count*100) as startup_crash_percent,\n avg(t.log_lines_unwanted) as avg_unwanted_log_lines,\n-COUNT(t.bad_instrumentation) as runs_count,\n+sum(t.actual_duration/3600.0) as total_fuzzing_time_hrs,\n _FUZZER_RUN_LOGS as logs,\n _CORPUS_BACKUP as corpus_backup,\"\"\"\n", "issue": "Show time executed / cpu hours instead of number of runs in fuzzer stats.\n\n", "before_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Initial datastore setup.\"\"\"\nfrom __future__ import print_function\n\nfrom builtins import object\nimport six\n\nfrom google.cloud import monitoring_v3\n\nfrom base import utils\nfrom datastore import data_types\nfrom metrics import monitor\nfrom metrics import monitoring_metrics\n\nLIBFUZZER_TEMPLATE = \"\"\"APP_NAME = launcher.py\nMAX_FUZZ_THREADS = 1\nMAX_TESTCASES = 4\nFUZZ_TEST_TIMEOUT = 4800\nTEST_TIMEOUT = 30\nWARMUP_TIMEOUT = 30\nBAD_BUILD_CHECK = False\nTHREAD_ALIVE_CHECK_INTERVAL = 1\nREPORT_OOMS_AND_HANGS = True\nCORPUS_FUZZER_NAME_OVERRIDE = libFuzzer\nENABLE_GESTURES = False\nTHREAD_DELAY = 30.0\n\"\"\"\n\nAFL_TEMPLATE = \"\"\"APP_NAME = launcher.py\nMAX_FUZZ_THREADS = 1\nMAX_TESTCASES = 4\nFUZZ_TEST_TIMEOUT = 4800\nTEST_TIMEOUT = 30\nWARMUP_TIMEOUT = 30\nBAD_BUILD_CHECK = False\nTHREAD_ALIVE_CHECK_INTERVAL = 1\nCORPUS_FUZZER_NAME_OVERRIDE = libFuzzer\nADDITIONAL_PROCESSES_TO_KILL = afl-fuzz afl-showmap\nENABLE_GESTURES = False\nTHREAD_DELAY = 30.0\n\"\"\"\n\nENGINE_ASAN_TEMPLATE = \"\"\"LSAN = True\nADDITIONAL_ASAN_OPTIONS = quarantine_size_mb=64:strict_memcmp=1:symbolize=0:fast_unwind_on_fatal=0:allocator_release_to_os_interval_ms=500\n\"\"\"\n\nENGINE_MSAN_TEMPLATE = ('ADDITIONAL_MSAN_OPTIONS = symbolize=0:print_stats=1:'\n 'allocator_release_to_os_interval_ms=500:'\n 'halt_on_error=1')\n\nENGINE_UBSAN_TEMPLATE = \"\"\"LSAN = False\nADDITIONAL_UBSAN_OPTIONS = symbolize=0:allocator_release_to_os_interval_ms=500\n\"\"\"\n\nPRUNE_TEMPLATE = 'CORPUS_PRUNE = True'\n\nTEMPLATES = {\n 'afl': AFL_TEMPLATE,\n 'engine_asan': ENGINE_ASAN_TEMPLATE,\n 'engine_msan': ENGINE_MSAN_TEMPLATE,\n 'engine_ubsan': ENGINE_UBSAN_TEMPLATE,\n 'libfuzzer': LIBFUZZER_TEMPLATE,\n 'prune': PRUNE_TEMPLATE,\n}\n\n\nclass BaseBuiltinFuzzerDefaults(object):\n \"\"\"Default values for a builtin Fuzzer data_type. Note this class should be\n inherited and should not be used directly.\"\"\"\n\n def __init__(self):\n # Set defaults for any builtin fuzzer.\n self.revision = 1\n self.file_size = 'builtin'\n self.source = 'builtin'\n self.builtin = True\n\n # Create attributes that must be set by child classes.\n self.name = None\n self.stats_column_descriptions = None\n self.stats_columns = None\n self.key_id = None\n\n def create_fuzzer(self):\n \"\"\"Create a Fuzzer data_type with columns set to the defaults specified by\n this object.\"\"\"\n assert self.name is not None\n return data_types.Fuzzer(\n id=self.key_id,\n revision=self.revision,\n file_size=self.file_size,\n source=self.source,\n name=self.name,\n builtin=self.builtin,\n stats_column_descriptions=self.stats_column_descriptions,\n stats_columns=self.stats_columns)\n\n\nclass LibFuzzerDefaults(BaseBuiltinFuzzerDefaults):\n \"\"\"Default values for libFuzzer.\"\"\"\n\n def __init__(self):\n super(LibFuzzerDefaults, self).__init__()\n # Override empty values from parent.\n self.name = 'libFuzzer'\n self.key_id = 1337\n # Use single quotes since the string ends in a double quote.\n # pylint: disable=line-too-long\n self.stats_column_descriptions = '''fuzzer: \"Fuzz target\"\nperf_report: \"Link to performance analysis report\"\ntests_executed: \"Number of testcases executed during this time period\"\nnew_crashes: \"Number of new unique crashes observed during this time period\"\nedge_coverage: \"Coverage for this fuzz target (number of edges/total)\"\ncov_report: \"Link to coverage report\"\ncorpus_size: \"Size of the minimized corpus generated based on code coverage (number of testcases and total size on disk)\"\navg_exec_per_sec: \"Average number of testcases executed per second\"\nfuzzing_time_percent: \"Percent of expected fuzzing time that is actually spent fuzzing.\"\nnew_tests_added: \"New testcases added to the corpus during fuzzing based on code coverage\"\nnew_features: \"New coverage features based on new tests added to corpus.\"\nregular_crash_percent: \"Percent of fuzzing runs that had regular crashes (other than ooms, leaks, timeouts, startup and bad instrumentation crashes)\"\noom_percent: \"Percent of fuzzing runs that crashed on OOMs (should be 0)\"\nleak_percent: \"Percent of fuzzing runs that crashed on memory leaks (should be 0)\"\ntimeout_percent: \"Percent of fuzzing runs that had testcases timeout (should be 0)\"\nstartup_crash_percent: \"Percent of fuzzing runs that crashed on startup (should be 0)\"\navg_unwanted_log_lines: \"Average number of unwanted log lines in fuzzing runs (should be 0)\"\nruns_count: \"Number of libFuzzer fuzzing runs or sessions (default is ~1 hr)\"\nlogs: \"Link to fuzzing logs\"\ncorpus_backup: \"Backup copy of the minimized corpus generated based on code coverage\"'''\n\n self.stats_columns = \"\"\"_PERFORMANCE_REPORT as perf_report,\nsum(t.number_of_executed_units) as tests_executed,\ncustom(j.new_crashes) as new_crashes,\n_EDGE_COV as edge_coverage,\n_COV_REPORT as cov_report,\n_CORPUS_SIZE as corpus_size,\navg(t.average_exec_per_sec) as avg_exec_per_sec,\navg(t.fuzzing_time_percent) as fuzzing_time_percent,\nsum(t.new_units_added) as new_tests_added,\nsum(t.new_features) as new_features,\navg(t.crash_count*100) as regular_crash_percent,\navg(t.oom_count*100) as oom_percent,\navg(t.leak_count*100) as leak_percent,\navg(t.timeout_count*100) as timeout_percent,\navg(t.startup_crash_count*100) as startup_crash_percent,\navg(t.log_lines_unwanted) as avg_unwanted_log_lines,\nCOUNT(t.bad_instrumentation) as runs_count,\n_FUZZER_RUN_LOGS as logs,\n_CORPUS_BACKUP as corpus_backup,\"\"\"\n\n\nclass AflDefaults(BaseBuiltinFuzzerDefaults):\n \"\"\"Default values for AFL.\"\"\"\n\n def __init__(self):\n super(AflDefaults, self).__init__()\n # Override empty values from parent.\n self.name = 'afl'\n self.key_id = 1338\n # Use single quotes since the string ends in a double quote.\n # pylint: disable=line-too-long\n self.stats_column_descriptions = '''fuzzer: \"Fuzz target\"\nnew_crashes: \"Number of new unique crashes observed during this time period\"\nedge_coverage: \"Edge coverage for this fuzz target (number of edges / total)\"\ncov_report: \"Link to coverage report\"\ncorpus_size: \"Size of the minimized corpus generated based on code coverage (number of testcases and total size on disk)\"\navg_exec_per_sec: \"Average number of testcases executed per second\"\nstability: \"Percentage of edges that behave deterministically\"\nnew_tests_added: \"New testcases added to the corpus during fuzzing based on code coverage\"\nregular_crash_percent: \"Percent of fuzzing runs that had regular crashes (other than startup and bad instrumentation crashes)\"\ntimeout_percent: \"Percent of fuzzing runs that had testcases timeout (should be 0)\"\nstartup_crash_percent: \"Percent of fuzzing runs that crashed on startup (should be 0)\"\navg_unwanted_log_lines: \"Average number of unwanted log lines in fuzzing runs (should be 0)\"\nruns_count: \"Number of libFuzzer fuzzing runs or sessions (default is ~1 hr)\"\nlogs: \"Link to fuzzing logs\"\ncorpus_backup: \"Backup copy of the minimized corpus generated based on code coverage\"'''\n\n self.stats_columns = \"\"\"custom(j.new_crashes) as new_crashes,\n_EDGE_COV as edge_coverage,\n_COV_REPORT as cov_report,\n_CORPUS_SIZE as corpus_size,\navg(t.average_exec_per_sec) as avg_exec_per_sec,\navg(t.stability) as stability,\nsum(t.new_units_added) as new_tests_added,\navg(t.crash_count*100) as regular_crash_percent,\navg(t.timeout_count*100) as timeout_percent,\navg(t.startup_crash_count*100) as startup_crash_percent,\navg(t.log_lines_unwanted) as avg_unwanted_log_lines,\nCOUNT(t.bad_instrumentation) as runs_count,\n_FUZZER_RUN_LOGS as logs,\n_CORPUS_BACKUP as corpus_backup,\"\"\"\n\n\ndef setup_config(non_dry_run):\n \"\"\"Set up configuration.\"\"\"\n config = data_types.Config.query().get()\n if not config:\n config = data_types.Config()\n\n if non_dry_run:\n print('Creating config')\n config.put()\n else:\n print('Skip creating config (dry-run mode)')\n\n\ndef setup_fuzzers(non_dry_run):\n \"\"\"Set up fuzzers.\"\"\"\n for fuzzer_defaults in [AflDefaults(), LibFuzzerDefaults()]:\n fuzzer = data_types.Fuzzer.query(\n data_types.Fuzzer.name == fuzzer_defaults.name).get()\n if fuzzer:\n print(fuzzer_defaults.name, 'fuzzer already exists')\n if non_dry_run:\n print('Updating stats metrics.')\n fuzzer.stats_columns = fuzzer_defaults.stats_columns\n fuzzer.stats_column_descriptions = (\n fuzzer_defaults.stats_column_descriptions)\n fuzzer.put()\n\n continue\n\n if non_dry_run:\n print('Creating fuzzer', fuzzer_defaults.name)\n fuzzer_defaults.create_fuzzer().put()\n else:\n print('Skip creating fuzzer', fuzzer_defaults.name, '(dry-run mode)')\n\n\ndef setup_templates(non_dry_run):\n \"\"\"Set up templates.\"\"\"\n for name, template in six.iteritems(TEMPLATES):\n job = data_types.JobTemplate.query(\n data_types.JobTemplate.name == name).get()\n if job:\n print('Template with name', name, 'already exists.')\n continue\n\n if non_dry_run:\n print('Creating template', name)\n data_types.JobTemplate(name=name, environment_string=template).put()\n else:\n print('Skip creating template', name, '(dry-run mode)')\n\n\ndef setup_metrics(non_dry_run):\n \"\"\"Set up metrics.\"\"\"\n client = monitoring_v3.MetricServiceClient()\n project_name = utils.get_application_id()\n project_path = client.project_path(project_name)\n\n for name in dir(monitoring_metrics):\n metric = getattr(monitoring_metrics, name)\n if not isinstance(metric, monitor.Metric):\n continue\n\n descriptor = monitoring_v3.types.MetricDescriptor()\n metric.monitoring_v3_metric_descriptor(descriptor)\n\n if non_dry_run:\n print('Creating metric', descriptor)\n client.create_metric_descriptor(project_path, descriptor)\n else:\n print('Skip creating metric', descriptor, '(dry-run mode)')\n\n\ndef execute(args):\n \"\"\"Set up initial Datastore models.\"\"\"\n setup_config(args.non_dry_run)\n setup_fuzzers(args.non_dry_run)\n setup_templates(args.non_dry_run)\n\n if not args.local:\n setup_metrics(args.non_dry_run)\n\n print('Done')\n", "path": "src/local/butler/scripts/setup.py"}], "after_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Initial datastore setup.\"\"\"\nfrom __future__ import print_function\n\nfrom builtins import object\nimport six\n\nfrom google.cloud import monitoring_v3\n\nfrom base import utils\nfrom datastore import data_types\nfrom metrics import monitor\nfrom metrics import monitoring_metrics\n\nLIBFUZZER_TEMPLATE = \"\"\"APP_NAME = launcher.py\nMAX_FUZZ_THREADS = 1\nMAX_TESTCASES = 4\nFUZZ_TEST_TIMEOUT = 4800\nTEST_TIMEOUT = 30\nWARMUP_TIMEOUT = 30\nBAD_BUILD_CHECK = False\nTHREAD_ALIVE_CHECK_INTERVAL = 1\nREPORT_OOMS_AND_HANGS = True\nCORPUS_FUZZER_NAME_OVERRIDE = libFuzzer\nENABLE_GESTURES = False\nTHREAD_DELAY = 30.0\n\"\"\"\n\nAFL_TEMPLATE = \"\"\"APP_NAME = launcher.py\nMAX_FUZZ_THREADS = 1\nMAX_TESTCASES = 4\nFUZZ_TEST_TIMEOUT = 4800\nTEST_TIMEOUT = 30\nWARMUP_TIMEOUT = 30\nBAD_BUILD_CHECK = False\nTHREAD_ALIVE_CHECK_INTERVAL = 1\nCORPUS_FUZZER_NAME_OVERRIDE = libFuzzer\nADDITIONAL_PROCESSES_TO_KILL = afl-fuzz afl-showmap\nENABLE_GESTURES = False\nTHREAD_DELAY = 30.0\n\"\"\"\n\nENGINE_ASAN_TEMPLATE = \"\"\"LSAN = True\nADDITIONAL_ASAN_OPTIONS = quarantine_size_mb=64:strict_memcmp=1:symbolize=0:fast_unwind_on_fatal=0:allocator_release_to_os_interval_ms=500\n\"\"\"\n\nENGINE_MSAN_TEMPLATE = ('ADDITIONAL_MSAN_OPTIONS = symbolize=0:print_stats=1:'\n 'allocator_release_to_os_interval_ms=500:'\n 'halt_on_error=1')\n\nENGINE_UBSAN_TEMPLATE = \"\"\"LSAN = False\nADDITIONAL_UBSAN_OPTIONS = symbolize=0:allocator_release_to_os_interval_ms=500\n\"\"\"\n\nPRUNE_TEMPLATE = 'CORPUS_PRUNE = True'\n\nTEMPLATES = {\n 'afl': AFL_TEMPLATE,\n 'engine_asan': ENGINE_ASAN_TEMPLATE,\n 'engine_msan': ENGINE_MSAN_TEMPLATE,\n 'engine_ubsan': ENGINE_UBSAN_TEMPLATE,\n 'libfuzzer': LIBFUZZER_TEMPLATE,\n 'prune': PRUNE_TEMPLATE,\n}\n\n\nclass BaseBuiltinFuzzerDefaults(object):\n \"\"\"Default values for a builtin Fuzzer data_type. Note this class should be\n inherited and should not be used directly.\"\"\"\n\n def __init__(self):\n # Set defaults for any builtin fuzzer.\n self.revision = 1\n self.file_size = 'builtin'\n self.source = 'builtin'\n self.builtin = True\n\n # Create attributes that must be set by child classes.\n self.name = None\n self.stats_column_descriptions = None\n self.stats_columns = None\n self.key_id = None\n\n def create_fuzzer(self):\n \"\"\"Create a Fuzzer data_type with columns set to the defaults specified by\n this object.\"\"\"\n assert self.name is not None\n return data_types.Fuzzer(\n id=self.key_id,\n revision=self.revision,\n file_size=self.file_size,\n source=self.source,\n name=self.name,\n builtin=self.builtin,\n stats_column_descriptions=self.stats_column_descriptions,\n stats_columns=self.stats_columns)\n\n\nclass LibFuzzerDefaults(BaseBuiltinFuzzerDefaults):\n \"\"\"Default values for libFuzzer.\"\"\"\n\n def __init__(self):\n super(LibFuzzerDefaults, self).__init__()\n # Override empty values from parent.\n self.name = 'libFuzzer'\n self.key_id = 1337\n # Use single quotes since the string ends in a double quote.\n # pylint: disable=line-too-long\n self.stats_column_descriptions = '''fuzzer: \"Fuzz target\"\nperf_report: \"Link to performance analysis report\"\ntests_executed: \"Number of testcases executed during this time period\"\nnew_crashes: \"Number of new unique crashes observed during this time period\"\nedge_coverage: \"Coverage for this fuzz target (number of edges/total)\"\ncov_report: \"Link to coverage report\"\ncorpus_size: \"Size of the minimized corpus generated based on code coverage (number of testcases and total size on disk)\"\navg_exec_per_sec: \"Average number of testcases executed per second\"\nfuzzing_time_percent: \"Percent of expected fuzzing time that is actually spent fuzzing.\"\nnew_tests_added: \"New testcases added to the corpus during fuzzing based on code coverage\"\nnew_features: \"New coverage features based on new tests added to corpus.\"\nregular_crash_percent: \"Percent of fuzzing runs that had regular crashes (other than ooms, leaks, timeouts, startup and bad instrumentation crashes)\"\noom_percent: \"Percent of fuzzing runs that crashed on OOMs (should be 0)\"\nleak_percent: \"Percent of fuzzing runs that crashed on memory leaks (should be 0)\"\ntimeout_percent: \"Percent of fuzzing runs that had testcases timeout (should be 0)\"\nstartup_crash_percent: \"Percent of fuzzing runs that crashed on startup (should be 0)\"\navg_unwanted_log_lines: \"Average number of unwanted log lines in fuzzing runs (should be 0)\"\ntotal_fuzzing_time_hrs: \"Total time in hours for which the fuzzer(s) ran. Will be lower if fuzzer hits a crash frequently.\"\nlogs: \"Link to fuzzing logs\"\ncorpus_backup: \"Backup copy of the minimized corpus generated based on code coverage\"'''\n\n self.stats_columns = \"\"\"_PERFORMANCE_REPORT as perf_report,\nsum(t.number_of_executed_units) as tests_executed,\ncustom(j.new_crashes) as new_crashes,\n_EDGE_COV as edge_coverage,\n_COV_REPORT as cov_report,\n_CORPUS_SIZE as corpus_size,\navg(t.average_exec_per_sec) as avg_exec_per_sec,\navg(t.fuzzing_time_percent) as fuzzing_time_percent,\nsum(t.new_units_added) as new_tests_added,\nsum(t.new_features) as new_features,\navg(t.crash_count*100) as regular_crash_percent,\navg(t.oom_count*100) as oom_percent,\navg(t.leak_count*100) as leak_percent,\navg(t.timeout_count*100) as timeout_percent,\navg(t.startup_crash_count*100) as startup_crash_percent,\navg(t.log_lines_unwanted) as avg_unwanted_log_lines,\nsum(t.actual_duration/3600.0) as total_fuzzing_time_hrs,\n_FUZZER_RUN_LOGS as logs,\n_CORPUS_BACKUP as corpus_backup,\"\"\"\n\n\nclass AflDefaults(BaseBuiltinFuzzerDefaults):\n \"\"\"Default values for AFL.\"\"\"\n\n def __init__(self):\n super(AflDefaults, self).__init__()\n # Override empty values from parent.\n self.name = 'afl'\n self.key_id = 1338\n # Use single quotes since the string ends in a double quote.\n # pylint: disable=line-too-long\n self.stats_column_descriptions = '''fuzzer: \"Fuzz target\"\nnew_crashes: \"Number of new unique crashes observed during this time period\"\nedge_coverage: \"Edge coverage for this fuzz target (number of edges / total)\"\ncov_report: \"Link to coverage report\"\ncorpus_size: \"Size of the minimized corpus generated based on code coverage (number of testcases and total size on disk)\"\navg_exec_per_sec: \"Average number of testcases executed per second\"\nstability: \"Percentage of edges that behave deterministically\"\nnew_tests_added: \"New testcases added to the corpus during fuzzing based on code coverage\"\nregular_crash_percent: \"Percent of fuzzing runs that had regular crashes (other than startup and bad instrumentation crashes)\"\ntimeout_percent: \"Percent of fuzzing runs that had testcases timeout (should be 0)\"\nstartup_crash_percent: \"Percent of fuzzing runs that crashed on startup (should be 0)\"\navg_unwanted_log_lines: \"Average number of unwanted log lines in fuzzing runs (should be 0)\"\ntotal_fuzzing_time_hrs: \"Total time in hours for which the fuzzer(s) ran. Will be lower if fuzzer hits a crash frequently.\"\nlogs: \"Link to fuzzing logs\"\ncorpus_backup: \"Backup copy of the minimized corpus generated based on code coverage\"'''\n\n self.stats_columns = \"\"\"custom(j.new_crashes) as new_crashes,\n_EDGE_COV as edge_coverage,\n_COV_REPORT as cov_report,\n_CORPUS_SIZE as corpus_size,\navg(t.average_exec_per_sec) as avg_exec_per_sec,\navg(t.stability) as stability,\nsum(t.new_units_added) as new_tests_added,\navg(t.crash_count*100) as regular_crash_percent,\navg(t.timeout_count*100) as timeout_percent,\navg(t.startup_crash_count*100) as startup_crash_percent,\navg(t.log_lines_unwanted) as avg_unwanted_log_lines,\nsum(t.actual_duration/3600.0) as total_fuzzing_time_hrs,\n_FUZZER_RUN_LOGS as logs,\n_CORPUS_BACKUP as corpus_backup,\"\"\"\n\n\ndef setup_config(non_dry_run):\n \"\"\"Set up configuration.\"\"\"\n config = data_types.Config.query().get()\n if not config:\n config = data_types.Config()\n\n if non_dry_run:\n print('Creating config')\n config.put()\n else:\n print('Skip creating config (dry-run mode)')\n\n\ndef setup_fuzzers(non_dry_run):\n \"\"\"Set up fuzzers.\"\"\"\n for fuzzer_defaults in [AflDefaults(), LibFuzzerDefaults()]:\n fuzzer = data_types.Fuzzer.query(\n data_types.Fuzzer.name == fuzzer_defaults.name).get()\n if fuzzer:\n print(fuzzer_defaults.name, 'fuzzer already exists')\n if non_dry_run:\n print('Updating stats metrics.')\n fuzzer.stats_columns = fuzzer_defaults.stats_columns\n fuzzer.stats_column_descriptions = (\n fuzzer_defaults.stats_column_descriptions)\n fuzzer.put()\n\n continue\n\n if non_dry_run:\n print('Creating fuzzer', fuzzer_defaults.name)\n fuzzer_defaults.create_fuzzer().put()\n else:\n print('Skip creating fuzzer', fuzzer_defaults.name, '(dry-run mode)')\n\n\ndef setup_templates(non_dry_run):\n \"\"\"Set up templates.\"\"\"\n for name, template in six.iteritems(TEMPLATES):\n job = data_types.JobTemplate.query(\n data_types.JobTemplate.name == name).get()\n if job:\n print('Template with name', name, 'already exists.')\n continue\n\n if non_dry_run:\n print('Creating template', name)\n data_types.JobTemplate(name=name, environment_string=template).put()\n else:\n print('Skip creating template', name, '(dry-run mode)')\n\n\ndef setup_metrics(non_dry_run):\n \"\"\"Set up metrics.\"\"\"\n client = monitoring_v3.MetricServiceClient()\n project_name = utils.get_application_id()\n project_path = client.project_path(project_name)\n\n for name in dir(monitoring_metrics):\n metric = getattr(monitoring_metrics, name)\n if not isinstance(metric, monitor.Metric):\n continue\n\n descriptor = monitoring_v3.types.MetricDescriptor()\n metric.monitoring_v3_metric_descriptor(descriptor)\n\n if non_dry_run:\n print('Creating metric', descriptor)\n client.create_metric_descriptor(project_path, descriptor)\n else:\n print('Skip creating metric', descriptor, '(dry-run mode)')\n\n\ndef execute(args):\n \"\"\"Set up initial Datastore models.\"\"\"\n setup_config(args.non_dry_run)\n setup_fuzzers(args.non_dry_run)\n setup_templates(args.non_dry_run)\n\n if not args.local:\n setup_metrics(args.non_dry_run)\n\n print('Done')\n", "path": "src/local/butler/scripts/setup.py"}]}
| 3,687 | 595 |
gh_patches_debug_5983
|
rasdani/github-patches
|
git_diff
|
nilearn__nilearn-1916
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Change iframe call for rendering in Jupyter Book
<!--
Hi!
If you have:
-1 Questions about how to use Nilearn or
-2 Need analysis suggestions & recommendations?
A bunch of fMRI researchers hang out at Neurostars (http://neurostars.org/).
Post those questions there.
Add the tag `nilearn` (we get an email from Neurostars if you do).
Posting them here makes life more complicated for the Nilearn developers.
-->
<!--
For the Feature Request,
Include the following:
------------------------
What would you like changed/added and why?
What would be the benefit? Does the change make something easier to use?
Clarifies something?
If it is a new feature, what is the benefit?
-->
I would like to render interactive brainsprites generated from `view_img` in [Jupyter Book](https://github.com/jupyter/jupyter-book). Jupyter Book is built using Jekyll, and converts executed notebooks to markdown using nbconvert. The nbconvert is then converted to stylized HTML using kramdown.
My difficulty comes that kramdown [cannot currently handle unquoted attributes](https://github.com/jupyter/jupyter-book/issues/72) (even though these are valid HTML!). When we generate the iframe in which the brainsprite viewer is embedded here in nilearn, [we don't quote the `width` and `height` attributes](https://github.com/nilearn/nilearn/blob/ed86f29305a264693156826f0e429b2d6281eeaf/nilearn/plotting/js_plotting_utils.py#L126).
I'd like to open a simple fix to quote these two attributes, and smooth nilearn use with Jupyter Book. Would that be alright?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nilearn/plotting/js_plotting_utils.py`
Content:
```
1 """
2 Helper functions for views, i.e. interactive plots from html_surface and
3 html_connectome.
4 """
5
6 import os
7 import base64
8 import webbrowser
9 import tempfile
10 import warnings
11 import subprocess
12 from string import Template
13 import weakref
14 try:
15 from html import escape # Unavailable in Py2
16 except ImportError: # Can be removed once we EOL Py2 support for NiLearn
17 from cgi import escape # Deprecated in Py3, necessary for Py2
18
19 import matplotlib as mpl
20 import numpy as np
21 from matplotlib import cm as mpl_cm
22
23 from .._utils.extmath import fast_abs_percentile
24 from .._utils.param_validation import check_threshold
25 from .. import surface
26
27
28 def add_js_lib(html, embed_js=True):
29 """
30 Add javascript libraries to html template.
31
32 if embed_js is True, jquery and plotly are embedded in resulting page.
33 otherwise, they are loaded via CDNs.
34 """
35 js_dir = os.path.join(os.path.dirname(__file__), 'data', 'js')
36 with open(os.path.join(js_dir, 'surface-plot-utils.js')) as f:
37 js_utils = f.read()
38 if not embed_js:
39 js_lib = """
40 <script
41 src="https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js">
42 </script>
43 <script src="https://cdn.plot.ly/plotly-gl3d-latest.min.js"></script>
44 <script>
45 {}
46 </script>
47 """.format(js_utils)
48 else:
49 with open(os.path.join(js_dir, 'jquery.min.js')) as f:
50 jquery = f.read()
51 with open(os.path.join(js_dir, 'plotly-gl3d-latest.min.js')) as f:
52 plotly = f.read()
53 js_lib = """
54 <script>{}</script>
55 <script>{}</script>
56 <script>
57 {}
58 </script>
59 """.format(jquery, plotly, js_utils)
60 if not isinstance(html, Template):
61 html = Template(html)
62 return html.safe_substitute({'INSERT_JS_LIBRARIES_HERE': js_lib})
63
64
65 def get_html_template(template_name):
66 """Get an HTML file from package data"""
67 template_path = os.path.join(
68 os.path.dirname(__file__), 'data', 'html', template_name)
69 with open(template_path, 'rb') as f:
70 return Template(f.read().decode('utf-8'))
71
72
73 def _remove_after_n_seconds(file_name, n_seconds):
74 script = os.path.join(os.path.dirname(__file__), 'rm_file.py')
75 subprocess.Popen(['python', script, file_name, str(n_seconds)])
76
77
78 class HTMLDocument(object):
79 """
80 Embeds a plot in a web page.
81
82 If you are running a Jupyter notebook, the plot will be displayed
83 inline if this object is the output of a cell.
84 Otherwise, use open_in_browser() to open it in a web browser (or
85 save_as_html("filename.html") to save it as an html file).
86
87 use str(document) or document.html to get the content of the web page,
88 and document.get_iframe() to have it wrapped in an iframe.
89
90 """
91 _all_open_html_repr = weakref.WeakSet()
92
93 def __init__(self, html, width=600, height=400):
94 self.html = html
95 self.width = width
96 self.height = height
97 self._temp_file = None
98 self._check_n_open()
99
100 def _check_n_open(self):
101 HTMLDocument._all_open_html_repr.add(self)
102 if len(HTMLDocument._all_open_html_repr) > 9:
103 warnings.warn('It seems you have created more than 10 '
104 'nilearn views. As each view uses dozens '
105 'of megabytes of RAM, you might want to '
106 'delete some of them.')
107
108 def resize(self, width, height):
109 """Resize the plot displayed in a Jupyter notebook."""
110 self.width, self.height = width, height
111 return self
112
113 def get_iframe(self, width=None, height=None):
114 """
115 Get the document wrapped in an inline frame.
116
117 For inserting in another HTML page of for display in a Jupyter
118 notebook.
119
120 """
121 if width is None:
122 width = self.width
123 if height is None:
124 height = self.height
125 escaped = escape(self.html, quote=True)
126 wrapped = ('<iframe srcdoc="{}" width={} height={} '
127 'frameBorder="0"></iframe>').format(escaped, width, height)
128 return wrapped
129
130 def get_standalone(self):
131 """ Get the plot in an HTML page."""
132 return self.html
133
134 def _repr_html_(self):
135 """
136 Used by the Jupyter notebook.
137
138 Users normally won't call this method explicitely.
139 """
140 return self.get_iframe()
141
142 def __str__(self):
143 return self.html
144
145 def save_as_html(self, file_name):
146 """
147 Save the plot in an HTML file, that can later be opened in a browser.
148 """
149 with open(file_name, 'wb') as f:
150 f.write(self.html.encode('utf-8'))
151
152 def open_in_browser(self, file_name=None, temp_file_lifetime=30):
153 """
154 Save the plot to a temporary HTML file and open it in a browser.
155
156 Parameters
157 ----------
158
159 file_name : str, optional
160 .html file to use as temporary file
161
162 temp_file_lifetime : float, optional (default=30.)
163 Time, in seconds, after which the temporary file is removed.
164 If None, it is never removed.
165
166 """
167 if file_name is None:
168 fd, file_name = tempfile.mkstemp('.html', 'nilearn_surface_plot_')
169 os.close(fd)
170 self.save_as_html(file_name)
171 self._temp_file = file_name
172 file_size = os.path.getsize(file_name) / 1e6
173 if temp_file_lifetime is None:
174 print(("Saved HTML in temporary file: {}\n"
175 "file size is {:.1f}M, delete it when you're done, "
176 "for example by calling this.remove_temp_file").format(
177 file_name, file_size))
178 else:
179 _remove_after_n_seconds(self._temp_file, temp_file_lifetime)
180 webbrowser.open('file://{}'.format(file_name))
181
182 def remove_temp_file(self):
183 """
184 Remove the temporary file created by `open_in_browser`, if necessary.
185 """
186 if self._temp_file is None:
187 return
188 if not os.path.isfile(self._temp_file):
189 return
190 os.remove(self._temp_file)
191 print('removed {}'.format(self._temp_file))
192 self._temp_file = None
193
194
195 def colorscale(cmap, values, threshold=None, symmetric_cmap=True,
196 vmax=None, vmin=None):
197 """Normalize a cmap, put it in plotly format, get threshold and range."""
198 cmap = mpl_cm.get_cmap(cmap)
199 abs_values = np.abs(values)
200 if not symmetric_cmap and (values.min() < 0):
201 warnings.warn('you have specified symmetric_cmap=False '
202 'but the map contains negative values; '
203 'setting symmetric_cmap to True')
204 symmetric_cmap = True
205 if symmetric_cmap and vmin is not None:
206 warnings.warn('vmin cannot be chosen when cmap is symmetric')
207 vmin = None
208 if threshold is not None:
209 if vmin is not None:
210 warnings.warn('choosing both vmin and a threshold is not allowed; '
211 'setting vmin to 0')
212 vmin = 0
213 if vmax is None:
214 vmax = abs_values.max()
215 if symmetric_cmap:
216 vmin = - vmax
217 if vmin is None:
218 vmin = values.min()
219 norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
220 cmaplist = [cmap(i) for i in range(cmap.N)]
221 abs_threshold = None
222 if threshold is not None:
223 abs_threshold = check_threshold(threshold, values, fast_abs_percentile)
224 istart = int(norm(-abs_threshold, clip=True) * (cmap.N - 1))
225 istop = int(norm(abs_threshold, clip=True) * (cmap.N - 1))
226 for i in range(istart, istop):
227 cmaplist[i] = (0.5, 0.5, 0.5, 1.) # just an average gray color
228 our_cmap = mpl.colors.LinearSegmentedColormap.from_list(
229 'Custom cmap', cmaplist, cmap.N)
230 x = np.linspace(0, 1, 100)
231 rgb = our_cmap(x, bytes=True)[:, :3]
232 rgb = np.array(rgb, dtype=int)
233 colors = []
234 for i, col in zip(x, rgb):
235 colors.append([np.round(i, 3), "rgb({}, {}, {})".format(*col)])
236 return {
237 'colors': colors, 'vmin': vmin, 'vmax': vmax, 'cmap': our_cmap,
238 'norm': norm, 'abs_threshold': abs_threshold,
239 'symmetric_cmap': symmetric_cmap
240 }
241
242
243 def encode(a):
244 """Base64 encode a numpy array"""
245 try:
246 data = a.tobytes()
247 except AttributeError:
248 # np < 1.9
249 data = a.tostring()
250 return base64.b64encode(data).decode('utf-8')
251
252
253 def decode(b, dtype):
254 """Decode a numpy array encoded as Base64"""
255 return np.frombuffer(base64.b64decode(b.encode('utf-8')), dtype)
256
257
258 def mesh_to_plotly(mesh):
259 mesh = surface.load_surf_mesh(mesh)
260 x, y, z = map(encode, np.asarray(mesh[0].T, dtype='<f4'))
261 i, j, k = map(encode, np.asarray(mesh[1].T, dtype='<i4'))
262 info = {
263 "_x": x,
264 "_y": y,
265 "_z": z,
266 "_i": i,
267 "_j": j,
268 "_k": k,
269 }
270 return info
271
272
273 def to_color_strings(colors):
274 cmap = mpl.colors.ListedColormap(colors)
275 colors = cmap(np.arange(cmap.N))[:, :3]
276 colors = np.asarray(colors * 255, dtype='uint8')
277 colors = ['#{:02x}{:02x}{:02x}'.format(*row) for row in colors]
278 return colors
279
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/nilearn/plotting/js_plotting_utils.py b/nilearn/plotting/js_plotting_utils.py
--- a/nilearn/plotting/js_plotting_utils.py
+++ b/nilearn/plotting/js_plotting_utils.py
@@ -123,7 +123,7 @@
if height is None:
height = self.height
escaped = escape(self.html, quote=True)
- wrapped = ('<iframe srcdoc="{}" width={} height={} '
+ wrapped = ('<iframe srcdoc="{}" width="{}" height="{}" '
'frameBorder="0"></iframe>').format(escaped, width, height)
return wrapped
|
{"golden_diff": "diff --git a/nilearn/plotting/js_plotting_utils.py b/nilearn/plotting/js_plotting_utils.py\n--- a/nilearn/plotting/js_plotting_utils.py\n+++ b/nilearn/plotting/js_plotting_utils.py\n@@ -123,7 +123,7 @@\n if height is None:\n height = self.height\n escaped = escape(self.html, quote=True)\n- wrapped = ('<iframe srcdoc=\"{}\" width={} height={} '\n+ wrapped = ('<iframe srcdoc=\"{}\" width=\"{}\" height=\"{}\" '\n 'frameBorder=\"0\"></iframe>').format(escaped, width, height)\n return wrapped\n", "issue": "Change iframe call for rendering in Jupyter Book\n<!--\r\nHi! \r\nIf you have: \r\n -1 Questions about how to use Nilearn or \r\n -2 Need analysis suggestions & recommendations?\r\n\r\nA bunch of fMRI researchers hang out at Neurostars (http://neurostars.org/). \r\nPost those questions there. \r\nAdd the tag `nilearn` (we get an email from Neurostars if you do).\r\n\r\nPosting them here makes life more complicated for the Nilearn developers. \r\n-->\r\n\r\n<!-- \r\nFor the Feature Request,\r\nInclude the following:\r\n------------------------\r\nWhat would you like changed/added and why?\r\nWhat would be the benefit? Does the change make something easier to use?\r\nClarifies something?\r\nIf it is a new feature, what is the benefit? \r\n-->\r\n\r\nI would like to render interactive brainsprites generated from `view_img` in [Jupyter Book](https://github.com/jupyter/jupyter-book). Jupyter Book is built using Jekyll, and converts executed notebooks to markdown using nbconvert. The nbconvert is then converted to stylized HTML using kramdown.\r\n\r\nMy difficulty comes that kramdown [cannot currently handle unquoted attributes](https://github.com/jupyter/jupyter-book/issues/72) (even though these are valid HTML!). When we generate the iframe in which the brainsprite viewer is embedded here in nilearn, [we don't quote the `width` and `height` attributes](https://github.com/nilearn/nilearn/blob/ed86f29305a264693156826f0e429b2d6281eeaf/nilearn/plotting/js_plotting_utils.py#L126).\r\n\r\nI'd like to open a simple fix to quote these two attributes, and smooth nilearn use with Jupyter Book. Would that be alright?\r\n\n", "before_files": [{"content": "\"\"\"\nHelper functions for views, i.e. interactive plots from html_surface and\nhtml_connectome.\n\"\"\"\n\nimport os\nimport base64\nimport webbrowser\nimport tempfile\nimport warnings\nimport subprocess\nfrom string import Template\nimport weakref\ntry:\n from html import escape # Unavailable in Py2\nexcept ImportError: # Can be removed once we EOL Py2 support for NiLearn\n from cgi import escape # Deprecated in Py3, necessary for Py2\n\nimport matplotlib as mpl\nimport numpy as np\nfrom matplotlib import cm as mpl_cm\n\nfrom .._utils.extmath import fast_abs_percentile\nfrom .._utils.param_validation import check_threshold\nfrom .. import surface\n\n\ndef add_js_lib(html, embed_js=True):\n \"\"\"\n Add javascript libraries to html template.\n\n if embed_js is True, jquery and plotly are embedded in resulting page.\n otherwise, they are loaded via CDNs.\n \"\"\"\n js_dir = os.path.join(os.path.dirname(__file__), 'data', 'js')\n with open(os.path.join(js_dir, 'surface-plot-utils.js')) as f:\n js_utils = f.read()\n if not embed_js:\n js_lib = \"\"\"\n <script\n src=\"https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js\">\n </script>\n <script src=\"https://cdn.plot.ly/plotly-gl3d-latest.min.js\"></script>\n <script>\n {}\n </script>\n \"\"\".format(js_utils)\n else:\n with open(os.path.join(js_dir, 'jquery.min.js')) as f:\n jquery = f.read()\n with open(os.path.join(js_dir, 'plotly-gl3d-latest.min.js')) as f:\n plotly = f.read()\n js_lib = \"\"\"\n <script>{}</script>\n <script>{}</script>\n <script>\n {}\n </script>\n \"\"\".format(jquery, plotly, js_utils)\n if not isinstance(html, Template):\n html = Template(html)\n return html.safe_substitute({'INSERT_JS_LIBRARIES_HERE': js_lib})\n\n\ndef get_html_template(template_name):\n \"\"\"Get an HTML file from package data\"\"\"\n template_path = os.path.join(\n os.path.dirname(__file__), 'data', 'html', template_name)\n with open(template_path, 'rb') as f:\n return Template(f.read().decode('utf-8'))\n\n\ndef _remove_after_n_seconds(file_name, n_seconds):\n script = os.path.join(os.path.dirname(__file__), 'rm_file.py')\n subprocess.Popen(['python', script, file_name, str(n_seconds)])\n\n\nclass HTMLDocument(object):\n \"\"\"\n Embeds a plot in a web page.\n\n If you are running a Jupyter notebook, the plot will be displayed\n inline if this object is the output of a cell.\n Otherwise, use open_in_browser() to open it in a web browser (or\n save_as_html(\"filename.html\") to save it as an html file).\n\n use str(document) or document.html to get the content of the web page,\n and document.get_iframe() to have it wrapped in an iframe.\n\n \"\"\"\n _all_open_html_repr = weakref.WeakSet()\n\n def __init__(self, html, width=600, height=400):\n self.html = html\n self.width = width\n self.height = height\n self._temp_file = None\n self._check_n_open()\n\n def _check_n_open(self):\n HTMLDocument._all_open_html_repr.add(self)\n if len(HTMLDocument._all_open_html_repr) > 9:\n warnings.warn('It seems you have created more than 10 '\n 'nilearn views. As each view uses dozens '\n 'of megabytes of RAM, you might want to '\n 'delete some of them.')\n\n def resize(self, width, height):\n \"\"\"Resize the plot displayed in a Jupyter notebook.\"\"\"\n self.width, self.height = width, height\n return self\n\n def get_iframe(self, width=None, height=None):\n \"\"\"\n Get the document wrapped in an inline frame.\n\n For inserting in another HTML page of for display in a Jupyter\n notebook.\n\n \"\"\"\n if width is None:\n width = self.width\n if height is None:\n height = self.height\n escaped = escape(self.html, quote=True)\n wrapped = ('<iframe srcdoc=\"{}\" width={} height={} '\n 'frameBorder=\"0\"></iframe>').format(escaped, width, height)\n return wrapped\n\n def get_standalone(self):\n \"\"\" Get the plot in an HTML page.\"\"\"\n return self.html\n\n def _repr_html_(self):\n \"\"\"\n Used by the Jupyter notebook.\n\n Users normally won't call this method explicitely.\n \"\"\"\n return self.get_iframe()\n\n def __str__(self):\n return self.html\n\n def save_as_html(self, file_name):\n \"\"\"\n Save the plot in an HTML file, that can later be opened in a browser.\n \"\"\"\n with open(file_name, 'wb') as f:\n f.write(self.html.encode('utf-8'))\n\n def open_in_browser(self, file_name=None, temp_file_lifetime=30):\n \"\"\"\n Save the plot to a temporary HTML file and open it in a browser.\n\n Parameters\n ----------\n\n file_name : str, optional\n .html file to use as temporary file\n\n temp_file_lifetime : float, optional (default=30.)\n Time, in seconds, after which the temporary file is removed.\n If None, it is never removed.\n\n \"\"\"\n if file_name is None:\n fd, file_name = tempfile.mkstemp('.html', 'nilearn_surface_plot_')\n os.close(fd)\n self.save_as_html(file_name)\n self._temp_file = file_name\n file_size = os.path.getsize(file_name) / 1e6\n if temp_file_lifetime is None:\n print((\"Saved HTML in temporary file: {}\\n\"\n \"file size is {:.1f}M, delete it when you're done, \"\n \"for example by calling this.remove_temp_file\").format(\n file_name, file_size))\n else:\n _remove_after_n_seconds(self._temp_file, temp_file_lifetime)\n webbrowser.open('file://{}'.format(file_name))\n\n def remove_temp_file(self):\n \"\"\"\n Remove the temporary file created by `open_in_browser`, if necessary.\n \"\"\"\n if self._temp_file is None:\n return\n if not os.path.isfile(self._temp_file):\n return\n os.remove(self._temp_file)\n print('removed {}'.format(self._temp_file))\n self._temp_file = None\n\n\ndef colorscale(cmap, values, threshold=None, symmetric_cmap=True,\n vmax=None, vmin=None):\n \"\"\"Normalize a cmap, put it in plotly format, get threshold and range.\"\"\"\n cmap = mpl_cm.get_cmap(cmap)\n abs_values = np.abs(values)\n if not symmetric_cmap and (values.min() < 0):\n warnings.warn('you have specified symmetric_cmap=False '\n 'but the map contains negative values; '\n 'setting symmetric_cmap to True')\n symmetric_cmap = True\n if symmetric_cmap and vmin is not None:\n warnings.warn('vmin cannot be chosen when cmap is symmetric')\n vmin = None\n if threshold is not None:\n if vmin is not None:\n warnings.warn('choosing both vmin and a threshold is not allowed; '\n 'setting vmin to 0')\n vmin = 0\n if vmax is None:\n vmax = abs_values.max()\n if symmetric_cmap:\n vmin = - vmax\n if vmin is None:\n vmin = values.min()\n norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)\n cmaplist = [cmap(i) for i in range(cmap.N)]\n abs_threshold = None\n if threshold is not None:\n abs_threshold = check_threshold(threshold, values, fast_abs_percentile)\n istart = int(norm(-abs_threshold, clip=True) * (cmap.N - 1))\n istop = int(norm(abs_threshold, clip=True) * (cmap.N - 1))\n for i in range(istart, istop):\n cmaplist[i] = (0.5, 0.5, 0.5, 1.) # just an average gray color\n our_cmap = mpl.colors.LinearSegmentedColormap.from_list(\n 'Custom cmap', cmaplist, cmap.N)\n x = np.linspace(0, 1, 100)\n rgb = our_cmap(x, bytes=True)[:, :3]\n rgb = np.array(rgb, dtype=int)\n colors = []\n for i, col in zip(x, rgb):\n colors.append([np.round(i, 3), \"rgb({}, {}, {})\".format(*col)])\n return {\n 'colors': colors, 'vmin': vmin, 'vmax': vmax, 'cmap': our_cmap,\n 'norm': norm, 'abs_threshold': abs_threshold,\n 'symmetric_cmap': symmetric_cmap\n }\n\n\ndef encode(a):\n \"\"\"Base64 encode a numpy array\"\"\"\n try:\n data = a.tobytes()\n except AttributeError:\n # np < 1.9\n data = a.tostring()\n return base64.b64encode(data).decode('utf-8')\n\n\ndef decode(b, dtype):\n \"\"\"Decode a numpy array encoded as Base64\"\"\"\n return np.frombuffer(base64.b64decode(b.encode('utf-8')), dtype)\n\n\ndef mesh_to_plotly(mesh):\n mesh = surface.load_surf_mesh(mesh)\n x, y, z = map(encode, np.asarray(mesh[0].T, dtype='<f4'))\n i, j, k = map(encode, np.asarray(mesh[1].T, dtype='<i4'))\n info = {\n \"_x\": x,\n \"_y\": y,\n \"_z\": z,\n \"_i\": i,\n \"_j\": j,\n \"_k\": k,\n }\n return info\n\n\ndef to_color_strings(colors):\n cmap = mpl.colors.ListedColormap(colors)\n colors = cmap(np.arange(cmap.N))[:, :3]\n colors = np.asarray(colors * 255, dtype='uint8')\n colors = ['#{:02x}{:02x}{:02x}'.format(*row) for row in colors]\n return colors\n", "path": "nilearn/plotting/js_plotting_utils.py"}], "after_files": [{"content": "\"\"\"\nHelper functions for views, i.e. interactive plots from html_surface and\nhtml_connectome.\n\"\"\"\n\nimport os\nimport base64\nimport webbrowser\nimport tempfile\nimport warnings\nimport subprocess\nfrom string import Template\nimport weakref\ntry:\n from html import escape # Unavailable in Py2\nexcept ImportError: # Can be removed once we EOL Py2 support for NiLearn\n from cgi import escape # Deprecated in Py3, necessary for Py2\n\nimport matplotlib as mpl\nimport numpy as np\nfrom matplotlib import cm as mpl_cm\n\nfrom .._utils.extmath import fast_abs_percentile\nfrom .._utils.param_validation import check_threshold\nfrom .. import surface\n\n\ndef add_js_lib(html, embed_js=True):\n \"\"\"\n Add javascript libraries to html template.\n\n if embed_js is True, jquery and plotly are embedded in resulting page.\n otherwise, they are loaded via CDNs.\n \"\"\"\n js_dir = os.path.join(os.path.dirname(__file__), 'data', 'js')\n with open(os.path.join(js_dir, 'surface-plot-utils.js')) as f:\n js_utils = f.read()\n if not embed_js:\n js_lib = \"\"\"\n <script\n src=\"https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js\">\n </script>\n <script src=\"https://cdn.plot.ly/plotly-gl3d-latest.min.js\"></script>\n <script>\n {}\n </script>\n \"\"\".format(js_utils)\n else:\n with open(os.path.join(js_dir, 'jquery.min.js')) as f:\n jquery = f.read()\n with open(os.path.join(js_dir, 'plotly-gl3d-latest.min.js')) as f:\n plotly = f.read()\n js_lib = \"\"\"\n <script>{}</script>\n <script>{}</script>\n <script>\n {}\n </script>\n \"\"\".format(jquery, plotly, js_utils)\n if not isinstance(html, Template):\n html = Template(html)\n return html.safe_substitute({'INSERT_JS_LIBRARIES_HERE': js_lib})\n\n\ndef get_html_template(template_name):\n \"\"\"Get an HTML file from package data\"\"\"\n template_path = os.path.join(\n os.path.dirname(__file__), 'data', 'html', template_name)\n with open(template_path, 'rb') as f:\n return Template(f.read().decode('utf-8'))\n\n\ndef _remove_after_n_seconds(file_name, n_seconds):\n script = os.path.join(os.path.dirname(__file__), 'rm_file.py')\n subprocess.Popen(['python', script, file_name, str(n_seconds)])\n\n\nclass HTMLDocument(object):\n \"\"\"\n Embeds a plot in a web page.\n\n If you are running a Jupyter notebook, the plot will be displayed\n inline if this object is the output of a cell.\n Otherwise, use open_in_browser() to open it in a web browser (or\n save_as_html(\"filename.html\") to save it as an html file).\n\n use str(document) or document.html to get the content of the web page,\n and document.get_iframe() to have it wrapped in an iframe.\n\n \"\"\"\n _all_open_html_repr = weakref.WeakSet()\n\n def __init__(self, html, width=600, height=400):\n self.html = html\n self.width = width\n self.height = height\n self._temp_file = None\n self._check_n_open()\n\n def _check_n_open(self):\n HTMLDocument._all_open_html_repr.add(self)\n if len(HTMLDocument._all_open_html_repr) > 9:\n warnings.warn('It seems you have created more than 10 '\n 'nilearn views. As each view uses dozens '\n 'of megabytes of RAM, you might want to '\n 'delete some of them.')\n\n def resize(self, width, height):\n \"\"\"Resize the plot displayed in a Jupyter notebook.\"\"\"\n self.width, self.height = width, height\n return self\n\n def get_iframe(self, width=None, height=None):\n \"\"\"\n Get the document wrapped in an inline frame.\n\n For inserting in another HTML page of for display in a Jupyter\n notebook.\n\n \"\"\"\n if width is None:\n width = self.width\n if height is None:\n height = self.height\n escaped = escape(self.html, quote=True)\n wrapped = ('<iframe srcdoc=\"{}\" width=\"{}\" height=\"{}\" '\n 'frameBorder=\"0\"></iframe>').format(escaped, width, height)\n return wrapped\n\n def get_standalone(self):\n \"\"\" Get the plot in an HTML page.\"\"\"\n return self.html\n\n def _repr_html_(self):\n \"\"\"\n Used by the Jupyter notebook.\n\n Users normally won't call this method explicitely.\n \"\"\"\n return self.get_iframe()\n\n def __str__(self):\n return self.html\n\n def save_as_html(self, file_name):\n \"\"\"\n Save the plot in an HTML file, that can later be opened in a browser.\n \"\"\"\n with open(file_name, 'wb') as f:\n f.write(self.html.encode('utf-8'))\n\n def open_in_browser(self, file_name=None, temp_file_lifetime=30):\n \"\"\"\n Save the plot to a temporary HTML file and open it in a browser.\n\n Parameters\n ----------\n\n file_name : str, optional\n .html file to use as temporary file\n\n temp_file_lifetime : float, optional (default=30.)\n Time, in seconds, after which the temporary file is removed.\n If None, it is never removed.\n\n \"\"\"\n if file_name is None:\n fd, file_name = tempfile.mkstemp('.html', 'nilearn_surface_plot_')\n os.close(fd)\n self.save_as_html(file_name)\n self._temp_file = file_name\n file_size = os.path.getsize(file_name) / 1e6\n if temp_file_lifetime is None:\n print((\"Saved HTML in temporary file: {}\\n\"\n \"file size is {:.1f}M, delete it when you're done, \"\n \"for example by calling this.remove_temp_file\").format(\n file_name, file_size))\n else:\n _remove_after_n_seconds(self._temp_file, temp_file_lifetime)\n webbrowser.open('file://{}'.format(file_name))\n\n def remove_temp_file(self):\n \"\"\"\n Remove the temporary file created by `open_in_browser`, if necessary.\n \"\"\"\n if self._temp_file is None:\n return\n if not os.path.isfile(self._temp_file):\n return\n os.remove(self._temp_file)\n print('removed {}'.format(self._temp_file))\n self._temp_file = None\n\n\ndef colorscale(cmap, values, threshold=None, symmetric_cmap=True,\n vmax=None, vmin=None):\n \"\"\"Normalize a cmap, put it in plotly format, get threshold and range.\"\"\"\n cmap = mpl_cm.get_cmap(cmap)\n abs_values = np.abs(values)\n if not symmetric_cmap and (values.min() < 0):\n warnings.warn('you have specified symmetric_cmap=False '\n 'but the map contains negative values; '\n 'setting symmetric_cmap to True')\n symmetric_cmap = True\n if symmetric_cmap and vmin is not None:\n warnings.warn('vmin cannot be chosen when cmap is symmetric')\n vmin = None\n if threshold is not None:\n if vmin is not None:\n warnings.warn('choosing both vmin and a threshold is not allowed; '\n 'setting vmin to 0')\n vmin = 0\n if vmax is None:\n vmax = abs_values.max()\n if symmetric_cmap:\n vmin = - vmax\n if vmin is None:\n vmin = values.min()\n norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)\n cmaplist = [cmap(i) for i in range(cmap.N)]\n abs_threshold = None\n if threshold is not None:\n abs_threshold = check_threshold(threshold, values, fast_abs_percentile)\n istart = int(norm(-abs_threshold, clip=True) * (cmap.N - 1))\n istop = int(norm(abs_threshold, clip=True) * (cmap.N - 1))\n for i in range(istart, istop):\n cmaplist[i] = (0.5, 0.5, 0.5, 1.) # just an average gray color\n our_cmap = mpl.colors.LinearSegmentedColormap.from_list(\n 'Custom cmap', cmaplist, cmap.N)\n x = np.linspace(0, 1, 100)\n rgb = our_cmap(x, bytes=True)[:, :3]\n rgb = np.array(rgb, dtype=int)\n colors = []\n for i, col in zip(x, rgb):\n colors.append([np.round(i, 3), \"rgb({}, {}, {})\".format(*col)])\n return {\n 'colors': colors, 'vmin': vmin, 'vmax': vmax, 'cmap': our_cmap,\n 'norm': norm, 'abs_threshold': abs_threshold,\n 'symmetric_cmap': symmetric_cmap\n }\n\n\ndef encode(a):\n \"\"\"Base64 encode a numpy array\"\"\"\n try:\n data = a.tobytes()\n except AttributeError:\n # np < 1.9\n data = a.tostring()\n return base64.b64encode(data).decode('utf-8')\n\n\ndef decode(b, dtype):\n \"\"\"Decode a numpy array encoded as Base64\"\"\"\n return np.frombuffer(base64.b64decode(b.encode('utf-8')), dtype)\n\n\ndef mesh_to_plotly(mesh):\n mesh = surface.load_surf_mesh(mesh)\n x, y, z = map(encode, np.asarray(mesh[0].T, dtype='<f4'))\n i, j, k = map(encode, np.asarray(mesh[1].T, dtype='<i4'))\n info = {\n \"_x\": x,\n \"_y\": y,\n \"_z\": z,\n \"_i\": i,\n \"_j\": j,\n \"_k\": k,\n }\n return info\n\n\ndef to_color_strings(colors):\n cmap = mpl.colors.ListedColormap(colors)\n colors = cmap(np.arange(cmap.N))[:, :3]\n colors = np.asarray(colors * 255, dtype='uint8')\n colors = ['#{:02x}{:02x}{:02x}'.format(*row) for row in colors]\n return colors\n", "path": "nilearn/plotting/js_plotting_utils.py"}]}
| 3,686 | 151 |
gh_patches_debug_38021
|
rasdani/github-patches
|
git_diff
|
pyinstaller__pyinstaller-2010
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pyinstaller lock exe file while running on windows
Version of python: 2.7 32bit
Version of pyinstall: download from git
How to reproduce:
1: create 1.py, content:
```
import time
time.sleep(99999)
```
2: pack 1.py to 1.exe, run
```
pyinstaller.py 1.py
```
3: execute 1.exe
Then 1.exe cannot be rename while running.
I test py2exe and cx_Freeze they don't have this problem.
I think it's because pyinstaller didn't close file handle of exe after read it needs.
Pyinstaller use fopen not CreateFile so it has the default behavior.
other problem is pyinstaller will inhert file handles to child process but it's not necessarily,
I already test change py_utils.c:520 to FALSE is ok,
but problem still not resolved.
http://en.wikipedia.org/wiki/File_locking
http://msdn.microsoft.com/en-us/library/windows/desktop/aa363858%28v=vs.85%29.aspx
http://msdn.microsoft.com/en-us/library/windows/desktop/ms682425%28v=vs.85%29.aspx
If you still have problem to read this please contact me instead of close, otherwise I will open other again.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `PyInstaller/loader/pyimod02_archive.py`
Content:
```
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2005-2016, PyInstaller Development Team.
3 #
4 # Distributed under the terms of the GNU General Public License with exception
5 # for distributing bootloader.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #-----------------------------------------------------------------------------
9
10 # TODO clean up this module
11
12 # Subclasses may not need marshal or struct, but since they're
13 # builtin, importing is safe.
14 #
15 # While an Archive is really an abstraction for any "filesystem
16 # within a file", it is tuned for use with imputil.FuncImporter.
17 # This assumes it contains python code objects, indexed by the
18 # the internal name (ie, no '.py').
19
20 # See pyi_carchive.py for a more general archive (contains anything)
21 # that can be understood by a C program.
22
23
24 ### **NOTE** This module is used during bootstrap.
25 ### Import *ONLY* builtin modules.
26
27 import marshal
28 import struct
29 import sys
30 import zlib
31
32
33 # For decrypting Python modules.
34 CRYPT_BLOCK_SIZE = 16
35
36
37 # content types for PYZ
38 PYZ_TYPE_MODULE = 0
39 PYZ_TYPE_PKG = 1
40 PYZ_TYPE_DATA = 2
41
42
43 class ArchiveFile(object):
44 """
45 File class support auto open when access member from file object
46 This class is use to avoid file locking on windows
47 """
48
49 def __init__(self, *args, **kwargs):
50 self.args = args
51 self.kwargs = kwargs
52 self.pos = 0
53 self.fd = None
54 self.__open()
55
56 def __getattr__(self, name):
57 """
58 Auto open file when access member from file object
59 This function only call when member of name not exist in self
60 """
61 assert self.fd
62 return getattr(self.fd, name)
63
64 def __open(self):
65 """
66 Open file and seek to pos record from last close
67 """
68 if self.fd is None:
69 self.fd = open(*self.args, **self.kwargs)
70 self.fd.seek(self.pos)
71
72 def __enter__(self):
73 self.__open()
74
75 def __exit__(self, type, value, traceback):
76 assert self.fd
77 self.close()
78
79 def close(self):
80 """
81 Close file and record pos
82 """
83 if self.fd is not None:
84 self.pos = self.fd.tell()
85 self.fd.close()
86 self.fd = None
87
88
89 class ArchiveReadError(RuntimeError):
90 pass
91
92
93 class ArchiveReader(object):
94 """
95 A base class for a repository of python code objects.
96 The extract method is used by imputil.ArchiveImporter
97 to get code objects by name (fully qualified name), so
98 an enduser "import a.b" would become
99 extract('a.__init__')
100 extract('a.b')
101 """
102 MAGIC = b'PYL\0'
103 HDRLEN = 12 # default is MAGIC followed by python's magic, int pos of toc
104 TOCPOS = 8
105 os = None
106 _bincache = None
107
108 def __init__(self, path=None, start=0):
109 """
110 Initialize an Archive. If path is omitted, it will be an empty Archive.
111 """
112 self.toc = None
113 self.path = path
114 self.start = start
115
116 # In Python 3 module 'imp' is no longer built-in and we cannot use it.
117 # There is for Python 3 another way how to obtain magic value.
118 if sys.version_info[0] == 2:
119 import imp
120 self.pymagic = imp.get_magic()
121 else:
122 # We cannot use at this bootstrap stage importlib directly
123 # but its frozen variant.
124 import _frozen_importlib
125 if sys.version_info[1] <= 3:
126 # Python 3.3
127 self.pymagic = _frozen_importlib._MAGIC_BYTES
128 elif sys.version_info[1] == 4:
129 # Python 3.4
130 self.pymagic = _frozen_importlib.MAGIC_NUMBER
131 else:
132 # Python 3.5+
133 self.pymagic = _frozen_importlib._bootstrap_external.MAGIC_NUMBER
134
135 if path is not None:
136 self.lib = ArchiveFile(self.path, 'rb')
137 with self.lib:
138 self.checkmagic()
139 self.loadtoc()
140
141
142 def loadtoc(self):
143 """
144 Overridable.
145 Default: After magic comes an int (4 byte native) giving the
146 position of the TOC within self.lib.
147 Default: The TOC is a marshal-able string.
148 """
149 self.lib.seek(self.start + self.TOCPOS)
150 (offset,) = struct.unpack('!i', self.lib.read(4))
151 self.lib.seek(self.start + offset)
152 # Use marshal.loads() since load() arg must be a file object
153 # Convert the read list into a dict for faster access
154 self.toc = dict(marshal.loads(self.lib.read()))
155
156 ######## This is what is called by FuncImporter #######
157 ## Since an Archive is flat, we ignore parent and modname.
158 #XXX obsolete - imputil only code
159 ## def get_code(self, parent, modname, fqname):
160 ## pass
161
162 ####### Core method - Override as needed #########
163 def extract(self, name):
164 """
165 Get the object corresponding to name, or None.
166 For use with imputil ArchiveImporter, object is a python code object.
167 'name' is the name as specified in an 'import name'.
168 'import a.b' will become:
169 extract('a') (return None because 'a' is not a code object)
170 extract('a.__init__') (return a code object)
171 extract('a.b') (return a code object)
172 Default implementation:
173 self.toc is a dict
174 self.toc[name] is pos
175 self.lib has the code object marshal-ed at pos
176 """
177 ispkg, pos = self.toc.get(name, (0, None))
178 if pos is None:
179 return None
180 with self.lib:
181 self.lib.seek(self.start + pos)
182 # use marshal.loads() sind load() arg must be a file object
183 obj = marshal.loads(self.lib.read())
184 return ispkg, obj
185
186 ########################################################################
187 # Informational methods
188
189 def contents(self):
190 """
191 Return a list of the contents
192 Default implementation assumes self.toc is a dict like object.
193 Not required by ArchiveImporter.
194 """
195 return list(self.toc.keys())
196
197 def checkmagic(self):
198 """
199 Overridable.
200 Check to see if the file object self.lib actually has a file
201 we understand.
202 """
203 self.lib.seek(self.start) # default - magic is at start of file
204
205 if self.lib.read(len(self.MAGIC)) != self.MAGIC:
206 raise ArchiveReadError("%s is not a valid %s archive file"
207 % (self.path, self.__class__.__name__))
208
209 if self.lib.read(len(self.pymagic)) != self.pymagic:
210 raise ArchiveReadError("%s has version mismatch to dll" %
211 (self.path))
212
213 self.lib.read(4)
214
215
216 class Cipher(object):
217 """
218 This class is used only to decrypt Python modules.
219 """
220 def __init__(self):
221 # At build-type the key is given to us from inside the spec file, at
222 # bootstrap-time, we must look for it ourselves by trying to import
223 # the generated 'pyi_crypto_key' module.
224 import pyimod00_crypto_key
225 key = pyimod00_crypto_key.key
226
227 assert type(key) is str
228 if len(key) > CRYPT_BLOCK_SIZE:
229 self.key = key[0:CRYPT_BLOCK_SIZE]
230 else:
231 self.key = key.zfill(CRYPT_BLOCK_SIZE)
232 assert len(self.key) == CRYPT_BLOCK_SIZE
233
234 # Import the right AES module.
235 self._aes = self._import_aesmod()
236
237 def _import_aesmod(self):
238 """
239 Tries to import the AES module from PyCrypto.
240
241 PyCrypto 2.4 and 2.6 uses different name of the AES extension.
242 """
243 # Not-so-easy way: at bootstrap time we have to load the module from the
244 # temporary directory in a manner similar to pyi_importers.CExtensionImporter.
245 from pyimod03_importers import CExtensionImporter
246 importer = CExtensionImporter()
247 # NOTE: We _must_ call find_module first.
248 # The _AES.so module exists only in PyCrypto 2.6 and later. Try to import
249 # that first.
250 modname = 'Crypto.Cipher._AES'
251 mod = importer.find_module(modname)
252 # Fallback to AES.so, which should be there in PyCrypto 2.4 and earlier.
253 if not mod:
254 modname = 'Crypto.Cipher.AES'
255 mod = importer.find_module(modname)
256 if not mod:
257 # Raise import error if none of the AES modules is found.
258 raise ImportError(modname)
259 mod = mod.load_module(modname)
260 # Issue #1663: Remove the AES module from sys.modules list. Otherwise
261 # it interferes with using 'Crypto.Cipher' module in users' code.
262 if modname in sys.modules:
263 del sys.modules[modname]
264 return mod
265
266 def __create_cipher(self, iv):
267 # The 'BlockAlgo' class is stateful, this factory method is used to
268 # re-initialize the block cipher class with each call to encrypt() and
269 # decrypt().
270 return self._aes.new(self.key, self._aes.MODE_CFB, iv)
271
272 def decrypt(self, data):
273 return self.__create_cipher(data[:CRYPT_BLOCK_SIZE]).decrypt(data[CRYPT_BLOCK_SIZE:])
274
275
276 class ZlibArchiveReader(ArchiveReader):
277 """
278 ZlibArchive - an archive with compressed entries. Archive is read
279 from the executable created by PyInstaller.
280
281 This archive is used for bundling python modules inside the executable.
282
283 NOTE: The whole ZlibArchive (PYZ) is compressed so it is not necessary
284 to compress single modules with zlib.
285 """
286 MAGIC = b'PYZ\0'
287 TOCPOS = 8
288 HDRLEN = ArchiveReader.HDRLEN + 5
289
290 def __init__(self, path=None, offset=None):
291 if path is None:
292 offset = 0
293 elif offset is None:
294 for i in range(len(path) - 1, - 1, - 1):
295 if path[i] == '?':
296 try:
297 offset = int(path[i + 1:])
298 except ValueError:
299 # Just ignore any spurious "?" in the path
300 # (like in Windows UNC \\?\<path>).
301 continue
302 path = path[:i]
303 break
304 else:
305 offset = 0
306
307 super(ZlibArchiveReader, self).__init__(path, offset)
308
309 # Try to import the key module. If the key module is not available
310 # then it means that encryption is disabled.
311 try:
312 import pyimod00_crypto_key
313 self.cipher = Cipher()
314 except ImportError:
315 self.cipher = None
316
317 def extract(self, name):
318 (typ, pos, length) = self.toc.get(name, (0, None, 0))
319 if pos is None:
320 return None
321 with self.lib:
322 self.lib.seek(self.start + pos)
323 obj = self.lib.read(length)
324 try:
325 if self.cipher:
326 obj = self.cipher.decrypt(obj)
327 obj = zlib.decompress(obj)
328 if typ in (PYZ_TYPE_MODULE, PYZ_TYPE_PKG):
329 obj = marshal.loads(obj)
330 except EOFError:
331 raise ImportError("PYZ entry '%s' failed to unmarshal" % name)
332 return typ, obj
333
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/PyInstaller/loader/pyimod02_archive.py b/PyInstaller/loader/pyimod02_archive.py
--- a/PyInstaller/loader/pyimod02_archive.py
+++ b/PyInstaller/loader/pyimod02_archive.py
@@ -28,6 +28,10 @@
import struct
import sys
import zlib
+if sys.version_info[0] == 2:
+ import thread
+else:
+ import _thread as thread
# For decrypting Python modules.
@@ -39,6 +43,17 @@
PYZ_TYPE_PKG = 1
PYZ_TYPE_DATA = 2
+class FilePos(object):
+ """
+ This class keeps track of the file object representing and current position
+ in a file.
+ """
+ def __init__(self):
+ # The file object representing this file.
+ self.file = None
+ # The position in the file when it was last closed.
+ self.pos = 0
+
class ArchiveFile(object):
"""
@@ -49,41 +64,51 @@
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
- self.pos = 0
- self.fd = None
- self.__open()
+ self._filePos = {}
- def __getattr__(self, name):
+ def local(self):
"""
- Auto open file when access member from file object
- This function only call when member of name not exist in self
+ Return an instance of FilePos for the current thread. This is a crude
+ # re-implementation of threading.local, which isn't a built-in module
+ # and therefore isn't available.
"""
- assert self.fd
- return getattr(self.fd, name)
+ ti = thread.get_ident()
+ if ti not in self._filePos:
+ self._filePos[ti] = FilePos()
+ return self._filePos[ti]
- def __open(self):
+ def __getattr__(self, name):
"""
- Open file and seek to pos record from last close
+ Make this class act like a file, by invoking most methods on its
+ underlying file object.
"""
- if self.fd is None:
- self.fd = open(*self.args, **self.kwargs)
- self.fd.seek(self.pos)
+ file = self.local().file
+ assert file
+ return getattr(file, name)
def __enter__(self):
- self.__open()
+ """
+ Open file and seek to pos record from last close.
+ """
+ # The file shouldn't be open yet.
+ fp = self.local()
+ assert not fp.file
+ # Open the file and seek to the last position.
+ fp.file = open(*self.args, **self.kwargs)
+ fp.file.seek(fp.pos)
def __exit__(self, type, value, traceback):
- assert self.fd
- self.close()
-
- def close(self):
"""
- Close file and record pos
+ Close file and record pos.
"""
- if self.fd is not None:
- self.pos = self.fd.tell()
- self.fd.close()
- self.fd = None
+ # The file should still be open.
+ fp = self.local()
+ assert fp.file
+
+ # Close the file and record its position.
+ fp.pos = fp.file.tell()
+ fp.file.close()
+ fp.file = None
class ArchiveReadError(RuntimeError):
|
{"golden_diff": "diff --git a/PyInstaller/loader/pyimod02_archive.py b/PyInstaller/loader/pyimod02_archive.py\n--- a/PyInstaller/loader/pyimod02_archive.py\n+++ b/PyInstaller/loader/pyimod02_archive.py\n@@ -28,6 +28,10 @@\n import struct\n import sys\n import zlib\n+if sys.version_info[0] == 2:\n+ import thread\n+else:\n+ import _thread as thread\n \n \n # For decrypting Python modules.\n@@ -39,6 +43,17 @@\n PYZ_TYPE_PKG = 1\n PYZ_TYPE_DATA = 2\n \n+class FilePos(object):\n+ \"\"\"\n+ This class keeps track of the file object representing and current position\n+ in a file.\n+ \"\"\"\n+ def __init__(self):\n+ # The file object representing this file.\n+ self.file = None\n+ # The position in the file when it was last closed.\n+ self.pos = 0\n+\n \n class ArchiveFile(object):\n \"\"\"\n@@ -49,41 +64,51 @@\n def __init__(self, *args, **kwargs):\n self.args = args\n self.kwargs = kwargs\n- self.pos = 0\n- self.fd = None\n- self.__open()\n+ self._filePos = {}\n \n- def __getattr__(self, name):\n+ def local(self):\n \"\"\"\n- Auto open file when access member from file object\n- This function only call when member of name not exist in self\n+ Return an instance of FilePos for the current thread. This is a crude\n+ # re-implementation of threading.local, which isn't a built-in module\n+ # and therefore isn't available.\n \"\"\"\n- assert self.fd\n- return getattr(self.fd, name)\n+ ti = thread.get_ident()\n+ if ti not in self._filePos:\n+ self._filePos[ti] = FilePos()\n+ return self._filePos[ti]\n \n- def __open(self):\n+ def __getattr__(self, name):\n \"\"\"\n- Open file and seek to pos record from last close\n+ Make this class act like a file, by invoking most methods on its\n+ underlying file object.\n \"\"\"\n- if self.fd is None:\n- self.fd = open(*self.args, **self.kwargs)\n- self.fd.seek(self.pos)\n+ file = self.local().file\n+ assert file\n+ return getattr(file, name)\n \n def __enter__(self):\n- self.__open()\n+ \"\"\"\n+ Open file and seek to pos record from last close.\n+ \"\"\"\n+ # The file shouldn't be open yet.\n+ fp = self.local()\n+ assert not fp.file\n+ # Open the file and seek to the last position.\n+ fp.file = open(*self.args, **self.kwargs)\n+ fp.file.seek(fp.pos)\n \n def __exit__(self, type, value, traceback):\n- assert self.fd\n- self.close()\n-\n- def close(self):\n \"\"\"\n- Close file and record pos\n+ Close file and record pos.\n \"\"\"\n- if self.fd is not None:\n- self.pos = self.fd.tell()\n- self.fd.close()\n- self.fd = None\n+ # The file should still be open.\n+ fp = self.local()\n+ assert fp.file\n+\n+ # Close the file and record its position.\n+ fp.pos = fp.file.tell()\n+ fp.file.close()\n+ fp.file = None\n \n \n class ArchiveReadError(RuntimeError):\n", "issue": "Pyinstaller lock exe file while running on windows\nVersion of python: 2.7 32bit\nVersion of pyinstall: download from git\n\nHow to reproduce:\n1: create 1.py, content:\n\n```\nimport time\ntime.sleep(99999)\n```\n\n2: pack 1.py to 1.exe, run\n\n```\npyinstaller.py 1.py\n```\n\n3: execute 1.exe\n\nThen 1.exe cannot be rename while running.\n\nI test py2exe and cx_Freeze they don't have this problem.\nI think it's because pyinstaller didn't close file handle of exe after read it needs.\n\nPyinstaller use fopen not CreateFile so it has the default behavior.\nother problem is pyinstaller will inhert file handles to child process but it's not necessarily,\nI already test change py_utils.c:520 to FALSE is ok,\nbut problem still not resolved.\n\nhttp://en.wikipedia.org/wiki/File_locking\nhttp://msdn.microsoft.com/en-us/library/windows/desktop/aa363858%28v=vs.85%29.aspx\nhttp://msdn.microsoft.com/en-us/library/windows/desktop/ms682425%28v=vs.85%29.aspx\n\nIf you still have problem to read this please contact me instead of close, otherwise I will open other again.\n\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2016, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n# TODO clean up this module\n\n# Subclasses may not need marshal or struct, but since they're\n# builtin, importing is safe.\n#\n# While an Archive is really an abstraction for any \"filesystem\n# within a file\", it is tuned for use with imputil.FuncImporter.\n# This assumes it contains python code objects, indexed by the\n# the internal name (ie, no '.py').\n\n# See pyi_carchive.py for a more general archive (contains anything)\n# that can be understood by a C program.\n\n\n### **NOTE** This module is used during bootstrap.\n### Import *ONLY* builtin modules.\n\nimport marshal\nimport struct\nimport sys\nimport zlib\n\n\n# For decrypting Python modules.\nCRYPT_BLOCK_SIZE = 16\n\n\n# content types for PYZ\nPYZ_TYPE_MODULE = 0\nPYZ_TYPE_PKG = 1\nPYZ_TYPE_DATA = 2\n\n\nclass ArchiveFile(object):\n \"\"\"\n File class support auto open when access member from file object\n This class is use to avoid file locking on windows\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self.args = args\n self.kwargs = kwargs\n self.pos = 0\n self.fd = None\n self.__open()\n\n def __getattr__(self, name):\n \"\"\"\n Auto open file when access member from file object\n This function only call when member of name not exist in self\n \"\"\"\n assert self.fd\n return getattr(self.fd, name)\n\n def __open(self):\n \"\"\"\n Open file and seek to pos record from last close\n \"\"\"\n if self.fd is None:\n self.fd = open(*self.args, **self.kwargs)\n self.fd.seek(self.pos)\n\n def __enter__(self):\n self.__open()\n\n def __exit__(self, type, value, traceback):\n assert self.fd\n self.close()\n\n def close(self):\n \"\"\"\n Close file and record pos\n \"\"\"\n if self.fd is not None:\n self.pos = self.fd.tell()\n self.fd.close()\n self.fd = None\n\n\nclass ArchiveReadError(RuntimeError):\n pass\n\n\nclass ArchiveReader(object):\n \"\"\"\n A base class for a repository of python code objects.\n The extract method is used by imputil.ArchiveImporter\n to get code objects by name (fully qualified name), so\n an enduser \"import a.b\" would become\n extract('a.__init__')\n extract('a.b')\n \"\"\"\n MAGIC = b'PYL\\0'\n HDRLEN = 12 # default is MAGIC followed by python's magic, int pos of toc\n TOCPOS = 8\n os = None\n _bincache = None\n\n def __init__(self, path=None, start=0):\n \"\"\"\n Initialize an Archive. If path is omitted, it will be an empty Archive.\n \"\"\"\n self.toc = None\n self.path = path\n self.start = start\n\n # In Python 3 module 'imp' is no longer built-in and we cannot use it.\n # There is for Python 3 another way how to obtain magic value.\n if sys.version_info[0] == 2:\n import imp\n self.pymagic = imp.get_magic()\n else:\n # We cannot use at this bootstrap stage importlib directly\n # but its frozen variant.\n import _frozen_importlib\n if sys.version_info[1] <= 3:\n # Python 3.3\n self.pymagic = _frozen_importlib._MAGIC_BYTES\n elif sys.version_info[1] == 4:\n # Python 3.4\n self.pymagic = _frozen_importlib.MAGIC_NUMBER\n else:\n # Python 3.5+\n self.pymagic = _frozen_importlib._bootstrap_external.MAGIC_NUMBER\n\n if path is not None:\n self.lib = ArchiveFile(self.path, 'rb')\n with self.lib:\n self.checkmagic()\n self.loadtoc()\n\n\n def loadtoc(self):\n \"\"\"\n Overridable.\n Default: After magic comes an int (4 byte native) giving the\n position of the TOC within self.lib.\n Default: The TOC is a marshal-able string.\n \"\"\"\n self.lib.seek(self.start + self.TOCPOS)\n (offset,) = struct.unpack('!i', self.lib.read(4))\n self.lib.seek(self.start + offset)\n # Use marshal.loads() since load() arg must be a file object\n # Convert the read list into a dict for faster access\n self.toc = dict(marshal.loads(self.lib.read()))\n\n ######## This is what is called by FuncImporter #######\n ## Since an Archive is flat, we ignore parent and modname.\n #XXX obsolete - imputil only code\n ## def get_code(self, parent, modname, fqname):\n ## pass\n\n ####### Core method - Override as needed #########\n def extract(self, name):\n \"\"\"\n Get the object corresponding to name, or None.\n For use with imputil ArchiveImporter, object is a python code object.\n 'name' is the name as specified in an 'import name'.\n 'import a.b' will become:\n extract('a') (return None because 'a' is not a code object)\n extract('a.__init__') (return a code object)\n extract('a.b') (return a code object)\n Default implementation:\n self.toc is a dict\n self.toc[name] is pos\n self.lib has the code object marshal-ed at pos\n \"\"\"\n ispkg, pos = self.toc.get(name, (0, None))\n if pos is None:\n return None\n with self.lib:\n self.lib.seek(self.start + pos)\n # use marshal.loads() sind load() arg must be a file object\n obj = marshal.loads(self.lib.read())\n return ispkg, obj\n\n ########################################################################\n # Informational methods\n\n def contents(self):\n \"\"\"\n Return a list of the contents\n Default implementation assumes self.toc is a dict like object.\n Not required by ArchiveImporter.\n \"\"\"\n return list(self.toc.keys())\n\n def checkmagic(self):\n \"\"\"\n Overridable.\n Check to see if the file object self.lib actually has a file\n we understand.\n \"\"\"\n self.lib.seek(self.start) # default - magic is at start of file\n\n if self.lib.read(len(self.MAGIC)) != self.MAGIC:\n raise ArchiveReadError(\"%s is not a valid %s archive file\"\n % (self.path, self.__class__.__name__))\n\n if self.lib.read(len(self.pymagic)) != self.pymagic:\n raise ArchiveReadError(\"%s has version mismatch to dll\" %\n (self.path))\n\n self.lib.read(4)\n\n\nclass Cipher(object):\n \"\"\"\n This class is used only to decrypt Python modules.\n \"\"\"\n def __init__(self):\n # At build-type the key is given to us from inside the spec file, at\n # bootstrap-time, we must look for it ourselves by trying to import\n # the generated 'pyi_crypto_key' module.\n import pyimod00_crypto_key\n key = pyimod00_crypto_key.key\n\n assert type(key) is str\n if len(key) > CRYPT_BLOCK_SIZE:\n self.key = key[0:CRYPT_BLOCK_SIZE]\n else:\n self.key = key.zfill(CRYPT_BLOCK_SIZE)\n assert len(self.key) == CRYPT_BLOCK_SIZE\n\n # Import the right AES module.\n self._aes = self._import_aesmod()\n\n def _import_aesmod(self):\n \"\"\"\n Tries to import the AES module from PyCrypto.\n\n PyCrypto 2.4 and 2.6 uses different name of the AES extension.\n \"\"\"\n # Not-so-easy way: at bootstrap time we have to load the module from the\n # temporary directory in a manner similar to pyi_importers.CExtensionImporter.\n from pyimod03_importers import CExtensionImporter\n importer = CExtensionImporter()\n # NOTE: We _must_ call find_module first.\n # The _AES.so module exists only in PyCrypto 2.6 and later. Try to import\n # that first.\n modname = 'Crypto.Cipher._AES'\n mod = importer.find_module(modname)\n # Fallback to AES.so, which should be there in PyCrypto 2.4 and earlier.\n if not mod:\n modname = 'Crypto.Cipher.AES'\n mod = importer.find_module(modname)\n if not mod:\n # Raise import error if none of the AES modules is found.\n raise ImportError(modname)\n mod = mod.load_module(modname)\n # Issue #1663: Remove the AES module from sys.modules list. Otherwise\n # it interferes with using 'Crypto.Cipher' module in users' code.\n if modname in sys.modules:\n del sys.modules[modname]\n return mod\n\n def __create_cipher(self, iv):\n # The 'BlockAlgo' class is stateful, this factory method is used to\n # re-initialize the block cipher class with each call to encrypt() and\n # decrypt().\n return self._aes.new(self.key, self._aes.MODE_CFB, iv)\n\n def decrypt(self, data):\n return self.__create_cipher(data[:CRYPT_BLOCK_SIZE]).decrypt(data[CRYPT_BLOCK_SIZE:])\n\n\nclass ZlibArchiveReader(ArchiveReader):\n \"\"\"\n ZlibArchive - an archive with compressed entries. Archive is read\n from the executable created by PyInstaller.\n\n This archive is used for bundling python modules inside the executable.\n\n NOTE: The whole ZlibArchive (PYZ) is compressed so it is not necessary\n to compress single modules with zlib.\n \"\"\"\n MAGIC = b'PYZ\\0'\n TOCPOS = 8\n HDRLEN = ArchiveReader.HDRLEN + 5\n\n def __init__(self, path=None, offset=None):\n if path is None:\n offset = 0\n elif offset is None:\n for i in range(len(path) - 1, - 1, - 1):\n if path[i] == '?':\n try:\n offset = int(path[i + 1:])\n except ValueError:\n # Just ignore any spurious \"?\" in the path\n # (like in Windows UNC \\\\?\\<path>).\n continue\n path = path[:i]\n break\n else:\n offset = 0\n\n super(ZlibArchiveReader, self).__init__(path, offset)\n\n # Try to import the key module. If the key module is not available\n # then it means that encryption is disabled.\n try:\n import pyimod00_crypto_key\n self.cipher = Cipher()\n except ImportError:\n self.cipher = None\n\n def extract(self, name):\n (typ, pos, length) = self.toc.get(name, (0, None, 0))\n if pos is None:\n return None\n with self.lib:\n self.lib.seek(self.start + pos)\n obj = self.lib.read(length)\n try:\n if self.cipher:\n obj = self.cipher.decrypt(obj)\n obj = zlib.decompress(obj)\n if typ in (PYZ_TYPE_MODULE, PYZ_TYPE_PKG):\n obj = marshal.loads(obj)\n except EOFError:\n raise ImportError(\"PYZ entry '%s' failed to unmarshal\" % name)\n return typ, obj\n", "path": "PyInstaller/loader/pyimod02_archive.py"}], "after_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2016, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n# TODO clean up this module\n\n# Subclasses may not need marshal or struct, but since they're\n# builtin, importing is safe.\n#\n# While an Archive is really an abstraction for any \"filesystem\n# within a file\", it is tuned for use with imputil.FuncImporter.\n# This assumes it contains python code objects, indexed by the\n# the internal name (ie, no '.py').\n\n# See pyi_carchive.py for a more general archive (contains anything)\n# that can be understood by a C program.\n\n\n### **NOTE** This module is used during bootstrap.\n### Import *ONLY* builtin modules.\n\nimport marshal\nimport struct\nimport sys\nimport zlib\nif sys.version_info[0] == 2:\n import thread\nelse:\n import _thread as thread\n\n\n# For decrypting Python modules.\nCRYPT_BLOCK_SIZE = 16\n\n\n# content types for PYZ\nPYZ_TYPE_MODULE = 0\nPYZ_TYPE_PKG = 1\nPYZ_TYPE_DATA = 2\n\nclass FilePos(object):\n \"\"\"\n This class keeps track of the file object representing and current position\n in a file.\n \"\"\"\n def __init__(self):\n # The file object representing this file.\n self.file = None\n # The position in the file when it was last closed.\n self.pos = 0\n\n\nclass ArchiveFile(object):\n \"\"\"\n File class support auto open when access member from file object\n This class is use to avoid file locking on windows\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self.args = args\n self.kwargs = kwargs\n self._filePos = {}\n\n def local(self):\n \"\"\"\n Return an instance of FilePos for the current thread. This is a crude\n # re-implementation of threading.local, which isn't a built-in module\n # and therefore isn't available.\n \"\"\"\n ti = thread.get_ident()\n if ti not in self._filePos:\n self._filePos[ti] = FilePos()\n return self._filePos[ti]\n\n def __getattr__(self, name):\n \"\"\"\n Make this class act like a file, by invoking most methods on its\n underlying file object.\n \"\"\"\n file = self.local().file\n assert file\n return getattr(file, name)\n\n def __enter__(self):\n \"\"\"\n Open file and seek to pos record from last close.\n \"\"\"\n # The file shouldn't be open yet.\n fp = self.local()\n assert not fp.file\n # Open the file and seek to the last position.\n fp.file = open(*self.args, **self.kwargs)\n fp.file.seek(fp.pos)\n\n def __exit__(self, type, value, traceback):\n \"\"\"\n Close file and record pos.\n \"\"\"\n # The file should still be open.\n fp = self.local()\n assert fp.file\n\n # Close the file and record its position.\n fp.pos = fp.file.tell()\n fp.file.close()\n fp.file = None\n\n\nclass ArchiveReadError(RuntimeError):\n pass\n\n\nclass ArchiveReader(object):\n \"\"\"\n A base class for a repository of python code objects.\n The extract method is used by imputil.ArchiveImporter\n to get code objects by name (fully qualified name), so\n an enduser \"import a.b\" would become\n extract('a.__init__')\n extract('a.b')\n \"\"\"\n MAGIC = b'PYL\\0'\n HDRLEN = 12 # default is MAGIC followed by python's magic, int pos of toc\n TOCPOS = 8\n os = None\n _bincache = None\n\n def __init__(self, path=None, start=0):\n \"\"\"\n Initialize an Archive. If path is omitted, it will be an empty Archive.\n \"\"\"\n self.toc = None\n self.path = path\n self.start = start\n\n # In Python 3 module 'imp' is no longer built-in and we cannot use it.\n # There is for Python 3 another way how to obtain magic value.\n if sys.version_info[0] == 2:\n import imp\n self.pymagic = imp.get_magic()\n else:\n # We cannot use at this bootstrap stage importlib directly\n # but its frozen variant.\n import _frozen_importlib\n if sys.version_info[1] <= 3:\n # Python 3.3\n self.pymagic = _frozen_importlib._MAGIC_BYTES\n elif sys.version_info[1] == 4:\n # Python 3.4\n self.pymagic = _frozen_importlib.MAGIC_NUMBER\n else:\n # Python 3.5+\n self.pymagic = _frozen_importlib._bootstrap_external.MAGIC_NUMBER\n\n if path is not None:\n self.lib = ArchiveFile(self.path, 'rb')\n with self.lib:\n self.checkmagic()\n self.loadtoc()\n\n\n def loadtoc(self):\n \"\"\"\n Overridable.\n Default: After magic comes an int (4 byte native) giving the\n position of the TOC within self.lib.\n Default: The TOC is a marshal-able string.\n \"\"\"\n self.lib.seek(self.start + self.TOCPOS)\n (offset,) = struct.unpack('!i', self.lib.read(4))\n self.lib.seek(self.start + offset)\n # Use marshal.loads() since load() arg must be a file object\n # Convert the read list into a dict for faster access\n self.toc = dict(marshal.loads(self.lib.read()))\n\n ######## This is what is called by FuncImporter #######\n ## Since an Archive is flat, we ignore parent and modname.\n #XXX obsolete - imputil only code\n ## def get_code(self, parent, modname, fqname):\n ## pass\n\n ####### Core method - Override as needed #########\n def extract(self, name):\n \"\"\"\n Get the object corresponding to name, or None.\n For use with imputil ArchiveImporter, object is a python code object.\n 'name' is the name as specified in an 'import name'.\n 'import a.b' will become:\n extract('a') (return None because 'a' is not a code object)\n extract('a.__init__') (return a code object)\n extract('a.b') (return a code object)\n Default implementation:\n self.toc is a dict\n self.toc[name] is pos\n self.lib has the code object marshal-ed at pos\n \"\"\"\n ispkg, pos = self.toc.get(name, (0, None))\n if pos is None:\n return None\n with self.lib:\n self.lib.seek(self.start + pos)\n # use marshal.loads() sind load() arg must be a file object\n obj = marshal.loads(self.lib.read())\n return ispkg, obj\n\n ########################################################################\n # Informational methods\n\n def contents(self):\n \"\"\"\n Return a list of the contents\n Default implementation assumes self.toc is a dict like object.\n Not required by ArchiveImporter.\n \"\"\"\n return list(self.toc.keys())\n\n def checkmagic(self):\n \"\"\"\n Overridable.\n Check to see if the file object self.lib actually has a file\n we understand.\n \"\"\"\n self.lib.seek(self.start) # default - magic is at start of file\n\n if self.lib.read(len(self.MAGIC)) != self.MAGIC:\n raise ArchiveReadError(\"%s is not a valid %s archive file\"\n % (self.path, self.__class__.__name__))\n\n if self.lib.read(len(self.pymagic)) != self.pymagic:\n raise ArchiveReadError(\"%s has version mismatch to dll\" %\n (self.path))\n\n self.lib.read(4)\n\n\nclass Cipher(object):\n \"\"\"\n This class is used only to decrypt Python modules.\n \"\"\"\n def __init__(self):\n # At build-type the key is given to us from inside the spec file, at\n # bootstrap-time, we must look for it ourselves by trying to import\n # the generated 'pyi_crypto_key' module.\n import pyimod00_crypto_key\n key = pyimod00_crypto_key.key\n\n assert type(key) is str\n if len(key) > CRYPT_BLOCK_SIZE:\n self.key = key[0:CRYPT_BLOCK_SIZE]\n else:\n self.key = key.zfill(CRYPT_BLOCK_SIZE)\n assert len(self.key) == CRYPT_BLOCK_SIZE\n\n # Import the right AES module.\n self._aes = self._import_aesmod()\n\n def _import_aesmod(self):\n \"\"\"\n Tries to import the AES module from PyCrypto.\n\n PyCrypto 2.4 and 2.6 uses different name of the AES extension.\n \"\"\"\n # Not-so-easy way: at bootstrap time we have to load the module from the\n # temporary directory in a manner similar to pyi_importers.CExtensionImporter.\n from pyimod03_importers import CExtensionImporter\n importer = CExtensionImporter()\n # NOTE: We _must_ call find_module first.\n # The _AES.so module exists only in PyCrypto 2.6 and later. Try to import\n # that first.\n modname = 'Crypto.Cipher._AES'\n mod = importer.find_module(modname)\n # Fallback to AES.so, which should be there in PyCrypto 2.4 and earlier.\n if not mod:\n modname = 'Crypto.Cipher.AES'\n mod = importer.find_module(modname)\n if not mod:\n # Raise import error if none of the AES modules is found.\n raise ImportError(modname)\n mod = mod.load_module(modname)\n # Issue #1663: Remove the AES module from sys.modules list. Otherwise\n # it interferes with using 'Crypto.Cipher' module in users' code.\n if modname in sys.modules:\n del sys.modules[modname]\n return mod\n\n def __create_cipher(self, iv):\n # The 'BlockAlgo' class is stateful, this factory method is used to\n # re-initialize the block cipher class with each call to encrypt() and\n # decrypt().\n return self._aes.new(self.key, self._aes.MODE_CFB, iv)\n\n def decrypt(self, data):\n return self.__create_cipher(data[:CRYPT_BLOCK_SIZE]).decrypt(data[CRYPT_BLOCK_SIZE:])\n\n\nclass ZlibArchiveReader(ArchiveReader):\n \"\"\"\n ZlibArchive - an archive with compressed entries. Archive is read\n from the executable created by PyInstaller.\n\n This archive is used for bundling python modules inside the executable.\n\n NOTE: The whole ZlibArchive (PYZ) is compressed so it is not necessary\n to compress single modules with zlib.\n \"\"\"\n MAGIC = b'PYZ\\0'\n TOCPOS = 8\n HDRLEN = ArchiveReader.HDRLEN + 5\n\n def __init__(self, path=None, offset=None):\n if path is None:\n offset = 0\n elif offset is None:\n for i in range(len(path) - 1, - 1, - 1):\n if path[i] == '?':\n try:\n offset = int(path[i + 1:])\n except ValueError:\n # Just ignore any spurious \"?\" in the path\n # (like in Windows UNC \\\\?\\<path>).\n continue\n path = path[:i]\n break\n else:\n offset = 0\n\n super(ZlibArchiveReader, self).__init__(path, offset)\n\n # Try to import the key module. If the key module is not available\n # then it means that encryption is disabled.\n try:\n import pyimod00_crypto_key\n self.cipher = Cipher()\n except ImportError:\n self.cipher = None\n\n def extract(self, name):\n (typ, pos, length) = self.toc.get(name, (0, None, 0))\n if pos is None:\n return None\n with self.lib:\n self.lib.seek(self.start + pos)\n obj = self.lib.read(length)\n try:\n if self.cipher:\n obj = self.cipher.decrypt(obj)\n obj = zlib.decompress(obj)\n if typ in (PYZ_TYPE_MODULE, PYZ_TYPE_PKG):\n obj = marshal.loads(obj)\n except EOFError:\n raise ImportError(\"PYZ entry '%s' failed to unmarshal\" % name)\n return typ, obj\n", "path": "PyInstaller/loader/pyimod02_archive.py"}]}
| 4,045 | 801 |
gh_patches_debug_37530
|
rasdani/github-patches
|
git_diff
|
keras-team__autokeras-286
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add a class to generate an MLP
<!---
Please label your issue with `new_task_module`.
-->
### Suggested Name
<!---
-->
MlpGenerator
### Task Description
<!---
A clear and concise description of the machine learning task to be added, its problem statement and learning outcome.
-->
Add a class named MlpGenerator. Create a superclass that would be inherited by CnnGenerator and MlpGenerator.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `autokeras/constant.py`
Content:
```
1 class Constant:
2 # Data
3
4 VALIDATION_SET_SIZE = 0.08333
5
6 # Searcher
7
8 MAX_MODEL_NUM = 1000
9 BETA = 2.576
10 KERNEL_LAMBDA = 0.1
11 T_MIN = 0.0001
12 N_NEIGHBOURS = 8
13 MAX_MODEL_SIZE = (1 << 25)
14 MAX_LAYER_WIDTH = 4096
15 MAX_LAYERS = 100
16
17 # Model Defaults
18
19 DENSE_DROPOUT_RATE = 0.5
20 CONV_DROPOUT_RATE = 0.25
21 CONV_BLOCK_DISTANCE = 2
22 DENSE_BLOCK_DISTANCE = 1
23 MODEL_LEN = 3
24 MODEL_WIDTH = 64
25
26 # ModelTrainer
27
28 DATA_AUGMENTATION = True
29 MAX_ITER_NUM = 200
30 MIN_LOSS_DEC = 1e-4
31 MAX_NO_IMPROVEMENT_NUM = 5
32 MAX_BATCH_SIZE = 128
33 LIMIT_MEMORY = False
34 SEARCH_MAX_ITER = 200
35
36 # text preprocessor
37
38 EMBEDDING_DIM = 100
39 MAX_SEQUENCE_LENGTH = 400
40 MAX_NB_WORDS = 5000
41 EXTRACT_PATH = "glove/"
42 # Download file name
43 FILE_PATH = "glove.zip"
44 PRE_TRAIN_FILE_LINK = "http://nlp.stanford.edu/data/glove.6B.zip"
45 PRE_TRAIN_FILE_NAME = "glove.6B.100d.txt"
46
```
Path: `autokeras/nn/generator.py`
Content:
```
1 from autokeras.constant import Constant
2 from autokeras.nn.graph import Graph
3 from autokeras.nn.layers import StubBatchNormalization, StubConv, StubDropout, StubPooling, StubDense, StubFlatten, \
4 StubReLU, StubGlobalPooling
5
6
7 class CnnGenerator:
8 def __init__(self, n_output_node, input_shape):
9 self.n_output_node = n_output_node
10 self.input_shape = input_shape
11 if len(self.input_shape) > 4:
12 raise ValueError('The input dimension is too high.')
13 if len(self.input_shape) < 2:
14 raise ValueError('The input dimension is too low.')
15
16 def generate(self, model_len=Constant.MODEL_LEN, model_width=Constant.MODEL_WIDTH):
17 pooling_len = int(model_len / 4)
18 graph = Graph(self.input_shape, False)
19 temp_input_channel = self.input_shape[-1]
20 output_node_id = 0
21 for i in range(model_len):
22 output_node_id = graph.add_layer(StubReLU(), output_node_id)
23 output_node_id = graph.add_layer(StubConv(temp_input_channel, model_width, kernel_size=3), output_node_id)
24 output_node_id = graph.add_layer(StubBatchNormalization(model_width), output_node_id)
25 temp_input_channel = model_width
26 if pooling_len == 0 or ((i + 1) % pooling_len == 0 and i != model_len - 1):
27 output_node_id = graph.add_layer(StubPooling(), output_node_id)
28
29 output_node_id = graph.add_layer(StubGlobalPooling(), output_node_id)
30 output_node_id = graph.add_layer(StubDropout(Constant.CONV_DROPOUT_RATE), output_node_id)
31 output_node_id = graph.add_layer(StubDense(graph.node_list[output_node_id].shape[0], model_width),
32 output_node_id)
33 output_node_id = graph.add_layer(StubReLU(), output_node_id)
34 graph.add_layer(StubDense(model_width, self.n_output_node), output_node_id)
35 return graph
36
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/autokeras/constant.py b/autokeras/constant.py
--- a/autokeras/constant.py
+++ b/autokeras/constant.py
@@ -18,9 +18,12 @@
DENSE_DROPOUT_RATE = 0.5
CONV_DROPOUT_RATE = 0.25
+ MLP_DROPOUT_RATE = 0.25
CONV_BLOCK_DISTANCE = 2
DENSE_BLOCK_DISTANCE = 1
MODEL_LEN = 3
+ MLP_MODEL_LEN = 3
+ MLP_MODEL_WIDTH = 5
MODEL_WIDTH = 64
# ModelTrainer
diff --git a/autokeras/nn/generator.py b/autokeras/nn/generator.py
--- a/autokeras/nn/generator.py
+++ b/autokeras/nn/generator.py
@@ -2,12 +2,22 @@
from autokeras.nn.graph import Graph
from autokeras.nn.layers import StubBatchNormalization, StubConv, StubDropout, StubPooling, StubDense, StubFlatten, \
StubReLU, StubGlobalPooling
+from abc import abstractmethod
-class CnnGenerator:
+class NetworkGenerator:
def __init__(self, n_output_node, input_shape):
self.n_output_node = n_output_node
self.input_shape = input_shape
+
+ @abstractmethod
+ def generate(self, model_len, model_width):
+ pass
+
+
+class CnnGenerator(NetworkGenerator):
+ def __init__(self, n_output_node, input_shape):
+ super(CnnGenerator, self).__init__(n_output_node, input_shape)
if len(self.input_shape) > 4:
raise ValueError('The input dimension is too high.')
if len(self.input_shape) < 2:
@@ -33,3 +43,28 @@
output_node_id = graph.add_layer(StubReLU(), output_node_id)
graph.add_layer(StubDense(model_width, self.n_output_node), output_node_id)
return graph
+
+
+class MlpGenerator(NetworkGenerator):
+ def __init__(self, n_output_node, input_shape):
+ super(MlpGenerator, self).__init__(n_output_node, input_shape)
+ if len(self.input_shape) > 1:
+ raise ValueError('The input dimension is too high.')
+
+ def generate(self, model_len=Constant.MLP_MODEL_LEN, model_width=Constant.MLP_MODEL_WIDTH):
+ if type(model_width) is list and not len(model_width) == model_len:
+ raise ValueError('The length of \'model_width\' does not match \'model_len\'')
+ elif type(model_width) is int:
+ model_width = [model_width] * model_len
+
+ graph = Graph(self.input_shape[0], False)
+ output_node_id = 0
+ n_nodes_prev_layer = self.input_shape[0]
+ for width in model_width:
+ output_node_id = graph.add_layer(StubDense(n_nodes_prev_layer, width), output_node_id)
+ output_node_id = graph.add_layer(StubDropout(Constant.MLP_DROPOUT_RATE), output_node_id)
+ output_node_id = graph.add_layer(StubReLU(), output_node_id)
+ n_nodes_prev_layer = width
+
+ graph.add_layer(StubDense(n_nodes_prev_layer, self.n_output_node), output_node_id)
+ return graph
|
{"golden_diff": "diff --git a/autokeras/constant.py b/autokeras/constant.py\n--- a/autokeras/constant.py\n+++ b/autokeras/constant.py\n@@ -18,9 +18,12 @@\n \n DENSE_DROPOUT_RATE = 0.5\n CONV_DROPOUT_RATE = 0.25\n+ MLP_DROPOUT_RATE = 0.25\n CONV_BLOCK_DISTANCE = 2\n DENSE_BLOCK_DISTANCE = 1\n MODEL_LEN = 3\n+ MLP_MODEL_LEN = 3\n+ MLP_MODEL_WIDTH = 5\n MODEL_WIDTH = 64\n \n # ModelTrainer\ndiff --git a/autokeras/nn/generator.py b/autokeras/nn/generator.py\n--- a/autokeras/nn/generator.py\n+++ b/autokeras/nn/generator.py\n@@ -2,12 +2,22 @@\n from autokeras.nn.graph import Graph\n from autokeras.nn.layers import StubBatchNormalization, StubConv, StubDropout, StubPooling, StubDense, StubFlatten, \\\n StubReLU, StubGlobalPooling\n+from abc import abstractmethod\n \n \n-class CnnGenerator:\n+class NetworkGenerator:\n def __init__(self, n_output_node, input_shape):\n self.n_output_node = n_output_node\n self.input_shape = input_shape\n+\n+ @abstractmethod\n+ def generate(self, model_len, model_width):\n+ pass\n+\n+\n+class CnnGenerator(NetworkGenerator):\n+ def __init__(self, n_output_node, input_shape):\n+ super(CnnGenerator, self).__init__(n_output_node, input_shape)\n if len(self.input_shape) > 4:\n raise ValueError('The input dimension is too high.')\n if len(self.input_shape) < 2:\n@@ -33,3 +43,28 @@\n output_node_id = graph.add_layer(StubReLU(), output_node_id)\n graph.add_layer(StubDense(model_width, self.n_output_node), output_node_id)\n return graph\n+\n+\n+class MlpGenerator(NetworkGenerator):\n+ def __init__(self, n_output_node, input_shape):\n+ super(MlpGenerator, self).__init__(n_output_node, input_shape)\n+ if len(self.input_shape) > 1:\n+ raise ValueError('The input dimension is too high.')\n+\n+ def generate(self, model_len=Constant.MLP_MODEL_LEN, model_width=Constant.MLP_MODEL_WIDTH):\n+ if type(model_width) is list and not len(model_width) == model_len:\n+ raise ValueError('The length of \\'model_width\\' does not match \\'model_len\\'')\n+ elif type(model_width) is int:\n+ model_width = [model_width] * model_len\n+\n+ graph = Graph(self.input_shape[0], False)\n+ output_node_id = 0\n+ n_nodes_prev_layer = self.input_shape[0]\n+ for width in model_width:\n+ output_node_id = graph.add_layer(StubDense(n_nodes_prev_layer, width), output_node_id)\n+ output_node_id = graph.add_layer(StubDropout(Constant.MLP_DROPOUT_RATE), output_node_id)\n+ output_node_id = graph.add_layer(StubReLU(), output_node_id)\n+ n_nodes_prev_layer = width\n+\n+ graph.add_layer(StubDense(n_nodes_prev_layer, self.n_output_node), output_node_id)\n+ return graph\n", "issue": "Add a class to generate an MLP\n<!---\r\nPlease label your issue with `new_task_module`.\r\n-->\r\n\r\n### Suggested Name\r\n<!---\r\n-->\r\nMlpGenerator\r\n\r\n### Task Description\r\n<!---\r\nA clear and concise description of the machine learning task to be added, its problem statement and learning outcome.\r\n-->\r\nAdd a class named MlpGenerator. Create a superclass that would be inherited by CnnGenerator and MlpGenerator.\r\n\n", "before_files": [{"content": "class Constant:\n # Data\n\n VALIDATION_SET_SIZE = 0.08333\n\n # Searcher\n\n MAX_MODEL_NUM = 1000\n BETA = 2.576\n KERNEL_LAMBDA = 0.1\n T_MIN = 0.0001\n N_NEIGHBOURS = 8\n MAX_MODEL_SIZE = (1 << 25)\n MAX_LAYER_WIDTH = 4096\n MAX_LAYERS = 100\n\n # Model Defaults\n\n DENSE_DROPOUT_RATE = 0.5\n CONV_DROPOUT_RATE = 0.25\n CONV_BLOCK_DISTANCE = 2\n DENSE_BLOCK_DISTANCE = 1\n MODEL_LEN = 3\n MODEL_WIDTH = 64\n\n # ModelTrainer\n\n DATA_AUGMENTATION = True\n MAX_ITER_NUM = 200\n MIN_LOSS_DEC = 1e-4\n MAX_NO_IMPROVEMENT_NUM = 5\n MAX_BATCH_SIZE = 128\n LIMIT_MEMORY = False\n SEARCH_MAX_ITER = 200\n\n # text preprocessor\n\n EMBEDDING_DIM = 100\n MAX_SEQUENCE_LENGTH = 400\n MAX_NB_WORDS = 5000\n EXTRACT_PATH = \"glove/\"\n # Download file name\n FILE_PATH = \"glove.zip\"\n PRE_TRAIN_FILE_LINK = \"http://nlp.stanford.edu/data/glove.6B.zip\"\n PRE_TRAIN_FILE_NAME = \"glove.6B.100d.txt\"\n", "path": "autokeras/constant.py"}, {"content": "from autokeras.constant import Constant\nfrom autokeras.nn.graph import Graph\nfrom autokeras.nn.layers import StubBatchNormalization, StubConv, StubDropout, StubPooling, StubDense, StubFlatten, \\\n StubReLU, StubGlobalPooling\n\n\nclass CnnGenerator:\n def __init__(self, n_output_node, input_shape):\n self.n_output_node = n_output_node\n self.input_shape = input_shape\n if len(self.input_shape) > 4:\n raise ValueError('The input dimension is too high.')\n if len(self.input_shape) < 2:\n raise ValueError('The input dimension is too low.')\n\n def generate(self, model_len=Constant.MODEL_LEN, model_width=Constant.MODEL_WIDTH):\n pooling_len = int(model_len / 4)\n graph = Graph(self.input_shape, False)\n temp_input_channel = self.input_shape[-1]\n output_node_id = 0\n for i in range(model_len):\n output_node_id = graph.add_layer(StubReLU(), output_node_id)\n output_node_id = graph.add_layer(StubConv(temp_input_channel, model_width, kernel_size=3), output_node_id)\n output_node_id = graph.add_layer(StubBatchNormalization(model_width), output_node_id)\n temp_input_channel = model_width\n if pooling_len == 0 or ((i + 1) % pooling_len == 0 and i != model_len - 1):\n output_node_id = graph.add_layer(StubPooling(), output_node_id)\n\n output_node_id = graph.add_layer(StubGlobalPooling(), output_node_id)\n output_node_id = graph.add_layer(StubDropout(Constant.CONV_DROPOUT_RATE), output_node_id)\n output_node_id = graph.add_layer(StubDense(graph.node_list[output_node_id].shape[0], model_width),\n output_node_id)\n output_node_id = graph.add_layer(StubReLU(), output_node_id)\n graph.add_layer(StubDense(model_width, self.n_output_node), output_node_id)\n return graph\n", "path": "autokeras/nn/generator.py"}], "after_files": [{"content": "class Constant:\n # Data\n\n VALIDATION_SET_SIZE = 0.08333\n\n # Searcher\n\n MAX_MODEL_NUM = 1000\n BETA = 2.576\n KERNEL_LAMBDA = 0.1\n T_MIN = 0.0001\n N_NEIGHBOURS = 8\n MAX_MODEL_SIZE = (1 << 25)\n MAX_LAYER_WIDTH = 4096\n MAX_LAYERS = 100\n\n # Model Defaults\n\n DENSE_DROPOUT_RATE = 0.5\n CONV_DROPOUT_RATE = 0.25\n MLP_DROPOUT_RATE = 0.25\n CONV_BLOCK_DISTANCE = 2\n DENSE_BLOCK_DISTANCE = 1\n MODEL_LEN = 3\n MLP_MODEL_LEN = 3\n MLP_MODEL_WIDTH = 5\n MODEL_WIDTH = 64\n\n # ModelTrainer\n\n DATA_AUGMENTATION = True\n MAX_ITER_NUM = 200\n MIN_LOSS_DEC = 1e-4\n MAX_NO_IMPROVEMENT_NUM = 5\n MAX_BATCH_SIZE = 128\n LIMIT_MEMORY = False\n SEARCH_MAX_ITER = 200\n\n # text preprocessor\n\n EMBEDDING_DIM = 100\n MAX_SEQUENCE_LENGTH = 400\n MAX_NB_WORDS = 5000\n EXTRACT_PATH = \"glove/\"\n # Download file name\n FILE_PATH = \"glove.zip\"\n PRE_TRAIN_FILE_LINK = \"http://nlp.stanford.edu/data/glove.6B.zip\"\n PRE_TRAIN_FILE_NAME = \"glove.6B.100d.txt\"\n", "path": "autokeras/constant.py"}, {"content": "from autokeras.constant import Constant\nfrom autokeras.nn.graph import Graph\nfrom autokeras.nn.layers import StubBatchNormalization, StubConv, StubDropout, StubPooling, StubDense, StubFlatten, \\\n StubReLU, StubGlobalPooling\nfrom abc import abstractmethod\n\n\nclass NetworkGenerator:\n def __init__(self, n_output_node, input_shape):\n self.n_output_node = n_output_node\n self.input_shape = input_shape\n\n @abstractmethod\n def generate(self, model_len, model_width):\n pass\n\n\nclass CnnGenerator(NetworkGenerator):\n def __init__(self, n_output_node, input_shape):\n super(CnnGenerator, self).__init__(n_output_node, input_shape)\n if len(self.input_shape) > 4:\n raise ValueError('The input dimension is too high.')\n if len(self.input_shape) < 2:\n raise ValueError('The input dimension is too low.')\n\n def generate(self, model_len=Constant.MODEL_LEN, model_width=Constant.MODEL_WIDTH):\n pooling_len = int(model_len / 4)\n graph = Graph(self.input_shape, False)\n temp_input_channel = self.input_shape[-1]\n output_node_id = 0\n for i in range(model_len):\n output_node_id = graph.add_layer(StubReLU(), output_node_id)\n output_node_id = graph.add_layer(StubConv(temp_input_channel, model_width, kernel_size=3), output_node_id)\n output_node_id = graph.add_layer(StubBatchNormalization(model_width), output_node_id)\n temp_input_channel = model_width\n if pooling_len == 0 or ((i + 1) % pooling_len == 0 and i != model_len - 1):\n output_node_id = graph.add_layer(StubPooling(), output_node_id)\n\n output_node_id = graph.add_layer(StubGlobalPooling(), output_node_id)\n output_node_id = graph.add_layer(StubDropout(Constant.CONV_DROPOUT_RATE), output_node_id)\n output_node_id = graph.add_layer(StubDense(graph.node_list[output_node_id].shape[0], model_width),\n output_node_id)\n output_node_id = graph.add_layer(StubReLU(), output_node_id)\n graph.add_layer(StubDense(model_width, self.n_output_node), output_node_id)\n return graph\n\n\nclass MlpGenerator(NetworkGenerator):\n def __init__(self, n_output_node, input_shape):\n super(MlpGenerator, self).__init__(n_output_node, input_shape)\n if len(self.input_shape) > 1:\n raise ValueError('The input dimension is too high.')\n\n def generate(self, model_len=Constant.MLP_MODEL_LEN, model_width=Constant.MLP_MODEL_WIDTH):\n if type(model_width) is list and not len(model_width) == model_len:\n raise ValueError('The length of \\'model_width\\' does not match \\'model_len\\'')\n elif type(model_width) is int:\n model_width = [model_width] * model_len\n\n graph = Graph(self.input_shape[0], False)\n output_node_id = 0\n n_nodes_prev_layer = self.input_shape[0]\n for width in model_width:\n output_node_id = graph.add_layer(StubDense(n_nodes_prev_layer, width), output_node_id)\n output_node_id = graph.add_layer(StubDropout(Constant.MLP_DROPOUT_RATE), output_node_id)\n output_node_id = graph.add_layer(StubReLU(), output_node_id)\n n_nodes_prev_layer = width\n\n graph.add_layer(StubDense(n_nodes_prev_layer, self.n_output_node), output_node_id)\n return graph\n", "path": "autokeras/nn/generator.py"}]}
| 1,322 | 766 |
gh_patches_debug_20247
|
rasdani/github-patches
|
git_diff
|
kserve__kserve-1782
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Failed to pull image "gcr.io/kfserving/kfserving-controller:v0.5.1"
/kind bug
**What steps did you take and what happened:**
Attempted to deploy kfserving from kustomize of KF 1.3.0
Failed to pull image "gcr.io/kfserving/kfserving-controller:v0.5.1": rpc error: code = Unknown desc = Error response from daemon: Head https://gcr.io/v2/kfserving/kfserving-controller/manifests/v0.5.1: denied: Project kfserving has been deleted.
**What did you expect to happen:**
expected the images to be present at the configured location
**Anything else you would like to add:**
This appears to be a relatively recent problem but I'm not entirely sure of that.
I also attempted to pull v0.6.0 and that also failed.
**Environment:**
- Istio Version: v1.9.0
- Knative Version: ???
- KFServing Version: v1.5.1
- Kubeflow version: v1.3.0
- Kfdef:[k8s_istio/istio_dex/gcp_basic_auth/gcp_iap/aws/aws_cognito/ibm]
- Kubernetes version: Client Version: version.Info{Major:"1", Minor:"21", GitVersion:"v1.21.1", GitCommit:"5e58841cce77d4bc13713ad2b91fa0d961e69192", GitTreeState:"clean", BuildDate:"2021-05-12T14:18:45Z", GoVersion:"go1.16.4", Compiler:"gc", Platform:"darwin/amd64"}
Server Version: version.Info{Major:"1", Minor:"21", GitVersion:"v1.21.3", GitCommit:"ca643a4d1f7bfe34773c74f79527be4afd95bf39", GitTreeState:"clean", BuildDate:"2021-07-15T20:59:07Z", GoVersion:"go1.16.6", Compiler:"gc", Platform:"linux/amd64"}
- OS (e.g. from `/etc/os-release`):
Ubuntu 20.04
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/custom_model/model_remote.py`
Content:
```
1 import kfserving
2 from torchvision import models, transforms
3 from typing import Dict
4 import torch
5 from PIL import Image
6 import base64
7 import io
8 from ray import serve
9
10
11 # the model handle name should match the model endpoint name
12 @serve.deployment(name="custom-model", config={"num_replicas": 2})
13 class AlexNetModel(kfserving.KFModel):
14 def __init__(self):
15 self.name = "custom-model"
16 super().__init__(self.name)
17 self.load()
18
19 def load(self):
20 model = models.alexnet(pretrained=True)
21 model.eval()
22 self.model = model
23 self.ready = True
24
25 async def predict(self, request: Dict) -> Dict:
26 inputs = request["instances"]
27
28 # Input follows the Tensorflow V1 HTTP API for binary values
29 # https://www.tensorflow.org/tfx/serving/api_rest#encoding_binary_values
30 data = inputs[0]["image"]["b64"]
31
32 raw_img_data = base64.b64decode(data)
33 input_image = Image.open(io.BytesIO(raw_img_data))
34
35 preprocess = transforms.Compose([
36 transforms.Resize(256),
37 transforms.CenterCrop(224),
38 transforms.ToTensor(),
39 transforms.Normalize(mean=[0.485, 0.456, 0.406],
40 std=[0.229, 0.224, 0.225]),
41 ])
42
43 input_tensor = preprocess(input_image)
44 input_batch = input_tensor.unsqueeze(0)
45
46 output = self.model(input_batch)
47
48 torch.nn.functional.softmax(output, dim=1)[0]
49
50 values, top_5 = torch.topk(output, 5)
51
52 return {"predictions": values.tolist()}
53
54
55 if __name__ == "__main__":
56 kfserving.KFServer(workers=1).start({"custom-model": AlexNetModel})
57
```
Path: `python/kfserving/kfserving/kfserver.py`
Content:
```
1 # Copyright 2020 kubeflow.org.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import argparse
16 import logging
17 import json
18 import inspect
19 import sys
20 from typing import List, Optional, Dict, Union
21 import tornado.ioloop
22 import tornado.web
23 import tornado.httpserver
24 import tornado.log
25 import asyncio
26 from tornado import concurrent
27 from .utils import utils
28
29 from kfserving.handlers.http import PredictHandler, ExplainHandler
30 from kfserving import KFModel
31 from kfserving.kfmodel_repository import KFModelRepository
32 from ray.serve.api import Deployment, RayServeHandle
33 from ray import serve
34
35 DEFAULT_HTTP_PORT = 8080
36 DEFAULT_GRPC_PORT = 8081
37 DEFAULT_MAX_BUFFER_SIZE = 104857600
38
39 parser = argparse.ArgumentParser(add_help=False)
40 parser.add_argument('--http_port', default=DEFAULT_HTTP_PORT, type=int,
41 help='The HTTP Port listened to by the model server.')
42 parser.add_argument('--grpc_port', default=DEFAULT_GRPC_PORT, type=int,
43 help='The GRPC Port listened to by the model server.')
44 parser.add_argument('--max_buffer_size', default=DEFAULT_MAX_BUFFER_SIZE, type=int,
45 help='The max buffer size for tornado.')
46 parser.add_argument('--workers', default=1, type=int,
47 help='The number of works to fork')
48 parser.add_argument('--max_asyncio_workers', default=None, type=int,
49 help='Max number of asyncio workers to spawn')
50 args, _ = parser.parse_known_args()
51
52 tornado.log.enable_pretty_logging()
53
54
55 class KFServer:
56 def __init__(self, http_port: int = args.http_port,
57 grpc_port: int = args.grpc_port,
58 max_buffer_size: int = args.max_buffer_size,
59 workers: int = args.workers,
60 max_asyncio_workers: int = args.max_asyncio_workers,
61 registered_models: KFModelRepository = KFModelRepository()):
62 self.registered_models = registered_models
63 self.http_port = http_port
64 self.grpc_port = grpc_port
65 self.max_buffer_size = max_buffer_size
66 self.workers = workers
67 self.max_asyncio_workers = max_asyncio_workers
68 self._http_server: Optional[tornado.httpserver.HTTPServer] = None
69
70 def create_application(self):
71 return tornado.web.Application([
72 # Server Liveness API returns 200 if server is alive.
73 (r"/", LivenessHandler),
74 (r"/v2/health/live", LivenessHandler),
75 (r"/v1/models",
76 ListHandler, dict(models=self.registered_models)),
77 (r"/v2/models",
78 ListHandler, dict(models=self.registered_models)),
79 # Model Health API returns 200 if model is ready to serve.
80 (r"/v1/models/([a-zA-Z0-9_-]+)",
81 HealthHandler, dict(models=self.registered_models)),
82 (r"/v2/models/([a-zA-Z0-9_-]+)/status",
83 HealthHandler, dict(models=self.registered_models)),
84 (r"/v1/models/([a-zA-Z0-9_-]+):predict",
85 PredictHandler, dict(models=self.registered_models)),
86 (r"/v2/models/([a-zA-Z0-9_-]+)/infer",
87 PredictHandler, dict(models=self.registered_models)),
88 (r"/v1/models/([a-zA-Z0-9_-]+):explain",
89 ExplainHandler, dict(models=self.registered_models)),
90 (r"/v2/models/([a-zA-Z0-9_-]+)/explain",
91 ExplainHandler, dict(models=self.registered_models)),
92 (r"/v2/repository/models/([a-zA-Z0-9_-]+)/load",
93 LoadHandler, dict(models=self.registered_models)),
94 (r"/v2/repository/models/([a-zA-Z0-9_-]+)/unload",
95 UnloadHandler, dict(models=self.registered_models)),
96 ])
97
98 def start(self, models: Union[List[KFModel], Dict[str, Deployment]], nest_asyncio: bool = False):
99 if isinstance(models, list):
100 for model in models:
101 if isinstance(model, KFModel):
102 self.register_model(model)
103 else:
104 raise RuntimeError("Model type should be KFModel")
105 elif isinstance(models, dict):
106 if all([issubclass(v, Deployment) for v in models.values()]):
107 serve.start(detached=True, http_host='0.0.0.0', http_port=9071)
108 for key in models:
109 models[key].deploy()
110 handle = models[key].get_handle()
111 self.register_model_handle(key, handle)
112 else:
113 raise RuntimeError("Model type should be RayServe Deployment")
114 else:
115 raise RuntimeError("Unknown model collection types")
116
117 if self.max_asyncio_workers is None:
118 # formula as suggest in https://bugs.python.org/issue35279
119 self.max_asyncio_workers = min(32, utils.cpu_count()+4)
120
121 logging.info(f"Setting asyncio max_workers as {self.max_asyncio_workers}")
122 asyncio.get_event_loop().set_default_executor(
123 concurrent.futures.ThreadPoolExecutor(max_workers=self.max_asyncio_workers))
124
125 self._http_server = tornado.httpserver.HTTPServer(
126 self.create_application(), max_buffer_size=self.max_buffer_size)
127
128 logging.info("Listening on port %s", self.http_port)
129 self._http_server.bind(self.http_port)
130 logging.info("Will fork %d workers", self.workers)
131 self._http_server.start(self.workers)
132
133 # Need to start the IOLoop after workers have been started
134 # https://github.com/tornadoweb/tornado/issues/2426
135 # The nest_asyncio package needs to be installed by the downstream module
136 if nest_asyncio:
137 import nest_asyncio
138 nest_asyncio.apply()
139
140 tornado.ioloop.IOLoop.current().start()
141
142 def register_model_handle(self, name: str, model_handle: RayServeHandle):
143 self.registered_models.update_handle(name, model_handle)
144 logging.info("Registering model handle: %s", name)
145
146 def register_model(self, model: KFModel):
147 if not model.name:
148 raise Exception(
149 "Failed to register model, model.name must be provided.")
150 self.registered_models.update(model)
151 logging.info("Registering model: %s", model.name)
152
153
154 class LivenessHandler(tornado.web.RequestHandler): # pylint:disable=too-few-public-methods
155 def get(self):
156 self.write("Alive")
157
158
159 class HealthHandler(tornado.web.RequestHandler):
160 def initialize(self, models: KFModelRepository):
161 self.models = models # pylint:disable=attribute-defined-outside-init
162
163 def get(self, name: str):
164 model = self.models.get_model(name)
165 if model is None:
166 raise tornado.web.HTTPError(
167 status_code=404,
168 reason="Model with name %s does not exist." % name
169 )
170
171 if not self.models.is_model_ready(name):
172 raise tornado.web.HTTPError(
173 status_code=503,
174 reason="Model with name %s is not ready." % name
175 )
176
177 self.write(json.dumps({
178 "name": model.name,
179 "ready": model.ready
180 }))
181
182
183 class ListHandler(tornado.web.RequestHandler):
184 def initialize(self, models: KFModelRepository):
185 self.models = models # pylint:disable=attribute-defined-outside-init
186
187 def get(self):
188 self.write(json.dumps([ob.name for ob in self.models.get_models()]))
189
190
191 class LoadHandler(tornado.web.RequestHandler):
192 def initialize(self, models: KFModelRepository): # pylint:disable=attribute-defined-outside-init
193 self.models = models
194
195 async def post(self, name: str):
196 try:
197 if inspect.iscoroutinefunction(self.models.load):
198 await self.models.load(name)
199 else:
200 self.models.load(name)
201 except Exception:
202 ex_type, ex_value, ex_traceback = sys.exc_info()
203 raise tornado.web.HTTPError(
204 status_code=500,
205 reason=f"Model with name {name} is not ready. "
206 f"Error type: {ex_type} error msg: {ex_value}"
207 )
208
209 if not self.models.is_model_ready(name):
210 raise tornado.web.HTTPError(
211 status_code=503,
212 reason=f"Model with name {name} is not ready."
213 )
214 self.write(json.dumps({
215 "name": name,
216 "load": True
217 }))
218
219
220 class UnloadHandler(tornado.web.RequestHandler):
221 def initialize(self, models: KFModelRepository): # pylint:disable=attribute-defined-outside-init
222 self.models = models
223
224 def post(self, name: str):
225 try:
226 self.models.unload(name)
227 except KeyError:
228 raise tornado.web.HTTPError(
229 status_code=404,
230 reason="Model with name %s does not exist." % name
231 )
232 self.write(json.dumps({
233 "name": name,
234 "unload": True
235 }))
236
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/python/custom_model/model_remote.py b/python/custom_model/model_remote.py
--- a/python/custom_model/model_remote.py
+++ b/python/custom_model/model_remote.py
@@ -9,7 +9,7 @@
# the model handle name should match the model endpoint name
[email protected](name="custom-model", config={"num_replicas": 2})
[email protected](name="custom-model", num_replicas=2)
class AlexNetModel(kfserving.KFModel):
def __init__(self):
self.name = "custom-model"
diff --git a/python/kfserving/kfserving/kfserver.py b/python/kfserving/kfserving/kfserver.py
--- a/python/kfserving/kfserving/kfserver.py
+++ b/python/kfserving/kfserving/kfserver.py
@@ -103,7 +103,7 @@
else:
raise RuntimeError("Model type should be KFModel")
elif isinstance(models, dict):
- if all([issubclass(v, Deployment) for v in models.values()]):
+ if all([isinstance(v, Deployment) for v in models.values()]):
serve.start(detached=True, http_host='0.0.0.0', http_port=9071)
for key in models:
models[key].deploy()
|
{"golden_diff": "diff --git a/python/custom_model/model_remote.py b/python/custom_model/model_remote.py\n--- a/python/custom_model/model_remote.py\n+++ b/python/custom_model/model_remote.py\n@@ -9,7 +9,7 @@\n \n \n # the model handle name should match the model endpoint name\[email protected](name=\"custom-model\", config={\"num_replicas\": 2})\[email protected](name=\"custom-model\", num_replicas=2)\n class AlexNetModel(kfserving.KFModel):\n def __init__(self):\n self.name = \"custom-model\"\ndiff --git a/python/kfserving/kfserving/kfserver.py b/python/kfserving/kfserving/kfserver.py\n--- a/python/kfserving/kfserving/kfserver.py\n+++ b/python/kfserving/kfserving/kfserver.py\n@@ -103,7 +103,7 @@\n else:\n raise RuntimeError(\"Model type should be KFModel\")\n elif isinstance(models, dict):\n- if all([issubclass(v, Deployment) for v in models.values()]):\n+ if all([isinstance(v, Deployment) for v in models.values()]):\n serve.start(detached=True, http_host='0.0.0.0', http_port=9071)\n for key in models:\n models[key].deploy()\n", "issue": "Failed to pull image \"gcr.io/kfserving/kfserving-controller:v0.5.1\"\n/kind bug\r\n\r\n**What steps did you take and what happened:**\r\n\r\nAttempted to deploy kfserving from kustomize of KF 1.3.0\r\n\r\nFailed to pull image \"gcr.io/kfserving/kfserving-controller:v0.5.1\": rpc error: code = Unknown desc = Error response from daemon: Head https://gcr.io/v2/kfserving/kfserving-controller/manifests/v0.5.1: denied: Project kfserving has been deleted.\r\n\r\n**What did you expect to happen:**\r\n\r\nexpected the images to be present at the configured location\r\n\r\n**Anything else you would like to add:**\r\n\r\nThis appears to be a relatively recent problem but I'm not entirely sure of that.\r\n\r\nI also attempted to pull v0.6.0 and that also failed.\r\n\r\n**Environment:**\r\n\r\n- Istio Version: v1.9.0\r\n- Knative Version: ???\r\n- KFServing Version: v1.5.1\r\n- Kubeflow version: v1.3.0\r\n- Kfdef:[k8s_istio/istio_dex/gcp_basic_auth/gcp_iap/aws/aws_cognito/ibm]\r\n- Kubernetes version: Client Version: version.Info{Major:\"1\", Minor:\"21\", GitVersion:\"v1.21.1\", GitCommit:\"5e58841cce77d4bc13713ad2b91fa0d961e69192\", GitTreeState:\"clean\", BuildDate:\"2021-05-12T14:18:45Z\", GoVersion:\"go1.16.4\", Compiler:\"gc\", Platform:\"darwin/amd64\"}\r\nServer Version: version.Info{Major:\"1\", Minor:\"21\", GitVersion:\"v1.21.3\", GitCommit:\"ca643a4d1f7bfe34773c74f79527be4afd95bf39\", GitTreeState:\"clean\", BuildDate:\"2021-07-15T20:59:07Z\", GoVersion:\"go1.16.6\", Compiler:\"gc\", Platform:\"linux/amd64\"}\r\n- OS (e.g. from `/etc/os-release`):\r\nUbuntu 20.04\n", "before_files": [{"content": "import kfserving\nfrom torchvision import models, transforms\nfrom typing import Dict\nimport torch\nfrom PIL import Image\nimport base64\nimport io\nfrom ray import serve\n\n\n# the model handle name should match the model endpoint name\[email protected](name=\"custom-model\", config={\"num_replicas\": 2})\nclass AlexNetModel(kfserving.KFModel):\n def __init__(self):\n self.name = \"custom-model\"\n super().__init__(self.name)\n self.load()\n\n def load(self):\n model = models.alexnet(pretrained=True)\n model.eval()\n self.model = model\n self.ready = True\n\n async def predict(self, request: Dict) -> Dict:\n inputs = request[\"instances\"]\n\n # Input follows the Tensorflow V1 HTTP API for binary values\n # https://www.tensorflow.org/tfx/serving/api_rest#encoding_binary_values\n data = inputs[0][\"image\"][\"b64\"]\n\n raw_img_data = base64.b64decode(data)\n input_image = Image.open(io.BytesIO(raw_img_data))\n\n preprocess = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]),\n ])\n\n input_tensor = preprocess(input_image)\n input_batch = input_tensor.unsqueeze(0)\n\n output = self.model(input_batch)\n\n torch.nn.functional.softmax(output, dim=1)[0]\n\n values, top_5 = torch.topk(output, 5)\n\n return {\"predictions\": values.tolist()}\n\n\nif __name__ == \"__main__\":\n kfserving.KFServer(workers=1).start({\"custom-model\": AlexNetModel})\n", "path": "python/custom_model/model_remote.py"}, {"content": "# Copyright 2020 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport logging\nimport json\nimport inspect\nimport sys\nfrom typing import List, Optional, Dict, Union\nimport tornado.ioloop\nimport tornado.web\nimport tornado.httpserver\nimport tornado.log\nimport asyncio\nfrom tornado import concurrent\nfrom .utils import utils\n\nfrom kfserving.handlers.http import PredictHandler, ExplainHandler\nfrom kfserving import KFModel\nfrom kfserving.kfmodel_repository import KFModelRepository\nfrom ray.serve.api import Deployment, RayServeHandle\nfrom ray import serve\n\nDEFAULT_HTTP_PORT = 8080\nDEFAULT_GRPC_PORT = 8081\nDEFAULT_MAX_BUFFER_SIZE = 104857600\n\nparser = argparse.ArgumentParser(add_help=False)\nparser.add_argument('--http_port', default=DEFAULT_HTTP_PORT, type=int,\n help='The HTTP Port listened to by the model server.')\nparser.add_argument('--grpc_port', default=DEFAULT_GRPC_PORT, type=int,\n help='The GRPC Port listened to by the model server.')\nparser.add_argument('--max_buffer_size', default=DEFAULT_MAX_BUFFER_SIZE, type=int,\n help='The max buffer size for tornado.')\nparser.add_argument('--workers', default=1, type=int,\n help='The number of works to fork')\nparser.add_argument('--max_asyncio_workers', default=None, type=int,\n help='Max number of asyncio workers to spawn')\nargs, _ = parser.parse_known_args()\n\ntornado.log.enable_pretty_logging()\n\n\nclass KFServer:\n def __init__(self, http_port: int = args.http_port,\n grpc_port: int = args.grpc_port,\n max_buffer_size: int = args.max_buffer_size,\n workers: int = args.workers,\n max_asyncio_workers: int = args.max_asyncio_workers,\n registered_models: KFModelRepository = KFModelRepository()):\n self.registered_models = registered_models\n self.http_port = http_port\n self.grpc_port = grpc_port\n self.max_buffer_size = max_buffer_size\n self.workers = workers\n self.max_asyncio_workers = max_asyncio_workers\n self._http_server: Optional[tornado.httpserver.HTTPServer] = None\n\n def create_application(self):\n return tornado.web.Application([\n # Server Liveness API returns 200 if server is alive.\n (r\"/\", LivenessHandler),\n (r\"/v2/health/live\", LivenessHandler),\n (r\"/v1/models\",\n ListHandler, dict(models=self.registered_models)),\n (r\"/v2/models\",\n ListHandler, dict(models=self.registered_models)),\n # Model Health API returns 200 if model is ready to serve.\n (r\"/v1/models/([a-zA-Z0-9_-]+)\",\n HealthHandler, dict(models=self.registered_models)),\n (r\"/v2/models/([a-zA-Z0-9_-]+)/status\",\n HealthHandler, dict(models=self.registered_models)),\n (r\"/v1/models/([a-zA-Z0-9_-]+):predict\",\n PredictHandler, dict(models=self.registered_models)),\n (r\"/v2/models/([a-zA-Z0-9_-]+)/infer\",\n PredictHandler, dict(models=self.registered_models)),\n (r\"/v1/models/([a-zA-Z0-9_-]+):explain\",\n ExplainHandler, dict(models=self.registered_models)),\n (r\"/v2/models/([a-zA-Z0-9_-]+)/explain\",\n ExplainHandler, dict(models=self.registered_models)),\n (r\"/v2/repository/models/([a-zA-Z0-9_-]+)/load\",\n LoadHandler, dict(models=self.registered_models)),\n (r\"/v2/repository/models/([a-zA-Z0-9_-]+)/unload\",\n UnloadHandler, dict(models=self.registered_models)),\n ])\n\n def start(self, models: Union[List[KFModel], Dict[str, Deployment]], nest_asyncio: bool = False):\n if isinstance(models, list):\n for model in models:\n if isinstance(model, KFModel):\n self.register_model(model)\n else:\n raise RuntimeError(\"Model type should be KFModel\")\n elif isinstance(models, dict):\n if all([issubclass(v, Deployment) for v in models.values()]):\n serve.start(detached=True, http_host='0.0.0.0', http_port=9071)\n for key in models:\n models[key].deploy()\n handle = models[key].get_handle()\n self.register_model_handle(key, handle)\n else:\n raise RuntimeError(\"Model type should be RayServe Deployment\")\n else:\n raise RuntimeError(\"Unknown model collection types\")\n\n if self.max_asyncio_workers is None:\n # formula as suggest in https://bugs.python.org/issue35279\n self.max_asyncio_workers = min(32, utils.cpu_count()+4)\n\n logging.info(f\"Setting asyncio max_workers as {self.max_asyncio_workers}\")\n asyncio.get_event_loop().set_default_executor(\n concurrent.futures.ThreadPoolExecutor(max_workers=self.max_asyncio_workers))\n\n self._http_server = tornado.httpserver.HTTPServer(\n self.create_application(), max_buffer_size=self.max_buffer_size)\n\n logging.info(\"Listening on port %s\", self.http_port)\n self._http_server.bind(self.http_port)\n logging.info(\"Will fork %d workers\", self.workers)\n self._http_server.start(self.workers)\n\n # Need to start the IOLoop after workers have been started\n # https://github.com/tornadoweb/tornado/issues/2426\n # The nest_asyncio package needs to be installed by the downstream module\n if nest_asyncio:\n import nest_asyncio\n nest_asyncio.apply()\n\n tornado.ioloop.IOLoop.current().start()\n\n def register_model_handle(self, name: str, model_handle: RayServeHandle):\n self.registered_models.update_handle(name, model_handle)\n logging.info(\"Registering model handle: %s\", name)\n\n def register_model(self, model: KFModel):\n if not model.name:\n raise Exception(\n \"Failed to register model, model.name must be provided.\")\n self.registered_models.update(model)\n logging.info(\"Registering model: %s\", model.name)\n\n\nclass LivenessHandler(tornado.web.RequestHandler): # pylint:disable=too-few-public-methods\n def get(self):\n self.write(\"Alive\")\n\n\nclass HealthHandler(tornado.web.RequestHandler):\n def initialize(self, models: KFModelRepository):\n self.models = models # pylint:disable=attribute-defined-outside-init\n\n def get(self, name: str):\n model = self.models.get_model(name)\n if model is None:\n raise tornado.web.HTTPError(\n status_code=404,\n reason=\"Model with name %s does not exist.\" % name\n )\n\n if not self.models.is_model_ready(name):\n raise tornado.web.HTTPError(\n status_code=503,\n reason=\"Model with name %s is not ready.\" % name\n )\n\n self.write(json.dumps({\n \"name\": model.name,\n \"ready\": model.ready\n }))\n\n\nclass ListHandler(tornado.web.RequestHandler):\n def initialize(self, models: KFModelRepository):\n self.models = models # pylint:disable=attribute-defined-outside-init\n\n def get(self):\n self.write(json.dumps([ob.name for ob in self.models.get_models()]))\n\n\nclass LoadHandler(tornado.web.RequestHandler):\n def initialize(self, models: KFModelRepository): # pylint:disable=attribute-defined-outside-init\n self.models = models\n\n async def post(self, name: str):\n try:\n if inspect.iscoroutinefunction(self.models.load):\n await self.models.load(name)\n else:\n self.models.load(name)\n except Exception:\n ex_type, ex_value, ex_traceback = sys.exc_info()\n raise tornado.web.HTTPError(\n status_code=500,\n reason=f\"Model with name {name} is not ready. \"\n f\"Error type: {ex_type} error msg: {ex_value}\"\n )\n\n if not self.models.is_model_ready(name):\n raise tornado.web.HTTPError(\n status_code=503,\n reason=f\"Model with name {name} is not ready.\"\n )\n self.write(json.dumps({\n \"name\": name,\n \"load\": True\n }))\n\n\nclass UnloadHandler(tornado.web.RequestHandler):\n def initialize(self, models: KFModelRepository): # pylint:disable=attribute-defined-outside-init\n self.models = models\n\n def post(self, name: str):\n try:\n self.models.unload(name)\n except KeyError:\n raise tornado.web.HTTPError(\n status_code=404,\n reason=\"Model with name %s does not exist.\" % name\n )\n self.write(json.dumps({\n \"name\": name,\n \"unload\": True\n }))\n", "path": "python/kfserving/kfserving/kfserver.py"}], "after_files": [{"content": "import kfserving\nfrom torchvision import models, transforms\nfrom typing import Dict\nimport torch\nfrom PIL import Image\nimport base64\nimport io\nfrom ray import serve\n\n\n# the model handle name should match the model endpoint name\[email protected](name=\"custom-model\", num_replicas=2)\nclass AlexNetModel(kfserving.KFModel):\n def __init__(self):\n self.name = \"custom-model\"\n super().__init__(self.name)\n self.load()\n\n def load(self):\n model = models.alexnet(pretrained=True)\n model.eval()\n self.model = model\n self.ready = True\n\n async def predict(self, request: Dict) -> Dict:\n inputs = request[\"instances\"]\n\n # Input follows the Tensorflow V1 HTTP API for binary values\n # https://www.tensorflow.org/tfx/serving/api_rest#encoding_binary_values\n data = inputs[0][\"image\"][\"b64\"]\n\n raw_img_data = base64.b64decode(data)\n input_image = Image.open(io.BytesIO(raw_img_data))\n\n preprocess = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]),\n ])\n\n input_tensor = preprocess(input_image)\n input_batch = input_tensor.unsqueeze(0)\n\n output = self.model(input_batch)\n\n torch.nn.functional.softmax(output, dim=1)[0]\n\n values, top_5 = torch.topk(output, 5)\n\n return {\"predictions\": values.tolist()}\n\n\nif __name__ == \"__main__\":\n kfserving.KFServer(workers=1).start({\"custom-model\": AlexNetModel})\n", "path": "python/custom_model/model_remote.py"}, {"content": "# Copyright 2020 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport logging\nimport json\nimport inspect\nimport sys\nfrom typing import List, Optional, Dict, Union\nimport tornado.ioloop\nimport tornado.web\nimport tornado.httpserver\nimport tornado.log\nimport asyncio\nfrom tornado import concurrent\nfrom .utils import utils\n\nfrom kfserving.handlers.http import PredictHandler, ExplainHandler\nfrom kfserving import KFModel\nfrom kfserving.kfmodel_repository import KFModelRepository\nfrom ray.serve.api import Deployment, RayServeHandle\nfrom ray import serve\n\nDEFAULT_HTTP_PORT = 8080\nDEFAULT_GRPC_PORT = 8081\nDEFAULT_MAX_BUFFER_SIZE = 104857600\n\nparser = argparse.ArgumentParser(add_help=False)\nparser.add_argument('--http_port', default=DEFAULT_HTTP_PORT, type=int,\n help='The HTTP Port listened to by the model server.')\nparser.add_argument('--grpc_port', default=DEFAULT_GRPC_PORT, type=int,\n help='The GRPC Port listened to by the model server.')\nparser.add_argument('--max_buffer_size', default=DEFAULT_MAX_BUFFER_SIZE, type=int,\n help='The max buffer size for tornado.')\nparser.add_argument('--workers', default=1, type=int,\n help='The number of works to fork')\nparser.add_argument('--max_asyncio_workers', default=None, type=int,\n help='Max number of asyncio workers to spawn')\nargs, _ = parser.parse_known_args()\n\ntornado.log.enable_pretty_logging()\n\n\nclass KFServer:\n def __init__(self, http_port: int = args.http_port,\n grpc_port: int = args.grpc_port,\n max_buffer_size: int = args.max_buffer_size,\n workers: int = args.workers,\n max_asyncio_workers: int = args.max_asyncio_workers,\n registered_models: KFModelRepository = KFModelRepository()):\n self.registered_models = registered_models\n self.http_port = http_port\n self.grpc_port = grpc_port\n self.max_buffer_size = max_buffer_size\n self.workers = workers\n self.max_asyncio_workers = max_asyncio_workers\n self._http_server: Optional[tornado.httpserver.HTTPServer] = None\n\n def create_application(self):\n return tornado.web.Application([\n # Server Liveness API returns 200 if server is alive.\n (r\"/\", LivenessHandler),\n (r\"/v2/health/live\", LivenessHandler),\n (r\"/v1/models\",\n ListHandler, dict(models=self.registered_models)),\n (r\"/v2/models\",\n ListHandler, dict(models=self.registered_models)),\n # Model Health API returns 200 if model is ready to serve.\n (r\"/v1/models/([a-zA-Z0-9_-]+)\",\n HealthHandler, dict(models=self.registered_models)),\n (r\"/v2/models/([a-zA-Z0-9_-]+)/status\",\n HealthHandler, dict(models=self.registered_models)),\n (r\"/v1/models/([a-zA-Z0-9_-]+):predict\",\n PredictHandler, dict(models=self.registered_models)),\n (r\"/v2/models/([a-zA-Z0-9_-]+)/infer\",\n PredictHandler, dict(models=self.registered_models)),\n (r\"/v1/models/([a-zA-Z0-9_-]+):explain\",\n ExplainHandler, dict(models=self.registered_models)),\n (r\"/v2/models/([a-zA-Z0-9_-]+)/explain\",\n ExplainHandler, dict(models=self.registered_models)),\n (r\"/v2/repository/models/([a-zA-Z0-9_-]+)/load\",\n LoadHandler, dict(models=self.registered_models)),\n (r\"/v2/repository/models/([a-zA-Z0-9_-]+)/unload\",\n UnloadHandler, dict(models=self.registered_models)),\n ])\n\n def start(self, models: Union[List[KFModel], Dict[str, Deployment]], nest_asyncio: bool = False):\n if isinstance(models, list):\n for model in models:\n if isinstance(model, KFModel):\n self.register_model(model)\n else:\n raise RuntimeError(\"Model type should be KFModel\")\n elif isinstance(models, dict):\n if all([isinstance(v, Deployment) for v in models.values()]):\n serve.start(detached=True, http_host='0.0.0.0', http_port=9071)\n for key in models:\n models[key].deploy()\n handle = models[key].get_handle()\n self.register_model_handle(key, handle)\n else:\n raise RuntimeError(\"Model type should be RayServe Deployment\")\n else:\n raise RuntimeError(\"Unknown model collection types\")\n\n if self.max_asyncio_workers is None:\n # formula as suggest in https://bugs.python.org/issue35279\n self.max_asyncio_workers = min(32, utils.cpu_count()+4)\n\n logging.info(f\"Setting asyncio max_workers as {self.max_asyncio_workers}\")\n asyncio.get_event_loop().set_default_executor(\n concurrent.futures.ThreadPoolExecutor(max_workers=self.max_asyncio_workers))\n\n self._http_server = tornado.httpserver.HTTPServer(\n self.create_application(), max_buffer_size=self.max_buffer_size)\n\n logging.info(\"Listening on port %s\", self.http_port)\n self._http_server.bind(self.http_port)\n logging.info(\"Will fork %d workers\", self.workers)\n self._http_server.start(self.workers)\n\n # Need to start the IOLoop after workers have been started\n # https://github.com/tornadoweb/tornado/issues/2426\n # The nest_asyncio package needs to be installed by the downstream module\n if nest_asyncio:\n import nest_asyncio\n nest_asyncio.apply()\n\n tornado.ioloop.IOLoop.current().start()\n\n def register_model_handle(self, name: str, model_handle: RayServeHandle):\n self.registered_models.update_handle(name, model_handle)\n logging.info(\"Registering model handle: %s\", name)\n\n def register_model(self, model: KFModel):\n if not model.name:\n raise Exception(\n \"Failed to register model, model.name must be provided.\")\n self.registered_models.update(model)\n logging.info(\"Registering model: %s\", model.name)\n\n\nclass LivenessHandler(tornado.web.RequestHandler): # pylint:disable=too-few-public-methods\n def get(self):\n self.write(\"Alive\")\n\n\nclass HealthHandler(tornado.web.RequestHandler):\n def initialize(self, models: KFModelRepository):\n self.models = models # pylint:disable=attribute-defined-outside-init\n\n def get(self, name: str):\n model = self.models.get_model(name)\n if model is None:\n raise tornado.web.HTTPError(\n status_code=404,\n reason=\"Model with name %s does not exist.\" % name\n )\n\n if not self.models.is_model_ready(name):\n raise tornado.web.HTTPError(\n status_code=503,\n reason=\"Model with name %s is not ready.\" % name\n )\n\n self.write(json.dumps({\n \"name\": model.name,\n \"ready\": model.ready\n }))\n\n\nclass ListHandler(tornado.web.RequestHandler):\n def initialize(self, models: KFModelRepository):\n self.models = models # pylint:disable=attribute-defined-outside-init\n\n def get(self):\n self.write(json.dumps([ob.name for ob in self.models.get_models()]))\n\n\nclass LoadHandler(tornado.web.RequestHandler):\n def initialize(self, models: KFModelRepository): # pylint:disable=attribute-defined-outside-init\n self.models = models\n\n async def post(self, name: str):\n try:\n if inspect.iscoroutinefunction(self.models.load):\n await self.models.load(name)\n else:\n self.models.load(name)\n except Exception:\n ex_type, ex_value, ex_traceback = sys.exc_info()\n raise tornado.web.HTTPError(\n status_code=500,\n reason=f\"Model with name {name} is not ready. \"\n f\"Error type: {ex_type} error msg: {ex_value}\"\n )\n\n if not self.models.is_model_ready(name):\n raise tornado.web.HTTPError(\n status_code=503,\n reason=f\"Model with name {name} is not ready.\"\n )\n self.write(json.dumps({\n \"name\": name,\n \"load\": True\n }))\n\n\nclass UnloadHandler(tornado.web.RequestHandler):\n def initialize(self, models: KFModelRepository): # pylint:disable=attribute-defined-outside-init\n self.models = models\n\n def post(self, name: str):\n try:\n self.models.unload(name)\n except KeyError:\n raise tornado.web.HTTPError(\n status_code=404,\n reason=\"Model with name %s does not exist.\" % name\n )\n self.write(json.dumps({\n \"name\": name,\n \"unload\": True\n }))\n", "path": "python/kfserving/kfserving/kfserver.py"}]}
| 4,006 | 287 |
gh_patches_debug_9892
|
rasdani/github-patches
|
git_diff
|
docker__docker-py-2795
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
version requirements for cryptography should be consistent
Hi
It seems that version requirements for cryptography in setup.py and requirements.txt are not consistent
In setup.py, it is cryptography>=1.3.4
In requirements.txt, it is cryptography==3.2
Note that in pypi, the version of cryptography is always updating( now 3.4.6). Inconsistent version requirements will result in installing different version for cryptography if I use different ways of installation.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 from __future__ import print_function
3
4 import codecs
5 import os
6
7 from setuptools import find_packages
8 from setuptools import setup
9
10 ROOT_DIR = os.path.dirname(__file__)
11 SOURCE_DIR = os.path.join(ROOT_DIR)
12
13 requirements = [
14 'websocket-client >= 0.32.0',
15 'requests >= 2.14.2, != 2.18.0',
16 ]
17
18 extras_require = {
19 # win32 APIs if on Windows (required for npipe support)
20 ':sys_platform == "win32"': 'pywin32==227',
21
22 # If using docker-py over TLS, highly recommend this option is
23 # pip-installed or pinned.
24
25 # TODO: if pip installing both "requests" and "requests[security]", the
26 # extra package from the "security" option are not installed (see
27 # https://github.com/pypa/pip/issues/4391). Once that's fixed, instead of
28 # installing the extra dependencies, install the following instead:
29 # 'requests[security] >= 2.5.2, != 2.11.0, != 2.12.2'
30 'tls': ['pyOpenSSL>=17.5.0', 'cryptography>=1.3.4', 'idna>=2.0.0'],
31
32 # Only required when connecting using the ssh:// protocol
33 'ssh': ['paramiko>=2.4.2'],
34
35 }
36
37 version = None
38 exec(open('docker/version.py').read())
39
40 with open('./test-requirements.txt') as test_reqs_txt:
41 test_requirements = [line for line in test_reqs_txt]
42
43
44 long_description = ''
45 with codecs.open('./README.md', encoding='utf-8') as readme_md:
46 long_description = readme_md.read()
47
48 setup(
49 name="docker",
50 version=version,
51 description="A Python library for the Docker Engine API.",
52 long_description=long_description,
53 long_description_content_type='text/markdown',
54 url='https://github.com/docker/docker-py',
55 project_urls={
56 'Documentation': 'https://docker-py.readthedocs.io',
57 'Changelog': 'https://docker-py.readthedocs.io/en/stable/change-log.html', # noqa: E501
58 'Source': 'https://github.com/docker/docker-py',
59 'Tracker': 'https://github.com/docker/docker-py/issues',
60 },
61 packages=find_packages(exclude=["tests.*", "tests"]),
62 install_requires=requirements,
63 tests_require=test_requirements,
64 extras_require=extras_require,
65 python_requires='>=3.6',
66 zip_safe=False,
67 test_suite='tests',
68 classifiers=[
69 'Development Status :: 5 - Production/Stable',
70 'Environment :: Other Environment',
71 'Intended Audience :: Developers',
72 'Operating System :: OS Independent',
73 'Programming Language :: Python',
74 'Programming Language :: Python :: 3',
75 'Programming Language :: Python :: 3.6',
76 'Programming Language :: Python :: 3.7',
77 'Programming Language :: Python :: 3.8',
78 'Programming Language :: Python :: 3.9',
79 'Topic :: Software Development',
80 'Topic :: Utilities',
81 'License :: OSI Approved :: Apache Software License',
82 ],
83 maintainer='Joffrey F',
84 maintainer_email='[email protected]',
85 )
86
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -27,7 +27,7 @@
# https://github.com/pypa/pip/issues/4391). Once that's fixed, instead of
# installing the extra dependencies, install the following instead:
# 'requests[security] >= 2.5.2, != 2.11.0, != 2.12.2'
- 'tls': ['pyOpenSSL>=17.5.0', 'cryptography>=1.3.4', 'idna>=2.0.0'],
+ 'tls': ['pyOpenSSL>=17.5.0', 'cryptography>=3.4.7', 'idna>=2.0.0'],
# Only required when connecting using the ssh:// protocol
'ssh': ['paramiko>=2.4.2'],
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -27,7 +27,7 @@\n # https://github.com/pypa/pip/issues/4391). Once that's fixed, instead of\n # installing the extra dependencies, install the following instead:\n # 'requests[security] >= 2.5.2, != 2.11.0, != 2.12.2'\n- 'tls': ['pyOpenSSL>=17.5.0', 'cryptography>=1.3.4', 'idna>=2.0.0'],\n+ 'tls': ['pyOpenSSL>=17.5.0', 'cryptography>=3.4.7', 'idna>=2.0.0'],\n \n # Only required when connecting using the ssh:// protocol\n 'ssh': ['paramiko>=2.4.2'],\n", "issue": "version requirements for cryptography should be consistent\nHi\r\nIt seems that version requirements for cryptography in setup.py and requirements.txt are not consistent\r\nIn setup.py, it is cryptography>=1.3.4\r\nIn requirements.txt, it is cryptography==3.2\r\nNote that in pypi, the version of cryptography is always updating( now 3.4.6). Inconsistent version requirements will result in installing different version for cryptography if I use different ways of installation. \n", "before_files": [{"content": "#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport codecs\nimport os\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nROOT_DIR = os.path.dirname(__file__)\nSOURCE_DIR = os.path.join(ROOT_DIR)\n\nrequirements = [\n 'websocket-client >= 0.32.0',\n 'requests >= 2.14.2, != 2.18.0',\n]\n\nextras_require = {\n # win32 APIs if on Windows (required for npipe support)\n ':sys_platform == \"win32\"': 'pywin32==227',\n\n # If using docker-py over TLS, highly recommend this option is\n # pip-installed or pinned.\n\n # TODO: if pip installing both \"requests\" and \"requests[security]\", the\n # extra package from the \"security\" option are not installed (see\n # https://github.com/pypa/pip/issues/4391). Once that's fixed, instead of\n # installing the extra dependencies, install the following instead:\n # 'requests[security] >= 2.5.2, != 2.11.0, != 2.12.2'\n 'tls': ['pyOpenSSL>=17.5.0', 'cryptography>=1.3.4', 'idna>=2.0.0'],\n\n # Only required when connecting using the ssh:// protocol\n 'ssh': ['paramiko>=2.4.2'],\n\n}\n\nversion = None\nexec(open('docker/version.py').read())\n\nwith open('./test-requirements.txt') as test_reqs_txt:\n test_requirements = [line for line in test_reqs_txt]\n\n\nlong_description = ''\nwith codecs.open('./README.md', encoding='utf-8') as readme_md:\n long_description = readme_md.read()\n\nsetup(\n name=\"docker\",\n version=version,\n description=\"A Python library for the Docker Engine API.\",\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/docker/docker-py',\n project_urls={\n 'Documentation': 'https://docker-py.readthedocs.io',\n 'Changelog': 'https://docker-py.readthedocs.io/en/stable/change-log.html', # noqa: E501\n 'Source': 'https://github.com/docker/docker-py',\n 'Tracker': 'https://github.com/docker/docker-py/issues',\n },\n packages=find_packages(exclude=[\"tests.*\", \"tests\"]),\n install_requires=requirements,\n tests_require=test_requirements,\n extras_require=extras_require,\n python_requires='>=3.6',\n zip_safe=False,\n test_suite='tests',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Other Environment',\n 'Intended Audience :: Developers',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Topic :: Software Development',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: Apache Software License',\n ],\n maintainer='Joffrey F',\n maintainer_email='[email protected]',\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport codecs\nimport os\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nROOT_DIR = os.path.dirname(__file__)\nSOURCE_DIR = os.path.join(ROOT_DIR)\n\nrequirements = [\n 'websocket-client >= 0.32.0',\n 'requests >= 2.14.2, != 2.18.0',\n]\n\nextras_require = {\n # win32 APIs if on Windows (required for npipe support)\n ':sys_platform == \"win32\"': 'pywin32==227',\n\n # If using docker-py over TLS, highly recommend this option is\n # pip-installed or pinned.\n\n # TODO: if pip installing both \"requests\" and \"requests[security]\", the\n # extra package from the \"security\" option are not installed (see\n # https://github.com/pypa/pip/issues/4391). Once that's fixed, instead of\n # installing the extra dependencies, install the following instead:\n # 'requests[security] >= 2.5.2, != 2.11.0, != 2.12.2'\n 'tls': ['pyOpenSSL>=17.5.0', 'cryptography>=3.4.7', 'idna>=2.0.0'],\n\n # Only required when connecting using the ssh:// protocol\n 'ssh': ['paramiko>=2.4.2'],\n\n}\n\nversion = None\nexec(open('docker/version.py').read())\n\nwith open('./test-requirements.txt') as test_reqs_txt:\n test_requirements = [line for line in test_reqs_txt]\n\n\nlong_description = ''\nwith codecs.open('./README.md', encoding='utf-8') as readme_md:\n long_description = readme_md.read()\n\nsetup(\n name=\"docker\",\n version=version,\n description=\"A Python library for the Docker Engine API.\",\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/docker/docker-py',\n project_urls={\n 'Documentation': 'https://docker-py.readthedocs.io',\n 'Changelog': 'https://docker-py.readthedocs.io/en/stable/change-log.html', # noqa: E501\n 'Source': 'https://github.com/docker/docker-py',\n 'Tracker': 'https://github.com/docker/docker-py/issues',\n },\n packages=find_packages(exclude=[\"tests.*\", \"tests\"]),\n install_requires=requirements,\n tests_require=test_requirements,\n extras_require=extras_require,\n python_requires='>=3.6',\n zip_safe=False,\n test_suite='tests',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Other Environment',\n 'Intended Audience :: Developers',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Topic :: Software Development',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: Apache Software License',\n ],\n maintainer='Joffrey F',\n maintainer_email='[email protected]',\n)\n", "path": "setup.py"}]}
| 1,260 | 205 |
gh_patches_debug_3267
|
rasdani/github-patches
|
git_diff
|
cupy__cupy-4711
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Python 3.9 support
- CI
- cupy-release-tools
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 import glob
4 import os
5 from setuptools import setup, find_packages
6 import sys
7
8 import cupy_setup_build
9
10
11 for submodule in ('cupy/core/include/cupy/cub/',
12 'cupy/core/include/cupy/jitify'):
13 if len(os.listdir(submodule)) == 0:
14 msg = '''
15 The folder %s is a git submodule but is
16 currently empty. Please use the command
17
18 git submodule update --init
19
20 to populate the folder before building from source.
21 ''' % submodule
22 print(msg, file=sys.stderr)
23 sys.exit(1)
24
25
26 requirements = {
27 'setup': [
28 'fastrlock>=0.5',
29 ],
30 'install': [
31 'numpy>=1.17',
32 'fastrlock>=0.5',
33 ],
34 'all': [
35 'scipy>=1.4',
36 'optuna>=2.0',
37 ],
38
39 'stylecheck': [
40 'autopep8==1.4.4',
41 'flake8==3.7.9',
42 'pbr==4.0.4',
43 'pycodestyle==2.5.0',
44 ],
45 'test': [
46 # 4.2 <= pytest < 6.2 is slow collecting tests and times out on CI.
47 'pytest>=6.2',
48 ],
49 'appveyor': [
50 '-r test',
51 ],
52 'jenkins': [
53 '-r test',
54 'pytest-timeout',
55 'pytest-cov',
56 'coveralls',
57 'codecov',
58 'coverage<5', # Otherwise, Python must be built with sqlite
59 ],
60 }
61
62
63 def reduce_requirements(key):
64 # Resolve recursive requirements notation (-r)
65 reqs = requirements[key]
66 resolved_reqs = []
67 for req in reqs:
68 if req.startswith('-r'):
69 depend_key = req[2:].lstrip()
70 reduce_requirements(depend_key)
71 resolved_reqs += requirements[depend_key]
72 else:
73 resolved_reqs.append(req)
74 requirements[key] = resolved_reqs
75
76
77 for k in requirements.keys():
78 reduce_requirements(k)
79
80
81 extras_require = {k: v for k, v in requirements.items() if k != 'install'}
82
83
84 setup_requires = requirements['setup']
85 install_requires = requirements['install']
86 tests_require = requirements['test']
87
88 # List of files that needs to be in the distribution (sdist/wheel).
89 # Notes:
90 # - Files only needed in sdist should be added to `MANIFEST.in`.
91 # - The following glob (`**`) ignores items starting with `.`.
92 cupy_package_data = [
93 'cupy/cuda/cupy_thrust.cu',
94 'cupy/cuda/cupy_cub.cu',
95 'cupy/cuda/cupy_cufftXt.cu', # for cuFFT callback
96 'cupy/cuda/cupy_cufftXt.h', # for cuFFT callback
97 'cupy/cuda/cupy_cufft.h', # for cuFFT callback
98 'cupy/cuda/cufft.pxd', # for cuFFT callback
99 'cupy/cuda/cufft.pyx', # for cuFFT callback
100 'cupy/random/cupy_distributions.cu',
101 'cupy/random/cupy_distributions.cuh',
102 ] + [
103 x for x in glob.glob('cupy/core/include/cupy/**', recursive=True)
104 if os.path.isfile(x)
105 ]
106
107 package_data = {
108 'cupy': [
109 os.path.relpath(x, 'cupy') for x in cupy_package_data
110 ],
111 }
112
113 package_data['cupy'] += cupy_setup_build.prepare_wheel_libs()
114
115 package_name = cupy_setup_build.get_package_name()
116 long_description = cupy_setup_build.get_long_description()
117 ext_modules = cupy_setup_build.get_ext_modules()
118 build_ext = cupy_setup_build.custom_build_ext
119 sdist = cupy_setup_build.sdist_with_cython
120
121 here = os.path.abspath(os.path.dirname(__file__))
122 # Get __version__ variable
123 with open(os.path.join(here, 'cupy', '_version.py')) as f:
124 exec(f.read())
125
126 CLASSIFIERS = """\
127 Development Status :: 5 - Production/Stable
128 Intended Audience :: Science/Research
129 Intended Audience :: Developers
130 License :: OSI Approved :: MIT License
131 Programming Language :: Python
132 Programming Language :: Python :: 3
133 Programming Language :: Python :: 3.6
134 Programming Language :: Python :: 3.7
135 Programming Language :: Python :: 3.8
136 Programming Language :: Python :: 3 :: Only
137 Programming Language :: Cython
138 Topic :: Software Development
139 Topic :: Scientific/Engineering
140 Operating System :: POSIX
141 Operating System :: Microsoft :: Windows
142 """
143
144
145 setup(
146 name=package_name,
147 version=__version__, # NOQA
148 description='CuPy: A NumPy-compatible array library accelerated by CUDA',
149 long_description=long_description,
150 author='Seiya Tokui',
151 author_email='[email protected]',
152 url='https://cupy.dev/',
153 license='MIT License',
154 project_urls={
155 "Bug Tracker": "https://github.com/cupy/cupy/issues",
156 "Documentation": "https://docs.cupy.dev/",
157 "Source Code": "https://github.com/cupy/cupy",
158 },
159 classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f],
160 packages=find_packages(exclude=['install', 'tests']),
161 package_data=package_data,
162 zip_safe=False,
163 python_requires='>=3.6.0',
164 setup_requires=setup_requires,
165 install_requires=install_requires,
166 tests_require=tests_require,
167 extras_require=extras_require,
168 ext_modules=ext_modules,
169 cmdclass={'build_ext': build_ext,
170 'sdist': sdist},
171 )
172
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -133,6 +133,7 @@
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
+Programming Language :: Python :: 3.9
Programming Language :: Python :: 3 :: Only
Programming Language :: Cython
Topic :: Software Development
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -133,6 +133,7 @@\n Programming Language :: Python :: 3.6\n Programming Language :: Python :: 3.7\n Programming Language :: Python :: 3.8\n+Programming Language :: Python :: 3.9\n Programming Language :: Python :: 3 :: Only\n Programming Language :: Cython\n Topic :: Software Development\n", "issue": "Python 3.9 support\n- CI\r\n- cupy-release-tools\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport glob\nimport os\nfrom setuptools import setup, find_packages\nimport sys\n\nimport cupy_setup_build\n\n\nfor submodule in ('cupy/core/include/cupy/cub/',\n 'cupy/core/include/cupy/jitify'):\n if len(os.listdir(submodule)) == 0:\n msg = '''\n The folder %s is a git submodule but is\n currently empty. Please use the command\n\n git submodule update --init\n\n to populate the folder before building from source.\n ''' % submodule\n print(msg, file=sys.stderr)\n sys.exit(1)\n\n\nrequirements = {\n 'setup': [\n 'fastrlock>=0.5',\n ],\n 'install': [\n 'numpy>=1.17',\n 'fastrlock>=0.5',\n ],\n 'all': [\n 'scipy>=1.4',\n 'optuna>=2.0',\n ],\n\n 'stylecheck': [\n 'autopep8==1.4.4',\n 'flake8==3.7.9',\n 'pbr==4.0.4',\n 'pycodestyle==2.5.0',\n ],\n 'test': [\n # 4.2 <= pytest < 6.2 is slow collecting tests and times out on CI.\n 'pytest>=6.2',\n ],\n 'appveyor': [\n '-r test',\n ],\n 'jenkins': [\n '-r test',\n 'pytest-timeout',\n 'pytest-cov',\n 'coveralls',\n 'codecov',\n 'coverage<5', # Otherwise, Python must be built with sqlite\n ],\n}\n\n\ndef reduce_requirements(key):\n # Resolve recursive requirements notation (-r)\n reqs = requirements[key]\n resolved_reqs = []\n for req in reqs:\n if req.startswith('-r'):\n depend_key = req[2:].lstrip()\n reduce_requirements(depend_key)\n resolved_reqs += requirements[depend_key]\n else:\n resolved_reqs.append(req)\n requirements[key] = resolved_reqs\n\n\nfor k in requirements.keys():\n reduce_requirements(k)\n\n\nextras_require = {k: v for k, v in requirements.items() if k != 'install'}\n\n\nsetup_requires = requirements['setup']\ninstall_requires = requirements['install']\ntests_require = requirements['test']\n\n# List of files that needs to be in the distribution (sdist/wheel).\n# Notes:\n# - Files only needed in sdist should be added to `MANIFEST.in`.\n# - The following glob (`**`) ignores items starting with `.`.\ncupy_package_data = [\n 'cupy/cuda/cupy_thrust.cu',\n 'cupy/cuda/cupy_cub.cu',\n 'cupy/cuda/cupy_cufftXt.cu', # for cuFFT callback\n 'cupy/cuda/cupy_cufftXt.h', # for cuFFT callback\n 'cupy/cuda/cupy_cufft.h', # for cuFFT callback\n 'cupy/cuda/cufft.pxd', # for cuFFT callback\n 'cupy/cuda/cufft.pyx', # for cuFFT callback\n 'cupy/random/cupy_distributions.cu',\n 'cupy/random/cupy_distributions.cuh',\n] + [\n x for x in glob.glob('cupy/core/include/cupy/**', recursive=True)\n if os.path.isfile(x)\n]\n\npackage_data = {\n 'cupy': [\n os.path.relpath(x, 'cupy') for x in cupy_package_data\n ],\n}\n\npackage_data['cupy'] += cupy_setup_build.prepare_wheel_libs()\n\npackage_name = cupy_setup_build.get_package_name()\nlong_description = cupy_setup_build.get_long_description()\next_modules = cupy_setup_build.get_ext_modules()\nbuild_ext = cupy_setup_build.custom_build_ext\nsdist = cupy_setup_build.sdist_with_cython\n\nhere = os.path.abspath(os.path.dirname(__file__))\n# Get __version__ variable\nwith open(os.path.join(here, 'cupy', '_version.py')) as f:\n exec(f.read())\n\nCLASSIFIERS = \"\"\"\\\nDevelopment Status :: 5 - Production/Stable\nIntended Audience :: Science/Research\nIntended Audience :: Developers\nLicense :: OSI Approved :: MIT License\nProgramming Language :: Python\nProgramming Language :: Python :: 3\nProgramming Language :: Python :: 3.6\nProgramming Language :: Python :: 3.7\nProgramming Language :: Python :: 3.8\nProgramming Language :: Python :: 3 :: Only\nProgramming Language :: Cython\nTopic :: Software Development\nTopic :: Scientific/Engineering\nOperating System :: POSIX\nOperating System :: Microsoft :: Windows\n\"\"\"\n\n\nsetup(\n name=package_name,\n version=__version__, # NOQA\n description='CuPy: A NumPy-compatible array library accelerated by CUDA',\n long_description=long_description,\n author='Seiya Tokui',\n author_email='[email protected]',\n url='https://cupy.dev/',\n license='MIT License',\n project_urls={\n \"Bug Tracker\": \"https://github.com/cupy/cupy/issues\",\n \"Documentation\": \"https://docs.cupy.dev/\",\n \"Source Code\": \"https://github.com/cupy/cupy\",\n },\n classifiers=[_f for _f in CLASSIFIERS.split('\\n') if _f],\n packages=find_packages(exclude=['install', 'tests']),\n package_data=package_data,\n zip_safe=False,\n python_requires='>=3.6.0',\n setup_requires=setup_requires,\n install_requires=install_requires,\n tests_require=tests_require,\n extras_require=extras_require,\n ext_modules=ext_modules,\n cmdclass={'build_ext': build_ext,\n 'sdist': sdist},\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nimport glob\nimport os\nfrom setuptools import setup, find_packages\nimport sys\n\nimport cupy_setup_build\n\n\nfor submodule in ('cupy/core/include/cupy/cub/',\n 'cupy/core/include/cupy/jitify'):\n if len(os.listdir(submodule)) == 0:\n msg = '''\n The folder %s is a git submodule but is\n currently empty. Please use the command\n\n git submodule update --init\n\n to populate the folder before building from source.\n ''' % submodule\n print(msg, file=sys.stderr)\n sys.exit(1)\n\n\nrequirements = {\n 'setup': [\n 'fastrlock>=0.5',\n ],\n 'install': [\n 'numpy>=1.17',\n 'fastrlock>=0.5',\n ],\n 'all': [\n 'scipy>=1.4',\n 'optuna>=2.0',\n ],\n\n 'stylecheck': [\n 'autopep8==1.4.4',\n 'flake8==3.7.9',\n 'pbr==4.0.4',\n 'pycodestyle==2.5.0',\n ],\n 'test': [\n # 4.2 <= pytest < 6.2 is slow collecting tests and times out on CI.\n 'pytest>=6.2',\n ],\n 'appveyor': [\n '-r test',\n ],\n 'jenkins': [\n '-r test',\n 'pytest-timeout',\n 'pytest-cov',\n 'coveralls',\n 'codecov',\n 'coverage<5', # Otherwise, Python must be built with sqlite\n ],\n}\n\n\ndef reduce_requirements(key):\n # Resolve recursive requirements notation (-r)\n reqs = requirements[key]\n resolved_reqs = []\n for req in reqs:\n if req.startswith('-r'):\n depend_key = req[2:].lstrip()\n reduce_requirements(depend_key)\n resolved_reqs += requirements[depend_key]\n else:\n resolved_reqs.append(req)\n requirements[key] = resolved_reqs\n\n\nfor k in requirements.keys():\n reduce_requirements(k)\n\n\nextras_require = {k: v for k, v in requirements.items() if k != 'install'}\n\n\nsetup_requires = requirements['setup']\ninstall_requires = requirements['install']\ntests_require = requirements['test']\n\n# List of files that needs to be in the distribution (sdist/wheel).\n# Notes:\n# - Files only needed in sdist should be added to `MANIFEST.in`.\n# - The following glob (`**`) ignores items starting with `.`.\ncupy_package_data = [\n 'cupy/cuda/cupy_thrust.cu',\n 'cupy/cuda/cupy_cub.cu',\n 'cupy/cuda/cupy_cufftXt.cu', # for cuFFT callback\n 'cupy/cuda/cupy_cufftXt.h', # for cuFFT callback\n 'cupy/cuda/cupy_cufft.h', # for cuFFT callback\n 'cupy/cuda/cufft.pxd', # for cuFFT callback\n 'cupy/cuda/cufft.pyx', # for cuFFT callback\n 'cupy/random/cupy_distributions.cu',\n 'cupy/random/cupy_distributions.cuh',\n] + [\n x for x in glob.glob('cupy/core/include/cupy/**', recursive=True)\n if os.path.isfile(x)\n]\n\npackage_data = {\n 'cupy': [\n os.path.relpath(x, 'cupy') for x in cupy_package_data\n ],\n}\n\npackage_data['cupy'] += cupy_setup_build.prepare_wheel_libs()\n\npackage_name = cupy_setup_build.get_package_name()\nlong_description = cupy_setup_build.get_long_description()\next_modules = cupy_setup_build.get_ext_modules()\nbuild_ext = cupy_setup_build.custom_build_ext\nsdist = cupy_setup_build.sdist_with_cython\n\nhere = os.path.abspath(os.path.dirname(__file__))\n# Get __version__ variable\nwith open(os.path.join(here, 'cupy', '_version.py')) as f:\n exec(f.read())\n\nCLASSIFIERS = \"\"\"\\\nDevelopment Status :: 5 - Production/Stable\nIntended Audience :: Science/Research\nIntended Audience :: Developers\nLicense :: OSI Approved :: MIT License\nProgramming Language :: Python\nProgramming Language :: Python :: 3\nProgramming Language :: Python :: 3.6\nProgramming Language :: Python :: 3.7\nProgramming Language :: Python :: 3.8\nProgramming Language :: Python :: 3.9\nProgramming Language :: Python :: 3 :: Only\nProgramming Language :: Cython\nTopic :: Software Development\nTopic :: Scientific/Engineering\nOperating System :: POSIX\nOperating System :: Microsoft :: Windows\n\"\"\"\n\n\nsetup(\n name=package_name,\n version=__version__, # NOQA\n description='CuPy: A NumPy-compatible array library accelerated by CUDA',\n long_description=long_description,\n author='Seiya Tokui',\n author_email='[email protected]',\n url='https://cupy.dev/',\n license='MIT License',\n project_urls={\n \"Bug Tracker\": \"https://github.com/cupy/cupy/issues\",\n \"Documentation\": \"https://docs.cupy.dev/\",\n \"Source Code\": \"https://github.com/cupy/cupy\",\n },\n classifiers=[_f for _f in CLASSIFIERS.split('\\n') if _f],\n packages=find_packages(exclude=['install', 'tests']),\n package_data=package_data,\n zip_safe=False,\n python_requires='>=3.6.0',\n setup_requires=setup_requires,\n install_requires=install_requires,\n tests_require=tests_require,\n extras_require=extras_require,\n ext_modules=ext_modules,\n cmdclass={'build_ext': build_ext,\n 'sdist': sdist},\n)\n", "path": "setup.py"}]}
| 1,944 | 96 |
gh_patches_debug_7564
|
rasdani/github-patches
|
git_diff
|
fossasia__open-event-server-4421
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Sentry]: 'EventInvoice' object has no attribute 'stripe_user_id'
https://sentry.eventyay.com/eventyay/dev-api/issues/59/
```
AttributeError: 'EventInvoice' object has no attribute 'stripe_user_id'
File "flask/app.py", line 1982, in wsgi_app
response = self.full_dispatch_request()
File "flask/app.py", line 1614, in full_dispatch_request
rv = self.handle_user_exception(e)
File "flask_cors/extension.py", line 161, in wrapped_function
return cors_after_request(app.make_response(f(*args, **kwargs)))
File "flask/app.py", line 1517, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "flask/app.py", line 1612, in full_dispatch_request
rv = self.dispatch_request()
File "flask/app.py", line 1598, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "flask_admin/base.py", line 69, in inner
return self._run_view(f, *args, **kwargs)
File "flask_admin/base.py", line 368, in _run_view
return fn(self, *args, **kwargs)
File "flask_admin/model/base.py", line 2076, in edit_view
return_url=return_url)
File "flask_admin/base.py", line 308, in render
return render_template(template, **kwargs)
File "flask/templating.py", line 134, in render_template
context, ctx.app)
File "flask/templating.py", line 116, in _render
rv = template.render(context)
File "jinja2/environment.py", line 1008, in render
return self.environment.handle_exception(exc_info, True)
File "jinja2/environment.py", line 780, in handle_exception
reraise(exc_type, exc_value, tb)
File "/app/.heroku/python/lib/python2.7/site-packages/flask_admin/templates/bootstrap3/admin/model/edit.html", line 3, in top-level template code
{% from 'admin/lib.html' import extra with context %} {# backward compatible #}
File "/app/.heroku/python/lib/python2.7/site-packages/flask_admin/templates/bootstrap3/admin/master.html", line 1, in top-level template code
{% extends admin_base_template %}
File "/app/app/templates/admin_base.html", line 1, in top-level template code
{% extends 'admin/base.html' %}
File "/app/.heroku/python/lib/python2.7/site-packages/flask_admin/templates/bootstrap3/admin/base.html", line 37, in top-level template code
{% block page_body %}
File "/app/.heroku/python/lib/python2.7/site-packages/flask_admin/templates/bootstrap3/admin/base.html", line 77, in block "page_body"
{% block body %}{% endblock %}
File "/app/.heroku/python/lib/python2.7/site-packages/flask_admin/templates/bootstrap3/admin/model/edit.html", line 32, in block "body"
{% block edit_form %}
File "/app/.heroku/python/lib/python2.7/site-packages/flask_admin/templates/bootstrap3/admin/model/edit.html", line 33, in block "edit_form"
{{ lib.render_form(form, return_url, extra(), form_opts) }}
File "jinja2/runtime.py", line 553, in _invoke
rv = self._func(*arguments)
File "/app/.heroku/python/lib/python2.7/site-packages/flask_admin/templates/bootstrap3/admin/lib.html", line 202, in template
{% call form_tag(action=action) %}
File "jinja2/runtime.py", line 553, in _invoke
rv = self._func(*arguments)
File "/app/.heroku/python/lib/python2.7/site-packages/flask_admin/templates/bootstrap3/admin/lib.html", line 182, in template
{{ caller() }}
File "jinja2/runtime.py", line 553, in _invoke
rv = self._func(*arguments)
File "/app/.heroku/python/lib/python2.7/site-packages/flask_admin/templates/bootstrap3/admin/lib.html", line 203, in template
{{ render_form_fields(form, form_opts=form_opts) }}
File "jinja2/runtime.py", line 553, in _invoke
rv = self._func(*arguments)
File "/app/.heroku/python/lib/python2.7/site-packages/flask_admin/templates/bootstrap3/admin/lib.html", line 175, in template
{{ render_field(form, f, kwargs) }}
File "jinja2/runtime.py", line 553, in _invoke
rv = self._func(*arguments)
File "/app/.heroku/python/lib/python2.7/site-packages/flask_admin/templates/bootstrap3/admin/lib.html", line 130, in template
{{ field(**kwargs)|safe }}
File "wtforms/fields/core.py", line 153, in __call__
return self.meta.render_field(self, kwargs)
File "wtforms/meta.py", line 56, in render_field
return field.widget(field, **render_kw)
File "flask_admin/form/widgets.py", line 28, in __call__
return super(Select2Widget, self).__call__(field, **kwargs)
File "wtforms/widgets/core.py", line 288, in __call__
html.append(self.render_option(val, label, selected))
File "wtforms/widgets/core.py", line 301, in render_option
return HTMLString('<option %s>%s</option>' % (html_params(**options), escape(text_type(label), quote=False)))
File "app/models/event_invoice.py", line 111, in __unicode__
return self.stripe_user_id
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/models/event_invoice.py`
Content:
```
1 import time
2 import uuid
3 from datetime import datetime
4
5 from app.api.helpers.db import get_count
6 from app.models import db
7
8
9 def get_new_identifier():
10 identifier = str(uuid.uuid4())
11 count = get_count(EventInvoice.query.filter_by(identifier=identifier))
12 if count == 0:
13 return identifier
14 else:
15 return get_new_identifier()
16
17
18 class EventInvoice(db.Model):
19 """
20 Stripe authorization information for an event.
21 """
22 __tablename__ = 'event_invoices'
23
24 id = db.Column(db.Integer, primary_key=True)
25 identifier = db.Column(db.String, unique=True)
26 amount = db.Column(db.Float)
27 address = db.Column(db.String)
28 city = db.Column(db.String)
29 state = db.Column(db.String)
30 country = db.Column(db.String)
31 zipcode = db.Column(db.String)
32
33 user_id = db.Column(db.Integer, db.ForeignKey('users.id', ondelete='SET NULL'))
34 event_id = db.Column(db.Integer, db.ForeignKey('events.id', ondelete='SET NULL'))
35
36 created_at = db.Column(db.DateTime(timezone=True))
37 completed_at = db.Column(db.DateTime(timezone=True), nullable=True, default=None)
38 transaction_id = db.Column(db.String)
39 paid_via = db.Column(db.String)
40 payment_mode = db.Column(db.String)
41 brand = db.Column(db.String)
42 exp_month = db.Column(db.Integer)
43 exp_year = db.Column(db.Integer)
44 last4 = db.Column(db.String)
45 stripe_token = db.Column(db.String)
46 paypal_token = db.Column(db.String)
47 status = db.Column(db.String)
48 invoice_pdf_url = db.Column(db.String)
49
50 event = db.relationship('Event', backref='invoices')
51 user = db.relationship('User', backref='invoices')
52
53 discount_code_id = db.Column(db.Integer, db.ForeignKey('discount_codes.id', ondelete='SET NULL'),
54 nullable=True, default=None)
55 discount_code = db.relationship('DiscountCode', backref='event_invoices')
56
57 def __init__(self,
58 amount=None,
59 address=None,
60 city=None,
61 state=None,
62 country=None,
63 zipcode=None,
64 transaction_id=None,
65 paid_via=None,
66 user_id=None,
67 discount_code_id=None,
68 event_id=None,
69 invoice_pdf_url=None,
70 payment_mode=None,
71 brand=None,
72 exp_month=None,
73 exp_year=None,
74 last4=None,
75 stripe_token=None,
76 paypal_token=None
77 ):
78 self.identifier = get_new_identifier()
79 self.amount = amount
80 self.address = address
81 self.state = state
82 self.country = country
83 self.zipcode = zipcode
84 self.city = city
85 self.user_id = user_id
86 self.event_id = event_id
87 self.transaction_id = transaction_id
88 self.paid_via = paid_via
89 self.created_at = datetime.utcnow()
90 self.discount_code_id = discount_code_id
91 self.status = 'pending'
92 self.invoice_pdf_url = invoice_pdf_url
93 self.payment_mode = payment_mode
94 self.brand = brand
95 self.exp_month = exp_month
96 self.exp_year = exp_year
97 self.last4 = last4
98 self.stripe_token = stripe_token
99 self.paypal_token = paypal_token
100
101 def get_invoice_number(self):
102 return 'I' + str(int(time.mktime(self.created_at.timetuple()))) + '-' + str(self.id)
103
104 def __repr__(self):
105 return '<EventInvoice %r>' % self.stripe_user_id
106
107 def __str__(self):
108 return unicode(self).encode('utf-8')
109
110 def __unicode__(self):
111 return self.stripe_user_id
112
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/app/models/event_invoice.py b/app/models/event_invoice.py
--- a/app/models/event_invoice.py
+++ b/app/models/event_invoice.py
@@ -102,10 +102,10 @@
return 'I' + str(int(time.mktime(self.created_at.timetuple()))) + '-' + str(self.id)
def __repr__(self):
- return '<EventInvoice %r>' % self.stripe_user_id
+ return '<EventInvoice %r>' % self.invoice_pdf_url
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
- return self.stripe_user_id
+ return self.invoice_pdf_url
|
{"golden_diff": "diff --git a/app/models/event_invoice.py b/app/models/event_invoice.py\n--- a/app/models/event_invoice.py\n+++ b/app/models/event_invoice.py\n@@ -102,10 +102,10 @@\n return 'I' + str(int(time.mktime(self.created_at.timetuple()))) + '-' + str(self.id)\n \n def __repr__(self):\n- return '<EventInvoice %r>' % self.stripe_user_id\n+ return '<EventInvoice %r>' % self.invoice_pdf_url\n \n def __str__(self):\n return unicode(self).encode('utf-8')\n \n def __unicode__(self):\n- return self.stripe_user_id\n+ return self.invoice_pdf_url\n", "issue": "[Sentry]: 'EventInvoice' object has no attribute 'stripe_user_id'\nhttps://sentry.eventyay.com/eventyay/dev-api/issues/59/\r\n\r\n```\r\nAttributeError: 'EventInvoice' object has no attribute 'stripe_user_id'\r\n File \"flask/app.py\", line 1982, in wsgi_app\r\n response = self.full_dispatch_request()\r\n File \"flask/app.py\", line 1614, in full_dispatch_request\r\n rv = self.handle_user_exception(e)\r\n File \"flask_cors/extension.py\", line 161, in wrapped_function\r\n return cors_after_request(app.make_response(f(*args, **kwargs)))\r\n File \"flask/app.py\", line 1517, in handle_user_exception\r\n reraise(exc_type, exc_value, tb)\r\n File \"flask/app.py\", line 1612, in full_dispatch_request\r\n rv = self.dispatch_request()\r\n File \"flask/app.py\", line 1598, in dispatch_request\r\n return self.view_functions[rule.endpoint](**req.view_args)\r\n File \"flask_admin/base.py\", line 69, in inner\r\n return self._run_view(f, *args, **kwargs)\r\n File \"flask_admin/base.py\", line 368, in _run_view\r\n return fn(self, *args, **kwargs)\r\n File \"flask_admin/model/base.py\", line 2076, in edit_view\r\n return_url=return_url)\r\n File \"flask_admin/base.py\", line 308, in render\r\n return render_template(template, **kwargs)\r\n File \"flask/templating.py\", line 134, in render_template\r\n context, ctx.app)\r\n File \"flask/templating.py\", line 116, in _render\r\n rv = template.render(context)\r\n File \"jinja2/environment.py\", line 1008, in render\r\n return self.environment.handle_exception(exc_info, True)\r\n File \"jinja2/environment.py\", line 780, in handle_exception\r\n reraise(exc_type, exc_value, tb)\r\n File \"/app/.heroku/python/lib/python2.7/site-packages/flask_admin/templates/bootstrap3/admin/model/edit.html\", line 3, in top-level template code\r\n {% from 'admin/lib.html' import extra with context %} {# backward compatible #}\r\n File \"/app/.heroku/python/lib/python2.7/site-packages/flask_admin/templates/bootstrap3/admin/master.html\", line 1, in top-level template code\r\n {% extends admin_base_template %}\r\n File \"/app/app/templates/admin_base.html\", line 1, in top-level template code\r\n {% extends 'admin/base.html' %}\r\n File \"/app/.heroku/python/lib/python2.7/site-packages/flask_admin/templates/bootstrap3/admin/base.html\", line 37, in top-level template code\r\n {% block page_body %}\r\n File \"/app/.heroku/python/lib/python2.7/site-packages/flask_admin/templates/bootstrap3/admin/base.html\", line 77, in block \"page_body\"\r\n {% block body %}{% endblock %}\r\n File \"/app/.heroku/python/lib/python2.7/site-packages/flask_admin/templates/bootstrap3/admin/model/edit.html\", line 32, in block \"body\"\r\n {% block edit_form %}\r\n File \"/app/.heroku/python/lib/python2.7/site-packages/flask_admin/templates/bootstrap3/admin/model/edit.html\", line 33, in block \"edit_form\"\r\n {{ lib.render_form(form, return_url, extra(), form_opts) }}\r\n File \"jinja2/runtime.py\", line 553, in _invoke\r\n rv = self._func(*arguments)\r\n File \"/app/.heroku/python/lib/python2.7/site-packages/flask_admin/templates/bootstrap3/admin/lib.html\", line 202, in template\r\n {% call form_tag(action=action) %}\r\n File \"jinja2/runtime.py\", line 553, in _invoke\r\n rv = self._func(*arguments)\r\n File \"/app/.heroku/python/lib/python2.7/site-packages/flask_admin/templates/bootstrap3/admin/lib.html\", line 182, in template\r\n {{ caller() }}\r\n File \"jinja2/runtime.py\", line 553, in _invoke\r\n rv = self._func(*arguments)\r\n File \"/app/.heroku/python/lib/python2.7/site-packages/flask_admin/templates/bootstrap3/admin/lib.html\", line 203, in template\r\n {{ render_form_fields(form, form_opts=form_opts) }}\r\n File \"jinja2/runtime.py\", line 553, in _invoke\r\n rv = self._func(*arguments)\r\n File \"/app/.heroku/python/lib/python2.7/site-packages/flask_admin/templates/bootstrap3/admin/lib.html\", line 175, in template\r\n {{ render_field(form, f, kwargs) }}\r\n File \"jinja2/runtime.py\", line 553, in _invoke\r\n rv = self._func(*arguments)\r\n File \"/app/.heroku/python/lib/python2.7/site-packages/flask_admin/templates/bootstrap3/admin/lib.html\", line 130, in template\r\n {{ field(**kwargs)|safe }}\r\n File \"wtforms/fields/core.py\", line 153, in __call__\r\n return self.meta.render_field(self, kwargs)\r\n File \"wtforms/meta.py\", line 56, in render_field\r\n return field.widget(field, **render_kw)\r\n File \"flask_admin/form/widgets.py\", line 28, in __call__\r\n return super(Select2Widget, self).__call__(field, **kwargs)\r\n File \"wtforms/widgets/core.py\", line 288, in __call__\r\n html.append(self.render_option(val, label, selected))\r\n File \"wtforms/widgets/core.py\", line 301, in render_option\r\n return HTMLString('<option %s>%s</option>' % (html_params(**options), escape(text_type(label), quote=False)))\r\n File \"app/models/event_invoice.py\", line 111, in __unicode__\r\n return self.stripe_user_id\r\n```\n", "before_files": [{"content": "import time\nimport uuid\nfrom datetime import datetime\n\nfrom app.api.helpers.db import get_count\nfrom app.models import db\n\n\ndef get_new_identifier():\n identifier = str(uuid.uuid4())\n count = get_count(EventInvoice.query.filter_by(identifier=identifier))\n if count == 0:\n return identifier\n else:\n return get_new_identifier()\n\n\nclass EventInvoice(db.Model):\n \"\"\"\n Stripe authorization information for an event.\n \"\"\"\n __tablename__ = 'event_invoices'\n\n id = db.Column(db.Integer, primary_key=True)\n identifier = db.Column(db.String, unique=True)\n amount = db.Column(db.Float)\n address = db.Column(db.String)\n city = db.Column(db.String)\n state = db.Column(db.String)\n country = db.Column(db.String)\n zipcode = db.Column(db.String)\n\n user_id = db.Column(db.Integer, db.ForeignKey('users.id', ondelete='SET NULL'))\n event_id = db.Column(db.Integer, db.ForeignKey('events.id', ondelete='SET NULL'))\n\n created_at = db.Column(db.DateTime(timezone=True))\n completed_at = db.Column(db.DateTime(timezone=True), nullable=True, default=None)\n transaction_id = db.Column(db.String)\n paid_via = db.Column(db.String)\n payment_mode = db.Column(db.String)\n brand = db.Column(db.String)\n exp_month = db.Column(db.Integer)\n exp_year = db.Column(db.Integer)\n last4 = db.Column(db.String)\n stripe_token = db.Column(db.String)\n paypal_token = db.Column(db.String)\n status = db.Column(db.String)\n invoice_pdf_url = db.Column(db.String)\n\n event = db.relationship('Event', backref='invoices')\n user = db.relationship('User', backref='invoices')\n\n discount_code_id = db.Column(db.Integer, db.ForeignKey('discount_codes.id', ondelete='SET NULL'),\n nullable=True, default=None)\n discount_code = db.relationship('DiscountCode', backref='event_invoices')\n\n def __init__(self,\n amount=None,\n address=None,\n city=None,\n state=None,\n country=None,\n zipcode=None,\n transaction_id=None,\n paid_via=None,\n user_id=None,\n discount_code_id=None,\n event_id=None,\n invoice_pdf_url=None,\n payment_mode=None,\n brand=None,\n exp_month=None,\n exp_year=None,\n last4=None,\n stripe_token=None,\n paypal_token=None\n ):\n self.identifier = get_new_identifier()\n self.amount = amount\n self.address = address\n self.state = state\n self.country = country\n self.zipcode = zipcode\n self.city = city\n self.user_id = user_id\n self.event_id = event_id\n self.transaction_id = transaction_id\n self.paid_via = paid_via\n self.created_at = datetime.utcnow()\n self.discount_code_id = discount_code_id\n self.status = 'pending'\n self.invoice_pdf_url = invoice_pdf_url\n self.payment_mode = payment_mode\n self.brand = brand\n self.exp_month = exp_month\n self.exp_year = exp_year\n self.last4 = last4\n self.stripe_token = stripe_token\n self.paypal_token = paypal_token\n\n def get_invoice_number(self):\n return 'I' + str(int(time.mktime(self.created_at.timetuple()))) + '-' + str(self.id)\n\n def __repr__(self):\n return '<EventInvoice %r>' % self.stripe_user_id\n\n def __str__(self):\n return unicode(self).encode('utf-8')\n\n def __unicode__(self):\n return self.stripe_user_id\n", "path": "app/models/event_invoice.py"}], "after_files": [{"content": "import time\nimport uuid\nfrom datetime import datetime\n\nfrom app.api.helpers.db import get_count\nfrom app.models import db\n\n\ndef get_new_identifier():\n identifier = str(uuid.uuid4())\n count = get_count(EventInvoice.query.filter_by(identifier=identifier))\n if count == 0:\n return identifier\n else:\n return get_new_identifier()\n\n\nclass EventInvoice(db.Model):\n \"\"\"\n Stripe authorization information for an event.\n \"\"\"\n __tablename__ = 'event_invoices'\n\n id = db.Column(db.Integer, primary_key=True)\n identifier = db.Column(db.String, unique=True)\n amount = db.Column(db.Float)\n address = db.Column(db.String)\n city = db.Column(db.String)\n state = db.Column(db.String)\n country = db.Column(db.String)\n zipcode = db.Column(db.String)\n\n user_id = db.Column(db.Integer, db.ForeignKey('users.id', ondelete='SET NULL'))\n event_id = db.Column(db.Integer, db.ForeignKey('events.id', ondelete='SET NULL'))\n\n created_at = db.Column(db.DateTime(timezone=True))\n completed_at = db.Column(db.DateTime(timezone=True), nullable=True, default=None)\n transaction_id = db.Column(db.String)\n paid_via = db.Column(db.String)\n payment_mode = db.Column(db.String)\n brand = db.Column(db.String)\n exp_month = db.Column(db.Integer)\n exp_year = db.Column(db.Integer)\n last4 = db.Column(db.String)\n stripe_token = db.Column(db.String)\n paypal_token = db.Column(db.String)\n status = db.Column(db.String)\n invoice_pdf_url = db.Column(db.String)\n\n event = db.relationship('Event', backref='invoices')\n user = db.relationship('User', backref='invoices')\n\n discount_code_id = db.Column(db.Integer, db.ForeignKey('discount_codes.id', ondelete='SET NULL'),\n nullable=True, default=None)\n discount_code = db.relationship('DiscountCode', backref='event_invoices')\n\n def __init__(self,\n amount=None,\n address=None,\n city=None,\n state=None,\n country=None,\n zipcode=None,\n transaction_id=None,\n paid_via=None,\n user_id=None,\n discount_code_id=None,\n event_id=None,\n invoice_pdf_url=None,\n payment_mode=None,\n brand=None,\n exp_month=None,\n exp_year=None,\n last4=None,\n stripe_token=None,\n paypal_token=None\n ):\n self.identifier = get_new_identifier()\n self.amount = amount\n self.address = address\n self.state = state\n self.country = country\n self.zipcode = zipcode\n self.city = city\n self.user_id = user_id\n self.event_id = event_id\n self.transaction_id = transaction_id\n self.paid_via = paid_via\n self.created_at = datetime.utcnow()\n self.discount_code_id = discount_code_id\n self.status = 'pending'\n self.invoice_pdf_url = invoice_pdf_url\n self.payment_mode = payment_mode\n self.brand = brand\n self.exp_month = exp_month\n self.exp_year = exp_year\n self.last4 = last4\n self.stripe_token = stripe_token\n self.paypal_token = paypal_token\n\n def get_invoice_number(self):\n return 'I' + str(int(time.mktime(self.created_at.timetuple()))) + '-' + str(self.id)\n\n def __repr__(self):\n return '<EventInvoice %r>' % self.invoice_pdf_url\n\n def __str__(self):\n return unicode(self).encode('utf-8')\n\n def __unicode__(self):\n return self.invoice_pdf_url\n", "path": "app/models/event_invoice.py"}]}
| 2,613 | 155 |
gh_patches_debug_16794
|
rasdani/github-patches
|
git_diff
|
rucio__rucio-3622
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ius repo link is being decommissioned
Motivation
----------
The link currently used for the CentOS repo (https://centos7.iuscommunity.org/ius-release.rpm) is being decommissioned and replaced with a new link (https://repo.ius.io/ius-release-el7.rpm). Trying to use the old link will cause the Travis suites to error/fail.
Modification
------------
Change to the new link.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/rucio/client/ruleclient.py`
Content:
```
1 # Copyright 2013-2018 CERN for the benefit of the ATLAS collaboration.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 #
15 # Authors:
16 # - Martin Barisits <[email protected]>, 2013-2018
17 # - Vincent Garonne <[email protected]>, 2013-2018
18 # - Cedric Serfon <[email protected]>, 2014-2015
19 # - Ralph Vigne <[email protected]>, 2015
20 # - Joaquin Bogado <[email protected]>, 2018
21 #
22 # PY3K COMPATIBLE
23
24 from json import dumps, loads
25 from requests.status_codes import codes
26
27 from rucio.client.baseclient import BaseClient
28 from rucio.client.baseclient import choice
29 from rucio.common.utils import build_url
30
31
32 class RuleClient(BaseClient):
33
34 """RuleClient class for working with replication rules"""
35
36 RULE_BASEURL = 'rules'
37
38 def __init__(self, rucio_host=None, auth_host=None, account=None, ca_cert=None, auth_type=None, creds=None, timeout=600, dq2_wrapper=False):
39 super(RuleClient, self).__init__(rucio_host, auth_host, account, ca_cert, auth_type, creds, timeout, dq2_wrapper)
40
41 def add_replication_rule(self, dids, copies, rse_expression, weight=None, lifetime=None, grouping='DATASET', account=None,
42 locked=False, source_replica_expression=None, activity=None, notify='N', purge_replicas=False,
43 ignore_availability=False, comment=None, ask_approval=False, asynchronous=False, priority=3,
44 meta=None):
45 """
46 :param dids: The data identifier set.
47 :param copies: The number of replicas.
48 :param rse_expression: Boolean string expression to give the list of RSEs.
49 :param weight: If the weighting option of the replication rule is used, the choice of RSEs takes their weight into account.
50 :param lifetime: The lifetime of the replication rules (in seconds).
51 :param grouping: ALL - All files will be replicated to the same RSE.
52 DATASET - All files in the same dataset will be replicated to the same RSE.
53 NONE - Files will be completely spread over all allowed RSEs without any grouping considerations at all.
54 :param account: The account owning the rule.
55 :param locked: If the rule is locked, it cannot be deleted.
56 :param source_replica_expression: RSE Expression for RSEs to be considered for source replicas.
57 :param activity: Transfer Activity to be passed to FTS.
58 :param notify: Notification setting for the rule (Y, N, C).
59 :param purge_replicas: When the rule gets deleted purge the associated replicas immediately.
60 :param ignore_availability: Option to ignore the availability of RSEs.
61 :param ask_approval: Ask for approval of this replication rule.
62 :param asynchronous: Create rule asynchronously by judge-injector.
63 :param priority: Priority of the transfers.
64 :param comment: Comment about the rule.
65 :param meta: Metadata, as dictionary.
66 """
67 path = self.RULE_BASEURL + '/'
68 url = build_url(choice(self.list_hosts), path=path)
69 # TODO remove the subscription_id from the client; It will only be used by the core;
70 data = dumps({'dids': dids, 'copies': copies, 'rse_expression': rse_expression,
71 'weight': weight, 'lifetime': lifetime, 'grouping': grouping,
72 'account': account, 'locked': locked, 'source_replica_expression': source_replica_expression,
73 'activity': activity, 'notify': notify, 'purge_replicas': purge_replicas,
74 'ignore_availability': ignore_availability, 'comment': comment, 'ask_approval': ask_approval,
75 'asynchronous': asynchronous, 'priority': priority, 'meta': meta})
76 r = self._send_request(url, type='POST', data=data)
77 if r.status_code == codes.created:
78 return loads(r.text)
79 exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
80 raise exc_cls(exc_msg)
81
82 def delete_replication_rule(self, rule_id, purge_replicas=None):
83 """
84 Deletes a replication rule and all associated locks.
85
86 :param rule_id: The id of the rule to be deleted
87 :param purge_replicas: Immediately delete the replicas.
88 :raises: RuleNotFound, AccessDenied
89 """
90
91 path = self.RULE_BASEURL + '/' + rule_id
92 url = build_url(choice(self.list_hosts), path=path)
93
94 data = dumps({'purge_replicas': purge_replicas})
95
96 r = self._send_request(url, type='DEL', data=data)
97
98 if r.status_code == codes.ok:
99 return True
100 exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
101 raise exc_cls(exc_msg)
102
103 def get_replication_rule(self, rule_id, estimate_ttc=False):
104 """
105 Get a replication rule.
106
107 :param rule_id: The id of the rule to be retrieved.
108 :param estimate_ttc: bool, if rule_info should return ttc information
109 :raises: RuleNotFound
110 """
111 path = self.RULE_BASEURL + '/' + rule_id
112 url = build_url(choice(self.list_hosts), path=path)
113 data = dumps({'estimate_ttc': estimate_ttc})
114 r = self._send_request(url, type='GET', data=data)
115 if r.status_code == codes.ok:
116 return next(self._load_json_data(r))
117 else:
118 exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
119 raise exc_cls(exc_msg)
120
121 def update_replication_rule(self, rule_id, options):
122 """
123 :param rule_id: The id of the rule to be retrieved.
124 :param options: Options dictionary.
125 :raises: RuleNotFound
126 """
127 path = self.RULE_BASEURL + '/' + rule_id
128 url = build_url(choice(self.list_hosts), path=path)
129 data = dumps({'options': options})
130 r = self._send_request(url, type='PUT', data=data)
131 if r.status_code == codes.ok:
132 return True
133 exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
134 raise exc_cls(exc_msg)
135
136 def reduce_replication_rule(self, rule_id, copies, exclude_expression=None):
137 """
138 :param rule_id: Rule to be reduced.
139 :param copies: Number of copies of the new rule.
140 :param exclude_expression: RSE Expression of RSEs to exclude.
141 :raises: RuleReplaceFailed, RuleNotFound
142 """
143
144 path = self.RULE_BASEURL + '/' + rule_id + '/reduce'
145 url = build_url(choice(self.list_hosts), path=path)
146 data = dumps({'copies': copies, 'exclude_expression': exclude_expression})
147 r = self._send_request(url, type='POST', data=data)
148 if r.status_code == codes.ok:
149 return loads(r.text)
150 exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
151 raise exc_cls(exc_msg)
152
153 def move_replication_rule(self, rule_id, rse_expression):
154 """
155 Move a replication rule to another RSE and, once done, delete the original one.
156
157 :param rule_id: Rule to be moved.
158 :param rse_expression: RSE expression of the new rule.
159 :raises: RuleNotFound, RuleReplaceFailed
160 """
161
162 path = self.RULE_BASEURL + '/' + rule_id + '/move'
163 url = build_url(choice(self.list_hosts), path=path)
164 data = dumps({'rule_id': rule_id, 'rse_expression': rse_expression})
165 r = self._send_request(url, type='POST', data=data)
166 if r.status_code == codes.created:
167 return loads(r.text)
168 exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
169 raise exc_cls(exc_msg)
170
171 def approve_replication_rule(self, rule_id):
172 """
173 :param rule_id: Rule to be approved.
174 :raises: RuleNotFound
175 """
176
177 path = self.RULE_BASEURL + '/' + rule_id
178 url = build_url(choice(self.list_hosts), path=path)
179 data = dumps({'options': {'approve': True}})
180 r = self._send_request(url, type='PUT', data=data)
181 if r.status_code == codes.ok:
182 return True
183 exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
184 raise exc_cls(exc_msg)
185
186 def deny_replication_rule(self, rule_id):
187 """
188 :param rule_id: Rule to be denied.
189 :raises: RuleNotFound
190 """
191
192 path = self.RULE_BASEURL + '/' + rule_id
193 url = build_url(choice(self.list_hosts), path=path)
194 data = dumps({'options': {'approve': False}})
195 r = self._send_request(url, type='PUT', data=data)
196 if r.status_code == codes.ok:
197 return True
198 exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
199 raise exc_cls(exc_msg)
200
201 def list_replication_rule_full_history(self, scope, name):
202 """
203 List the rule history of a DID.
204
205 :param scope: The scope of the DID.
206 :param name: The name of the DID.
207 """
208 path = self.RULE_BASEURL + '/' + scope + '/' + name + '/history'
209 url = build_url(choice(self.list_hosts), path=path)
210 r = self._send_request(url, type='GET')
211 if r.status_code == codes.ok:
212 return self._load_json_data(r)
213 exc_cls, exc_msg = self._get_exception(r.headers, r.status_code)
214 raise exc_cls(exc_msg)
215
216 def examine_replication_rule(self, rule_id):
217 """
218 Examine a replication rule for errors during transfer.
219
220 :param rule_id: Rule to be denied.
221 :raises: RuleNotFound
222 """
223 path = self.RULE_BASEURL + '/' + rule_id + '/analysis'
224 url = build_url(choice(self.list_hosts), path=path)
225 r = self._send_request(url, type='GET')
226 if r.status_code == codes.ok:
227 return next(self._load_json_data(r))
228 exc_cls, exc_msg = self._get_exception(r.headers, r.status_code)
229 raise exc_cls(exc_msg)
230
231 def list_replica_locks(self, rule_id):
232 """
233 List details of all replica locks for a rule.
234
235 :param rule_id: Rule to be denied.
236 :raises: RuleNotFound
237 """
238 path = self.RULE_BASEURL + '/' + rule_id + '/locks'
239 url = build_url(choice(self.list_hosts), path=path)
240 r = self._send_request(url, type='GET')
241 if r.status_code == codes.ok:
242 return self._load_json_data(r)
243 exc_cls, exc_msg = self._get_exception(r.headers, r.status_code)
244 raise exc_cls(exc_msg)
245
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/lib/rucio/client/ruleclient.py b/lib/rucio/client/ruleclient.py
--- a/lib/rucio/client/ruleclient.py
+++ b/lib/rucio/client/ruleclient.py
@@ -21,6 +21,11 @@
#
# PY3K COMPATIBLE
+try:
+ from urllib import quote_plus
+except ImportError:
+ from urllib.parse import quote_plus
+
from json import dumps, loads
from requests.status_codes import codes
@@ -205,7 +210,7 @@
:param scope: The scope of the DID.
:param name: The name of the DID.
"""
- path = self.RULE_BASEURL + '/' + scope + '/' + name + '/history'
+ path = '/'.join([self.RULE_BASEURL, quote_plus(scope), quote_plus(name), 'history'])
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url, type='GET')
if r.status_code == codes.ok:
|
{"golden_diff": "diff --git a/lib/rucio/client/ruleclient.py b/lib/rucio/client/ruleclient.py\n--- a/lib/rucio/client/ruleclient.py\n+++ b/lib/rucio/client/ruleclient.py\n@@ -21,6 +21,11 @@\n #\n # PY3K COMPATIBLE\n \n+try:\n+ from urllib import quote_plus\n+except ImportError:\n+ from urllib.parse import quote_plus\n+\n from json import dumps, loads\n from requests.status_codes import codes\n \n@@ -205,7 +210,7 @@\n :param scope: The scope of the DID.\n :param name: The name of the DID.\n \"\"\"\n- path = self.RULE_BASEURL + '/' + scope + '/' + name + '/history'\n+ path = '/'.join([self.RULE_BASEURL, quote_plus(scope), quote_plus(name), 'history'])\n url = build_url(choice(self.list_hosts), path=path)\n r = self._send_request(url, type='GET')\n if r.status_code == codes.ok:\n", "issue": "ius repo link is being decommissioned\nMotivation\r\n----------\r\nThe link currently used for the CentOS repo (https://centos7.iuscommunity.org/ius-release.rpm) is being decommissioned and replaced with a new link (https://repo.ius.io/ius-release-el7.rpm). Trying to use the old link will cause the Travis suites to error/fail.\r\n\r\nModification\r\n------------\r\nChange to the new link.\r\n\r\n\n", "before_files": [{"content": "# Copyright 2013-2018 CERN for the benefit of the ATLAS collaboration.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Authors:\n# - Martin Barisits <[email protected]>, 2013-2018\n# - Vincent Garonne <[email protected]>, 2013-2018\n# - Cedric Serfon <[email protected]>, 2014-2015\n# - Ralph Vigne <[email protected]>, 2015\n# - Joaquin Bogado <[email protected]>, 2018\n#\n# PY3K COMPATIBLE\n\nfrom json import dumps, loads\nfrom requests.status_codes import codes\n\nfrom rucio.client.baseclient import BaseClient\nfrom rucio.client.baseclient import choice\nfrom rucio.common.utils import build_url\n\n\nclass RuleClient(BaseClient):\n\n \"\"\"RuleClient class for working with replication rules\"\"\"\n\n RULE_BASEURL = 'rules'\n\n def __init__(self, rucio_host=None, auth_host=None, account=None, ca_cert=None, auth_type=None, creds=None, timeout=600, dq2_wrapper=False):\n super(RuleClient, self).__init__(rucio_host, auth_host, account, ca_cert, auth_type, creds, timeout, dq2_wrapper)\n\n def add_replication_rule(self, dids, copies, rse_expression, weight=None, lifetime=None, grouping='DATASET', account=None,\n locked=False, source_replica_expression=None, activity=None, notify='N', purge_replicas=False,\n ignore_availability=False, comment=None, ask_approval=False, asynchronous=False, priority=3,\n meta=None):\n \"\"\"\n :param dids: The data identifier set.\n :param copies: The number of replicas.\n :param rse_expression: Boolean string expression to give the list of RSEs.\n :param weight: If the weighting option of the replication rule is used, the choice of RSEs takes their weight into account.\n :param lifetime: The lifetime of the replication rules (in seconds).\n :param grouping: ALL - All files will be replicated to the same RSE.\n DATASET - All files in the same dataset will be replicated to the same RSE.\n NONE - Files will be completely spread over all allowed RSEs without any grouping considerations at all.\n :param account: The account owning the rule.\n :param locked: If the rule is locked, it cannot be deleted.\n :param source_replica_expression: RSE Expression for RSEs to be considered for source replicas.\n :param activity: Transfer Activity to be passed to FTS.\n :param notify: Notification setting for the rule (Y, N, C).\n :param purge_replicas: When the rule gets deleted purge the associated replicas immediately.\n :param ignore_availability: Option to ignore the availability of RSEs.\n :param ask_approval: Ask for approval of this replication rule.\n :param asynchronous: Create rule asynchronously by judge-injector.\n :param priority: Priority of the transfers.\n :param comment: Comment about the rule.\n :param meta: Metadata, as dictionary.\n \"\"\"\n path = self.RULE_BASEURL + '/'\n url = build_url(choice(self.list_hosts), path=path)\n # TODO remove the subscription_id from the client; It will only be used by the core;\n data = dumps({'dids': dids, 'copies': copies, 'rse_expression': rse_expression,\n 'weight': weight, 'lifetime': lifetime, 'grouping': grouping,\n 'account': account, 'locked': locked, 'source_replica_expression': source_replica_expression,\n 'activity': activity, 'notify': notify, 'purge_replicas': purge_replicas,\n 'ignore_availability': ignore_availability, 'comment': comment, 'ask_approval': ask_approval,\n 'asynchronous': asynchronous, 'priority': priority, 'meta': meta})\n r = self._send_request(url, type='POST', data=data)\n if r.status_code == codes.created:\n return loads(r.text)\n exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)\n raise exc_cls(exc_msg)\n\n def delete_replication_rule(self, rule_id, purge_replicas=None):\n \"\"\"\n Deletes a replication rule and all associated locks.\n\n :param rule_id: The id of the rule to be deleted\n :param purge_replicas: Immediately delete the replicas.\n :raises: RuleNotFound, AccessDenied\n \"\"\"\n\n path = self.RULE_BASEURL + '/' + rule_id\n url = build_url(choice(self.list_hosts), path=path)\n\n data = dumps({'purge_replicas': purge_replicas})\n\n r = self._send_request(url, type='DEL', data=data)\n\n if r.status_code == codes.ok:\n return True\n exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)\n raise exc_cls(exc_msg)\n\n def get_replication_rule(self, rule_id, estimate_ttc=False):\n \"\"\"\n Get a replication rule.\n\n :param rule_id: The id of the rule to be retrieved.\n :param estimate_ttc: bool, if rule_info should return ttc information\n :raises: RuleNotFound\n \"\"\"\n path = self.RULE_BASEURL + '/' + rule_id\n url = build_url(choice(self.list_hosts), path=path)\n data = dumps({'estimate_ttc': estimate_ttc})\n r = self._send_request(url, type='GET', data=data)\n if r.status_code == codes.ok:\n return next(self._load_json_data(r))\n else:\n exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)\n raise exc_cls(exc_msg)\n\n def update_replication_rule(self, rule_id, options):\n \"\"\"\n :param rule_id: The id of the rule to be retrieved.\n :param options: Options dictionary.\n :raises: RuleNotFound\n \"\"\"\n path = self.RULE_BASEURL + '/' + rule_id\n url = build_url(choice(self.list_hosts), path=path)\n data = dumps({'options': options})\n r = self._send_request(url, type='PUT', data=data)\n if r.status_code == codes.ok:\n return True\n exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)\n raise exc_cls(exc_msg)\n\n def reduce_replication_rule(self, rule_id, copies, exclude_expression=None):\n \"\"\"\n :param rule_id: Rule to be reduced.\n :param copies: Number of copies of the new rule.\n :param exclude_expression: RSE Expression of RSEs to exclude.\n :raises: RuleReplaceFailed, RuleNotFound\n \"\"\"\n\n path = self.RULE_BASEURL + '/' + rule_id + '/reduce'\n url = build_url(choice(self.list_hosts), path=path)\n data = dumps({'copies': copies, 'exclude_expression': exclude_expression})\n r = self._send_request(url, type='POST', data=data)\n if r.status_code == codes.ok:\n return loads(r.text)\n exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)\n raise exc_cls(exc_msg)\n\n def move_replication_rule(self, rule_id, rse_expression):\n \"\"\"\n Move a replication rule to another RSE and, once done, delete the original one.\n\n :param rule_id: Rule to be moved.\n :param rse_expression: RSE expression of the new rule.\n :raises: RuleNotFound, RuleReplaceFailed\n \"\"\"\n\n path = self.RULE_BASEURL + '/' + rule_id + '/move'\n url = build_url(choice(self.list_hosts), path=path)\n data = dumps({'rule_id': rule_id, 'rse_expression': rse_expression})\n r = self._send_request(url, type='POST', data=data)\n if r.status_code == codes.created:\n return loads(r.text)\n exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)\n raise exc_cls(exc_msg)\n\n def approve_replication_rule(self, rule_id):\n \"\"\"\n :param rule_id: Rule to be approved.\n :raises: RuleNotFound\n \"\"\"\n\n path = self.RULE_BASEURL + '/' + rule_id\n url = build_url(choice(self.list_hosts), path=path)\n data = dumps({'options': {'approve': True}})\n r = self._send_request(url, type='PUT', data=data)\n if r.status_code == codes.ok:\n return True\n exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)\n raise exc_cls(exc_msg)\n\n def deny_replication_rule(self, rule_id):\n \"\"\"\n :param rule_id: Rule to be denied.\n :raises: RuleNotFound\n \"\"\"\n\n path = self.RULE_BASEURL + '/' + rule_id\n url = build_url(choice(self.list_hosts), path=path)\n data = dumps({'options': {'approve': False}})\n r = self._send_request(url, type='PUT', data=data)\n if r.status_code == codes.ok:\n return True\n exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)\n raise exc_cls(exc_msg)\n\n def list_replication_rule_full_history(self, scope, name):\n \"\"\"\n List the rule history of a DID.\n\n :param scope: The scope of the DID.\n :param name: The name of the DID.\n \"\"\"\n path = self.RULE_BASEURL + '/' + scope + '/' + name + '/history'\n url = build_url(choice(self.list_hosts), path=path)\n r = self._send_request(url, type='GET')\n if r.status_code == codes.ok:\n return self._load_json_data(r)\n exc_cls, exc_msg = self._get_exception(r.headers, r.status_code)\n raise exc_cls(exc_msg)\n\n def examine_replication_rule(self, rule_id):\n \"\"\"\n Examine a replication rule for errors during transfer.\n\n :param rule_id: Rule to be denied.\n :raises: RuleNotFound\n \"\"\"\n path = self.RULE_BASEURL + '/' + rule_id + '/analysis'\n url = build_url(choice(self.list_hosts), path=path)\n r = self._send_request(url, type='GET')\n if r.status_code == codes.ok:\n return next(self._load_json_data(r))\n exc_cls, exc_msg = self._get_exception(r.headers, r.status_code)\n raise exc_cls(exc_msg)\n\n def list_replica_locks(self, rule_id):\n \"\"\"\n List details of all replica locks for a rule.\n\n :param rule_id: Rule to be denied.\n :raises: RuleNotFound\n \"\"\"\n path = self.RULE_BASEURL + '/' + rule_id + '/locks'\n url = build_url(choice(self.list_hosts), path=path)\n r = self._send_request(url, type='GET')\n if r.status_code == codes.ok:\n return self._load_json_data(r)\n exc_cls, exc_msg = self._get_exception(r.headers, r.status_code)\n raise exc_cls(exc_msg)\n", "path": "lib/rucio/client/ruleclient.py"}], "after_files": [{"content": "# Copyright 2013-2018 CERN for the benefit of the ATLAS collaboration.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Authors:\n# - Martin Barisits <[email protected]>, 2013-2018\n# - Vincent Garonne <[email protected]>, 2013-2018\n# - Cedric Serfon <[email protected]>, 2014-2015\n# - Ralph Vigne <[email protected]>, 2015\n# - Joaquin Bogado <[email protected]>, 2018\n#\n# PY3K COMPATIBLE\n\ntry:\n from urllib import quote_plus\nexcept ImportError:\n from urllib.parse import quote_plus\n\nfrom json import dumps, loads\nfrom requests.status_codes import codes\n\nfrom rucio.client.baseclient import BaseClient\nfrom rucio.client.baseclient import choice\nfrom rucio.common.utils import build_url\n\n\nclass RuleClient(BaseClient):\n\n \"\"\"RuleClient class for working with replication rules\"\"\"\n\n RULE_BASEURL = 'rules'\n\n def __init__(self, rucio_host=None, auth_host=None, account=None, ca_cert=None, auth_type=None, creds=None, timeout=600, dq2_wrapper=False):\n super(RuleClient, self).__init__(rucio_host, auth_host, account, ca_cert, auth_type, creds, timeout, dq2_wrapper)\n\n def add_replication_rule(self, dids, copies, rse_expression, weight=None, lifetime=None, grouping='DATASET', account=None,\n locked=False, source_replica_expression=None, activity=None, notify='N', purge_replicas=False,\n ignore_availability=False, comment=None, ask_approval=False, asynchronous=False, priority=3,\n meta=None):\n \"\"\"\n :param dids: The data identifier set.\n :param copies: The number of replicas.\n :param rse_expression: Boolean string expression to give the list of RSEs.\n :param weight: If the weighting option of the replication rule is used, the choice of RSEs takes their weight into account.\n :param lifetime: The lifetime of the replication rules (in seconds).\n :param grouping: ALL - All files will be replicated to the same RSE.\n DATASET - All files in the same dataset will be replicated to the same RSE.\n NONE - Files will be completely spread over all allowed RSEs without any grouping considerations at all.\n :param account: The account owning the rule.\n :param locked: If the rule is locked, it cannot be deleted.\n :param source_replica_expression: RSE Expression for RSEs to be considered for source replicas.\n :param activity: Transfer Activity to be passed to FTS.\n :param notify: Notification setting for the rule (Y, N, C).\n :param purge_replicas: When the rule gets deleted purge the associated replicas immediately.\n :param ignore_availability: Option to ignore the availability of RSEs.\n :param ask_approval: Ask for approval of this replication rule.\n :param asynchronous: Create rule asynchronously by judge-injector.\n :param priority: Priority of the transfers.\n :param comment: Comment about the rule.\n :param meta: Metadata, as dictionary.\n \"\"\"\n path = self.RULE_BASEURL + '/'\n url = build_url(choice(self.list_hosts), path=path)\n # TODO remove the subscription_id from the client; It will only be used by the core;\n data = dumps({'dids': dids, 'copies': copies, 'rse_expression': rse_expression,\n 'weight': weight, 'lifetime': lifetime, 'grouping': grouping,\n 'account': account, 'locked': locked, 'source_replica_expression': source_replica_expression,\n 'activity': activity, 'notify': notify, 'purge_replicas': purge_replicas,\n 'ignore_availability': ignore_availability, 'comment': comment, 'ask_approval': ask_approval,\n 'asynchronous': asynchronous, 'priority': priority, 'meta': meta})\n r = self._send_request(url, type='POST', data=data)\n if r.status_code == codes.created:\n return loads(r.text)\n exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)\n raise exc_cls(exc_msg)\n\n def delete_replication_rule(self, rule_id, purge_replicas=None):\n \"\"\"\n Deletes a replication rule and all associated locks.\n\n :param rule_id: The id of the rule to be deleted\n :param purge_replicas: Immediately delete the replicas.\n :raises: RuleNotFound, AccessDenied\n \"\"\"\n\n path = self.RULE_BASEURL + '/' + rule_id\n url = build_url(choice(self.list_hosts), path=path)\n\n data = dumps({'purge_replicas': purge_replicas})\n\n r = self._send_request(url, type='DEL', data=data)\n\n if r.status_code == codes.ok:\n return True\n exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)\n raise exc_cls(exc_msg)\n\n def get_replication_rule(self, rule_id, estimate_ttc=False):\n \"\"\"\n Get a replication rule.\n\n :param rule_id: The id of the rule to be retrieved.\n :param estimate_ttc: bool, if rule_info should return ttc information\n :raises: RuleNotFound\n \"\"\"\n path = self.RULE_BASEURL + '/' + rule_id\n url = build_url(choice(self.list_hosts), path=path)\n data = dumps({'estimate_ttc': estimate_ttc})\n r = self._send_request(url, type='GET', data=data)\n if r.status_code == codes.ok:\n return next(self._load_json_data(r))\n else:\n exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)\n raise exc_cls(exc_msg)\n\n def update_replication_rule(self, rule_id, options):\n \"\"\"\n :param rule_id: The id of the rule to be retrieved.\n :param options: Options dictionary.\n :raises: RuleNotFound\n \"\"\"\n path = self.RULE_BASEURL + '/' + rule_id\n url = build_url(choice(self.list_hosts), path=path)\n data = dumps({'options': options})\n r = self._send_request(url, type='PUT', data=data)\n if r.status_code == codes.ok:\n return True\n exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)\n raise exc_cls(exc_msg)\n\n def reduce_replication_rule(self, rule_id, copies, exclude_expression=None):\n \"\"\"\n :param rule_id: Rule to be reduced.\n :param copies: Number of copies of the new rule.\n :param exclude_expression: RSE Expression of RSEs to exclude.\n :raises: RuleReplaceFailed, RuleNotFound\n \"\"\"\n\n path = self.RULE_BASEURL + '/' + rule_id + '/reduce'\n url = build_url(choice(self.list_hosts), path=path)\n data = dumps({'copies': copies, 'exclude_expression': exclude_expression})\n r = self._send_request(url, type='POST', data=data)\n if r.status_code == codes.ok:\n return loads(r.text)\n exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)\n raise exc_cls(exc_msg)\n\n def move_replication_rule(self, rule_id, rse_expression):\n \"\"\"\n Move a replication rule to another RSE and, once done, delete the original one.\n\n :param rule_id: Rule to be moved.\n :param rse_expression: RSE expression of the new rule.\n :raises: RuleNotFound, RuleReplaceFailed\n \"\"\"\n\n path = self.RULE_BASEURL + '/' + rule_id + '/move'\n url = build_url(choice(self.list_hosts), path=path)\n data = dumps({'rule_id': rule_id, 'rse_expression': rse_expression})\n r = self._send_request(url, type='POST', data=data)\n if r.status_code == codes.created:\n return loads(r.text)\n exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)\n raise exc_cls(exc_msg)\n\n def approve_replication_rule(self, rule_id):\n \"\"\"\n :param rule_id: Rule to be approved.\n :raises: RuleNotFound\n \"\"\"\n\n path = self.RULE_BASEURL + '/' + rule_id\n url = build_url(choice(self.list_hosts), path=path)\n data = dumps({'options': {'approve': True}})\n r = self._send_request(url, type='PUT', data=data)\n if r.status_code == codes.ok:\n return True\n exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)\n raise exc_cls(exc_msg)\n\n def deny_replication_rule(self, rule_id):\n \"\"\"\n :param rule_id: Rule to be denied.\n :raises: RuleNotFound\n \"\"\"\n\n path = self.RULE_BASEURL + '/' + rule_id\n url = build_url(choice(self.list_hosts), path=path)\n data = dumps({'options': {'approve': False}})\n r = self._send_request(url, type='PUT', data=data)\n if r.status_code == codes.ok:\n return True\n exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)\n raise exc_cls(exc_msg)\n\n def list_replication_rule_full_history(self, scope, name):\n \"\"\"\n List the rule history of a DID.\n\n :param scope: The scope of the DID.\n :param name: The name of the DID.\n \"\"\"\n path = '/'.join([self.RULE_BASEURL, quote_plus(scope), quote_plus(name), 'history'])\n url = build_url(choice(self.list_hosts), path=path)\n r = self._send_request(url, type='GET')\n if r.status_code == codes.ok:\n return self._load_json_data(r)\n exc_cls, exc_msg = self._get_exception(r.headers, r.status_code)\n raise exc_cls(exc_msg)\n\n def examine_replication_rule(self, rule_id):\n \"\"\"\n Examine a replication rule for errors during transfer.\n\n :param rule_id: Rule to be denied.\n :raises: RuleNotFound\n \"\"\"\n path = self.RULE_BASEURL + '/' + rule_id + '/analysis'\n url = build_url(choice(self.list_hosts), path=path)\n r = self._send_request(url, type='GET')\n if r.status_code == codes.ok:\n return next(self._load_json_data(r))\n exc_cls, exc_msg = self._get_exception(r.headers, r.status_code)\n raise exc_cls(exc_msg)\n\n def list_replica_locks(self, rule_id):\n \"\"\"\n List details of all replica locks for a rule.\n\n :param rule_id: Rule to be denied.\n :raises: RuleNotFound\n \"\"\"\n path = self.RULE_BASEURL + '/' + rule_id + '/locks'\n url = build_url(choice(self.list_hosts), path=path)\n r = self._send_request(url, type='GET')\n if r.status_code == codes.ok:\n return self._load_json_data(r)\n exc_cls, exc_msg = self._get_exception(r.headers, r.status_code)\n raise exc_cls(exc_msg)\n", "path": "lib/rucio/client/ruleclient.py"}]}
| 3,624 | 228 |
gh_patches_debug_56080
|
rasdani/github-patches
|
git_diff
|
hpcaitech__ColossalAI-3107
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG]: typing miss of kwargs
### 🐛 Describe the bug
When I tried to train prompts with opt model, the following error was occurred.
```
Traceback (most recent call last):
File "/home/xxx/workspace/ColossalAI/applications/ChatGPT/examples/train_prompts.py", line 127, in <module>
main(args)
File "/home/xxx/workspace/ColossalAI/applications/ChatGPT/examples/train_prompts.py", line 42, in main
critic = OPTCritic(lora_rank=args.lora_rank).cuda()
File "/home/xxx/.pyenv/versions/3.9.9/lib/python3.9/site-packages/chatgpt/models/opt/opt_critic.py", line 38, in __init__
super().__init__(model, value_head, lora_rank, lora_train_bias, **kwargs)
NameError: name 'kwargs' is not defined
```
To Reproduce
```
torchrun --standalone --nproc_per_node=2 train_prompts.py prompts.csv --model opt --strategy colossalai_zero2
```
### Environment
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `applications/ChatGPT/chatgpt/models/opt/opt_critic.py`
Content:
```
1 from typing import Optional
2
3 import torch.nn as nn
4 from transformers.models.opt.configuration_opt import OPTConfig
5 from transformers.models.opt.modeling_opt import OPTModel
6
7 from ..base import Critic
8
9
10 class OPTCritic(Critic):
11 """
12 OPT Critic model.
13
14 Args:
15 pretrained (str): Pretrained model name or path.
16 config (OPTConfig): Model config.
17 checkpoint (bool): Enable gradient checkpointing.
18 lora_rank (int): Rank of the low-rank approximation.
19 lora_train_bias (str): LoRA bias training mode.
20 """
21
22 def __init__(self,
23 pretrained: Optional[str] = None,
24 config: Optional[OPTConfig] = None,
25 checkpoint: bool = False,
26 lora_rank: int = 0,
27 lora_train_bias: str = 'none',
28 **kargs) -> None:
29 if pretrained is not None:
30 model = OPTModel.from_pretrained(pretrained)
31 elif config is not None:
32 model = OPTModel(config)
33 else:
34 model = OPTModel(OPTConfig())
35 if checkpoint:
36 model.gradient_checkpointing_enable()
37 value_head = nn.Linear(model.config.hidden_size, 1)
38 super().__init__(model, value_head, lora_rank, lora_train_bias, **kwargs)
39
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/applications/ChatGPT/chatgpt/models/opt/opt_critic.py b/applications/ChatGPT/chatgpt/models/opt/opt_critic.py
--- a/applications/ChatGPT/chatgpt/models/opt/opt_critic.py
+++ b/applications/ChatGPT/chatgpt/models/opt/opt_critic.py
@@ -25,7 +25,7 @@
checkpoint: bool = False,
lora_rank: int = 0,
lora_train_bias: str = 'none',
- **kargs) -> None:
+ **kwargs) -> None:
if pretrained is not None:
model = OPTModel.from_pretrained(pretrained)
elif config is not None:
|
{"golden_diff": "diff --git a/applications/ChatGPT/chatgpt/models/opt/opt_critic.py b/applications/ChatGPT/chatgpt/models/opt/opt_critic.py\n--- a/applications/ChatGPT/chatgpt/models/opt/opt_critic.py\n+++ b/applications/ChatGPT/chatgpt/models/opt/opt_critic.py\n@@ -25,7 +25,7 @@\n checkpoint: bool = False,\n lora_rank: int = 0,\n lora_train_bias: str = 'none',\n- **kargs) -> None:\n+ **kwargs) -> None:\n if pretrained is not None:\n model = OPTModel.from_pretrained(pretrained)\n elif config is not None:\n", "issue": "[BUG]: typing miss of kwargs\n### \ud83d\udc1b Describe the bug\n\nWhen I tried to train prompts with opt model, the following error was occurred.\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/xxx/workspace/ColossalAI/applications/ChatGPT/examples/train_prompts.py\", line 127, in <module>\r\n main(args)\r\n File \"/home/xxx/workspace/ColossalAI/applications/ChatGPT/examples/train_prompts.py\", line 42, in main\r\n critic = OPTCritic(lora_rank=args.lora_rank).cuda()\r\n File \"/home/xxx/.pyenv/versions/3.9.9/lib/python3.9/site-packages/chatgpt/models/opt/opt_critic.py\", line 38, in __init__\r\n super().__init__(model, value_head, lora_rank, lora_train_bias, **kwargs)\r\nNameError: name 'kwargs' is not defined\r\n```\r\n\r\nTo Reproduce\r\n\r\n```\r\ntorchrun --standalone --nproc_per_node=2 train_prompts.py prompts.csv --model opt --strategy colossalai_zero2\r\n```\r\n\r\n\n\n### Environment\n\n_No response_\n", "before_files": [{"content": "from typing import Optional\n\nimport torch.nn as nn\nfrom transformers.models.opt.configuration_opt import OPTConfig\nfrom transformers.models.opt.modeling_opt import OPTModel\n\nfrom ..base import Critic\n\n\nclass OPTCritic(Critic):\n \"\"\"\n OPT Critic model.\n\n Args:\n pretrained (str): Pretrained model name or path.\n config (OPTConfig): Model config.\n checkpoint (bool): Enable gradient checkpointing.\n lora_rank (int): Rank of the low-rank approximation.\n lora_train_bias (str): LoRA bias training mode.\n \"\"\"\n\n def __init__(self,\n pretrained: Optional[str] = None,\n config: Optional[OPTConfig] = None,\n checkpoint: bool = False,\n lora_rank: int = 0,\n lora_train_bias: str = 'none',\n **kargs) -> None:\n if pretrained is not None:\n model = OPTModel.from_pretrained(pretrained)\n elif config is not None:\n model = OPTModel(config)\n else:\n model = OPTModel(OPTConfig())\n if checkpoint:\n model.gradient_checkpointing_enable()\n value_head = nn.Linear(model.config.hidden_size, 1)\n super().__init__(model, value_head, lora_rank, lora_train_bias, **kwargs)\n", "path": "applications/ChatGPT/chatgpt/models/opt/opt_critic.py"}], "after_files": [{"content": "from typing import Optional\n\nimport torch.nn as nn\nfrom transformers.models.opt.configuration_opt import OPTConfig\nfrom transformers.models.opt.modeling_opt import OPTModel\n\nfrom ..base import Critic\n\n\nclass OPTCritic(Critic):\n \"\"\"\n OPT Critic model.\n\n Args:\n pretrained (str): Pretrained model name or path.\n config (OPTConfig): Model config.\n checkpoint (bool): Enable gradient checkpointing.\n lora_rank (int): Rank of the low-rank approximation.\n lora_train_bias (str): LoRA bias training mode.\n \"\"\"\n\n def __init__(self,\n pretrained: Optional[str] = None,\n config: Optional[OPTConfig] = None,\n checkpoint: bool = False,\n lora_rank: int = 0,\n lora_train_bias: str = 'none',\n **kwargs) -> None:\n if pretrained is not None:\n model = OPTModel.from_pretrained(pretrained)\n elif config is not None:\n model = OPTModel(config)\n else:\n model = OPTModel(OPTConfig())\n if checkpoint:\n model.gradient_checkpointing_enable()\n value_head = nn.Linear(model.config.hidden_size, 1)\n super().__init__(model, value_head, lora_rank, lora_train_bias, **kwargs)\n", "path": "applications/ChatGPT/chatgpt/models/opt/opt_critic.py"}]}
| 864 | 154 |
gh_patches_debug_29082
|
rasdani/github-patches
|
git_diff
|
cloud-custodian__cloud-custodian-1477
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
mark-for-op and tag actions missing for resource=kinesis
The mark-for-op and tag actions are available for ec2, rds, and other resource types is not available for kinesis, but would desire them to be
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tools/dev/ratchet.py`
Content:
```
1 #!/usr/bin/env python3.6
2 # -*- coding: utf-8 -*-
3 """Ratchet up successes under Python 3.6.
4 """
5 from __future__ import (
6 absolute_import, division, print_function, unicode_literals)
7
8 import sys
9 import xml.etree.ElementTree as Etree
10
11
12 INVALID_TYPE = 'E botocore.exceptions.ParamValidationError: ' \
13 'Parameter validation failed: Invalid type for parameter '
14
15
16 class TestResults(object):
17
18 def __init__(self, results_path):
19 self.results_path = results_path
20 self.failed = []
21 self.failed_aggregates = {}
22 self.stderr_output = []
23 self.passed = []
24 self._tree = None
25
26 def parse(self):
27 if self._tree:
28 raise AssertionError("Already Parsed")
29 self._tree = Etree.parse(self.results_path)
30 for testcase in self._tree.findall('testcase'):
31 self.process_testcase(testcase)
32 return self
33
34 def process_testcase(self, case):
35 key = self.case_key(case)
36
37 # look at children but throw away stderr output
38 nonsuccess = [c for c in case if not c.tag == 'system-err']
39 n = len(nonsuccess)
40 if n > 1:
41 raise AssertionError("multiple results for %s: %s" %
42 (key, nonsuccess))
43 elif n == 1:
44 result = nonsuccess.pop()
45 self.failed.append(key)
46 message = result.get('message')
47 if message.startswith(INVALID_TYPE):
48 message = INVALID_TYPE
49 self.failed_aggregates.setdefault(message, []).append(key)
50 else:
51 self.passed.append(key)
52
53 @staticmethod
54 def case_key(case):
55 return "%s.%s" % (case.get('classname'), case.get('name'))
56
57 def report(self, details=False):
58 for k, v in sorted(
59 self.failed_aggregates.items(),
60 key = lambda i: len(i[1]),
61 reverse=True):
62 print("# %s" % k)
63 for t in v:
64 print(" - %s" % t)
65
66
67 def load_expected_failures(txt):
68 expected_failures = open(txt).read()
69 parsed = set()
70 for line in expected_failures.splitlines():
71 if not line or line.startswith('#'):
72 continue
73 parsed.add(line)
74 return parsed
75
76
77 def list_tests(tests):
78 for test in sorted(tests):
79 print(' ', test)
80
81
82 def update_expectation(txt_path, tests):
83 new = set(t + '\n' for t in tests)
84 open(txt_path, 'w+').writelines(sorted(new))
85
86
87 def main(xml_path, txt_path):
88 """Takes two paths, one to XML output from pytest, the other to a text file
89 listing expected successes. Walks the former looking for the latter.
90 """
91 results = TestResults(xml_path).parse()
92
93 if txt_path == '-':
94 results.report()
95 return
96
97 previous = load_expected_failures(txt_path)
98 current = set(results.failed)
99
100 expected = previous - current
101 if expected:
102 print("Some tests expected to fail under Python 3.6 didn't:")
103 list_tests(expected)
104 update_expectation(txt_path, current)
105 print("Conveniently, they have been removed from {} for you. Perhaps "
106 "commit that?".format(txt_path))
107
108 unexpected = current - previous
109 if unexpected:
110 print("Some tests not expected to fail under Python 3.6 did:")
111 list_tests(unexpected)
112
113 if expected or unexpected:
114 print("Previously %d tests failed under Python 3.6, now %d did." %
115 (len(previous), len(current)))
116 return 1
117
118 print('All and only tests expected to fail under Python 3.6 did.')
119 return 0
120
121
122 if __name__ == '__main__':
123 try:
124 xml_path, txt_path = sys.argv[1:3]
125 except ValueError:
126 script = sys.argv[0]
127 print('usage: {} <junitxml filepath> <expected successes filepath>'
128 .format(script), file=sys.stderr)
129 result = 1
130 else:
131 result = main(xml_path, txt_path)
132 sys.exit(result)
133
```
Path: `c7n/resources/glacier.py`
Content:
```
1 # Copyright 2016 Capital One Services, LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from __future__ import absolute_import, division, print_function, unicode_literals
15
16 from botocore.exceptions import ClientError
17
18 from c7n.filters import CrossAccountAccessFilter
19 from c7n.query import QueryResourceManager
20 from c7n.manager import resources
21 from c7n.utils import get_retry, local_session
22
23
24 @resources.register('glacier')
25 class Glacier(QueryResourceManager):
26
27 permissions = ('glacier:ListTagsForVault',)
28 retry = staticmethod(get_retry(('Throttled',)))
29
30 class resource_type(object):
31 service = 'glacier'
32 enum_spec = ('list_vaults', 'VaultList', None)
33 name = "VaultName"
34 id = "VaultARN"
35 dimension = None
36 universal_taggable = True
37
38 def augment(self, resources):
39 def process_tags(resource):
40 client = local_session(self.session_factory).client('glacier')
41 tag_dict = self.retry(
42 client.list_tags_for_vault,
43 vaultName=resource[self.get_model().name])['Tags']
44 tag_list = []
45 for k, v in tag_dict.items():
46 tag_list.append({'Key': k, 'Value': v})
47 resource['Tags'] = tag_list
48 return resource
49
50 with self.executor_factory(max_workers=2) as w:
51 return list(w.map(process_tags, resources))
52
53
54 @Glacier.filter_registry.register('cross-account')
55 class GlacierCrossAccountAccessFilter(CrossAccountAccessFilter):
56 """Filter to return all glacier vaults with cross account access permissions
57
58 The whitelist parameter will omit the accounts that match from the return
59
60 :example:
61
62 .. code-block:
63
64 policies:
65 - name: glacier-cross-account
66 resource: glacier
67 filters:
68 - type: cross-account
69 whitelist:
70 - permitted-account-01
71 - permitted-account-02
72 """
73 permissions = ('glacier:GetVaultAccessPolicy',)
74
75 def process(self, resources, event=None):
76 def _augment(r):
77 client = local_session(
78 self.manager.session_factory).client('glacier')
79 try:
80 r['Policy'] = client.get_vault_access_policy(
81 vaultName=r['VaultName'])['policy']['Policy']
82 return r
83 except ClientError as e:
84 if e.response['Error']['Code'] == 'AccessDeniedException':
85 self.log.warning(
86 "Access denied getting policy glacier:%s",
87 r['FunctionName'])
88
89 self.log.debug("fetching policy for %d glacier" % len(resources))
90 with self.executor_factory(max_workers=3) as w:
91 resources = list(filter(None, w.map(_augment, resources)))
92
93 return super(GlacierCrossAccountAccessFilter, self).process(
94 resources, event)
95
```
Path: `c7n/resources/kinesis.py`
Content:
```
1 # Copyright 2016 Capital One Services, LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from __future__ import absolute_import, division, print_function, unicode_literals
15
16 from c7n.actions import Action
17 from c7n.manager import resources
18 from c7n.query import QueryResourceManager
19 from c7n.utils import local_session, type_schema
20
21
22 @resources.register('kinesis')
23 class KinesisStream(QueryResourceManager):
24
25 class resource_type(object):
26 service = 'kinesis'
27 type = 'stream'
28 enum_spec = ('list_streams', 'StreamNames', None)
29 detail_spec = (
30 'describe_stream', 'StreamName', None, 'StreamDescription')
31 name = id = 'StreamName'
32 filter_name = None
33 filter_type = None
34 date = None
35 dimension = 'StreamName'
36
37
38 @KinesisStream.action_registry.register('delete')
39 class Delete(Action):
40
41 schema = type_schema('delete')
42 permissions = ("kinesis:DeleteStream",)
43
44 def process(self, resources):
45 client = local_session(self.manager.session_factory).client('kinesis')
46 not_active = [r['StreamName'] for r in resources
47 if r['StreamStatus'] != 'ACTIVE']
48 self.log.warning(
49 "The following streams cannot be deleted (wrong state): %s" % (
50 ", ".join(not_active)))
51 for r in resources:
52 if not r['StreamStatus'] == 'ACTIVE':
53 continue
54 client.delete_stream(
55 StreamName=r['StreamName'])
56
57
58 @resources.register('firehose')
59 class DeliveryStream(QueryResourceManager):
60
61 class resource_type(object):
62 service = 'firehose'
63 type = 'deliverystream'
64 enum_spec = ('list_delivery_streams', 'DeliveryStreamNames', None)
65 detail_spec = (
66 'describe_delivery_stream', 'DeliveryStreamName', None,
67 'DeliveryStreamDescription')
68 name = id = 'DeliveryStreamName'
69 filter_name = None
70 filter_type = None
71 date = 'CreateTimestamp'
72 dimension = 'DeliveryStreamName'
73
74
75 @DeliveryStream.action_registry.register('delete')
76 class FirehoseDelete(Action):
77
78 schema = type_schema('delete')
79 permissions = ("firehose:DeleteDeliveryStream",)
80
81 def process(self, resources):
82 client = local_session(self.manager.session_factory).client('firehose')
83 creating = [r['DeliveryStreamName'] for r in resources
84 if r['DeliveryStreamStatus'] == 'CREATING']
85 if creating:
86 self.log.warning(
87 "These delivery streams can't be deleted (wrong state): %s" % (
88 ", ".join(creating)))
89 for r in resources:
90 if not r['DeliveryStreamStatus'] == 'ACTIVE':
91 continue
92 client.delete_delivery_stream(
93 DeliveryStreamName=r['DeliveryStreamName'])
94
95
96 @resources.register('kinesis-analytics')
97 class AnalyticsApp(QueryResourceManager):
98
99 class resource_type(object):
100 service = "kinesisanalytics"
101 enum_spec = ('list_applications', 'ApplicationSummaries', None)
102 detail_spec = ('describe_application', 'ApplicationName',
103 'ApplicationName', 'ApplicationDetail')
104 name = "ApplicationName"
105 id = "ApplicationARN"
106 dimension = None
107 filter_name = None
108 filter_type = None
109
110
111 @AnalyticsApp.action_registry.register('delete')
112 class AppDelete(Action):
113
114 schema = type_schema('delete')
115 permissions = ("kinesisanalytics:DeleteApplication",)
116
117 def process(self, resources):
118 client = local_session(
119 self.manager.session_factory).client('kinesisanalytics')
120 for r in resources:
121 client.delete_application(
122 ApplicationName=r['ApplicationName'],
123 CreateTimestamp=r['CreateTimestamp'])
124
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/c7n/resources/glacier.py b/c7n/resources/glacier.py
--- a/c7n/resources/glacier.py
+++ b/c7n/resources/glacier.py
@@ -32,6 +32,7 @@
enum_spec = ('list_vaults', 'VaultList', None)
name = "VaultName"
id = "VaultARN"
+ filter_name = None
dimension = None
universal_taggable = True
diff --git a/c7n/resources/kinesis.py b/c7n/resources/kinesis.py
--- a/c7n/resources/kinesis.py
+++ b/c7n/resources/kinesis.py
@@ -16,6 +16,7 @@
from c7n.actions import Action
from c7n.manager import resources
from c7n.query import QueryResourceManager
+from c7n.tags import universal_augment
from c7n.utils import local_session, type_schema
@@ -33,6 +34,11 @@
filter_type = None
date = None
dimension = 'StreamName'
+ universal_taggable = True
+
+ def augment(self, resources):
+ return universal_augment(
+ self, super(KinesisStream, self).augment(resources))
@KinesisStream.action_registry.register('delete')
diff --git a/tools/dev/ratchet.py b/tools/dev/ratchet.py
--- a/tools/dev/ratchet.py
+++ b/tools/dev/ratchet.py
@@ -44,6 +44,8 @@
result = nonsuccess.pop()
self.failed.append(key)
message = result.get('message')
+ if message is None:
+ return
if message.startswith(INVALID_TYPE):
message = INVALID_TYPE
self.failed_aggregates.setdefault(message, []).append(key)
|
{"golden_diff": "diff --git a/c7n/resources/glacier.py b/c7n/resources/glacier.py\n--- a/c7n/resources/glacier.py\n+++ b/c7n/resources/glacier.py\n@@ -32,6 +32,7 @@\n enum_spec = ('list_vaults', 'VaultList', None)\n name = \"VaultName\"\n id = \"VaultARN\"\n+ filter_name = None\n dimension = None\n universal_taggable = True\n \ndiff --git a/c7n/resources/kinesis.py b/c7n/resources/kinesis.py\n--- a/c7n/resources/kinesis.py\n+++ b/c7n/resources/kinesis.py\n@@ -16,6 +16,7 @@\n from c7n.actions import Action\n from c7n.manager import resources\n from c7n.query import QueryResourceManager\n+from c7n.tags import universal_augment\n from c7n.utils import local_session, type_schema\n \n \n@@ -33,6 +34,11 @@\n filter_type = None\n date = None\n dimension = 'StreamName'\n+ universal_taggable = True\n+\n+ def augment(self, resources):\n+ return universal_augment(\n+ self, super(KinesisStream, self).augment(resources))\n \n \n @KinesisStream.action_registry.register('delete')\ndiff --git a/tools/dev/ratchet.py b/tools/dev/ratchet.py\n--- a/tools/dev/ratchet.py\n+++ b/tools/dev/ratchet.py\n@@ -44,6 +44,8 @@\n result = nonsuccess.pop()\n self.failed.append(key)\n message = result.get('message')\n+ if message is None:\n+ return\n if message.startswith(INVALID_TYPE):\n message = INVALID_TYPE\n self.failed_aggregates.setdefault(message, []).append(key)\n", "issue": "mark-for-op and tag actions missing for resource=kinesis\nThe mark-for-op and tag actions are available for ec2, rds, and other resource types is not available for kinesis, but would desire them to be\n", "before_files": [{"content": "#!/usr/bin/env python3.6\n# -*- coding: utf-8 -*-\n\"\"\"Ratchet up successes under Python 3.6.\n\"\"\"\nfrom __future__ import (\n absolute_import, division, print_function, unicode_literals)\n\nimport sys\nimport xml.etree.ElementTree as Etree\n\n\nINVALID_TYPE = 'E botocore.exceptions.ParamValidationError: ' \\\n 'Parameter validation failed: Invalid type for parameter '\n\n\nclass TestResults(object):\n\n def __init__(self, results_path):\n self.results_path = results_path\n self.failed = []\n self.failed_aggregates = {}\n self.stderr_output = []\n self.passed = []\n self._tree = None\n\n def parse(self):\n if self._tree:\n raise AssertionError(\"Already Parsed\")\n self._tree = Etree.parse(self.results_path)\n for testcase in self._tree.findall('testcase'):\n self.process_testcase(testcase)\n return self\n\n def process_testcase(self, case):\n key = self.case_key(case)\n\n # look at children but throw away stderr output\n nonsuccess = [c for c in case if not c.tag == 'system-err']\n n = len(nonsuccess)\n if n > 1:\n raise AssertionError(\"multiple results for %s: %s\" %\n (key, nonsuccess))\n elif n == 1:\n result = nonsuccess.pop()\n self.failed.append(key)\n message = result.get('message')\n if message.startswith(INVALID_TYPE):\n message = INVALID_TYPE\n self.failed_aggregates.setdefault(message, []).append(key)\n else:\n self.passed.append(key)\n\n @staticmethod\n def case_key(case):\n return \"%s.%s\" % (case.get('classname'), case.get('name'))\n\n def report(self, details=False):\n for k, v in sorted(\n self.failed_aggregates.items(),\n key = lambda i: len(i[1]),\n reverse=True):\n print(\"# %s\" % k)\n for t in v:\n print(\" - %s\" % t)\n\n\ndef load_expected_failures(txt):\n expected_failures = open(txt).read()\n parsed = set()\n for line in expected_failures.splitlines():\n if not line or line.startswith('#'):\n continue\n parsed.add(line)\n return parsed\n\n\ndef list_tests(tests):\n for test in sorted(tests):\n print(' ', test)\n\n\ndef update_expectation(txt_path, tests):\n new = set(t + '\\n' for t in tests)\n open(txt_path, 'w+').writelines(sorted(new))\n\n\ndef main(xml_path, txt_path):\n \"\"\"Takes two paths, one to XML output from pytest, the other to a text file\n listing expected successes. Walks the former looking for the latter.\n \"\"\"\n results = TestResults(xml_path).parse()\n\n if txt_path == '-':\n results.report()\n return\n\n previous = load_expected_failures(txt_path)\n current = set(results.failed)\n\n expected = previous - current\n if expected:\n print(\"Some tests expected to fail under Python 3.6 didn't:\")\n list_tests(expected)\n update_expectation(txt_path, current)\n print(\"Conveniently, they have been removed from {} for you. Perhaps \"\n \"commit that?\".format(txt_path))\n\n unexpected = current - previous\n if unexpected:\n print(\"Some tests not expected to fail under Python 3.6 did:\")\n list_tests(unexpected)\n\n if expected or unexpected:\n print(\"Previously %d tests failed under Python 3.6, now %d did.\" %\n (len(previous), len(current)))\n return 1\n\n print('All and only tests expected to fail under Python 3.6 did.')\n return 0\n\n\nif __name__ == '__main__':\n try:\n xml_path, txt_path = sys.argv[1:3]\n except ValueError:\n script = sys.argv[0]\n print('usage: {} <junitxml filepath> <expected successes filepath>'\n .format(script), file=sys.stderr)\n result = 1\n else:\n result = main(xml_path, txt_path)\n sys.exit(result)\n", "path": "tools/dev/ratchet.py"}, {"content": "# Copyright 2016 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom botocore.exceptions import ClientError\n\nfrom c7n.filters import CrossAccountAccessFilter\nfrom c7n.query import QueryResourceManager\nfrom c7n.manager import resources\nfrom c7n.utils import get_retry, local_session\n\n\[email protected]('glacier')\nclass Glacier(QueryResourceManager):\n\n permissions = ('glacier:ListTagsForVault',)\n retry = staticmethod(get_retry(('Throttled',)))\n\n class resource_type(object):\n service = 'glacier'\n enum_spec = ('list_vaults', 'VaultList', None)\n name = \"VaultName\"\n id = \"VaultARN\"\n dimension = None\n universal_taggable = True\n\n def augment(self, resources):\n def process_tags(resource):\n client = local_session(self.session_factory).client('glacier')\n tag_dict = self.retry(\n client.list_tags_for_vault,\n vaultName=resource[self.get_model().name])['Tags']\n tag_list = []\n for k, v in tag_dict.items():\n tag_list.append({'Key': k, 'Value': v})\n resource['Tags'] = tag_list\n return resource\n\n with self.executor_factory(max_workers=2) as w:\n return list(w.map(process_tags, resources))\n\n\[email protected]_registry.register('cross-account')\nclass GlacierCrossAccountAccessFilter(CrossAccountAccessFilter):\n \"\"\"Filter to return all glacier vaults with cross account access permissions\n\n The whitelist parameter will omit the accounts that match from the return\n\n :example:\n\n .. code-block:\n\n policies:\n - name: glacier-cross-account\n resource: glacier\n filters:\n - type: cross-account\n whitelist:\n - permitted-account-01\n - permitted-account-02\n \"\"\"\n permissions = ('glacier:GetVaultAccessPolicy',)\n\n def process(self, resources, event=None):\n def _augment(r):\n client = local_session(\n self.manager.session_factory).client('glacier')\n try:\n r['Policy'] = client.get_vault_access_policy(\n vaultName=r['VaultName'])['policy']['Policy']\n return r\n except ClientError as e:\n if e.response['Error']['Code'] == 'AccessDeniedException':\n self.log.warning(\n \"Access denied getting policy glacier:%s\",\n r['FunctionName'])\n\n self.log.debug(\"fetching policy for %d glacier\" % len(resources))\n with self.executor_factory(max_workers=3) as w:\n resources = list(filter(None, w.map(_augment, resources)))\n\n return super(GlacierCrossAccountAccessFilter, self).process(\n resources, event)\n", "path": "c7n/resources/glacier.py"}, {"content": "# Copyright 2016 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom c7n.actions import Action\nfrom c7n.manager import resources\nfrom c7n.query import QueryResourceManager\nfrom c7n.utils import local_session, type_schema\n\n\[email protected]('kinesis')\nclass KinesisStream(QueryResourceManager):\n\n class resource_type(object):\n service = 'kinesis'\n type = 'stream'\n enum_spec = ('list_streams', 'StreamNames', None)\n detail_spec = (\n 'describe_stream', 'StreamName', None, 'StreamDescription')\n name = id = 'StreamName'\n filter_name = None\n filter_type = None\n date = None\n dimension = 'StreamName'\n\n\[email protected]_registry.register('delete')\nclass Delete(Action):\n\n schema = type_schema('delete')\n permissions = (\"kinesis:DeleteStream\",)\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('kinesis')\n not_active = [r['StreamName'] for r in resources\n if r['StreamStatus'] != 'ACTIVE']\n self.log.warning(\n \"The following streams cannot be deleted (wrong state): %s\" % (\n \", \".join(not_active)))\n for r in resources:\n if not r['StreamStatus'] == 'ACTIVE':\n continue\n client.delete_stream(\n StreamName=r['StreamName'])\n\n\[email protected]('firehose')\nclass DeliveryStream(QueryResourceManager):\n\n class resource_type(object):\n service = 'firehose'\n type = 'deliverystream'\n enum_spec = ('list_delivery_streams', 'DeliveryStreamNames', None)\n detail_spec = (\n 'describe_delivery_stream', 'DeliveryStreamName', None,\n 'DeliveryStreamDescription')\n name = id = 'DeliveryStreamName'\n filter_name = None\n filter_type = None\n date = 'CreateTimestamp'\n dimension = 'DeliveryStreamName'\n\n\[email protected]_registry.register('delete')\nclass FirehoseDelete(Action):\n\n schema = type_schema('delete')\n permissions = (\"firehose:DeleteDeliveryStream\",)\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('firehose')\n creating = [r['DeliveryStreamName'] for r in resources\n if r['DeliveryStreamStatus'] == 'CREATING']\n if creating:\n self.log.warning(\n \"These delivery streams can't be deleted (wrong state): %s\" % (\n \", \".join(creating)))\n for r in resources:\n if not r['DeliveryStreamStatus'] == 'ACTIVE':\n continue\n client.delete_delivery_stream(\n DeliveryStreamName=r['DeliveryStreamName'])\n\n\[email protected]('kinesis-analytics')\nclass AnalyticsApp(QueryResourceManager):\n\n class resource_type(object):\n service = \"kinesisanalytics\"\n enum_spec = ('list_applications', 'ApplicationSummaries', None)\n detail_spec = ('describe_application', 'ApplicationName',\n 'ApplicationName', 'ApplicationDetail')\n name = \"ApplicationName\"\n id = \"ApplicationARN\"\n dimension = None\n filter_name = None\n filter_type = None\n\n\[email protected]_registry.register('delete')\nclass AppDelete(Action):\n\n schema = type_schema('delete')\n permissions = (\"kinesisanalytics:DeleteApplication\",)\n\n def process(self, resources):\n client = local_session(\n self.manager.session_factory).client('kinesisanalytics')\n for r in resources:\n client.delete_application(\n ApplicationName=r['ApplicationName'],\n CreateTimestamp=r['CreateTimestamp'])\n", "path": "c7n/resources/kinesis.py"}], "after_files": [{"content": "#!/usr/bin/env python3.6\n# -*- coding: utf-8 -*-\n\"\"\"Ratchet up successes under Python 3.6.\n\"\"\"\nfrom __future__ import (\n absolute_import, division, print_function, unicode_literals)\n\nimport sys\nimport xml.etree.ElementTree as Etree\n\n\nINVALID_TYPE = 'E botocore.exceptions.ParamValidationError: ' \\\n 'Parameter validation failed: Invalid type for parameter '\n\n\nclass TestResults(object):\n\n def __init__(self, results_path):\n self.results_path = results_path\n self.failed = []\n self.failed_aggregates = {}\n self.stderr_output = []\n self.passed = []\n self._tree = None\n\n def parse(self):\n if self._tree:\n raise AssertionError(\"Already Parsed\")\n self._tree = Etree.parse(self.results_path)\n for testcase in self._tree.findall('testcase'):\n self.process_testcase(testcase)\n return self\n\n def process_testcase(self, case):\n key = self.case_key(case)\n\n # look at children but throw away stderr output\n nonsuccess = [c for c in case if not c.tag == 'system-err']\n n = len(nonsuccess)\n if n > 1:\n raise AssertionError(\"multiple results for %s: %s\" %\n (key, nonsuccess))\n elif n == 1:\n result = nonsuccess.pop()\n self.failed.append(key)\n message = result.get('message')\n if message is None:\n return\n if message.startswith(INVALID_TYPE):\n message = INVALID_TYPE\n self.failed_aggregates.setdefault(message, []).append(key)\n else:\n self.passed.append(key)\n\n @staticmethod\n def case_key(case):\n return \"%s.%s\" % (case.get('classname'), case.get('name'))\n\n def report(self, details=False):\n for k, v in sorted(\n self.failed_aggregates.items(),\n key = lambda i: len(i[1]),\n reverse=True):\n print(\"# %s\" % k)\n for t in v:\n print(\" - %s\" % t)\n\n\ndef load_expected_failures(txt):\n expected_failures = open(txt).read()\n parsed = set()\n for line in expected_failures.splitlines():\n if not line or line.startswith('#'):\n continue\n parsed.add(line)\n return parsed\n\n\ndef list_tests(tests):\n for test in sorted(tests):\n print(' ', test)\n\n\ndef update_expectation(txt_path, tests):\n new = set(t + '\\n' for t in tests)\n open(txt_path, 'w+').writelines(sorted(new))\n\n\ndef main(xml_path, txt_path):\n \"\"\"Takes two paths, one to XML output from pytest, the other to a text file\n listing expected successes. Walks the former looking for the latter.\n \"\"\"\n results = TestResults(xml_path).parse()\n\n if txt_path == '-':\n results.report()\n return\n\n previous = load_expected_failures(txt_path)\n current = set(results.failed)\n\n expected = previous - current\n if expected:\n print(\"Some tests expected to fail under Python 3.6 didn't:\")\n list_tests(expected)\n update_expectation(txt_path, current)\n print(\"Conveniently, they have been removed from {} for you. Perhaps \"\n \"commit that?\".format(txt_path))\n\n unexpected = current - previous\n if unexpected:\n print(\"Some tests not expected to fail under Python 3.6 did:\")\n list_tests(unexpected)\n\n if expected or unexpected:\n print(\"Previously %d tests failed under Python 3.6, now %d did.\" %\n (len(previous), len(current)))\n return 1\n\n print('All and only tests expected to fail under Python 3.6 did.')\n return 0\n\n\nif __name__ == '__main__':\n try:\n xml_path, txt_path = sys.argv[1:3]\n except ValueError:\n script = sys.argv[0]\n print('usage: {} <junitxml filepath> <expected successes filepath>'\n .format(script), file=sys.stderr)\n result = 1\n else:\n result = main(xml_path, txt_path)\n sys.exit(result)\n", "path": "tools/dev/ratchet.py"}, {"content": "# Copyright 2016 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom botocore.exceptions import ClientError\n\nfrom c7n.filters import CrossAccountAccessFilter\nfrom c7n.query import QueryResourceManager\nfrom c7n.manager import resources\nfrom c7n.utils import get_retry, local_session\n\n\[email protected]('glacier')\nclass Glacier(QueryResourceManager):\n\n permissions = ('glacier:ListTagsForVault',)\n retry = staticmethod(get_retry(('Throttled',)))\n\n class resource_type(object):\n service = 'glacier'\n enum_spec = ('list_vaults', 'VaultList', None)\n name = \"VaultName\"\n id = \"VaultARN\"\n filter_name = None\n dimension = None\n universal_taggable = True\n\n def augment(self, resources):\n def process_tags(resource):\n client = local_session(self.session_factory).client('glacier')\n tag_dict = self.retry(\n client.list_tags_for_vault,\n vaultName=resource[self.get_model().name])['Tags']\n tag_list = []\n for k, v in tag_dict.items():\n tag_list.append({'Key': k, 'Value': v})\n resource['Tags'] = tag_list\n return resource\n\n with self.executor_factory(max_workers=2) as w:\n return list(w.map(process_tags, resources))\n\n\[email protected]_registry.register('cross-account')\nclass GlacierCrossAccountAccessFilter(CrossAccountAccessFilter):\n \"\"\"Filter to return all glacier vaults with cross account access permissions\n\n The whitelist parameter will omit the accounts that match from the return\n\n :example:\n\n .. code-block:\n\n policies:\n - name: glacier-cross-account\n resource: glacier\n filters:\n - type: cross-account\n whitelist:\n - permitted-account-01\n - permitted-account-02\n \"\"\"\n permissions = ('glacier:GetVaultAccessPolicy',)\n\n def process(self, resources, event=None):\n def _augment(r):\n client = local_session(\n self.manager.session_factory).client('glacier')\n try:\n r['Policy'] = client.get_vault_access_policy(\n vaultName=r['VaultName'])['policy']['Policy']\n return r\n except ClientError as e:\n if e.response['Error']['Code'] == 'AccessDeniedException':\n self.log.warning(\n \"Access denied getting policy glacier:%s\",\n r['FunctionName'])\n\n self.log.debug(\"fetching policy for %d glacier\" % len(resources))\n with self.executor_factory(max_workers=3) as w:\n resources = list(filter(None, w.map(_augment, resources)))\n\n return super(GlacierCrossAccountAccessFilter, self).process(\n resources, event)\n", "path": "c7n/resources/glacier.py"}, {"content": "# Copyright 2016 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom c7n.actions import Action\nfrom c7n.manager import resources\nfrom c7n.query import QueryResourceManager\nfrom c7n.tags import universal_augment\nfrom c7n.utils import local_session, type_schema\n\n\[email protected]('kinesis')\nclass KinesisStream(QueryResourceManager):\n\n class resource_type(object):\n service = 'kinesis'\n type = 'stream'\n enum_spec = ('list_streams', 'StreamNames', None)\n detail_spec = (\n 'describe_stream', 'StreamName', None, 'StreamDescription')\n name = id = 'StreamName'\n filter_name = None\n filter_type = None\n date = None\n dimension = 'StreamName'\n universal_taggable = True\n\n def augment(self, resources):\n return universal_augment(\n self, super(KinesisStream, self).augment(resources))\n\n\[email protected]_registry.register('delete')\nclass Delete(Action):\n\n schema = type_schema('delete')\n permissions = (\"kinesis:DeleteStream\",)\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('kinesis')\n not_active = [r['StreamName'] for r in resources\n if r['StreamStatus'] != 'ACTIVE']\n self.log.warning(\n \"The following streams cannot be deleted (wrong state): %s\" % (\n \", \".join(not_active)))\n for r in resources:\n if not r['StreamStatus'] == 'ACTIVE':\n continue\n client.delete_stream(\n StreamName=r['StreamName'])\n\n\[email protected]('firehose')\nclass DeliveryStream(QueryResourceManager):\n\n class resource_type(object):\n service = 'firehose'\n type = 'deliverystream'\n enum_spec = ('list_delivery_streams', 'DeliveryStreamNames', None)\n detail_spec = (\n 'describe_delivery_stream', 'DeliveryStreamName', None,\n 'DeliveryStreamDescription')\n name = id = 'DeliveryStreamName'\n filter_name = None\n filter_type = None\n date = 'CreateTimestamp'\n dimension = 'DeliveryStreamName'\n\n\[email protected]_registry.register('delete')\nclass FirehoseDelete(Action):\n\n schema = type_schema('delete')\n permissions = (\"firehose:DeleteDeliveryStream\",)\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('firehose')\n creating = [r['DeliveryStreamName'] for r in resources\n if r['DeliveryStreamStatus'] == 'CREATING']\n if creating:\n self.log.warning(\n \"These delivery streams can't be deleted (wrong state): %s\" % (\n \", \".join(creating)))\n for r in resources:\n if not r['DeliveryStreamStatus'] == 'ACTIVE':\n continue\n client.delete_delivery_stream(\n DeliveryStreamName=r['DeliveryStreamName'])\n\n\[email protected]('kinesis-analytics')\nclass AnalyticsApp(QueryResourceManager):\n\n class resource_type(object):\n service = \"kinesisanalytics\"\n enum_spec = ('list_applications', 'ApplicationSummaries', None)\n detail_spec = ('describe_application', 'ApplicationName',\n 'ApplicationName', 'ApplicationDetail')\n name = \"ApplicationName\"\n id = \"ApplicationARN\"\n dimension = None\n filter_name = None\n filter_type = None\n\n\[email protected]_registry.register('delete')\nclass AppDelete(Action):\n\n schema = type_schema('delete')\n permissions = (\"kinesisanalytics:DeleteApplication\",)\n\n def process(self, resources):\n client = local_session(\n self.manager.session_factory).client('kinesisanalytics')\n for r in resources:\n client.delete_application(\n ApplicationName=r['ApplicationName'],\n CreateTimestamp=r['CreateTimestamp'])\n", "path": "c7n/resources/kinesis.py"}]}
| 3,625 | 386 |
gh_patches_debug_8724
|
rasdani/github-patches
|
git_diff
|
pytorch__pytorch-2414
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
nn.Embedding improperly sorts inputs when max_norm is specified
```python
import torch
import torch.nn as nn
from torch.autograd import Variable
ids = Variable(torch.LongTensor([2, 12, 4, 8, 8, 6]))
embed = nn.Embedding(22, 5, max_norm=1.0)
print(ids) # before: 2 12 4 8 8 6
out = embed(ids)
print(ids) # after: 2 4 6 8 8 12
```
The output is also incorrect. It's the output from the sorted indices, instead of the user specified indices.
Reported by @adampolyak
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torch/nn/_functions/thnn/sparse.py`
Content:
```
1 import torch
2 from torch.autograd.function import Function
3 from torch._thnn import type2backend
4 from torch.autograd.function import once_differentiable
5
6 from . import _all_functions
7
8
9 class Embedding(Function):
10
11 @staticmethod
12 def _renorm(ctx, indices, weight, max_norm, norm_type):
13 if indices.dim() == 2:
14 indices = indices.clone().view(-1)
15
16 ctx._backend.LookupTable_renorm(
17 ctx._backend.library_state,
18 indices,
19 weight,
20 max_norm,
21 norm_type
22 )
23
24 @classmethod
25 def forward(cls, ctx, indices, weight, padding_idx, max_norm, norm_type, scale_grad_by_freq,
26 sparse=False):
27
28 ctx.padding_idx = padding_idx
29 ctx.scale_grad_by_freq = scale_grad_by_freq
30 ctx._indices = None
31 ctx.sparse = sparse
32
33 assert indices.dim() <= 2
34 assert not ctx.needs_input_grad[0], "Embedding doesn't " \
35 "compute the gradient w.r.t. the indices"
36
37 ctx._backend = type2backend[type(weight)]
38 ctx._weight_size = weight.size()
39
40 if not indices.is_contiguous():
41 ctx._indices = indices.contiguous()
42 indices = ctx._indices
43 else:
44 ctx.save_for_backward(indices)
45
46 output = weight.new()
47 if max_norm is not None:
48 cls._renorm(ctx, indices, weight, max_norm, norm_type)
49
50 if indices.dim() == 1:
51 output = torch.index_select(weight, 0, indices)
52 else:
53 output = torch.index_select(weight, 0, indices.view(-1))
54 output = output.view(indices.size(0), indices.size(1), weight.size(1))
55
56 return output
57
58 @staticmethod
59 @once_differentiable
60 def backward(ctx, grad_output):
61 if ctx._indices is not None:
62 indices = ctx._indices
63 else:
64 indices, = ctx.saved_tensors
65
66 grad_output = grad_output.contiguous()
67 if not ctx.sparse:
68 if indices.dim() == 2:
69 indices = indices.view(-1)
70
71 with torch.cuda.device_of(grad_output):
72 if grad_output.is_cuda:
73 _sorted = torch.cuda.LongTensor()
74 _indices = torch.cuda.LongTensor()
75 _count = torch.cuda.LongTensor()
76 else:
77 _count = torch.IntTensor()
78 _sorted = _indices = None
79
80 grad_weight = grad_output.new(ctx._weight_size).zero_()
81 # Doesn't support Variable grad_output
82 ctx._backend.LookupTable_accGradParameters(
83 ctx._backend.library_state,
84 indices,
85 grad_output,
86 grad_weight,
87 _count,
88 _sorted,
89 _indices,
90 ctx.scale_grad_by_freq,
91 ctx.padding_idx,
92 1
93 )
94 else:
95 tensor_type = type(grad_output).__name__
96 if grad_output.is_cuda:
97 SparseTensor = getattr(torch.cuda.sparse, tensor_type)
98 else:
99 SparseTensor = getattr(torch.sparse, tensor_type)
100 grad_weight = SparseTensor(
101 indices.view(1, -1),
102 grad_output.view(-1, ctx._weight_size[1]),
103 ctx._weight_size,
104 )
105 return None, grad_weight, None, None, None, None, None
106
107
108 _all_functions.append(Embedding)
109
110 MODE_SUM = 0
111 MODE_MEAN = 1
112
113
114 class EmbeddingBag(Function):
115
116 def __init__(self, max_norm, norm_type, scale_grad_by_freq, mode):
117 super(EmbeddingBag, self).__init__()
118 self.max_norm = max_norm
119 self.norm_type = norm_type
120 self.scale_grad_by_freq = scale_grad_by_freq
121 self._indices = None
122 assert mode is not None
123 if mode == 'sum':
124 self.mode = MODE_SUM
125 elif mode == 'mean':
126 self.mode = MODE_MEAN
127 else:
128 raise ValueError("mode needs to be 'sum' or 'mean', but got {}"
129 .format(mode))
130
131 def _renorm(self, indices, weight):
132 self._backend.LookupTable_renorm(
133 self._backend.library_state,
134 indices,
135 weight,
136 self.max_norm,
137 self.norm_type
138 )
139
140 def forward(self, weight, indices, offsets):
141 assert not self.needs_input_grad[1], "EmbeddingBag doesn't " \
142 "compute the gradient w.r.t. the indices"
143
144 assert not self.needs_input_grad[2], "EmbeddingBag doesn't " \
145 "compute the gradient w.r.t. the offsets"
146
147 assert indices.dim() == 1
148 if offsets.dim() != 1:
149 raise ValueError("offsets has to be a 1D Tensor")
150
151 if offsets[0] != 0:
152 raise ValueError("offsets[0] has to be 0, i.e. the first sequence"
153 " in the mini-batch has to start from position 0."
154 "However, got {}".format(offsets[0]))
155 if offsets[-1] > indices.size(0):
156 raise ValueError("offsets[-1] has to be smaller than indices's length"
157 " ({}), but got offsets[-1] of {}"
158 .format(indices.size(0), offsets[-1]))
159
160 self._backend = type2backend[type(weight)]
161 self._weight_size = weight.size()
162 self._offset2bag = offsets.new()
163
164 self.save_for_backward(indices)
165
166 indices = indices.contiguous().view(-1)
167 output = weight.new()
168 if self.max_norm is not None:
169 self._renorm(indices, weight)
170
171 if weight.is_cuda:
172 if self.mode == MODE_MEAN:
173 self.bag_size = offsets.new().resize_(offsets.size())
174 else:
175 self.bag_size = None
176
177 self._backend.LookupTableBag_updateOutput(
178 self._backend.library_state,
179 indices,
180 offsets,
181 weight,
182 output,
183 self._offset2bag,
184 self.mode,
185 self.bag_size
186 )
187 else:
188 # slow CPU implementation
189 index_output = torch.index_select(weight, 0, indices)
190 # indices = [1, 2, 30, 100, 12], offsets = [0, 2, 3]
191 self._offset2bag.resize_(indices.size(0)).zero_() # offset2bag = [0 0 0 0 0]
192 self._offset2bag.index_fill_(0, offsets, 1) # offset2bag = [1 0 1 0 1]
193 self._offset2bag[0] = 0 # offset2bag = [0 0 1 0 1]
194 self._offset2bag = self._offset2bag.cumsum(0) # offset2bag = [0 0 1 1 2]
195 output.resize_(offsets.size(0), weight.size(1)).zero_()
196 output.index_add_(0, self._offset2bag, index_output)
197 if self.mode == MODE_MEAN:
198 if offsets.size(0) == 1:
199 self.bag_size = indices.size(0)
200 else:
201 self.bag_size = weight.new().resize_(offsets.size())
202 self.bag_size[:-1] = offsets[1:] - offsets[:-1]
203 self.bag_size[-1] = indices.size(0) - offsets[-1]
204 self.bag_size = self.bag_size[:, None].expand_as(output)
205 output /= self.bag_size
206
207 return output
208
209 def backward(self, grad_output):
210 indices, = self.saved_tensors
211 indices = indices.contiguous().view(-1)
212 grad_output = grad_output.contiguous()
213
214 with torch.cuda.device_of(grad_output):
215 if grad_output.is_cuda:
216 _sorted = torch.cuda.LongTensor()
217 _indices = torch.cuda.LongTensor()
218 _count = torch.cuda.LongTensor()
219 else:
220 _count = torch.IntTensor()
221 _sorted = _indices = None
222
223 grad_weight = grad_output.new(self._weight_size).zero_()
224
225 if grad_output.is_cuda:
226 self._backend.LookupTableBag_accGradParameters(
227 self._backend.library_state,
228 indices,
229 grad_output,
230 grad_weight,
231 self._offset2bag,
232 _count,
233 _sorted,
234 _indices,
235 self.scale_grad_by_freq,
236 self.mode,
237 self.bag_size,
238 1
239 )
240 else:
241 # slow CPU implementation
242 if self.mode == MODE_MEAN:
243 # divide by average count
244 grad_output = grad_output / self.bag_size
245
246 index_grad_output = grad_output.index_select(0, self._offset2bag)
247 self._backend.LookupTable_accGradParameters(
248 self._backend.library_state,
249 indices,
250 index_grad_output,
251 grad_weight,
252 _count,
253 _sorted,
254 _indices,
255 self.scale_grad_by_freq,
256 -1,
257 1
258 )
259
260 return grad_weight, None, None
261
262
263 _all_functions.append(EmbeddingBag)
264
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/torch/nn/_functions/thnn/sparse.py b/torch/nn/_functions/thnn/sparse.py
--- a/torch/nn/_functions/thnn/sparse.py
+++ b/torch/nn/_functions/thnn/sparse.py
@@ -10,12 +10,10 @@
@staticmethod
def _renorm(ctx, indices, weight, max_norm, norm_type):
- if indices.dim() == 2:
- indices = indices.clone().view(-1)
-
+ # clone indices since LookupTable_renorm modifies it in-place
ctx._backend.LookupTable_renorm(
ctx._backend.library_state,
- indices,
+ indices.clone().view(-1),
weight,
max_norm,
norm_type
|
{"golden_diff": "diff --git a/torch/nn/_functions/thnn/sparse.py b/torch/nn/_functions/thnn/sparse.py\n--- a/torch/nn/_functions/thnn/sparse.py\n+++ b/torch/nn/_functions/thnn/sparse.py\n@@ -10,12 +10,10 @@\n \n @staticmethod\n def _renorm(ctx, indices, weight, max_norm, norm_type):\n- if indices.dim() == 2:\n- indices = indices.clone().view(-1)\n-\n+ # clone indices since LookupTable_renorm modifies it in-place\n ctx._backend.LookupTable_renorm(\n ctx._backend.library_state,\n- indices,\n+ indices.clone().view(-1),\n weight,\n max_norm,\n norm_type\n", "issue": "nn.Embedding improperly sorts inputs when max_norm is specified\n```python\r\nimport torch\r\nimport torch.nn as nn\r\nfrom torch.autograd import Variable\r\n\r\nids = Variable(torch.LongTensor([2, 12, 4, 8, 8, 6]))\r\nembed = nn.Embedding(22, 5, max_norm=1.0)\r\nprint(ids) # before: 2 12 4 8 8 6\r\nout = embed(ids)\r\nprint(ids) # after: 2 4 6 8 8 12\r\n```\r\n\r\nThe output is also incorrect. It's the output from the sorted indices, instead of the user specified indices.\r\n\r\nReported by @adampolyak \n", "before_files": [{"content": "import torch\nfrom torch.autograd.function import Function\nfrom torch._thnn import type2backend\nfrom torch.autograd.function import once_differentiable\n\nfrom . import _all_functions\n\n\nclass Embedding(Function):\n\n @staticmethod\n def _renorm(ctx, indices, weight, max_norm, norm_type):\n if indices.dim() == 2:\n indices = indices.clone().view(-1)\n\n ctx._backend.LookupTable_renorm(\n ctx._backend.library_state,\n indices,\n weight,\n max_norm,\n norm_type\n )\n\n @classmethod\n def forward(cls, ctx, indices, weight, padding_idx, max_norm, norm_type, scale_grad_by_freq,\n sparse=False):\n\n ctx.padding_idx = padding_idx\n ctx.scale_grad_by_freq = scale_grad_by_freq\n ctx._indices = None\n ctx.sparse = sparse\n\n assert indices.dim() <= 2\n assert not ctx.needs_input_grad[0], \"Embedding doesn't \" \\\n \"compute the gradient w.r.t. the indices\"\n\n ctx._backend = type2backend[type(weight)]\n ctx._weight_size = weight.size()\n\n if not indices.is_contiguous():\n ctx._indices = indices.contiguous()\n indices = ctx._indices\n else:\n ctx.save_for_backward(indices)\n\n output = weight.new()\n if max_norm is not None:\n cls._renorm(ctx, indices, weight, max_norm, norm_type)\n\n if indices.dim() == 1:\n output = torch.index_select(weight, 0, indices)\n else:\n output = torch.index_select(weight, 0, indices.view(-1))\n output = output.view(indices.size(0), indices.size(1), weight.size(1))\n\n return output\n\n @staticmethod\n @once_differentiable\n def backward(ctx, grad_output):\n if ctx._indices is not None:\n indices = ctx._indices\n else:\n indices, = ctx.saved_tensors\n\n grad_output = grad_output.contiguous()\n if not ctx.sparse:\n if indices.dim() == 2:\n indices = indices.view(-1)\n\n with torch.cuda.device_of(grad_output):\n if grad_output.is_cuda:\n _sorted = torch.cuda.LongTensor()\n _indices = torch.cuda.LongTensor()\n _count = torch.cuda.LongTensor()\n else:\n _count = torch.IntTensor()\n _sorted = _indices = None\n\n grad_weight = grad_output.new(ctx._weight_size).zero_()\n # Doesn't support Variable grad_output\n ctx._backend.LookupTable_accGradParameters(\n ctx._backend.library_state,\n indices,\n grad_output,\n grad_weight,\n _count,\n _sorted,\n _indices,\n ctx.scale_grad_by_freq,\n ctx.padding_idx,\n 1\n )\n else:\n tensor_type = type(grad_output).__name__\n if grad_output.is_cuda:\n SparseTensor = getattr(torch.cuda.sparse, tensor_type)\n else:\n SparseTensor = getattr(torch.sparse, tensor_type)\n grad_weight = SparseTensor(\n indices.view(1, -1),\n grad_output.view(-1, ctx._weight_size[1]),\n ctx._weight_size,\n )\n return None, grad_weight, None, None, None, None, None\n\n\n_all_functions.append(Embedding)\n\nMODE_SUM = 0\nMODE_MEAN = 1\n\n\nclass EmbeddingBag(Function):\n\n def __init__(self, max_norm, norm_type, scale_grad_by_freq, mode):\n super(EmbeddingBag, self).__init__()\n self.max_norm = max_norm\n self.norm_type = norm_type\n self.scale_grad_by_freq = scale_grad_by_freq\n self._indices = None\n assert mode is not None\n if mode == 'sum':\n self.mode = MODE_SUM\n elif mode == 'mean':\n self.mode = MODE_MEAN\n else:\n raise ValueError(\"mode needs to be 'sum' or 'mean', but got {}\"\n .format(mode))\n\n def _renorm(self, indices, weight):\n self._backend.LookupTable_renorm(\n self._backend.library_state,\n indices,\n weight,\n self.max_norm,\n self.norm_type\n )\n\n def forward(self, weight, indices, offsets):\n assert not self.needs_input_grad[1], \"EmbeddingBag doesn't \" \\\n \"compute the gradient w.r.t. the indices\"\n\n assert not self.needs_input_grad[2], \"EmbeddingBag doesn't \" \\\n \"compute the gradient w.r.t. the offsets\"\n\n assert indices.dim() == 1\n if offsets.dim() != 1:\n raise ValueError(\"offsets has to be a 1D Tensor\")\n\n if offsets[0] != 0:\n raise ValueError(\"offsets[0] has to be 0, i.e. the first sequence\"\n \" in the mini-batch has to start from position 0.\"\n \"However, got {}\".format(offsets[0]))\n if offsets[-1] > indices.size(0):\n raise ValueError(\"offsets[-1] has to be smaller than indices's length\"\n \" ({}), but got offsets[-1] of {}\"\n .format(indices.size(0), offsets[-1]))\n\n self._backend = type2backend[type(weight)]\n self._weight_size = weight.size()\n self._offset2bag = offsets.new()\n\n self.save_for_backward(indices)\n\n indices = indices.contiguous().view(-1)\n output = weight.new()\n if self.max_norm is not None:\n self._renorm(indices, weight)\n\n if weight.is_cuda:\n if self.mode == MODE_MEAN:\n self.bag_size = offsets.new().resize_(offsets.size())\n else:\n self.bag_size = None\n\n self._backend.LookupTableBag_updateOutput(\n self._backend.library_state,\n indices,\n offsets,\n weight,\n output,\n self._offset2bag,\n self.mode,\n self.bag_size\n )\n else:\n # slow CPU implementation\n index_output = torch.index_select(weight, 0, indices)\n # indices = [1, 2, 30, 100, 12], offsets = [0, 2, 3]\n self._offset2bag.resize_(indices.size(0)).zero_() # offset2bag = [0 0 0 0 0]\n self._offset2bag.index_fill_(0, offsets, 1) # offset2bag = [1 0 1 0 1]\n self._offset2bag[0] = 0 # offset2bag = [0 0 1 0 1]\n self._offset2bag = self._offset2bag.cumsum(0) # offset2bag = [0 0 1 1 2]\n output.resize_(offsets.size(0), weight.size(1)).zero_()\n output.index_add_(0, self._offset2bag, index_output)\n if self.mode == MODE_MEAN:\n if offsets.size(0) == 1:\n self.bag_size = indices.size(0)\n else:\n self.bag_size = weight.new().resize_(offsets.size())\n self.bag_size[:-1] = offsets[1:] - offsets[:-1]\n self.bag_size[-1] = indices.size(0) - offsets[-1]\n self.bag_size = self.bag_size[:, None].expand_as(output)\n output /= self.bag_size\n\n return output\n\n def backward(self, grad_output):\n indices, = self.saved_tensors\n indices = indices.contiguous().view(-1)\n grad_output = grad_output.contiguous()\n\n with torch.cuda.device_of(grad_output):\n if grad_output.is_cuda:\n _sorted = torch.cuda.LongTensor()\n _indices = torch.cuda.LongTensor()\n _count = torch.cuda.LongTensor()\n else:\n _count = torch.IntTensor()\n _sorted = _indices = None\n\n grad_weight = grad_output.new(self._weight_size).zero_()\n\n if grad_output.is_cuda:\n self._backend.LookupTableBag_accGradParameters(\n self._backend.library_state,\n indices,\n grad_output,\n grad_weight,\n self._offset2bag,\n _count,\n _sorted,\n _indices,\n self.scale_grad_by_freq,\n self.mode,\n self.bag_size,\n 1\n )\n else:\n # slow CPU implementation\n if self.mode == MODE_MEAN:\n # divide by average count\n grad_output = grad_output / self.bag_size\n\n index_grad_output = grad_output.index_select(0, self._offset2bag)\n self._backend.LookupTable_accGradParameters(\n self._backend.library_state,\n indices,\n index_grad_output,\n grad_weight,\n _count,\n _sorted,\n _indices,\n self.scale_grad_by_freq,\n -1,\n 1\n )\n\n return grad_weight, None, None\n\n\n_all_functions.append(EmbeddingBag)\n", "path": "torch/nn/_functions/thnn/sparse.py"}], "after_files": [{"content": "import torch\nfrom torch.autograd.function import Function\nfrom torch._thnn import type2backend\nfrom torch.autograd.function import once_differentiable\n\nfrom . import _all_functions\n\n\nclass Embedding(Function):\n\n @staticmethod\n def _renorm(ctx, indices, weight, max_norm, norm_type):\n # clone indices since LookupTable_renorm modifies it in-place\n ctx._backend.LookupTable_renorm(\n ctx._backend.library_state,\n indices.clone().view(-1),\n weight,\n max_norm,\n norm_type\n )\n\n @classmethod\n def forward(cls, ctx, indices, weight, padding_idx, max_norm, norm_type, scale_grad_by_freq,\n sparse=False):\n\n ctx.padding_idx = padding_idx\n ctx.scale_grad_by_freq = scale_grad_by_freq\n ctx._indices = None\n ctx.sparse = sparse\n\n assert indices.dim() <= 2\n assert not ctx.needs_input_grad[0], \"Embedding doesn't \" \\\n \"compute the gradient w.r.t. the indices\"\n\n ctx._backend = type2backend[type(weight)]\n ctx._weight_size = weight.size()\n\n if not indices.is_contiguous():\n ctx._indices = indices.contiguous()\n indices = ctx._indices\n else:\n ctx.save_for_backward(indices)\n\n output = weight.new()\n if max_norm is not None:\n cls._renorm(ctx, indices, weight, max_norm, norm_type)\n\n if indices.dim() == 1:\n output = torch.index_select(weight, 0, indices)\n else:\n output = torch.index_select(weight, 0, indices.view(-1))\n output = output.view(indices.size(0), indices.size(1), weight.size(1))\n\n return output\n\n @staticmethod\n @once_differentiable\n def backward(ctx, grad_output):\n if ctx._indices is not None:\n indices = ctx._indices\n else:\n indices, = ctx.saved_tensors\n\n grad_output = grad_output.contiguous()\n if not ctx.sparse:\n if indices.dim() == 2:\n indices = indices.view(-1)\n\n with torch.cuda.device_of(grad_output):\n if grad_output.is_cuda:\n _sorted = torch.cuda.LongTensor()\n _indices = torch.cuda.LongTensor()\n _count = torch.cuda.LongTensor()\n else:\n _count = torch.IntTensor()\n _sorted = _indices = None\n\n grad_weight = grad_output.new(ctx._weight_size).zero_()\n # Doesn't support Variable grad_output\n ctx._backend.LookupTable_accGradParameters(\n ctx._backend.library_state,\n indices,\n grad_output,\n grad_weight,\n _count,\n _sorted,\n _indices,\n ctx.scale_grad_by_freq,\n ctx.padding_idx,\n 1\n )\n else:\n tensor_type = type(grad_output).__name__\n if grad_output.is_cuda:\n SparseTensor = getattr(torch.cuda.sparse, tensor_type)\n else:\n SparseTensor = getattr(torch.sparse, tensor_type)\n grad_weight = SparseTensor(\n indices.view(1, -1),\n grad_output.view(-1, ctx._weight_size[1]),\n ctx._weight_size,\n )\n return None, grad_weight, None, None, None, None, None\n\n\n_all_functions.append(Embedding)\n\nMODE_SUM = 0\nMODE_MEAN = 1\n\n\nclass EmbeddingBag(Function):\n\n def __init__(self, max_norm, norm_type, scale_grad_by_freq, mode):\n super(EmbeddingBag, self).__init__()\n self.max_norm = max_norm\n self.norm_type = norm_type\n self.scale_grad_by_freq = scale_grad_by_freq\n self._indices = None\n assert mode is not None\n if mode == 'sum':\n self.mode = MODE_SUM\n elif mode == 'mean':\n self.mode = MODE_MEAN\n else:\n raise ValueError(\"mode needs to be 'sum' or 'mean', but got {}\"\n .format(mode))\n\n def _renorm(self, indices, weight):\n self._backend.LookupTable_renorm(\n self._backend.library_state,\n indices,\n weight,\n self.max_norm,\n self.norm_type\n )\n\n def forward(self, weight, indices, offsets):\n assert not self.needs_input_grad[1], \"EmbeddingBag doesn't \" \\\n \"compute the gradient w.r.t. the indices\"\n\n assert not self.needs_input_grad[2], \"EmbeddingBag doesn't \" \\\n \"compute the gradient w.r.t. the offsets\"\n\n assert indices.dim() == 1\n if offsets.dim() != 1:\n raise ValueError(\"offsets has to be a 1D Tensor\")\n\n if offsets[0] != 0:\n raise ValueError(\"offsets[0] has to be 0, i.e. the first sequence\"\n \" in the mini-batch has to start from position 0.\"\n \"However, got {}\".format(offsets[0]))\n if offsets[-1] > indices.size(0):\n raise ValueError(\"offsets[-1] has to be smaller than indices's length\"\n \" ({}), but got offsets[-1] of {}\"\n .format(indices.size(0), offsets[-1]))\n\n self._backend = type2backend[type(weight)]\n self._weight_size = weight.size()\n self._offset2bag = offsets.new()\n\n self.save_for_backward(indices)\n\n indices = indices.contiguous().view(-1)\n output = weight.new()\n if self.max_norm is not None:\n self._renorm(indices, weight)\n\n if weight.is_cuda:\n if self.mode == MODE_MEAN:\n self.bag_size = offsets.new().resize_(offsets.size())\n else:\n self.bag_size = None\n\n self._backend.LookupTableBag_updateOutput(\n self._backend.library_state,\n indices,\n offsets,\n weight,\n output,\n self._offset2bag,\n self.mode,\n self.bag_size\n )\n else:\n # slow CPU implementation\n index_output = torch.index_select(weight, 0, indices)\n # indices = [1, 2, 30, 100, 12], offsets = [0, 2, 3]\n self._offset2bag.resize_(indices.size(0)).zero_() # offset2bag = [0 0 0 0 0]\n self._offset2bag.index_fill_(0, offsets, 1) # offset2bag = [1 0 1 0 1]\n self._offset2bag[0] = 0 # offset2bag = [0 0 1 0 1]\n self._offset2bag = self._offset2bag.cumsum(0) # offset2bag = [0 0 1 1 2]\n output.resize_(offsets.size(0), weight.size(1)).zero_()\n output.index_add_(0, self._offset2bag, index_output)\n if self.mode == MODE_MEAN:\n if offsets.size(0) == 1:\n self.bag_size = indices.size(0)\n else:\n self.bag_size = weight.new().resize_(offsets.size())\n self.bag_size[:-1] = offsets[1:] - offsets[:-1]\n self.bag_size[-1] = indices.size(0) - offsets[-1]\n self.bag_size = self.bag_size[:, None].expand_as(output)\n output /= self.bag_size\n\n return output\n\n def backward(self, grad_output):\n indices, = self.saved_tensors\n indices = indices.contiguous().view(-1)\n grad_output = grad_output.contiguous()\n\n with torch.cuda.device_of(grad_output):\n if grad_output.is_cuda:\n _sorted = torch.cuda.LongTensor()\n _indices = torch.cuda.LongTensor()\n _count = torch.cuda.LongTensor()\n else:\n _count = torch.IntTensor()\n _sorted = _indices = None\n\n grad_weight = grad_output.new(self._weight_size).zero_()\n\n if grad_output.is_cuda:\n self._backend.LookupTableBag_accGradParameters(\n self._backend.library_state,\n indices,\n grad_output,\n grad_weight,\n self._offset2bag,\n _count,\n _sorted,\n _indices,\n self.scale_grad_by_freq,\n self.mode,\n self.bag_size,\n 1\n )\n else:\n # slow CPU implementation\n if self.mode == MODE_MEAN:\n # divide by average count\n grad_output = grad_output / self.bag_size\n\n index_grad_output = grad_output.index_select(0, self._offset2bag)\n self._backend.LookupTable_accGradParameters(\n self._backend.library_state,\n indices,\n index_grad_output,\n grad_weight,\n _count,\n _sorted,\n _indices,\n self.scale_grad_by_freq,\n -1,\n 1\n )\n\n return grad_weight, None, None\n\n\n_all_functions.append(EmbeddingBag)\n", "path": "torch/nn/_functions/thnn/sparse.py"}]}
| 3,076 | 169 |
gh_patches_debug_15721
|
rasdani/github-patches
|
git_diff
|
gpodder__mygpo-57
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Feedback link on left hand side is broken
There are a few issues with the Feedback "tab" on the left hand side:
* The styling is not rendering properly in FF 54 (Linux). I see no background for to the text that comes up, instead it dims the page and I see the text overlayed over the page text.
* The text indicates I am being redirected, but the redirect does not seem to execute.
* The redirect link goes to [getsatisfaction.com](http://retired.getsatisfaction.com/) which is not active. It should probably go to this issue tracker.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mygpo/users/settings.py`
Content:
```
1 from collections import namedtuple
2
3
4 WellKnownSetting = namedtuple('WellKnownSetting', 'name default')
5
6 ## Well-known settings
7 # this should be documented at
8 # http://wiki.gpodder.org/wiki/Web_Services/API_2/Settings#Known_Settings
9
10 # Flag to allow storing of user-agents
11 STORE_UA = WellKnownSetting('store_user_agent', True)
12
13 # Flag to mark a subscription as public
14 PUBLIC_SUB_PODCAST = WellKnownSetting('public_subscription', True)
15
16 # Default public-flag value (stored in the podcast)
17 PUBLIC_SUB_USER = WellKnownSetting('public_subscriptions', True)
18
19 # Flattr authentication token, empty if not logged in
20 FLATTR_TOKEN = WellKnownSetting('flattr_token', '')
21
22 # enable auto-flattring
23 FLATTR_AUTO = WellKnownSetting('auto_flattr', False)
24
25 # auto-flattr mygpo
26 FLATTR_MYGPO = WellKnownSetting('flattr_mygpo', False)
27
28 # username for flattr buttons for own content
29 FLATTR_USERNAME = WellKnownSetting('flattr_username', '')
30
31 # Flag to mark an episode as favorite
32 FAV_FLAG = WellKnownSetting('is_favorite', False)
33
```
Path: `mygpo/api/advanced/updates.py`
Content:
```
1 from itertools import chain
2 from datetime import datetime
3
4 from django.http import HttpResponseBadRequest, HttpResponseNotFound
5 from django.contrib.sites.requests import RequestSite
6 from django.views.decorators.csrf import csrf_exempt
7 from django.views.decorators.cache import never_cache
8 from django.utils.decorators import method_decorator
9 from django.views import View
10
11 from mygpo.podcasts.models import Episode
12 from mygpo.api.httpresponse import JsonResponse
13 from mygpo.api.advanced import episode_action_json
14 from mygpo.api.advanced.directory import episode_data, podcast_data
15 from mygpo.utils import parse_bool, get_timestamp
16 from mygpo.subscriptions import get_subscription_history, subscription_diff
17 from mygpo.users.models import Client
18 from mygpo.episodestates.models import EpisodeState
19 from mygpo.users.subscriptions import subscription_changes, podcasts_for_states
20 from mygpo.api.basic_auth import require_valid_user, check_username
21 from mygpo.decorators import cors_origin
22
23 from collections import namedtuple
24 EpisodeStatus = namedtuple('EpisodeStatus', 'episode status action')
25
26 import logging
27 logger = logging.getLogger(__name__)
28
29
30 class DeviceUpdates(View):
31 """ returns various updates for a device
32
33 http://wiki.gpodder.org/wiki/Web_Services/API_2/Devices#Get_Updates """
34
35 @method_decorator(csrf_exempt)
36 @method_decorator(require_valid_user)
37 @method_decorator(check_username)
38 @method_decorator(never_cache)
39 @method_decorator(cors_origin())
40 def get(self, request, username, device_uid):
41
42 now = datetime.utcnow()
43 now_ = get_timestamp(now)
44
45 user = request.user
46
47 try:
48 device = user.client_set.get(uid=device_uid)
49 except Client.DoesNotExist as e:
50 return HttpResponseNotFound(str(e))
51
52 try:
53 since = self.get_since(request)
54 except ValueError as e:
55 return HttpResponseBadRequest(str(e))
56
57 include_actions = parse_bool(request.GET.get('include_actions', False))
58
59 domain = RequestSite(request).domain
60
61 add, rem, subscriptions = self.get_subscription_changes(user, device,
62 since, now,
63 domain)
64 updates = self.get_episode_changes(user, subscriptions, domain,
65 include_actions, since)
66
67 return JsonResponse({
68 'add': add,
69 'rem': rem,
70 'updates': updates,
71 'timestamp': get_timestamp(now),
72 })
73
74
75 def get_subscription_changes(self, user, device, since, now, domain):
76 """ gets new, removed and current subscriptions """
77
78 history = get_subscription_history(user, device, since, now)
79 add, rem = subscription_diff(history)
80
81 subscriptions = device.get_subscribed_podcasts()
82
83 add = [podcast_data(p, domain) for p in add]
84 rem = [p.url for p in rem]
85
86 return add, rem, subscriptions
87
88
89 def get_episode_changes(self, user, subscriptions, domain, include_actions, since):
90 devices = {dev.id.hex: dev.uid for dev in user.client_set.all()}
91
92 # index subscribed podcasts by their Id for fast access
93 podcasts = {p.get_id(): p for p in subscriptions}
94
95 episode_updates = self.get_episode_updates(user, subscriptions, since)
96
97 return [self.get_episode_data(status, podcasts, domain,
98 include_actions, user, devices) for status in episode_updates]
99
100
101 def get_episode_updates(self, user, subscribed_podcasts, since,
102 max_per_podcast=5):
103 """ Returns the episode updates since the timestamp """
104
105 episodes = []
106 for podcast in subscribed_podcasts:
107 eps = Episode.objects.filter(podcast=podcast,
108 released__gt=since)\
109 .order_by('-order', '-released')
110 episodes.extend(eps[:max_per_podcast])
111
112 states = EpisodeState.dict_for_user(user, episodes)
113
114 for episode in episodes:
115 yield EpisodeStatus(episode, states.get(episode.id, 'new'), None)
116
117
118 def get_episode_data(self, episode_status, podcasts, domain, include_actions, user, devices):
119 """ Get episode data for an episode status object """
120
121 # TODO: shouldn't the podcast_id be in the episode status?
122 podcast_id = episode_status.episode.podcast
123 podcast = podcasts.get(podcast_id, None)
124 t = episode_data(episode_status.episode, domain, podcast)
125 t['status'] = episode_status.status
126
127 # include latest action (bug 1419)
128 # TODO
129 if include_actions and episode_status.action:
130 t['action'] = episode_action_json(episode_status.action, user)
131
132 return t
133
134 def get_since(self, request):
135 """ parses the "since" parameter """
136 since_ = request.GET.get('since', None)
137 if since_ is None:
138 raise ValueError('parameter since missing')
139 try:
140 return datetime.fromtimestamp(float(since_))
141 except ValueError as e:
142 raise ValueError("'since' is not a valid timestamp: %s" % str(e))
143
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mygpo/api/advanced/updates.py b/mygpo/api/advanced/updates.py
--- a/mygpo/api/advanced/updates.py
+++ b/mygpo/api/advanced/updates.py
@@ -30,7 +30,7 @@
class DeviceUpdates(View):
""" returns various updates for a device
- http://wiki.gpodder.org/wiki/Web_Services/API_2/Devices#Get_Updates """
+ https://gpoddernet.readthedocs.io/en/latest/api//Devices#Get_Updates """
@method_decorator(csrf_exempt)
@method_decorator(require_valid_user)
diff --git a/mygpo/users/settings.py b/mygpo/users/settings.py
--- a/mygpo/users/settings.py
+++ b/mygpo/users/settings.py
@@ -5,7 +5,7 @@
## Well-known settings
# this should be documented at
-# http://wiki.gpodder.org/wiki/Web_Services/API_2/Settings#Known_Settings
+# https://gpoddernet.readthedocs.io/en/latest/api//Settings#Known_Settings
# Flag to allow storing of user-agents
STORE_UA = WellKnownSetting('store_user_agent', True)
|
{"golden_diff": "diff --git a/mygpo/api/advanced/updates.py b/mygpo/api/advanced/updates.py\n--- a/mygpo/api/advanced/updates.py\n+++ b/mygpo/api/advanced/updates.py\n@@ -30,7 +30,7 @@\n class DeviceUpdates(View):\n \"\"\" returns various updates for a device\n \n- http://wiki.gpodder.org/wiki/Web_Services/API_2/Devices#Get_Updates \"\"\"\n+ https://gpoddernet.readthedocs.io/en/latest/api//Devices#Get_Updates \"\"\"\n \n @method_decorator(csrf_exempt)\n @method_decorator(require_valid_user)\ndiff --git a/mygpo/users/settings.py b/mygpo/users/settings.py\n--- a/mygpo/users/settings.py\n+++ b/mygpo/users/settings.py\n@@ -5,7 +5,7 @@\n \n ## Well-known settings\n # this should be documented at\n-# http://wiki.gpodder.org/wiki/Web_Services/API_2/Settings#Known_Settings\n+# https://gpoddernet.readthedocs.io/en/latest/api//Settings#Known_Settings\n \n # Flag to allow storing of user-agents\n STORE_UA = WellKnownSetting('store_user_agent', True)\n", "issue": "Feedback link on left hand side is broken\nThere are a few issues with the Feedback \"tab\" on the left hand side:\r\n * The styling is not rendering properly in FF 54 (Linux). I see no background for to the text that comes up, instead it dims the page and I see the text overlayed over the page text.\r\n * The text indicates I am being redirected, but the redirect does not seem to execute.\r\n * The redirect link goes to [getsatisfaction.com](http://retired.getsatisfaction.com/) which is not active. It should probably go to this issue tracker.\n", "before_files": [{"content": "from collections import namedtuple\n\n\nWellKnownSetting = namedtuple('WellKnownSetting', 'name default')\n\n## Well-known settings\n# this should be documented at\n# http://wiki.gpodder.org/wiki/Web_Services/API_2/Settings#Known_Settings\n\n# Flag to allow storing of user-agents\nSTORE_UA = WellKnownSetting('store_user_agent', True)\n\n# Flag to mark a subscription as public\nPUBLIC_SUB_PODCAST = WellKnownSetting('public_subscription', True)\n\n# Default public-flag value (stored in the podcast)\nPUBLIC_SUB_USER = WellKnownSetting('public_subscriptions', True)\n\n# Flattr authentication token, empty if not logged in\nFLATTR_TOKEN = WellKnownSetting('flattr_token', '')\n\n# enable auto-flattring\nFLATTR_AUTO = WellKnownSetting('auto_flattr', False)\n\n# auto-flattr mygpo\nFLATTR_MYGPO = WellKnownSetting('flattr_mygpo', False)\n\n# username for flattr buttons for own content\nFLATTR_USERNAME = WellKnownSetting('flattr_username', '')\n\n# Flag to mark an episode as favorite\nFAV_FLAG = WellKnownSetting('is_favorite', False)\n", "path": "mygpo/users/settings.py"}, {"content": "from itertools import chain\nfrom datetime import datetime\n\nfrom django.http import HttpResponseBadRequest, HttpResponseNotFound\nfrom django.contrib.sites.requests import RequestSite\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.decorators.cache import never_cache\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\n\nfrom mygpo.podcasts.models import Episode\nfrom mygpo.api.httpresponse import JsonResponse\nfrom mygpo.api.advanced import episode_action_json\nfrom mygpo.api.advanced.directory import episode_data, podcast_data\nfrom mygpo.utils import parse_bool, get_timestamp\nfrom mygpo.subscriptions import get_subscription_history, subscription_diff\nfrom mygpo.users.models import Client\nfrom mygpo.episodestates.models import EpisodeState\nfrom mygpo.users.subscriptions import subscription_changes, podcasts_for_states\nfrom mygpo.api.basic_auth import require_valid_user, check_username\nfrom mygpo.decorators import cors_origin\n\nfrom collections import namedtuple\nEpisodeStatus = namedtuple('EpisodeStatus', 'episode status action')\n\nimport logging\nlogger = logging.getLogger(__name__)\n\n\nclass DeviceUpdates(View):\n \"\"\" returns various updates for a device\n\n http://wiki.gpodder.org/wiki/Web_Services/API_2/Devices#Get_Updates \"\"\"\n\n @method_decorator(csrf_exempt)\n @method_decorator(require_valid_user)\n @method_decorator(check_username)\n @method_decorator(never_cache)\n @method_decorator(cors_origin())\n def get(self, request, username, device_uid):\n\n now = datetime.utcnow()\n now_ = get_timestamp(now)\n\n user = request.user\n\n try:\n device = user.client_set.get(uid=device_uid)\n except Client.DoesNotExist as e:\n return HttpResponseNotFound(str(e))\n\n try:\n since = self.get_since(request)\n except ValueError as e:\n return HttpResponseBadRequest(str(e))\n\n include_actions = parse_bool(request.GET.get('include_actions', False))\n\n domain = RequestSite(request).domain\n\n add, rem, subscriptions = self.get_subscription_changes(user, device,\n since, now,\n domain)\n updates = self.get_episode_changes(user, subscriptions, domain,\n include_actions, since)\n\n return JsonResponse({\n 'add': add,\n 'rem': rem,\n 'updates': updates,\n 'timestamp': get_timestamp(now),\n })\n\n\n def get_subscription_changes(self, user, device, since, now, domain):\n \"\"\" gets new, removed and current subscriptions \"\"\"\n\n history = get_subscription_history(user, device, since, now)\n add, rem = subscription_diff(history)\n\n subscriptions = device.get_subscribed_podcasts()\n\n add = [podcast_data(p, domain) for p in add]\n rem = [p.url for p in rem]\n\n return add, rem, subscriptions\n\n\n def get_episode_changes(self, user, subscriptions, domain, include_actions, since):\n devices = {dev.id.hex: dev.uid for dev in user.client_set.all()}\n\n # index subscribed podcasts by their Id for fast access\n podcasts = {p.get_id(): p for p in subscriptions}\n\n episode_updates = self.get_episode_updates(user, subscriptions, since)\n\n return [self.get_episode_data(status, podcasts, domain,\n include_actions, user, devices) for status in episode_updates]\n\n\n def get_episode_updates(self, user, subscribed_podcasts, since,\n max_per_podcast=5):\n \"\"\" Returns the episode updates since the timestamp \"\"\"\n\n episodes = []\n for podcast in subscribed_podcasts:\n eps = Episode.objects.filter(podcast=podcast,\n released__gt=since)\\\n .order_by('-order', '-released')\n episodes.extend(eps[:max_per_podcast])\n\n states = EpisodeState.dict_for_user(user, episodes)\n\n for episode in episodes:\n yield EpisodeStatus(episode, states.get(episode.id, 'new'), None)\n\n\n def get_episode_data(self, episode_status, podcasts, domain, include_actions, user, devices):\n \"\"\" Get episode data for an episode status object \"\"\"\n\n # TODO: shouldn't the podcast_id be in the episode status?\n podcast_id = episode_status.episode.podcast\n podcast = podcasts.get(podcast_id, None)\n t = episode_data(episode_status.episode, domain, podcast)\n t['status'] = episode_status.status\n\n # include latest action (bug 1419)\n # TODO\n if include_actions and episode_status.action:\n t['action'] = episode_action_json(episode_status.action, user)\n\n return t\n\n def get_since(self, request):\n \"\"\" parses the \"since\" parameter \"\"\"\n since_ = request.GET.get('since', None)\n if since_ is None:\n raise ValueError('parameter since missing')\n try:\n return datetime.fromtimestamp(float(since_))\n except ValueError as e:\n raise ValueError(\"'since' is not a valid timestamp: %s\" % str(e))\n", "path": "mygpo/api/advanced/updates.py"}], "after_files": [{"content": "from collections import namedtuple\n\n\nWellKnownSetting = namedtuple('WellKnownSetting', 'name default')\n\n## Well-known settings\n# this should be documented at\n# https://gpoddernet.readthedocs.io/en/latest/api//Settings#Known_Settings\n\n# Flag to allow storing of user-agents\nSTORE_UA = WellKnownSetting('store_user_agent', True)\n\n# Flag to mark a subscription as public\nPUBLIC_SUB_PODCAST = WellKnownSetting('public_subscription', True)\n\n# Default public-flag value (stored in the podcast)\nPUBLIC_SUB_USER = WellKnownSetting('public_subscriptions', True)\n\n# Flattr authentication token, empty if not logged in\nFLATTR_TOKEN = WellKnownSetting('flattr_token', '')\n\n# enable auto-flattring\nFLATTR_AUTO = WellKnownSetting('auto_flattr', False)\n\n# auto-flattr mygpo\nFLATTR_MYGPO = WellKnownSetting('flattr_mygpo', False)\n\n# username for flattr buttons for own content\nFLATTR_USERNAME = WellKnownSetting('flattr_username', '')\n\n# Flag to mark an episode as favorite\nFAV_FLAG = WellKnownSetting('is_favorite', False)\n", "path": "mygpo/users/settings.py"}, {"content": "from itertools import chain\nfrom datetime import datetime\n\nfrom django.http import HttpResponseBadRequest, HttpResponseNotFound\nfrom django.contrib.sites.requests import RequestSite\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.decorators.cache import never_cache\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\n\nfrom mygpo.podcasts.models import Episode\nfrom mygpo.api.httpresponse import JsonResponse\nfrom mygpo.api.advanced import episode_action_json\nfrom mygpo.api.advanced.directory import episode_data, podcast_data\nfrom mygpo.utils import parse_bool, get_timestamp\nfrom mygpo.subscriptions import get_subscription_history, subscription_diff\nfrom mygpo.users.models import Client\nfrom mygpo.episodestates.models import EpisodeState\nfrom mygpo.users.subscriptions import subscription_changes, podcasts_for_states\nfrom mygpo.api.basic_auth import require_valid_user, check_username\nfrom mygpo.decorators import cors_origin\n\nfrom collections import namedtuple\nEpisodeStatus = namedtuple('EpisodeStatus', 'episode status action')\n\nimport logging\nlogger = logging.getLogger(__name__)\n\n\nclass DeviceUpdates(View):\n \"\"\" returns various updates for a device\n\n https://gpoddernet.readthedocs.io/en/latest/api//Devices#Get_Updates \"\"\"\n\n @method_decorator(csrf_exempt)\n @method_decorator(require_valid_user)\n @method_decorator(check_username)\n @method_decorator(never_cache)\n @method_decorator(cors_origin())\n def get(self, request, username, device_uid):\n\n now = datetime.utcnow()\n now_ = get_timestamp(now)\n\n user = request.user\n\n try:\n device = user.client_set.get(uid=device_uid)\n except Client.DoesNotExist as e:\n return HttpResponseNotFound(str(e))\n\n try:\n since = self.get_since(request)\n except ValueError as e:\n return HttpResponseBadRequest(str(e))\n\n include_actions = parse_bool(request.GET.get('include_actions', False))\n\n domain = RequestSite(request).domain\n\n add, rem, subscriptions = self.get_subscription_changes(user, device,\n since, now,\n domain)\n updates = self.get_episode_changes(user, subscriptions, domain,\n include_actions, since)\n\n return JsonResponse({\n 'add': add,\n 'rem': rem,\n 'updates': updates,\n 'timestamp': get_timestamp(now),\n })\n\n\n def get_subscription_changes(self, user, device, since, now, domain):\n \"\"\" gets new, removed and current subscriptions \"\"\"\n\n history = get_subscription_history(user, device, since, now)\n add, rem = subscription_diff(history)\n\n subscriptions = device.get_subscribed_podcasts()\n\n add = [podcast_data(p, domain) for p in add]\n rem = [p.url for p in rem]\n\n return add, rem, subscriptions\n\n\n def get_episode_changes(self, user, subscriptions, domain, include_actions, since):\n devices = {dev.id.hex: dev.uid for dev in user.client_set.all()}\n\n # index subscribed podcasts by their Id for fast access\n podcasts = {p.get_id(): p for p in subscriptions}\n\n episode_updates = self.get_episode_updates(user, subscriptions, since)\n\n return [self.get_episode_data(status, podcasts, domain,\n include_actions, user, devices) for status in episode_updates]\n\n\n def get_episode_updates(self, user, subscribed_podcasts, since,\n max_per_podcast=5):\n \"\"\" Returns the episode updates since the timestamp \"\"\"\n\n episodes = []\n for podcast in subscribed_podcasts:\n eps = Episode.objects.filter(podcast=podcast,\n released__gt=since)\\\n .order_by('-order', '-released')\n episodes.extend(eps[:max_per_podcast])\n\n states = EpisodeState.dict_for_user(user, episodes)\n\n for episode in episodes:\n yield EpisodeStatus(episode, states.get(episode.id, 'new'), None)\n\n\n def get_episode_data(self, episode_status, podcasts, domain, include_actions, user, devices):\n \"\"\" Get episode data for an episode status object \"\"\"\n\n # TODO: shouldn't the podcast_id be in the episode status?\n podcast_id = episode_status.episode.podcast\n podcast = podcasts.get(podcast_id, None)\n t = episode_data(episode_status.episode, domain, podcast)\n t['status'] = episode_status.status\n\n # include latest action (bug 1419)\n # TODO\n if include_actions and episode_status.action:\n t['action'] = episode_action_json(episode_status.action, user)\n\n return t\n\n def get_since(self, request):\n \"\"\" parses the \"since\" parameter \"\"\"\n since_ = request.GET.get('since', None)\n if since_ is None:\n raise ValueError('parameter since missing')\n try:\n return datetime.fromtimestamp(float(since_))\n except ValueError as e:\n raise ValueError(\"'since' is not a valid timestamp: %s\" % str(e))\n", "path": "mygpo/api/advanced/updates.py"}]}
| 2,092 | 259 |
gh_patches_debug_9664
|
rasdani/github-patches
|
git_diff
|
conda__conda-build-2271
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Set target in package section [feature request]
## Problem
Currently the only way to change the target platform is using the built in way of handling variants. This involves including an additional file `conda_build_config.yaml`. When you try to set `target` in the package section it is completely ignored and when you try to set `target` in the outputs section it throws an error. Something like:
```
Expecting win-64 got linux-64
```
## Request
Ideally we would like to be able to set target directly in the package section, but our needs could be met as long as that functionality is exposed and not bound to the use of `conda_build_config.yaml`. I took a look at doing this myself but I am unfamiliar with the code base and the reliance on `target_platform` seems to be entrenched in the variants logic and spread across quite a few files.
Please let me know what you think!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conda_build/tarcheck.py`
Content:
```
1 from __future__ import absolute_import, division, print_function
2
3 import json
4 from os.path import basename
5 import re
6 import tarfile
7
8 from conda_build.utils import codec
9
10
11 def dist_fn(fn):
12 if fn.endswith('.tar'):
13 return fn[:-4]
14 elif fn.endswith('.tar.bz2'):
15 return fn[:-8]
16 else:
17 raise Exception('did not expect filename: %r' % fn)
18
19
20 class TarCheck(object):
21 def __init__(self, path, config):
22 self.t = tarfile.open(path)
23 self.paths = set(m.path for m in self.t.getmembers())
24 self.dist = dist_fn(basename(path))
25 self.name, self.version, self.build = self.dist.split('::', 1)[-1].rsplit('-', 2)
26 self.config = config
27
28 def __enter__(self):
29 return self
30
31 def __exit__(self, e_type, e_value, traceback):
32 self.t.close()
33
34 def info_files(self):
35 if re.search('pyh[0-9a-f]{%d}_' % self.config.hash_length, self.build):
36 return
37 lista = [p.strip().decode('utf-8') for p in
38 self.t.extractfile('info/files').readlines()]
39 seta = set(lista)
40 if len(lista) != len(seta):
41 raise Exception('info/files: duplicates')
42
43 listb = [m.path for m in self.t.getmembers()
44 if not (m.path.startswith('info/') or m.isdir())]
45 setb = set(listb)
46 if len(listb) != len(setb):
47 raise Exception('info_files: duplicate members')
48
49 if seta == setb:
50 return
51 for p in sorted(seta | setb):
52 if p not in seta:
53 print('%r not in info/files' % p)
54 if p not in setb:
55 print('%r not in tarball' % p)
56 raise Exception('info/files')
57
58 def index_json(self):
59 info = json.loads(self.t.extractfile('info/index.json').read().decode('utf-8'))
60 for varname in 'name', 'version':
61 if info[varname] != getattr(self, varname):
62 raise Exception('%s: %r != %r' % (varname, info[varname],
63 getattr(self, varname)))
64 assert isinstance(info['build_number'], int)
65
66 def prefix_length(self):
67 prefix_length = None
68 if 'info/has_prefix' in self.t.getnames():
69 prefix_files = self.t.extractfile('info/has_prefix').readlines()
70 for line in prefix_files:
71 try:
72 prefix, file_type, _ = line.split()
73 # lines not conforming to the split
74 except ValueError:
75 continue
76 if hasattr(file_type, 'decode'):
77 file_type = file_type.decode(codec)
78 if file_type == 'binary':
79 prefix_length = len(prefix)
80 break
81 return prefix_length
82
83 def correct_subdir(self):
84 info = json.loads(self.t.extractfile('info/index.json').read().decode('utf-8'))
85 assert info['subdir'] in [self.config.host_subdir, 'noarch'], \
86 ("Inconsistent subdir in package - index.json expecting {0},"
87 " got {1}".format(self.config.host_subdir, info['subdir']))
88
89
90 def check_all(path, config):
91 x = TarCheck(path, config)
92 x.info_files()
93 x.index_json()
94 x.correct_subdir()
95 x.t.close()
96
97
98 def check_prefix_lengths(files, config):
99 lengths = {}
100 for f in files:
101 length = TarCheck(f, config).prefix_length()
102 if length and length < config.prefix_length:
103 lengths[f] = length
104 return lengths
105
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/conda_build/tarcheck.py b/conda_build/tarcheck.py
--- a/conda_build/tarcheck.py
+++ b/conda_build/tarcheck.py
@@ -82,7 +82,7 @@
def correct_subdir(self):
info = json.loads(self.t.extractfile('info/index.json').read().decode('utf-8'))
- assert info['subdir'] in [self.config.host_subdir, 'noarch'], \
+ assert info['subdir'] in [self.config.host_subdir, 'noarch', self.config.target_subdir], \
("Inconsistent subdir in package - index.json expecting {0},"
" got {1}".format(self.config.host_subdir, info['subdir']))
|
{"golden_diff": "diff --git a/conda_build/tarcheck.py b/conda_build/tarcheck.py\n--- a/conda_build/tarcheck.py\n+++ b/conda_build/tarcheck.py\n@@ -82,7 +82,7 @@\n \n def correct_subdir(self):\n info = json.loads(self.t.extractfile('info/index.json').read().decode('utf-8'))\n- assert info['subdir'] in [self.config.host_subdir, 'noarch'], \\\n+ assert info['subdir'] in [self.config.host_subdir, 'noarch', self.config.target_subdir], \\\n (\"Inconsistent subdir in package - index.json expecting {0},\"\n \" got {1}\".format(self.config.host_subdir, info['subdir']))\n", "issue": "Set target in package section [feature request]\n## Problem\r\n\r\nCurrently the only way to change the target platform is using the built in way of handling variants. This involves including an additional file `conda_build_config.yaml`. When you try to set `target` in the package section it is completely ignored and when you try to set `target` in the outputs section it throws an error. Something like:\r\n```\r\nExpecting win-64 got linux-64\r\n```\r\n\r\n## Request\r\n\r\nIdeally we would like to be able to set target directly in the package section, but our needs could be met as long as that functionality is exposed and not bound to the use of `conda_build_config.yaml`. I took a look at doing this myself but I am unfamiliar with the code base and the reliance on `target_platform` seems to be entrenched in the variants logic and spread across quite a few files.\r\n\r\nPlease let me know what you think!\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport json\nfrom os.path import basename\nimport re\nimport tarfile\n\nfrom conda_build.utils import codec\n\n\ndef dist_fn(fn):\n if fn.endswith('.tar'):\n return fn[:-4]\n elif fn.endswith('.tar.bz2'):\n return fn[:-8]\n else:\n raise Exception('did not expect filename: %r' % fn)\n\n\nclass TarCheck(object):\n def __init__(self, path, config):\n self.t = tarfile.open(path)\n self.paths = set(m.path for m in self.t.getmembers())\n self.dist = dist_fn(basename(path))\n self.name, self.version, self.build = self.dist.split('::', 1)[-1].rsplit('-', 2)\n self.config = config\n\n def __enter__(self):\n return self\n\n def __exit__(self, e_type, e_value, traceback):\n self.t.close()\n\n def info_files(self):\n if re.search('pyh[0-9a-f]{%d}_' % self.config.hash_length, self.build):\n return\n lista = [p.strip().decode('utf-8') for p in\n self.t.extractfile('info/files').readlines()]\n seta = set(lista)\n if len(lista) != len(seta):\n raise Exception('info/files: duplicates')\n\n listb = [m.path for m in self.t.getmembers()\n if not (m.path.startswith('info/') or m.isdir())]\n setb = set(listb)\n if len(listb) != len(setb):\n raise Exception('info_files: duplicate members')\n\n if seta == setb:\n return\n for p in sorted(seta | setb):\n if p not in seta:\n print('%r not in info/files' % p)\n if p not in setb:\n print('%r not in tarball' % p)\n raise Exception('info/files')\n\n def index_json(self):\n info = json.loads(self.t.extractfile('info/index.json').read().decode('utf-8'))\n for varname in 'name', 'version':\n if info[varname] != getattr(self, varname):\n raise Exception('%s: %r != %r' % (varname, info[varname],\n getattr(self, varname)))\n assert isinstance(info['build_number'], int)\n\n def prefix_length(self):\n prefix_length = None\n if 'info/has_prefix' in self.t.getnames():\n prefix_files = self.t.extractfile('info/has_prefix').readlines()\n for line in prefix_files:\n try:\n prefix, file_type, _ = line.split()\n # lines not conforming to the split\n except ValueError:\n continue\n if hasattr(file_type, 'decode'):\n file_type = file_type.decode(codec)\n if file_type == 'binary':\n prefix_length = len(prefix)\n break\n return prefix_length\n\n def correct_subdir(self):\n info = json.loads(self.t.extractfile('info/index.json').read().decode('utf-8'))\n assert info['subdir'] in [self.config.host_subdir, 'noarch'], \\\n (\"Inconsistent subdir in package - index.json expecting {0},\"\n \" got {1}\".format(self.config.host_subdir, info['subdir']))\n\n\ndef check_all(path, config):\n x = TarCheck(path, config)\n x.info_files()\n x.index_json()\n x.correct_subdir()\n x.t.close()\n\n\ndef check_prefix_lengths(files, config):\n lengths = {}\n for f in files:\n length = TarCheck(f, config).prefix_length()\n if length and length < config.prefix_length:\n lengths[f] = length\n return lengths\n", "path": "conda_build/tarcheck.py"}], "after_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport json\nfrom os.path import basename\nimport re\nimport tarfile\n\nfrom conda_build.utils import codec\n\n\ndef dist_fn(fn):\n if fn.endswith('.tar'):\n return fn[:-4]\n elif fn.endswith('.tar.bz2'):\n return fn[:-8]\n else:\n raise Exception('did not expect filename: %r' % fn)\n\n\nclass TarCheck(object):\n def __init__(self, path, config):\n self.t = tarfile.open(path)\n self.paths = set(m.path for m in self.t.getmembers())\n self.dist = dist_fn(basename(path))\n self.name, self.version, self.build = self.dist.split('::', 1)[-1].rsplit('-', 2)\n self.config = config\n\n def __enter__(self):\n return self\n\n def __exit__(self, e_type, e_value, traceback):\n self.t.close()\n\n def info_files(self):\n if re.search('pyh[0-9a-f]{%d}_' % self.config.hash_length, self.build):\n return\n lista = [p.strip().decode('utf-8') for p in\n self.t.extractfile('info/files').readlines()]\n seta = set(lista)\n if len(lista) != len(seta):\n raise Exception('info/files: duplicates')\n\n listb = [m.path for m in self.t.getmembers()\n if not (m.path.startswith('info/') or m.isdir())]\n setb = set(listb)\n if len(listb) != len(setb):\n raise Exception('info_files: duplicate members')\n\n if seta == setb:\n return\n for p in sorted(seta | setb):\n if p not in seta:\n print('%r not in info/files' % p)\n if p not in setb:\n print('%r not in tarball' % p)\n raise Exception('info/files')\n\n def index_json(self):\n info = json.loads(self.t.extractfile('info/index.json').read().decode('utf-8'))\n for varname in 'name', 'version':\n if info[varname] != getattr(self, varname):\n raise Exception('%s: %r != %r' % (varname, info[varname],\n getattr(self, varname)))\n assert isinstance(info['build_number'], int)\n\n def prefix_length(self):\n prefix_length = None\n if 'info/has_prefix' in self.t.getnames():\n prefix_files = self.t.extractfile('info/has_prefix').readlines()\n for line in prefix_files:\n try:\n prefix, file_type, _ = line.split()\n # lines not conforming to the split\n except ValueError:\n continue\n if hasattr(file_type, 'decode'):\n file_type = file_type.decode(codec)\n if file_type == 'binary':\n prefix_length = len(prefix)\n break\n return prefix_length\n\n def correct_subdir(self):\n info = json.loads(self.t.extractfile('info/index.json').read().decode('utf-8'))\n assert info['subdir'] in [self.config.host_subdir, 'noarch', self.config.target_subdir], \\\n (\"Inconsistent subdir in package - index.json expecting {0},\"\n \" got {1}\".format(self.config.host_subdir, info['subdir']))\n\n\ndef check_all(path, config):\n x = TarCheck(path, config)\n x.info_files()\n x.index_json()\n x.correct_subdir()\n x.t.close()\n\n\ndef check_prefix_lengths(files, config):\n lengths = {}\n for f in files:\n length = TarCheck(f, config).prefix_length()\n if length and length < config.prefix_length:\n lengths[f] = length\n return lengths\n", "path": "conda_build/tarcheck.py"}]}
| 1,480 | 167 |
gh_patches_debug_24890
|
rasdani/github-patches
|
git_diff
|
netbox-community__netbox-12915
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Clearing the ordering of a table does not clear the `ordering` key from the UserConfig data
### NetBox version
v3.5.3
### Python version
3.8
### Steps to Reproduce
1. Open the sites list and order the entries by name.
2. In a shell, retrieve the UserConfig instance for your user and inspect the entry for `tables.SiteTable.ordering`:
```
>>> uc=UserConfig.objects.get(user__username='admin')
>>> uc.data['tables']['SiteTable']['ordering']
['name']
```
3. In the UI, clear the applied ordering by clicking the X in the column header.
4. Refresh and re-inspect the UserConfig data:
```
>>> uc.refresh_from_db()
>>> uc.data['tables']['SiteTable']['ordering']
['']
```
### Expected Behavior
The `ordering` key should be removed from the data, as there is no longer any preference stored.
### Observed Behavior
`ordering` is set to a list containing an empty string. This does not effect any breaking behavior AFAICT, however it should be cleaned up as it can pose complications.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `netbox/netbox/tables/tables.py`
Content:
```
1 import django_tables2 as tables
2 from django.contrib.auth.models import AnonymousUser
3 from django.contrib.contenttypes.fields import GenericForeignKey
4 from django.contrib.contenttypes.models import ContentType
5 from django.core.exceptions import FieldDoesNotExist
6 from django.db.models.fields.related import RelatedField
7 from django.urls import reverse
8 from django.urls.exceptions import NoReverseMatch
9 from django.utils.safestring import mark_safe
10 from django.utils.translation import gettext as _
11 from django_tables2.data import TableQuerysetData
12
13 from extras.models import CustomField, CustomLink
14 from extras.choices import CustomFieldVisibilityChoices
15 from netbox.tables import columns
16 from utilities.paginator import EnhancedPaginator, get_paginate_count
17 from utilities.utils import get_viewname, highlight_string, title
18
19 __all__ = (
20 'BaseTable',
21 'NetBoxTable',
22 'SearchTable',
23 )
24
25
26 class BaseTable(tables.Table):
27 """
28 Base table class for NetBox objects. Adds support for:
29
30 * User configuration (column preferences)
31 * Automatic prefetching of related objects
32 * BS5 styling
33
34 :param user: Personalize table display for the given user (optional). Has no effect if AnonymousUser is passed.
35 """
36 exempt_columns = ()
37
38 class Meta:
39 attrs = {
40 'class': 'table table-hover object-list',
41 }
42
43 def __init__(self, *args, user=None, **kwargs):
44
45 super().__init__(*args, **kwargs)
46
47 # Set default empty_text if none was provided
48 if self.empty_text is None:
49 self.empty_text = f"No {self._meta.model._meta.verbose_name_plural} found"
50
51 # Determine the table columns to display by checking the following:
52 # 1. User's configuration for the table
53 # 2. Meta.default_columns
54 # 3. Meta.fields
55 selected_columns = None
56 if user is not None and not isinstance(user, AnonymousUser):
57 selected_columns = user.config.get(f"tables.{self.__class__.__name__}.columns")
58 if not selected_columns:
59 selected_columns = getattr(self.Meta, 'default_columns', self.Meta.fields)
60
61 # Hide non-selected columns which are not exempt
62 for column in self.columns:
63 if column.name not in [*selected_columns, *self.exempt_columns]:
64 self.columns.hide(column.name)
65
66 # Rearrange the sequence to list selected columns first, followed by all remaining columns
67 # TODO: There's probably a more clever way to accomplish this
68 self.sequence = [
69 *[c for c in selected_columns if c in self.columns.names()],
70 *[c for c in self.columns.names() if c not in selected_columns]
71 ]
72
73 # PK column should always come first
74 if 'pk' in self.sequence:
75 self.sequence.remove('pk')
76 self.sequence.insert(0, 'pk')
77
78 # Actions column should always come last
79 if 'actions' in self.sequence:
80 self.sequence.remove('actions')
81 self.sequence.append('actions')
82
83 # Dynamically update the table's QuerySet to ensure related fields are pre-fetched
84 if isinstance(self.data, TableQuerysetData):
85
86 prefetch_fields = []
87 for column in self.columns:
88 if column.visible:
89 model = getattr(self.Meta, 'model')
90 accessor = column.accessor
91 prefetch_path = []
92 for field_name in accessor.split(accessor.SEPARATOR):
93 try:
94 field = model._meta.get_field(field_name)
95 except FieldDoesNotExist:
96 break
97 if isinstance(field, RelatedField):
98 # Follow ForeignKeys to the related model
99 prefetch_path.append(field_name)
100 model = field.remote_field.model
101 elif isinstance(field, GenericForeignKey):
102 # Can't prefetch beyond a GenericForeignKey
103 prefetch_path.append(field_name)
104 break
105 if prefetch_path:
106 prefetch_fields.append('__'.join(prefetch_path))
107 self.data.data = self.data.data.prefetch_related(*prefetch_fields)
108
109 def _get_columns(self, visible=True):
110 columns = []
111 for name, column in self.columns.items():
112 if column.visible == visible and name not in self.exempt_columns:
113 columns.append((name, column.verbose_name))
114 return columns
115
116 @property
117 def available_columns(self):
118 return self._get_columns(visible=False)
119
120 @property
121 def selected_columns(self):
122 return self._get_columns(visible=True)
123
124 @property
125 def objects_count(self):
126 """
127 Return the total number of real objects represented by the Table. This is useful when dealing with
128 prefixes/IP addresses/etc., where some table rows may represent available address space.
129 """
130 if not hasattr(self, '_objects_count'):
131 self._objects_count = sum(1 for obj in self.data if hasattr(obj, 'pk'))
132 return self._objects_count
133
134 def configure(self, request):
135 """
136 Configure the table for a specific request context. This performs pagination and records
137 the user's preferred ordering logic.
138 """
139 # Save ordering preference
140 if request.user.is_authenticated:
141 table_name = self.__class__.__name__
142 if self.prefixed_order_by_field in request.GET:
143 # If an ordering has been specified as a query parameter, save it as the
144 # user's preferred ordering for this table.
145 ordering = request.GET.getlist(self.prefixed_order_by_field)
146 request.user.config.set(f'tables.{table_name}.ordering', ordering, commit=True)
147 elif ordering := request.user.config.get(f'tables.{table_name}.ordering'):
148 # If no ordering has been specified, set the preferred ordering (if any).
149 self.order_by = ordering
150
151 # Paginate the table results
152 paginate = {
153 'paginator_class': EnhancedPaginator,
154 'per_page': get_paginate_count(request)
155 }
156 tables.RequestConfig(request, paginate).configure(self)
157
158
159 class NetBoxTable(BaseTable):
160 """
161 Table class for most NetBox objects. Adds support for custom field & custom link columns. Includes
162 default columns for:
163
164 * PK (row selection)
165 * ID
166 * Actions
167 """
168 pk = columns.ToggleColumn(
169 visible=False
170 )
171 id = tables.Column(
172 linkify=True,
173 verbose_name='ID'
174 )
175 actions = columns.ActionsColumn()
176
177 exempt_columns = ('pk', 'actions')
178
179 class Meta(BaseTable.Meta):
180 pass
181
182 def __init__(self, *args, extra_columns=None, **kwargs):
183 if extra_columns is None:
184 extra_columns = []
185
186 # Add custom field & custom link columns
187 content_type = ContentType.objects.get_for_model(self._meta.model)
188 custom_fields = CustomField.objects.filter(
189 content_types=content_type
190 ).exclude(ui_visibility=CustomFieldVisibilityChoices.VISIBILITY_HIDDEN)
191
192 extra_columns.extend([
193 (f'cf_{cf.name}', columns.CustomFieldColumn(cf)) for cf in custom_fields
194 ])
195 custom_links = CustomLink.objects.filter(content_types=content_type, enabled=True)
196 extra_columns.extend([
197 (f'cl_{cl.name}', columns.CustomLinkColumn(cl)) for cl in custom_links
198 ])
199
200 super().__init__(*args, extra_columns=extra_columns, **kwargs)
201
202 @property
203 def htmx_url(self):
204 """
205 Return the base HTML request URL for embedded tables.
206 """
207 if getattr(self, 'embedded', False):
208 viewname = get_viewname(self._meta.model, action='list')
209 try:
210 return reverse(viewname)
211 except NoReverseMatch:
212 pass
213 return ''
214
215
216 class SearchTable(tables.Table):
217 object_type = columns.ContentTypeColumn(
218 verbose_name=_('Type'),
219 order_by="object___meta__verbose_name",
220 )
221 object = tables.Column(
222 linkify=True,
223 order_by=('name', )
224 )
225 field = tables.Column()
226 value = tables.Column()
227
228 trim_length = 30
229
230 class Meta:
231 attrs = {
232 'class': 'table table-hover object-list',
233 }
234 empty_text = _('No results found')
235
236 def __init__(self, data, highlight=None, **kwargs):
237 self.highlight = highlight
238 super().__init__(data, **kwargs)
239
240 def render_field(self, value, record):
241 if hasattr(record.object, value):
242 return title(record.object._meta.get_field(value).verbose_name)
243 return value
244
245 def render_value(self, value):
246 if not self.highlight:
247 return value
248
249 value = highlight_string(value, self.highlight, trim_pre=self.trim_length, trim_post=self.trim_length)
250
251 return mark_safe(value)
252
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/netbox/netbox/tables/tables.py b/netbox/netbox/tables/tables.py
--- a/netbox/netbox/tables/tables.py
+++ b/netbox/netbox/tables/tables.py
@@ -140,10 +140,14 @@
if request.user.is_authenticated:
table_name = self.__class__.__name__
if self.prefixed_order_by_field in request.GET:
- # If an ordering has been specified as a query parameter, save it as the
- # user's preferred ordering for this table.
- ordering = request.GET.getlist(self.prefixed_order_by_field)
- request.user.config.set(f'tables.{table_name}.ordering', ordering, commit=True)
+ if request.GET[self.prefixed_order_by_field]:
+ # If an ordering has been specified as a query parameter, save it as the
+ # user's preferred ordering for this table.
+ ordering = request.GET.getlist(self.prefixed_order_by_field)
+ request.user.config.set(f'tables.{table_name}.ordering', ordering, commit=True)
+ else:
+ # If the ordering has been set to none (empty), clear any existing preference.
+ request.user.config.clear(f'tables.{table_name}.ordering', commit=True)
elif ordering := request.user.config.get(f'tables.{table_name}.ordering'):
# If no ordering has been specified, set the preferred ordering (if any).
self.order_by = ordering
|
{"golden_diff": "diff --git a/netbox/netbox/tables/tables.py b/netbox/netbox/tables/tables.py\n--- a/netbox/netbox/tables/tables.py\n+++ b/netbox/netbox/tables/tables.py\n@@ -140,10 +140,14 @@\n if request.user.is_authenticated:\n table_name = self.__class__.__name__\n if self.prefixed_order_by_field in request.GET:\n- # If an ordering has been specified as a query parameter, save it as the\n- # user's preferred ordering for this table.\n- ordering = request.GET.getlist(self.prefixed_order_by_field)\n- request.user.config.set(f'tables.{table_name}.ordering', ordering, commit=True)\n+ if request.GET[self.prefixed_order_by_field]:\n+ # If an ordering has been specified as a query parameter, save it as the\n+ # user's preferred ordering for this table.\n+ ordering = request.GET.getlist(self.prefixed_order_by_field)\n+ request.user.config.set(f'tables.{table_name}.ordering', ordering, commit=True)\n+ else:\n+ # If the ordering has been set to none (empty), clear any existing preference.\n+ request.user.config.clear(f'tables.{table_name}.ordering', commit=True)\n elif ordering := request.user.config.get(f'tables.{table_name}.ordering'):\n # If no ordering has been specified, set the preferred ordering (if any).\n self.order_by = ordering\n", "issue": "Clearing the ordering of a table does not clear the `ordering` key from the UserConfig data\n### NetBox version\n\nv3.5.3\n\n### Python version\n\n3.8\n\n### Steps to Reproduce\n\n1. Open the sites list and order the entries by name.\r\n2. In a shell, retrieve the UserConfig instance for your user and inspect the entry for `tables.SiteTable.ordering`:\r\n\r\n```\r\n>>> uc=UserConfig.objects.get(user__username='admin')\r\n>>> uc.data['tables']['SiteTable']['ordering']\r\n['name']\r\n```\r\n\r\n3. In the UI, clear the applied ordering by clicking the X in the column header.\r\n4. Refresh and re-inspect the UserConfig data:\r\n\r\n```\r\n>>> uc.refresh_from_db()\r\n>>> uc.data['tables']['SiteTable']['ordering']\r\n['']\r\n```\n\n### Expected Behavior\n\nThe `ordering` key should be removed from the data, as there is no longer any preference stored.\n\n### Observed Behavior\n\n`ordering` is set to a list containing an empty string. This does not effect any breaking behavior AFAICT, however it should be cleaned up as it can pose complications.\n", "before_files": [{"content": "import django_tables2 as tables\nfrom django.contrib.auth.models import AnonymousUser\nfrom django.contrib.contenttypes.fields import GenericForeignKey\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.exceptions import FieldDoesNotExist\nfrom django.db.models.fields.related import RelatedField\nfrom django.urls import reverse\nfrom django.urls.exceptions import NoReverseMatch\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import gettext as _\nfrom django_tables2.data import TableQuerysetData\n\nfrom extras.models import CustomField, CustomLink\nfrom extras.choices import CustomFieldVisibilityChoices\nfrom netbox.tables import columns\nfrom utilities.paginator import EnhancedPaginator, get_paginate_count\nfrom utilities.utils import get_viewname, highlight_string, title\n\n__all__ = (\n 'BaseTable',\n 'NetBoxTable',\n 'SearchTable',\n)\n\n\nclass BaseTable(tables.Table):\n \"\"\"\n Base table class for NetBox objects. Adds support for:\n\n * User configuration (column preferences)\n * Automatic prefetching of related objects\n * BS5 styling\n\n :param user: Personalize table display for the given user (optional). Has no effect if AnonymousUser is passed.\n \"\"\"\n exempt_columns = ()\n\n class Meta:\n attrs = {\n 'class': 'table table-hover object-list',\n }\n\n def __init__(self, *args, user=None, **kwargs):\n\n super().__init__(*args, **kwargs)\n\n # Set default empty_text if none was provided\n if self.empty_text is None:\n self.empty_text = f\"No {self._meta.model._meta.verbose_name_plural} found\"\n\n # Determine the table columns to display by checking the following:\n # 1. User's configuration for the table\n # 2. Meta.default_columns\n # 3. Meta.fields\n selected_columns = None\n if user is not None and not isinstance(user, AnonymousUser):\n selected_columns = user.config.get(f\"tables.{self.__class__.__name__}.columns\")\n if not selected_columns:\n selected_columns = getattr(self.Meta, 'default_columns', self.Meta.fields)\n\n # Hide non-selected columns which are not exempt\n for column in self.columns:\n if column.name not in [*selected_columns, *self.exempt_columns]:\n self.columns.hide(column.name)\n\n # Rearrange the sequence to list selected columns first, followed by all remaining columns\n # TODO: There's probably a more clever way to accomplish this\n self.sequence = [\n *[c for c in selected_columns if c in self.columns.names()],\n *[c for c in self.columns.names() if c not in selected_columns]\n ]\n\n # PK column should always come first\n if 'pk' in self.sequence:\n self.sequence.remove('pk')\n self.sequence.insert(0, 'pk')\n\n # Actions column should always come last\n if 'actions' in self.sequence:\n self.sequence.remove('actions')\n self.sequence.append('actions')\n\n # Dynamically update the table's QuerySet to ensure related fields are pre-fetched\n if isinstance(self.data, TableQuerysetData):\n\n prefetch_fields = []\n for column in self.columns:\n if column.visible:\n model = getattr(self.Meta, 'model')\n accessor = column.accessor\n prefetch_path = []\n for field_name in accessor.split(accessor.SEPARATOR):\n try:\n field = model._meta.get_field(field_name)\n except FieldDoesNotExist:\n break\n if isinstance(field, RelatedField):\n # Follow ForeignKeys to the related model\n prefetch_path.append(field_name)\n model = field.remote_field.model\n elif isinstance(field, GenericForeignKey):\n # Can't prefetch beyond a GenericForeignKey\n prefetch_path.append(field_name)\n break\n if prefetch_path:\n prefetch_fields.append('__'.join(prefetch_path))\n self.data.data = self.data.data.prefetch_related(*prefetch_fields)\n\n def _get_columns(self, visible=True):\n columns = []\n for name, column in self.columns.items():\n if column.visible == visible and name not in self.exempt_columns:\n columns.append((name, column.verbose_name))\n return columns\n\n @property\n def available_columns(self):\n return self._get_columns(visible=False)\n\n @property\n def selected_columns(self):\n return self._get_columns(visible=True)\n\n @property\n def objects_count(self):\n \"\"\"\n Return the total number of real objects represented by the Table. This is useful when dealing with\n prefixes/IP addresses/etc., where some table rows may represent available address space.\n \"\"\"\n if not hasattr(self, '_objects_count'):\n self._objects_count = sum(1 for obj in self.data if hasattr(obj, 'pk'))\n return self._objects_count\n\n def configure(self, request):\n \"\"\"\n Configure the table for a specific request context. This performs pagination and records\n the user's preferred ordering logic.\n \"\"\"\n # Save ordering preference\n if request.user.is_authenticated:\n table_name = self.__class__.__name__\n if self.prefixed_order_by_field in request.GET:\n # If an ordering has been specified as a query parameter, save it as the\n # user's preferred ordering for this table.\n ordering = request.GET.getlist(self.prefixed_order_by_field)\n request.user.config.set(f'tables.{table_name}.ordering', ordering, commit=True)\n elif ordering := request.user.config.get(f'tables.{table_name}.ordering'):\n # If no ordering has been specified, set the preferred ordering (if any).\n self.order_by = ordering\n\n # Paginate the table results\n paginate = {\n 'paginator_class': EnhancedPaginator,\n 'per_page': get_paginate_count(request)\n }\n tables.RequestConfig(request, paginate).configure(self)\n\n\nclass NetBoxTable(BaseTable):\n \"\"\"\n Table class for most NetBox objects. Adds support for custom field & custom link columns. Includes\n default columns for:\n\n * PK (row selection)\n * ID\n * Actions\n \"\"\"\n pk = columns.ToggleColumn(\n visible=False\n )\n id = tables.Column(\n linkify=True,\n verbose_name='ID'\n )\n actions = columns.ActionsColumn()\n\n exempt_columns = ('pk', 'actions')\n\n class Meta(BaseTable.Meta):\n pass\n\n def __init__(self, *args, extra_columns=None, **kwargs):\n if extra_columns is None:\n extra_columns = []\n\n # Add custom field & custom link columns\n content_type = ContentType.objects.get_for_model(self._meta.model)\n custom_fields = CustomField.objects.filter(\n content_types=content_type\n ).exclude(ui_visibility=CustomFieldVisibilityChoices.VISIBILITY_HIDDEN)\n\n extra_columns.extend([\n (f'cf_{cf.name}', columns.CustomFieldColumn(cf)) for cf in custom_fields\n ])\n custom_links = CustomLink.objects.filter(content_types=content_type, enabled=True)\n extra_columns.extend([\n (f'cl_{cl.name}', columns.CustomLinkColumn(cl)) for cl in custom_links\n ])\n\n super().__init__(*args, extra_columns=extra_columns, **kwargs)\n\n @property\n def htmx_url(self):\n \"\"\"\n Return the base HTML request URL for embedded tables.\n \"\"\"\n if getattr(self, 'embedded', False):\n viewname = get_viewname(self._meta.model, action='list')\n try:\n return reverse(viewname)\n except NoReverseMatch:\n pass\n return ''\n\n\nclass SearchTable(tables.Table):\n object_type = columns.ContentTypeColumn(\n verbose_name=_('Type'),\n order_by=\"object___meta__verbose_name\",\n )\n object = tables.Column(\n linkify=True,\n order_by=('name', )\n )\n field = tables.Column()\n value = tables.Column()\n\n trim_length = 30\n\n class Meta:\n attrs = {\n 'class': 'table table-hover object-list',\n }\n empty_text = _('No results found')\n\n def __init__(self, data, highlight=None, **kwargs):\n self.highlight = highlight\n super().__init__(data, **kwargs)\n\n def render_field(self, value, record):\n if hasattr(record.object, value):\n return title(record.object._meta.get_field(value).verbose_name)\n return value\n\n def render_value(self, value):\n if not self.highlight:\n return value\n\n value = highlight_string(value, self.highlight, trim_pre=self.trim_length, trim_post=self.trim_length)\n\n return mark_safe(value)\n", "path": "netbox/netbox/tables/tables.py"}], "after_files": [{"content": "import django_tables2 as tables\nfrom django.contrib.auth.models import AnonymousUser\nfrom django.contrib.contenttypes.fields import GenericForeignKey\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.exceptions import FieldDoesNotExist\nfrom django.db.models.fields.related import RelatedField\nfrom django.urls import reverse\nfrom django.urls.exceptions import NoReverseMatch\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import gettext as _\nfrom django_tables2.data import TableQuerysetData\n\nfrom extras.models import CustomField, CustomLink\nfrom extras.choices import CustomFieldVisibilityChoices\nfrom netbox.tables import columns\nfrom utilities.paginator import EnhancedPaginator, get_paginate_count\nfrom utilities.utils import get_viewname, highlight_string, title\n\n__all__ = (\n 'BaseTable',\n 'NetBoxTable',\n 'SearchTable',\n)\n\n\nclass BaseTable(tables.Table):\n \"\"\"\n Base table class for NetBox objects. Adds support for:\n\n * User configuration (column preferences)\n * Automatic prefetching of related objects\n * BS5 styling\n\n :param user: Personalize table display for the given user (optional). Has no effect if AnonymousUser is passed.\n \"\"\"\n exempt_columns = ()\n\n class Meta:\n attrs = {\n 'class': 'table table-hover object-list',\n }\n\n def __init__(self, *args, user=None, **kwargs):\n\n super().__init__(*args, **kwargs)\n\n # Set default empty_text if none was provided\n if self.empty_text is None:\n self.empty_text = f\"No {self._meta.model._meta.verbose_name_plural} found\"\n\n # Determine the table columns to display by checking the following:\n # 1. User's configuration for the table\n # 2. Meta.default_columns\n # 3. Meta.fields\n selected_columns = None\n if user is not None and not isinstance(user, AnonymousUser):\n selected_columns = user.config.get(f\"tables.{self.__class__.__name__}.columns\")\n if not selected_columns:\n selected_columns = getattr(self.Meta, 'default_columns', self.Meta.fields)\n\n # Hide non-selected columns which are not exempt\n for column in self.columns:\n if column.name not in [*selected_columns, *self.exempt_columns]:\n self.columns.hide(column.name)\n\n # Rearrange the sequence to list selected columns first, followed by all remaining columns\n # TODO: There's probably a more clever way to accomplish this\n self.sequence = [\n *[c for c in selected_columns if c in self.columns.names()],\n *[c for c in self.columns.names() if c not in selected_columns]\n ]\n\n # PK column should always come first\n if 'pk' in self.sequence:\n self.sequence.remove('pk')\n self.sequence.insert(0, 'pk')\n\n # Actions column should always come last\n if 'actions' in self.sequence:\n self.sequence.remove('actions')\n self.sequence.append('actions')\n\n # Dynamically update the table's QuerySet to ensure related fields are pre-fetched\n if isinstance(self.data, TableQuerysetData):\n\n prefetch_fields = []\n for column in self.columns:\n if column.visible:\n model = getattr(self.Meta, 'model')\n accessor = column.accessor\n prefetch_path = []\n for field_name in accessor.split(accessor.SEPARATOR):\n try:\n field = model._meta.get_field(field_name)\n except FieldDoesNotExist:\n break\n if isinstance(field, RelatedField):\n # Follow ForeignKeys to the related model\n prefetch_path.append(field_name)\n model = field.remote_field.model\n elif isinstance(field, GenericForeignKey):\n # Can't prefetch beyond a GenericForeignKey\n prefetch_path.append(field_name)\n break\n if prefetch_path:\n prefetch_fields.append('__'.join(prefetch_path))\n self.data.data = self.data.data.prefetch_related(*prefetch_fields)\n\n def _get_columns(self, visible=True):\n columns = []\n for name, column in self.columns.items():\n if column.visible == visible and name not in self.exempt_columns:\n columns.append((name, column.verbose_name))\n return columns\n\n @property\n def available_columns(self):\n return self._get_columns(visible=False)\n\n @property\n def selected_columns(self):\n return self._get_columns(visible=True)\n\n @property\n def objects_count(self):\n \"\"\"\n Return the total number of real objects represented by the Table. This is useful when dealing with\n prefixes/IP addresses/etc., where some table rows may represent available address space.\n \"\"\"\n if not hasattr(self, '_objects_count'):\n self._objects_count = sum(1 for obj in self.data if hasattr(obj, 'pk'))\n return self._objects_count\n\n def configure(self, request):\n \"\"\"\n Configure the table for a specific request context. This performs pagination and records\n the user's preferred ordering logic.\n \"\"\"\n # Save ordering preference\n if request.user.is_authenticated:\n table_name = self.__class__.__name__\n if self.prefixed_order_by_field in request.GET:\n if request.GET[self.prefixed_order_by_field]:\n # If an ordering has been specified as a query parameter, save it as the\n # user's preferred ordering for this table.\n ordering = request.GET.getlist(self.prefixed_order_by_field)\n request.user.config.set(f'tables.{table_name}.ordering', ordering, commit=True)\n else:\n # If the ordering has been set to none (empty), clear any existing preference.\n request.user.config.clear(f'tables.{table_name}.ordering', commit=True)\n elif ordering := request.user.config.get(f'tables.{table_name}.ordering'):\n # If no ordering has been specified, set the preferred ordering (if any).\n self.order_by = ordering\n\n # Paginate the table results\n paginate = {\n 'paginator_class': EnhancedPaginator,\n 'per_page': get_paginate_count(request)\n }\n tables.RequestConfig(request, paginate).configure(self)\n\n\nclass NetBoxTable(BaseTable):\n \"\"\"\n Table class for most NetBox objects. Adds support for custom field & custom link columns. Includes\n default columns for:\n\n * PK (row selection)\n * ID\n * Actions\n \"\"\"\n pk = columns.ToggleColumn(\n visible=False\n )\n id = tables.Column(\n linkify=True,\n verbose_name='ID'\n )\n actions = columns.ActionsColumn()\n\n exempt_columns = ('pk', 'actions')\n\n class Meta(BaseTable.Meta):\n pass\n\n def __init__(self, *args, extra_columns=None, **kwargs):\n if extra_columns is None:\n extra_columns = []\n\n # Add custom field & custom link columns\n content_type = ContentType.objects.get_for_model(self._meta.model)\n custom_fields = CustomField.objects.filter(\n content_types=content_type\n ).exclude(ui_visibility=CustomFieldVisibilityChoices.VISIBILITY_HIDDEN)\n\n extra_columns.extend([\n (f'cf_{cf.name}', columns.CustomFieldColumn(cf)) for cf in custom_fields\n ])\n custom_links = CustomLink.objects.filter(content_types=content_type, enabled=True)\n extra_columns.extend([\n (f'cl_{cl.name}', columns.CustomLinkColumn(cl)) for cl in custom_links\n ])\n\n super().__init__(*args, extra_columns=extra_columns, **kwargs)\n\n @property\n def htmx_url(self):\n \"\"\"\n Return the base HTML request URL for embedded tables.\n \"\"\"\n if getattr(self, 'embedded', False):\n viewname = get_viewname(self._meta.model, action='list')\n try:\n return reverse(viewname)\n except NoReverseMatch:\n pass\n return ''\n\n\nclass SearchTable(tables.Table):\n object_type = columns.ContentTypeColumn(\n verbose_name=_('Type'),\n order_by=\"object___meta__verbose_name\",\n )\n object = tables.Column(\n linkify=True,\n order_by=('name', )\n )\n field = tables.Column()\n value = tables.Column()\n\n trim_length = 30\n\n class Meta:\n attrs = {\n 'class': 'table table-hover object-list',\n }\n empty_text = _('No results found')\n\n def __init__(self, data, highlight=None, **kwargs):\n self.highlight = highlight\n super().__init__(data, **kwargs)\n\n def render_field(self, value, record):\n if hasattr(record.object, value):\n return title(record.object._meta.get_field(value).verbose_name)\n return value\n\n def render_value(self, value):\n if not self.highlight:\n return value\n\n value = highlight_string(value, self.highlight, trim_pre=self.trim_length, trim_post=self.trim_length)\n\n return mark_safe(value)\n", "path": "netbox/netbox/tables/tables.py"}]}
| 2,978 | 317 |
gh_patches_debug_8262
|
rasdani/github-patches
|
git_diff
|
svthalia__concrexit-2294
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Can't set registration information fields from admin (for non-member registrations)
### Describe the bug
It is not possible to manually set the event registration information fields from the admin. This is quite problematic when adding non-member's registrations
### How to reproduce
Steps to reproduce the behaviour:
1. add a manual event registration
2. try to edit the information fields (this works)
3. you cannot save it
### Expected behaviour
It should save
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/events/admin/views.py`
Content:
```
1 import csv
2
3 from django.conf import settings
4 from django.contrib import messages
5 from django.contrib.admin import helpers
6 from django.contrib.admin.views.decorators import staff_member_required
7 from django.contrib.auth.mixins import PermissionRequiredMixin
8 from django.http import HttpResponse
9 from django.shortcuts import get_object_or_404, redirect
10 from django.utils import timezone
11 from django.utils.decorators import method_decorator
12 from django.utils.text import slugify
13 from django.utils.translation import pgettext_lazy
14 from django.utils.translation import gettext_lazy as _
15 from django.views import View
16 from django.views.generic import DetailView, FormView
17
18 from events import services
19 from events.decorators import organiser_only
20 from events.exceptions import RegistrationError
21 from events.forms import FieldsForm, EventMessageForm
22 from payments.models import Payment
23 from pushnotifications.models import Message, Category
24 from events.models import Event, EventRegistration
25
26
27 @method_decorator(staff_member_required, name="dispatch")
28 @method_decorator(organiser_only, name="dispatch")
29 class EventAdminDetails(DetailView, PermissionRequiredMixin):
30 """Render an overview of registrations for the specified event."""
31
32 template_name = "events/admin/details.html"
33 model = Event
34 context_object_name = "event"
35 permission_required = "events.change_event"
36
37 def get_context_data(self, **kwargs):
38 context = super().get_context_data(**kwargs)
39
40 context.update({"payment": Payment, "has_permission": True, "site_url": "/"})
41
42 return context
43
44
45 @method_decorator(staff_member_required, name="dispatch")
46 @method_decorator(organiser_only, name="dispatch")
47 class RegistrationAdminFields(FormView):
48 """Render a form that allows the user to change the details of their registration.
49
50 The user should be authenticated.
51 """
52
53 form_class = FieldsForm
54 template_name = "admin/change_form.html"
55 registration = None
56 admin = None
57
58 def get_context_data(self, **kwargs):
59 context = super().get_context_data(**kwargs)
60 context.update(
61 {
62 **self.admin.admin_site.each_context(self.request),
63 "add": False,
64 "change": True,
65 "has_view_permission": True,
66 "has_add_permission": False,
67 "has_change_permission": self.request.user.has_perms(
68 "events.change_eventregistration"
69 ),
70 "has_delete_permission": False,
71 "has_editable_inline_admin_formsets": False,
72 "app_label": "events",
73 "opts": self.registration._meta,
74 "is_popup": False,
75 "save_as": False,
76 "save_on_top": False,
77 "original": self.registration,
78 "obj_id": self.registration.pk,
79 "title": _("Change registration fields"),
80 "adminform": helpers.AdminForm(
81 context["form"],
82 ((None, {"fields": context["form"].fields.keys()}),),
83 {},
84 ),
85 }
86 )
87 return context
88
89 def get_form_kwargs(self):
90 kwargs = super().get_form_kwargs()
91 kwargs["fields"] = services.registration_fields(
92 self.request, registration=self.registration
93 )
94 return kwargs
95
96 def form_valid(self, form):
97 values = form.field_values()
98 try:
99 services.update_registration(
100 registration=self.registration, field_values=values
101 )
102 messages.success(self.request, _("Registration successfully saved."))
103 if "_save" in self.request.POST:
104 return redirect(
105 "admin:events_eventregistration_change", self.registration.pk
106 )
107 except RegistrationError as e:
108 messages.error(self.request, e)
109 return self.render_to_response(self.get_context_data(form=form))
110
111 def dispatch(self, request, *args, **kwargs):
112 self.registration = get_object_or_404(
113 EventRegistration, pk=self.kwargs["registration"]
114 )
115 try:
116 if self.registration.event.has_fields:
117 return super().dispatch(request, *args, **kwargs)
118 except RegistrationError:
119 pass
120 return redirect("admin:events_eventregistration_change", self.registration.pk)
121
122
123 @method_decorator(staff_member_required, name="dispatch")
124 @method_decorator(organiser_only, name="dispatch")
125 class EventMessage(FormView):
126 """Renders a form that allows the user to create a push notification for all users registers to the event."""
127
128 form_class = EventMessageForm
129 template_name = "events/admin/message_form.html"
130 admin = None
131 event = None
132
133 def get_context_data(self, **kwargs):
134 context = super().get_context_data(**kwargs)
135 context.update(
136 {
137 **self.admin.admin_site.each_context(self.request),
138 "add": False,
139 "change": True,
140 "has_view_permission": True,
141 "has_add_permission": False,
142 "has_change_permission": self.request.user.has_perms(
143 "events.change_event"
144 ),
145 "has_delete_permission": False,
146 "has_editable_inline_admin_formsets": False,
147 "app_label": "events",
148 "opts": self.event._meta,
149 "is_popup": False,
150 "save_as": False,
151 "save_on_top": False,
152 "original": self.event,
153 "obj_id": self.event.pk,
154 "title": _("Send push notification"),
155 "adminform": helpers.AdminForm(
156 context["form"],
157 ((None, {"fields": context["form"].fields.keys()}),),
158 {},
159 ),
160 }
161 )
162 return context
163
164 def form_valid(self, form):
165 values = form.cleaned_data
166 if not values["url"]:
167 values["url"] = settings.BASE_URL + self.event.get_absolute_url()
168 message = Message(
169 title=values["title"],
170 body=values["body"],
171 url=values["url"],
172 category=Category.objects.get(key=Category.EVENT),
173 )
174 message.save()
175 message.users.set([r.member for r in self.event.participants if r.member])
176 message.send()
177
178 messages.success(self.request, _("Message sent successfully."))
179 if "_save" in self.request.POST:
180 return redirect("admin:events_event_details", self.event.pk)
181 return super().form_valid(form)
182
183 def dispatch(self, request, *args, **kwargs):
184 self.event = get_object_or_404(Event, pk=self.kwargs["pk"])
185 return super().dispatch(request, *args, **kwargs)
186
187
188 @method_decorator(staff_member_required, name="dispatch")
189 @method_decorator(organiser_only, name="dispatch")
190 class EventRegistrationsExport(View, PermissionRequiredMixin):
191 """View to export registrations."""
192
193 template_name = "events/admin/details.html"
194 permission_required = "events.change_event"
195
196 def get(self, request, pk):
197 """Export the registration of a specified event.
198
199 :param request: the request object
200 :param pk: the primary key of the event
201 :return: A CSV containing all registrations for the event
202 """
203 event = get_object_or_404(Event, pk=pk)
204 extra_fields = event.registrationinformationfield_set.all()
205 registrations = event.eventregistration_set.all()
206
207 header_fields = (
208 [
209 _("Name"),
210 _("Email"),
211 _("Paid"),
212 _("Present"),
213 _("Status"),
214 _("Phone number"),
215 ]
216 + [field.name for field in extra_fields]
217 + [_("Date"), _("Date cancelled")]
218 )
219
220 rows = []
221 if event.price == 0:
222 header_fields.remove(_("Paid"))
223 for registration in registrations:
224 if registration.member:
225 name = registration.member.get_full_name()
226 else:
227 name = registration.name
228 status = pgettext_lazy("registration status", "registered").capitalize()
229 cancelled = None
230 if registration.date_cancelled:
231
232 if registration.is_late_cancellation():
233 status = pgettext_lazy(
234 "registration status", "late cancellation"
235 ).capitalize()
236 else:
237 status = pgettext_lazy(
238 "registration status", "cancelled"
239 ).capitalize()
240 cancelled = timezone.localtime(registration.date_cancelled)
241
242 elif registration.queue_position:
243 status = pgettext_lazy("registration status", "waiting")
244 data = {
245 _("Name"): name,
246 _("Date"): timezone.localtime(registration.date),
247 _("Present"): _("Yes") if registration.present else "",
248 _("Phone number"): (
249 registration.phone_number if registration.phone_number else ""
250 ),
251 _("Email"): (registration.email if registration.email else ""),
252 _("Status"): status,
253 _("Date cancelled"): cancelled,
254 }
255 if event.price > 0:
256 if registration.is_paid():
257 data[_("Paid")] = registration.payment.get_type_display()
258 else:
259 data[_("Paid")] = _("No")
260
261 data.update(
262 {
263 field["field"].name: field["value"]
264 for field in registration.information_fields
265 }
266 )
267 rows.append(data)
268
269 response = HttpResponse(content_type="text/csv")
270 writer = csv.DictWriter(response, header_fields)
271 writer.writeheader()
272
273 rows = sorted(
274 rows,
275 key=lambda row: (
276 row[_("Status")]
277 == pgettext_lazy(
278 "registration status", "late cancellation"
279 ).capitalize(),
280 row[_("Date")],
281 ),
282 reverse=True,
283 )
284
285 for row in rows:
286 writer.writerow(row)
287
288 response[
289 "Content-Disposition"
290 ] = f'attachment; filename="{slugify(event.title)}.csv"'
291 return response
292
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/website/events/admin/views.py b/website/events/admin/views.py
--- a/website/events/admin/views.py
+++ b/website/events/admin/views.py
@@ -97,7 +97,9 @@
values = form.field_values()
try:
services.update_registration(
- registration=self.registration, field_values=values
+ registration=self.registration,
+ field_values=values,
+ actor=self.request.user,
)
messages.success(self.request, _("Registration successfully saved."))
if "_save" in self.request.POST:
|
{"golden_diff": "diff --git a/website/events/admin/views.py b/website/events/admin/views.py\n--- a/website/events/admin/views.py\n+++ b/website/events/admin/views.py\n@@ -97,7 +97,9 @@\n values = form.field_values()\n try:\n services.update_registration(\n- registration=self.registration, field_values=values\n+ registration=self.registration,\n+ field_values=values,\n+ actor=self.request.user,\n )\n messages.success(self.request, _(\"Registration successfully saved.\"))\n if \"_save\" in self.request.POST:\n", "issue": "Can't set registration information fields from admin (for non-member registrations)\n### Describe the bug\r\nIt is not possible to manually set the event registration information fields from the admin. This is quite problematic when adding non-member's registrations\r\n\r\n### How to reproduce\r\nSteps to reproduce the behaviour:\r\n1. add a manual event registration\r\n2. try to edit the information fields (this works)\r\n3. you cannot save it\r\n\r\n### Expected behaviour\r\nIt should save\r\n\r\n\n", "before_files": [{"content": "import csv\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.admin import helpers\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.contrib.auth.mixins import PermissionRequiredMixin\nfrom django.http import HttpResponse\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.utils import timezone\nfrom django.utils.decorators import method_decorator\nfrom django.utils.text import slugify\nfrom django.utils.translation import pgettext_lazy\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views import View\nfrom django.views.generic import DetailView, FormView\n\nfrom events import services\nfrom events.decorators import organiser_only\nfrom events.exceptions import RegistrationError\nfrom events.forms import FieldsForm, EventMessageForm\nfrom payments.models import Payment\nfrom pushnotifications.models import Message, Category\nfrom events.models import Event, EventRegistration\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(organiser_only, name=\"dispatch\")\nclass EventAdminDetails(DetailView, PermissionRequiredMixin):\n \"\"\"Render an overview of registrations for the specified event.\"\"\"\n\n template_name = \"events/admin/details.html\"\n model = Event\n context_object_name = \"event\"\n permission_required = \"events.change_event\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n context.update({\"payment\": Payment, \"has_permission\": True, \"site_url\": \"/\"})\n\n return context\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(organiser_only, name=\"dispatch\")\nclass RegistrationAdminFields(FormView):\n \"\"\"Render a form that allows the user to change the details of their registration.\n\n The user should be authenticated.\n \"\"\"\n\n form_class = FieldsForm\n template_name = \"admin/change_form.html\"\n registration = None\n admin = None\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context.update(\n {\n **self.admin.admin_site.each_context(self.request),\n \"add\": False,\n \"change\": True,\n \"has_view_permission\": True,\n \"has_add_permission\": False,\n \"has_change_permission\": self.request.user.has_perms(\n \"events.change_eventregistration\"\n ),\n \"has_delete_permission\": False,\n \"has_editable_inline_admin_formsets\": False,\n \"app_label\": \"events\",\n \"opts\": self.registration._meta,\n \"is_popup\": False,\n \"save_as\": False,\n \"save_on_top\": False,\n \"original\": self.registration,\n \"obj_id\": self.registration.pk,\n \"title\": _(\"Change registration fields\"),\n \"adminform\": helpers.AdminForm(\n context[\"form\"],\n ((None, {\"fields\": context[\"form\"].fields.keys()}),),\n {},\n ),\n }\n )\n return context\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs[\"fields\"] = services.registration_fields(\n self.request, registration=self.registration\n )\n return kwargs\n\n def form_valid(self, form):\n values = form.field_values()\n try:\n services.update_registration(\n registration=self.registration, field_values=values\n )\n messages.success(self.request, _(\"Registration successfully saved.\"))\n if \"_save\" in self.request.POST:\n return redirect(\n \"admin:events_eventregistration_change\", self.registration.pk\n )\n except RegistrationError as e:\n messages.error(self.request, e)\n return self.render_to_response(self.get_context_data(form=form))\n\n def dispatch(self, request, *args, **kwargs):\n self.registration = get_object_or_404(\n EventRegistration, pk=self.kwargs[\"registration\"]\n )\n try:\n if self.registration.event.has_fields:\n return super().dispatch(request, *args, **kwargs)\n except RegistrationError:\n pass\n return redirect(\"admin:events_eventregistration_change\", self.registration.pk)\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(organiser_only, name=\"dispatch\")\nclass EventMessage(FormView):\n \"\"\"Renders a form that allows the user to create a push notification for all users registers to the event.\"\"\"\n\n form_class = EventMessageForm\n template_name = \"events/admin/message_form.html\"\n admin = None\n event = None\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context.update(\n {\n **self.admin.admin_site.each_context(self.request),\n \"add\": False,\n \"change\": True,\n \"has_view_permission\": True,\n \"has_add_permission\": False,\n \"has_change_permission\": self.request.user.has_perms(\n \"events.change_event\"\n ),\n \"has_delete_permission\": False,\n \"has_editable_inline_admin_formsets\": False,\n \"app_label\": \"events\",\n \"opts\": self.event._meta,\n \"is_popup\": False,\n \"save_as\": False,\n \"save_on_top\": False,\n \"original\": self.event,\n \"obj_id\": self.event.pk,\n \"title\": _(\"Send push notification\"),\n \"adminform\": helpers.AdminForm(\n context[\"form\"],\n ((None, {\"fields\": context[\"form\"].fields.keys()}),),\n {},\n ),\n }\n )\n return context\n\n def form_valid(self, form):\n values = form.cleaned_data\n if not values[\"url\"]:\n values[\"url\"] = settings.BASE_URL + self.event.get_absolute_url()\n message = Message(\n title=values[\"title\"],\n body=values[\"body\"],\n url=values[\"url\"],\n category=Category.objects.get(key=Category.EVENT),\n )\n message.save()\n message.users.set([r.member for r in self.event.participants if r.member])\n message.send()\n\n messages.success(self.request, _(\"Message sent successfully.\"))\n if \"_save\" in self.request.POST:\n return redirect(\"admin:events_event_details\", self.event.pk)\n return super().form_valid(form)\n\n def dispatch(self, request, *args, **kwargs):\n self.event = get_object_or_404(Event, pk=self.kwargs[\"pk\"])\n return super().dispatch(request, *args, **kwargs)\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(organiser_only, name=\"dispatch\")\nclass EventRegistrationsExport(View, PermissionRequiredMixin):\n \"\"\"View to export registrations.\"\"\"\n\n template_name = \"events/admin/details.html\"\n permission_required = \"events.change_event\"\n\n def get(self, request, pk):\n \"\"\"Export the registration of a specified event.\n\n :param request: the request object\n :param pk: the primary key of the event\n :return: A CSV containing all registrations for the event\n \"\"\"\n event = get_object_or_404(Event, pk=pk)\n extra_fields = event.registrationinformationfield_set.all()\n registrations = event.eventregistration_set.all()\n\n header_fields = (\n [\n _(\"Name\"),\n _(\"Email\"),\n _(\"Paid\"),\n _(\"Present\"),\n _(\"Status\"),\n _(\"Phone number\"),\n ]\n + [field.name for field in extra_fields]\n + [_(\"Date\"), _(\"Date cancelled\")]\n )\n\n rows = []\n if event.price == 0:\n header_fields.remove(_(\"Paid\"))\n for registration in registrations:\n if registration.member:\n name = registration.member.get_full_name()\n else:\n name = registration.name\n status = pgettext_lazy(\"registration status\", \"registered\").capitalize()\n cancelled = None\n if registration.date_cancelled:\n\n if registration.is_late_cancellation():\n status = pgettext_lazy(\n \"registration status\", \"late cancellation\"\n ).capitalize()\n else:\n status = pgettext_lazy(\n \"registration status\", \"cancelled\"\n ).capitalize()\n cancelled = timezone.localtime(registration.date_cancelled)\n\n elif registration.queue_position:\n status = pgettext_lazy(\"registration status\", \"waiting\")\n data = {\n _(\"Name\"): name,\n _(\"Date\"): timezone.localtime(registration.date),\n _(\"Present\"): _(\"Yes\") if registration.present else \"\",\n _(\"Phone number\"): (\n registration.phone_number if registration.phone_number else \"\"\n ),\n _(\"Email\"): (registration.email if registration.email else \"\"),\n _(\"Status\"): status,\n _(\"Date cancelled\"): cancelled,\n }\n if event.price > 0:\n if registration.is_paid():\n data[_(\"Paid\")] = registration.payment.get_type_display()\n else:\n data[_(\"Paid\")] = _(\"No\")\n\n data.update(\n {\n field[\"field\"].name: field[\"value\"]\n for field in registration.information_fields\n }\n )\n rows.append(data)\n\n response = HttpResponse(content_type=\"text/csv\")\n writer = csv.DictWriter(response, header_fields)\n writer.writeheader()\n\n rows = sorted(\n rows,\n key=lambda row: (\n row[_(\"Status\")]\n == pgettext_lazy(\n \"registration status\", \"late cancellation\"\n ).capitalize(),\n row[_(\"Date\")],\n ),\n reverse=True,\n )\n\n for row in rows:\n writer.writerow(row)\n\n response[\n \"Content-Disposition\"\n ] = f'attachment; filename=\"{slugify(event.title)}.csv\"'\n return response\n", "path": "website/events/admin/views.py"}], "after_files": [{"content": "import csv\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.admin import helpers\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.contrib.auth.mixins import PermissionRequiredMixin\nfrom django.http import HttpResponse\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.utils import timezone\nfrom django.utils.decorators import method_decorator\nfrom django.utils.text import slugify\nfrom django.utils.translation import pgettext_lazy\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views import View\nfrom django.views.generic import DetailView, FormView\n\nfrom events import services\nfrom events.decorators import organiser_only\nfrom events.exceptions import RegistrationError\nfrom events.forms import FieldsForm, EventMessageForm\nfrom payments.models import Payment\nfrom pushnotifications.models import Message, Category\nfrom events.models import Event, EventRegistration\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(organiser_only, name=\"dispatch\")\nclass EventAdminDetails(DetailView, PermissionRequiredMixin):\n \"\"\"Render an overview of registrations for the specified event.\"\"\"\n\n template_name = \"events/admin/details.html\"\n model = Event\n context_object_name = \"event\"\n permission_required = \"events.change_event\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n context.update({\"payment\": Payment, \"has_permission\": True, \"site_url\": \"/\"})\n\n return context\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(organiser_only, name=\"dispatch\")\nclass RegistrationAdminFields(FormView):\n \"\"\"Render a form that allows the user to change the details of their registration.\n\n The user should be authenticated.\n \"\"\"\n\n form_class = FieldsForm\n template_name = \"admin/change_form.html\"\n registration = None\n admin = None\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context.update(\n {\n **self.admin.admin_site.each_context(self.request),\n \"add\": False,\n \"change\": True,\n \"has_view_permission\": True,\n \"has_add_permission\": False,\n \"has_change_permission\": self.request.user.has_perms(\n \"events.change_eventregistration\"\n ),\n \"has_delete_permission\": False,\n \"has_editable_inline_admin_formsets\": False,\n \"app_label\": \"events\",\n \"opts\": self.registration._meta,\n \"is_popup\": False,\n \"save_as\": False,\n \"save_on_top\": False,\n \"original\": self.registration,\n \"obj_id\": self.registration.pk,\n \"title\": _(\"Change registration fields\"),\n \"adminform\": helpers.AdminForm(\n context[\"form\"],\n ((None, {\"fields\": context[\"form\"].fields.keys()}),),\n {},\n ),\n }\n )\n return context\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs[\"fields\"] = services.registration_fields(\n self.request, registration=self.registration\n )\n return kwargs\n\n def form_valid(self, form):\n values = form.field_values()\n try:\n services.update_registration(\n registration=self.registration,\n field_values=values,\n actor=self.request.user,\n )\n messages.success(self.request, _(\"Registration successfully saved.\"))\n if \"_save\" in self.request.POST:\n return redirect(\n \"admin:events_eventregistration_change\", self.registration.pk\n )\n except RegistrationError as e:\n messages.error(self.request, e)\n return self.render_to_response(self.get_context_data(form=form))\n\n def dispatch(self, request, *args, **kwargs):\n self.registration = get_object_or_404(\n EventRegistration, pk=self.kwargs[\"registration\"]\n )\n try:\n if self.registration.event.has_fields:\n return super().dispatch(request, *args, **kwargs)\n except RegistrationError:\n pass\n return redirect(\"admin:events_eventregistration_change\", self.registration.pk)\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(organiser_only, name=\"dispatch\")\nclass EventMessage(FormView):\n \"\"\"Renders a form that allows the user to create a push notification for all users registers to the event.\"\"\"\n\n form_class = EventMessageForm\n template_name = \"events/admin/message_form.html\"\n admin = None\n event = None\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context.update(\n {\n **self.admin.admin_site.each_context(self.request),\n \"add\": False,\n \"change\": True,\n \"has_view_permission\": True,\n \"has_add_permission\": False,\n \"has_change_permission\": self.request.user.has_perms(\n \"events.change_event\"\n ),\n \"has_delete_permission\": False,\n \"has_editable_inline_admin_formsets\": False,\n \"app_label\": \"events\",\n \"opts\": self.event._meta,\n \"is_popup\": False,\n \"save_as\": False,\n \"save_on_top\": False,\n \"original\": self.event,\n \"obj_id\": self.event.pk,\n \"title\": _(\"Send push notification\"),\n \"adminform\": helpers.AdminForm(\n context[\"form\"],\n ((None, {\"fields\": context[\"form\"].fields.keys()}),),\n {},\n ),\n }\n )\n return context\n\n def form_valid(self, form):\n values = form.cleaned_data\n if not values[\"url\"]:\n values[\"url\"] = settings.BASE_URL + self.event.get_absolute_url()\n message = Message(\n title=values[\"title\"],\n body=values[\"body\"],\n url=values[\"url\"],\n category=Category.objects.get(key=Category.EVENT),\n )\n message.save()\n message.users.set([r.member for r in self.event.participants if r.member])\n message.send()\n\n messages.success(self.request, _(\"Message sent successfully.\"))\n if \"_save\" in self.request.POST:\n return redirect(\"admin:events_event_details\", self.event.pk)\n return super().form_valid(form)\n\n def dispatch(self, request, *args, **kwargs):\n self.event = get_object_or_404(Event, pk=self.kwargs[\"pk\"])\n return super().dispatch(request, *args, **kwargs)\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(organiser_only, name=\"dispatch\")\nclass EventRegistrationsExport(View, PermissionRequiredMixin):\n \"\"\"View to export registrations.\"\"\"\n\n template_name = \"events/admin/details.html\"\n permission_required = \"events.change_event\"\n\n def get(self, request, pk):\n \"\"\"Export the registration of a specified event.\n\n :param request: the request object\n :param pk: the primary key of the event\n :return: A CSV containing all registrations for the event\n \"\"\"\n event = get_object_or_404(Event, pk=pk)\n extra_fields = event.registrationinformationfield_set.all()\n registrations = event.eventregistration_set.all()\n\n header_fields = (\n [\n _(\"Name\"),\n _(\"Email\"),\n _(\"Paid\"),\n _(\"Present\"),\n _(\"Status\"),\n _(\"Phone number\"),\n ]\n + [field.name for field in extra_fields]\n + [_(\"Date\"), _(\"Date cancelled\")]\n )\n\n rows = []\n if event.price == 0:\n header_fields.remove(_(\"Paid\"))\n for registration in registrations:\n if registration.member:\n name = registration.member.get_full_name()\n else:\n name = registration.name\n status = pgettext_lazy(\"registration status\", \"registered\").capitalize()\n cancelled = None\n if registration.date_cancelled:\n\n if registration.is_late_cancellation():\n status = pgettext_lazy(\n \"registration status\", \"late cancellation\"\n ).capitalize()\n else:\n status = pgettext_lazy(\n \"registration status\", \"cancelled\"\n ).capitalize()\n cancelled = timezone.localtime(registration.date_cancelled)\n\n elif registration.queue_position:\n status = pgettext_lazy(\"registration status\", \"waiting\")\n data = {\n _(\"Name\"): name,\n _(\"Date\"): timezone.localtime(registration.date),\n _(\"Present\"): _(\"Yes\") if registration.present else \"\",\n _(\"Phone number\"): (\n registration.phone_number if registration.phone_number else \"\"\n ),\n _(\"Email\"): (registration.email if registration.email else \"\"),\n _(\"Status\"): status,\n _(\"Date cancelled\"): cancelled,\n }\n if event.price > 0:\n if registration.is_paid():\n data[_(\"Paid\")] = registration.payment.get_type_display()\n else:\n data[_(\"Paid\")] = _(\"No\")\n\n data.update(\n {\n field[\"field\"].name: field[\"value\"]\n for field in registration.information_fields\n }\n )\n rows.append(data)\n\n response = HttpResponse(content_type=\"text/csv\")\n writer = csv.DictWriter(response, header_fields)\n writer.writeheader()\n\n rows = sorted(\n rows,\n key=lambda row: (\n row[_(\"Status\")]\n == pgettext_lazy(\n \"registration status\", \"late cancellation\"\n ).capitalize(),\n row[_(\"Date\")],\n ),\n reverse=True,\n )\n\n for row in rows:\n writer.writerow(row)\n\n response[\n \"Content-Disposition\"\n ] = f'attachment; filename=\"{slugify(event.title)}.csv\"'\n return response\n", "path": "website/events/admin/views.py"}]}
| 3,102 | 118 |
gh_patches_debug_21519
|
rasdani/github-patches
|
git_diff
|
google__flax-596
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
QoL: better print for FrozenDict
The best way I'm aware of to get an overview of model shape is via `jax.tree_map(jnp.shape, params)`. FrozenDicts have no concept of pretty printing the way dicts do, so large models are unwieldy to parse at a glance.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flax/core/frozen_dict.py`
Content:
```
1 # Copyright 2020 The Flax Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Frozen Dictionary."""
16
17 from typing import Any, TypeVar, Mapping, Dict, Tuple
18
19 from flax import serialization
20 import jax
21
22
23 K = TypeVar('K')
24 V = TypeVar('V')
25
26
27 @jax.tree_util.register_pytree_node_class
28 class FrozenDict(Mapping[K, V]):
29 """An immutable variant of the Python dict."""
30 __slots__ = ('_dict', '_hash')
31
32 def __init__(self, *args, **kwargs):
33 # make sure the dict is as
34 xs = dict(*args, **kwargs)
35 self._dict = _prepare_freeze(xs)
36
37 self._hash = None
38
39 def __getitem__(self, key):
40 v = self._dict[key]
41 if isinstance(v, dict):
42 return FrozenDict(v)
43 return v
44
45 def __setitem__(self, key, value):
46 raise ValueError('FrozenDict is immutable.')
47
48 def __contains__(self, key):
49 return key in self._dict
50
51 def __iter__(self):
52 return iter(self._dict)
53
54 def __len__(self):
55 return len(self._dict)
56
57 def __repr__(self):
58 return 'FrozenDict(%r)' % self._dict
59
60 def __hash__(self):
61 if self._hash is None:
62 h = 0
63 for key, value in self.items():
64 h ^= hash((key, value))
65 self._hash = h
66 return self._hash
67
68 def copy(self, add_or_replace: Mapping[K, V]) -> 'FrozenDict[K, V]':
69 """Create a new FrozenDict with additional or replaced entries."""
70 return type(self)(self, **unfreeze(add_or_replace))
71
72 def items(self):
73 for key in self._dict:
74 yield (key, self[key])
75
76 def pop(self, key: K) -> Tuple['FrozenDict[K, V]', V]:
77 """Create a new FrozenDict where one entry is removed.
78
79 Example::
80
81 state, params = variables.pop('params')
82
83 Args:
84 key: the key to remove from the dict
85 Returns:
86 A pair with the new FrozenDict and the removed value.
87 """
88 value = self[key]
89 new_dict = dict(self._dict)
90 new_dict.pop(key)
91 new_self = type(self)(new_dict)
92 return new_self, value
93
94 def unfreeze(self) -> Dict[K, V]:
95 return unfreeze(self)
96
97 def tree_flatten(self):
98 return (self._dict,), ()
99
100 @classmethod
101 def tree_unflatten(cls, _, data):
102 return cls(*data)
103
104
105 def _prepare_freeze(xs: Any) -> Any:
106 """Deep copy unfrozen dicts to make the dictionary FrozenDict safe."""
107 if isinstance(xs, FrozenDict):
108 # we can safely ref share the internal state of a FrozenDict
109 # because it is immutable.
110 return xs._dict # pylint: disable=protected-access
111 if not isinstance(xs, dict):
112 # return a leaf as is.
113 return xs
114 # recursively copy dictionary to avoid ref sharing
115 return {key: _prepare_freeze(val) for key, val in xs.items()}
116
117
118 def freeze(xs: Dict[K, V]) -> FrozenDict[K, V]:
119 """Freeze a nested dict.
120
121 Makes a nested `dict` immutable by transforming it into `FrozenDict`.
122 """
123 return FrozenDict(xs)
124
125
126 def unfreeze(x: FrozenDict[K, V]) -> Dict[K, V]:
127 """Unfreeze a FrozenDict.
128
129 Makes a mutable copy of a `FrozenDict` mutable by transforming
130 it into (nested) dict.
131 """
132 if not isinstance(x, (FrozenDict, dict)):
133 return x
134 ys = {}
135 for key, value in x.items():
136 ys[key] = unfreeze(value)
137 return ys
138
139
140 def _frozen_dict_state_dict(xs):
141 return {key: serialization.to_state_dict(value) for key, value in xs.items()}
142
143
144 def _restore_frozen_dict(xs, states):
145 return FrozenDict(
146 {key: serialization.from_state_dict(value, states[key])
147 for key, value in xs.items()})
148
149
150 serialization.register_serialization_state(
151 FrozenDict,
152 _frozen_dict_state_dict,
153 _restore_frozen_dict)
154
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/flax/core/frozen_dict.py b/flax/core/frozen_dict.py
--- a/flax/core/frozen_dict.py
+++ b/flax/core/frozen_dict.py
@@ -24,6 +24,14 @@
V = TypeVar('V')
+def _indent(x, num_spaces):
+ indent_str = ' ' * num_spaces
+ lines = x.split('\n')
+ assert lines[-1] == ''
+ # skip the final line because it's empty and should not be indented.
+ return '\n'.join(indent_str + line for line in lines[:-1]) + '\n'
+
+
@jax.tree_util.register_pytree_node_class
class FrozenDict(Mapping[K, V]):
"""An immutable variant of the Python dict."""
@@ -55,7 +63,21 @@
return len(self._dict)
def __repr__(self):
- return 'FrozenDict(%r)' % self._dict
+ return self.pretty_repr()
+
+ def pretty_repr(self, num_spaces=4):
+ """Returns an indented representation of the nested dictionary."""
+ def pretty_dict(x):
+ if not isinstance(x, dict):
+ return repr(x)
+ rep = ''
+ for key, val in x.items():
+ rep += f'{key}: {pretty_dict(val)},\n'
+ if rep:
+ return '{\n' + _indent(rep, num_spaces) + '}'
+ else:
+ return '{}'
+ return f'FrozenDict({pretty_dict(self._dict)})'
def __hash__(self):
if self._hash is None:
|
{"golden_diff": "diff --git a/flax/core/frozen_dict.py b/flax/core/frozen_dict.py\n--- a/flax/core/frozen_dict.py\n+++ b/flax/core/frozen_dict.py\n@@ -24,6 +24,14 @@\n V = TypeVar('V')\n \n \n+def _indent(x, num_spaces):\n+ indent_str = ' ' * num_spaces\n+ lines = x.split('\\n')\n+ assert lines[-1] == ''\n+ # skip the final line because it's empty and should not be indented.\n+ return '\\n'.join(indent_str + line for line in lines[:-1]) + '\\n'\n+\n+\n @jax.tree_util.register_pytree_node_class\n class FrozenDict(Mapping[K, V]):\n \"\"\"An immutable variant of the Python dict.\"\"\"\n@@ -55,7 +63,21 @@\n return len(self._dict)\n \n def __repr__(self):\n- return 'FrozenDict(%r)' % self._dict\n+ return self.pretty_repr()\n+\n+ def pretty_repr(self, num_spaces=4):\n+ \"\"\"Returns an indented representation of the nested dictionary.\"\"\"\n+ def pretty_dict(x):\n+ if not isinstance(x, dict):\n+ return repr(x)\n+ rep = ''\n+ for key, val in x.items():\n+ rep += f'{key}: {pretty_dict(val)},\\n'\n+ if rep:\n+ return '{\\n' + _indent(rep, num_spaces) + '}'\n+ else:\n+ return '{}'\n+ return f'FrozenDict({pretty_dict(self._dict)})'\n \n def __hash__(self):\n if self._hash is None:\n", "issue": "QoL: better print for FrozenDict\nThe best way I'm aware of to get an overview of model shape is via `jax.tree_map(jnp.shape, params)`. FrozenDicts have no concept of pretty printing the way dicts do, so large models are unwieldy to parse at a glance. \n", "before_files": [{"content": "# Copyright 2020 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Frozen Dictionary.\"\"\"\n\nfrom typing import Any, TypeVar, Mapping, Dict, Tuple\n\nfrom flax import serialization\nimport jax\n\n\nK = TypeVar('K')\nV = TypeVar('V')\n\n\[email protected]_util.register_pytree_node_class\nclass FrozenDict(Mapping[K, V]):\n \"\"\"An immutable variant of the Python dict.\"\"\"\n __slots__ = ('_dict', '_hash')\n\n def __init__(self, *args, **kwargs):\n # make sure the dict is as\n xs = dict(*args, **kwargs)\n self._dict = _prepare_freeze(xs)\n\n self._hash = None\n\n def __getitem__(self, key):\n v = self._dict[key]\n if isinstance(v, dict):\n return FrozenDict(v)\n return v\n\n def __setitem__(self, key, value):\n raise ValueError('FrozenDict is immutable.')\n\n def __contains__(self, key):\n return key in self._dict\n\n def __iter__(self):\n return iter(self._dict)\n\n def __len__(self):\n return len(self._dict)\n\n def __repr__(self):\n return 'FrozenDict(%r)' % self._dict\n\n def __hash__(self):\n if self._hash is None:\n h = 0\n for key, value in self.items():\n h ^= hash((key, value))\n self._hash = h\n return self._hash\n\n def copy(self, add_or_replace: Mapping[K, V]) -> 'FrozenDict[K, V]':\n \"\"\"Create a new FrozenDict with additional or replaced entries.\"\"\"\n return type(self)(self, **unfreeze(add_or_replace))\n\n def items(self):\n for key in self._dict:\n yield (key, self[key])\n\n def pop(self, key: K) -> Tuple['FrozenDict[K, V]', V]:\n \"\"\"Create a new FrozenDict where one entry is removed.\n\n Example::\n\n state, params = variables.pop('params')\n\n Args:\n key: the key to remove from the dict\n Returns:\n A pair with the new FrozenDict and the removed value.\n \"\"\"\n value = self[key]\n new_dict = dict(self._dict)\n new_dict.pop(key)\n new_self = type(self)(new_dict)\n return new_self, value\n\n def unfreeze(self) -> Dict[K, V]:\n return unfreeze(self)\n\n def tree_flatten(self):\n return (self._dict,), ()\n\n @classmethod\n def tree_unflatten(cls, _, data):\n return cls(*data)\n\n\ndef _prepare_freeze(xs: Any) -> Any:\n \"\"\"Deep copy unfrozen dicts to make the dictionary FrozenDict safe.\"\"\"\n if isinstance(xs, FrozenDict):\n # we can safely ref share the internal state of a FrozenDict\n # because it is immutable.\n return xs._dict # pylint: disable=protected-access\n if not isinstance(xs, dict):\n # return a leaf as is.\n return xs\n # recursively copy dictionary to avoid ref sharing\n return {key: _prepare_freeze(val) for key, val in xs.items()}\n\n\ndef freeze(xs: Dict[K, V]) -> FrozenDict[K, V]:\n \"\"\"Freeze a nested dict.\n\n Makes a nested `dict` immutable by transforming it into `FrozenDict`.\n \"\"\"\n return FrozenDict(xs)\n\n\ndef unfreeze(x: FrozenDict[K, V]) -> Dict[K, V]:\n \"\"\"Unfreeze a FrozenDict.\n\n Makes a mutable copy of a `FrozenDict` mutable by transforming\n it into (nested) dict.\n \"\"\"\n if not isinstance(x, (FrozenDict, dict)):\n return x\n ys = {}\n for key, value in x.items():\n ys[key] = unfreeze(value)\n return ys\n\n\ndef _frozen_dict_state_dict(xs):\n return {key: serialization.to_state_dict(value) for key, value in xs.items()}\n\n\ndef _restore_frozen_dict(xs, states):\n return FrozenDict(\n {key: serialization.from_state_dict(value, states[key])\n for key, value in xs.items()})\n\n\nserialization.register_serialization_state(\n FrozenDict,\n _frozen_dict_state_dict,\n _restore_frozen_dict)\n", "path": "flax/core/frozen_dict.py"}], "after_files": [{"content": "# Copyright 2020 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Frozen Dictionary.\"\"\"\n\nfrom typing import Any, TypeVar, Mapping, Dict, Tuple\n\nfrom flax import serialization\nimport jax\n\n\nK = TypeVar('K')\nV = TypeVar('V')\n\n\ndef _indent(x, num_spaces):\n indent_str = ' ' * num_spaces\n lines = x.split('\\n')\n assert lines[-1] == ''\n # skip the final line because it's empty and should not be indented.\n return '\\n'.join(indent_str + line for line in lines[:-1]) + '\\n'\n\n\[email protected]_util.register_pytree_node_class\nclass FrozenDict(Mapping[K, V]):\n \"\"\"An immutable variant of the Python dict.\"\"\"\n __slots__ = ('_dict', '_hash')\n\n def __init__(self, *args, **kwargs):\n # make sure the dict is as\n xs = dict(*args, **kwargs)\n self._dict = _prepare_freeze(xs)\n\n self._hash = None\n\n def __getitem__(self, key):\n v = self._dict[key]\n if isinstance(v, dict):\n return FrozenDict(v)\n return v\n\n def __setitem__(self, key, value):\n raise ValueError('FrozenDict is immutable.')\n\n def __contains__(self, key):\n return key in self._dict\n\n def __iter__(self):\n return iter(self._dict)\n\n def __len__(self):\n return len(self._dict)\n\n def __repr__(self):\n return self.pretty_repr()\n\n def pretty_repr(self, num_spaces=4):\n \"\"\"Returns an indented representation of the nested dictionary.\"\"\"\n def pretty_dict(x):\n if not isinstance(x, dict):\n return repr(x)\n rep = ''\n for key, val in x.items():\n rep += f'{key}: {pretty_dict(val)},\\n'\n if rep:\n return '{\\n' + _indent(rep, num_spaces) + '}'\n else:\n return '{}'\n return f'FrozenDict({pretty_dict(self._dict)})'\n\n def __hash__(self):\n if self._hash is None:\n h = 0\n for key, value in self.items():\n h ^= hash((key, value))\n self._hash = h\n return self._hash\n\n def copy(self, add_or_replace: Mapping[K, V]) -> 'FrozenDict[K, V]':\n \"\"\"Create a new FrozenDict with additional or replaced entries.\"\"\"\n return type(self)(self, **unfreeze(add_or_replace))\n\n def items(self):\n for key in self._dict:\n yield (key, self[key])\n\n def pop(self, key: K) -> Tuple['FrozenDict[K, V]', V]:\n \"\"\"Create a new FrozenDict where one entry is removed.\n\n Example::\n\n state, params = variables.pop('params')\n\n Args:\n key: the key to remove from the dict\n Returns:\n A pair with the new FrozenDict and the removed value.\n \"\"\"\n value = self[key]\n new_dict = dict(self._dict)\n new_dict.pop(key)\n new_self = type(self)(new_dict)\n return new_self, value\n\n def unfreeze(self) -> Dict[K, V]:\n return unfreeze(self)\n\n def tree_flatten(self):\n return (self._dict,), ()\n\n @classmethod\n def tree_unflatten(cls, _, data):\n return cls(*data)\n\n\ndef _prepare_freeze(xs: Any) -> Any:\n \"\"\"Deep copy unfrozen dicts to make the dictionary FrozenDict safe.\"\"\"\n if isinstance(xs, FrozenDict):\n # we can safely ref share the internal state of a FrozenDict\n # because it is immutable.\n return xs._dict # pylint: disable=protected-access\n if not isinstance(xs, dict):\n # return a leaf as is.\n return xs\n # recursively copy dictionary to avoid ref sharing\n return {key: _prepare_freeze(val) for key, val in xs.items()}\n\n\ndef freeze(xs: Dict[K, V]) -> FrozenDict[K, V]:\n \"\"\"Freeze a nested dict.\n\n Makes a nested `dict` immutable by transforming it into `FrozenDict`.\n \"\"\"\n return FrozenDict(xs)\n\n\ndef unfreeze(x: FrozenDict[K, V]) -> Dict[K, V]:\n \"\"\"Unfreeze a FrozenDict.\n\n Makes a mutable copy of a `FrozenDict` mutable by transforming\n it into (nested) dict.\n \"\"\"\n if not isinstance(x, (FrozenDict, dict)):\n return x\n ys = {}\n for key, value in x.items():\n ys[key] = unfreeze(value)\n return ys\n\n\ndef _frozen_dict_state_dict(xs):\n return {key: serialization.to_state_dict(value) for key, value in xs.items()}\n\n\ndef _restore_frozen_dict(xs, states):\n return FrozenDict(\n {key: serialization.from_state_dict(value, states[key])\n for key, value in xs.items()})\n\n\nserialization.register_serialization_state(\n FrozenDict,\n _frozen_dict_state_dict,\n _restore_frozen_dict)\n", "path": "flax/core/frozen_dict.py"}]}
| 1,746 | 361 |
gh_patches_debug_14338
|
rasdani/github-patches
|
git_diff
|
rasterio__rasterio-2603
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Path parsing breaks URLs with `//` in the query string.
## Expected behavior and actual behavior.
I'm opening a file over HTTP and the URL includes a querystring with a double forward slash `//`.
I can't share the full URL since it includes a token, but I've traced the call to the call to `pathlib.Path(path).as_posix()` at https://github.com/rasterio/rasterio/blob/5cf71dc806adc299108543def00647845ab4fc42/rasterio/_path.py#L79. The root issue is that `pathlib.Path` seems to normalize `//` to `/`.
```python
In [18]: pathlib.Path("foo.tif?bar=a//b").as_posix()
Out[18]: 'foo.tif?bar=a/b'
```
I don't actually know what to do here... I'm not sure if that call to `pathlib.Path` can be avoided (maybe only if the scheme is http?).
The relatively easy workaround is to prepend the `fp` argument to `rasterio.open` with `/vsicurl/`, but that might be hard for libraries to figure out (without reimplementing parts of `_parse_path`).
## Steps to reproduce the problem.
I've redacted the secret token so this doesn't actually run.
```python
In [1]: import rasterio
In [2]: rasterio.open('https://sentinel2l2a01.blob.core.windows.net/sentinel2-l2/10/T/ET/2016/02/08/S2A_MSIL2A_20160208T190542_N0300_R013_T10TET_20210528T041016.SAFE/GRANULE/L2A_T10TET_A003299_20160208T190605/IMG_DATA/R10m/T10TET_20160208T190542_B03_10m.tif?<token>&sig=QalzEsDqH2wtl%2BTouR7lVh
...: MmzYr4aIKDg%2BAP2//vywA%3D')
---------------------------------------------------------------------------
CPLE_HttpResponseError Traceback (most recent call last)
File rasterio/_base.pyx:302, in rasterio._base.DatasetBase.__init__()
File rasterio/_base.pyx:213, in rasterio._base.open_dataset()
File rasterio/_err.pyx:217, in rasterio._err.exc_wrap_pointer()
CPLE_HttpResponseError: HTTP response code: 403
During handling of the above exception, another exception occurred:
RasterioIOError Traceback (most recent call last)
Cell In [2], line 1
----> 1 rasterio.open('https://sentinel2l2a01.blob.core.windows.net/sentinel2-l2/10/T/ET/2016/02/08/S2A_MSIL2A_20160208T190542_N0300_R013_T10TET_20210528T041016.SAFE/GRANULE/L2A_T10TET_A003299_20160208T190605/IMG_DATA/R10m/T10TET_20160208T190542_B03_10m.tif?<token>&sig=QalzEsDqH2wtl%2BTouR7lVhMmzYr4aIKDg%2BAP2//vywA%3D')
File ~/mambaforge/envs/gdal/lib/python3.10/site-packages/rasterio/env.py:442, in ensure_env_with_credentials.<locals>.wrapper(*args, **kwds)
439 session = DummySession()
441 with env_ctor(session=session):
--> 442 return f(*args, **kwds)
File ~/mambaforge/envs/gdal/lib/python3.10/site-packages/rasterio/__init__.py:277, in open(fp, mode, driver, width, height, count, crs, transform, dtype, nodata, sharing, **kwargs)
274 path = _parse_path(raw_dataset_path)
276 if mode == "r":
--> 277 dataset = DatasetReader(path, driver=driver, sharing=sharing, **kwargs)
278 elif mode == "r+":
279 dataset = get_writer_for_path(path, driver=driver)(
280 path, mode, driver=driver, sharing=sharing, **kwargs
281 )
File rasterio/_base.pyx:304, in rasterio._base.DatasetBase.__init__()
RasterioIOError: HTTP response code: 403
```
#### Environment Information
```
❯ rio --show-versions
rasterio info:
rasterio: 1.3.2
GDAL: 3.5.2
PROJ: 9.0.1
GEOS: 3.11.0
PROJ DATA: /home/taugspurger/mambaforge/envs/gdal/share/proj
GDAL DATA: None
System:
python: 3.10.6 | packaged by conda-forge | (main, Aug 22 2022, 20:36:39) [GCC 10.4.0]
executable: /home/taugspurger/mambaforge/envs/gdal/bin/python
machine: Linux-5.15.57.1-microsoft-standard-WSL2-x86_64-with-glibc2.31
Python deps:
affine: 2.3.1
attrs: 22.1.0
certifi: 2022.09.24
click: 8.1.3
cligj: 0.7.2
cython: None
numpy: 1.23.3
snuggs: 1.4.7
click-plugins: None
setuptools: 65.4.1
```
## Installation Method
conda
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rasterio/_path.py`
Content:
```
1 """Dataset paths, identifiers, and filenames
2
3 Note: this module is not part of Rasterio's API. It is for internal use
4 only.
5
6 """
7
8 import pathlib
9 import re
10 import sys
11 from urllib.parse import urlparse
12
13 import attr
14
15 from rasterio.errors import PathError
16
17 # Supported URI schemes and their mapping to GDAL's VSI suffix.
18 # TODO: extend for other cloud plaforms.
19 SCHEMES = {
20 'ftp': 'curl',
21 'gzip': 'gzip',
22 'http': 'curl',
23 'https': 'curl',
24 's3': 's3',
25 'tar': 'tar',
26 'zip': 'zip',
27 'file': 'file',
28 'oss': 'oss',
29 'gs': 'gs',
30 'az': 'az',
31 }
32
33 CURLSCHEMES = set([k for k, v in SCHEMES.items() if v == 'curl'])
34
35 # TODO: extend for other cloud plaforms.
36 REMOTESCHEMES = set([k for k, v in SCHEMES.items() if v in ('curl', 's3', 'oss', 'gs', 'az',)])
37
38
39 class _Path:
40 """Base class for dataset paths"""
41
42 def as_vsi(self):
43 return _vsi_path(self)
44
45
46 @attr.s(slots=True)
47 class _ParsedPath(_Path):
48 """Result of parsing a dataset URI/Path
49
50 Attributes
51 ----------
52 path : str
53 Parsed path. Includes the hostname and query string in the case
54 of a URI.
55 archive : str
56 Parsed archive path.
57 scheme : str
58 URI scheme such as "https" or "zip+s3".
59 """
60 path = attr.ib()
61 archive = attr.ib()
62 scheme = attr.ib()
63
64 @classmethod
65 def from_uri(cls, uri):
66 parts = urlparse(uri)
67 path = parts.path
68 scheme = parts.scheme or None
69
70 if parts.query:
71 path += "?" + parts.query
72
73 if parts.scheme and parts.netloc:
74 path = parts.netloc + path
75
76 parts = path.split('!')
77 path = parts.pop() if parts else None
78 archive = parts.pop() if parts else None
79 return _ParsedPath(pathlib.Path(path).as_posix(), archive, scheme)
80
81 @property
82 def name(self):
83 """The parsed path's original URI"""
84 if not self.scheme:
85 return self.path
86 elif self.archive:
87 return "{}://{}!{}".format(self.scheme, self.archive, self.path)
88 else:
89 return "{}://{}".format(self.scheme, self.path)
90
91 @property
92 def is_remote(self):
93 """Test if the path is a remote, network URI"""
94 return bool(self.scheme) and self.scheme.split("+")[-1] in REMOTESCHEMES
95
96 @property
97 def is_local(self):
98 """Test if the path is a local URI"""
99 return not self.scheme or (self.scheme and self.scheme.split('+')[-1] not in REMOTESCHEMES)
100
101
102 @attr.s(slots=True)
103 class _UnparsedPath(_Path):
104 """Encapsulates legacy GDAL filenames
105
106 Attributes
107 ----------
108 path : str
109 The legacy GDAL filename.
110 """
111 path = attr.ib()
112
113 @property
114 def name(self):
115 """The unparsed path's original path"""
116 return self.path
117
118
119 def _parse_path(path):
120 """Parse a dataset's identifier or path into its parts
121
122 Parameters
123 ----------
124 path : str or path-like object
125 The path to be parsed.
126
127 Returns
128 -------
129 ParsedPath or UnparsedPath
130
131 Notes
132 -----
133 When legacy GDAL filenames are encountered, they will be returned
134 in a UnparsedPath.
135
136 """
137 if isinstance(path, _Path):
138 return path
139
140 elif pathlib and isinstance(path, pathlib.PurePath):
141 return _ParsedPath(path.as_posix(), None, None)
142
143 elif isinstance(path, str):
144
145 if sys.platform == "win32" and re.match(r"^[a-zA-Z]\:", path):
146 if pathlib:
147 return _ParsedPath(pathlib.Path(path).as_posix(), None, None)
148 else:
149 return _UnparsedPath(path)
150
151 elif path.startswith('/vsi'):
152 return _UnparsedPath(path)
153
154 else:
155 parts = urlparse(path)
156
157 else:
158 raise PathError("invalid path '{!r}'".format(path))
159
160 # if the scheme is not one of Rasterio's supported schemes, we
161 # return an UnparsedPath.
162 if parts.scheme:
163
164 if all(p in SCHEMES for p in parts.scheme.split('+')):
165 return _ParsedPath.from_uri(path)
166
167 return _UnparsedPath(path)
168
169
170 def _vsi_path(path):
171 """Convert a parsed path to a GDAL VSI path
172
173 Parameters
174 ----------
175 path : Path
176 A ParsedPath or UnparsedPath object.
177
178 Returns
179 -------
180 str
181
182 """
183 if isinstance(path, _UnparsedPath):
184 return path.path
185
186 elif isinstance(path, _ParsedPath):
187
188 if not path.scheme:
189 return path.path
190
191 else:
192 if path.scheme.split('+')[-1] in CURLSCHEMES:
193 suffix = '{}://'.format(path.scheme.split('+')[-1])
194 else:
195 suffix = ''
196
197 prefix = '/'.join('vsi{0}'.format(SCHEMES[p]) for p in path.scheme.split('+') if p != 'file')
198
199 if prefix:
200 if path.archive:
201 result = '/{}/{}{}/{}'.format(prefix, suffix, path.archive, path.path.lstrip('/'))
202 else:
203 result = '/{}/{}{}'.format(prefix, suffix, path.path)
204 else:
205 result = path.path
206 return result
207
208 else:
209 raise ValueError("path must be a ParsedPath or UnparsedPath object")
210
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/rasterio/_path.py b/rasterio/_path.py
--- a/rasterio/_path.py
+++ b/rasterio/_path.py
@@ -64,7 +64,7 @@
@classmethod
def from_uri(cls, uri):
parts = urlparse(uri)
- path = parts.path
+ path = pathlib.Path(parts.path).as_posix() if parts.path else parts.path
scheme = parts.scheme or None
if parts.query:
@@ -76,7 +76,7 @@
parts = path.split('!')
path = parts.pop() if parts else None
archive = parts.pop() if parts else None
- return _ParsedPath(pathlib.Path(path).as_posix(), archive, scheme)
+ return _ParsedPath(path, archive, scheme)
@property
def name(self):
|
{"golden_diff": "diff --git a/rasterio/_path.py b/rasterio/_path.py\n--- a/rasterio/_path.py\n+++ b/rasterio/_path.py\n@@ -64,7 +64,7 @@\n @classmethod\n def from_uri(cls, uri):\n parts = urlparse(uri)\n- path = parts.path\n+ path = pathlib.Path(parts.path).as_posix() if parts.path else parts.path\n scheme = parts.scheme or None\n \n if parts.query:\n@@ -76,7 +76,7 @@\n parts = path.split('!')\n path = parts.pop() if parts else None\n archive = parts.pop() if parts else None\n- return _ParsedPath(pathlib.Path(path).as_posix(), archive, scheme)\n+ return _ParsedPath(path, archive, scheme)\n \n @property\n def name(self):\n", "issue": "Path parsing breaks URLs with `//` in the query string.\n## Expected behavior and actual behavior.\r\n\r\nI'm opening a file over HTTP and the URL includes a querystring with a double forward slash `//`.\r\n\r\nI can't share the full URL since it includes a token, but I've traced the call to the call to `pathlib.Path(path).as_posix()` at https://github.com/rasterio/rasterio/blob/5cf71dc806adc299108543def00647845ab4fc42/rasterio/_path.py#L79. The root issue is that `pathlib.Path` seems to normalize `//` to `/`.\r\n\r\n```python\r\nIn [18]: pathlib.Path(\"foo.tif?bar=a//b\").as_posix()\r\nOut[18]: 'foo.tif?bar=a/b'\r\n```\r\n\r\nI don't actually know what to do here... I'm not sure if that call to `pathlib.Path` can be avoided (maybe only if the scheme is http?).\r\n\r\nThe relatively easy workaround is to prepend the `fp` argument to `rasterio.open` with `/vsicurl/`, but that might be hard for libraries to figure out (without reimplementing parts of `_parse_path`).\r\n\r\n## Steps to reproduce the problem.\r\n\r\nI've redacted the secret token so this doesn't actually run.\r\n\r\n```python\r\nIn [1]: import rasterio\r\n\r\nIn [2]: rasterio.open('https://sentinel2l2a01.blob.core.windows.net/sentinel2-l2/10/T/ET/2016/02/08/S2A_MSIL2A_20160208T190542_N0300_R013_T10TET_20210528T041016.SAFE/GRANULE/L2A_T10TET_A003299_20160208T190605/IMG_DATA/R10m/T10TET_20160208T190542_B03_10m.tif?<token>&sig=QalzEsDqH2wtl%2BTouR7lVh\r\n ...: MmzYr4aIKDg%2BAP2//vywA%3D')\r\n---------------------------------------------------------------------------\r\nCPLE_HttpResponseError Traceback (most recent call last)\r\nFile rasterio/_base.pyx:302, in rasterio._base.DatasetBase.__init__()\r\n\r\nFile rasterio/_base.pyx:213, in rasterio._base.open_dataset()\r\n\r\nFile rasterio/_err.pyx:217, in rasterio._err.exc_wrap_pointer()\r\n\r\nCPLE_HttpResponseError: HTTP response code: 403\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nRasterioIOError Traceback (most recent call last)\r\nCell In [2], line 1\r\n----> 1 rasterio.open('https://sentinel2l2a01.blob.core.windows.net/sentinel2-l2/10/T/ET/2016/02/08/S2A_MSIL2A_20160208T190542_N0300_R013_T10TET_20210528T041016.SAFE/GRANULE/L2A_T10TET_A003299_20160208T190605/IMG_DATA/R10m/T10TET_20160208T190542_B03_10m.tif?<token>&sig=QalzEsDqH2wtl%2BTouR7lVhMmzYr4aIKDg%2BAP2//vywA%3D')\r\n\r\nFile ~/mambaforge/envs/gdal/lib/python3.10/site-packages/rasterio/env.py:442, in ensure_env_with_credentials.<locals>.wrapper(*args, **kwds)\r\n 439 session = DummySession()\r\n 441 with env_ctor(session=session):\r\n--> 442 return f(*args, **kwds)\r\n\r\nFile ~/mambaforge/envs/gdal/lib/python3.10/site-packages/rasterio/__init__.py:277, in open(fp, mode, driver, width, height, count, crs, transform, dtype, nodata, sharing, **kwargs)\r\n 274 path = _parse_path(raw_dataset_path)\r\n 276 if mode == \"r\":\r\n--> 277 dataset = DatasetReader(path, driver=driver, sharing=sharing, **kwargs)\r\n 278 elif mode == \"r+\":\r\n 279 dataset = get_writer_for_path(path, driver=driver)(\r\n 280 path, mode, driver=driver, sharing=sharing, **kwargs\r\n 281 )\r\n\r\nFile rasterio/_base.pyx:304, in rasterio._base.DatasetBase.__init__()\r\n\r\nRasterioIOError: HTTP response code: 403\r\n\r\n```\r\n\r\n#### Environment Information\r\n\r\n```\r\n\u276f rio --show-versions\r\nrasterio info:\r\n rasterio: 1.3.2\r\n GDAL: 3.5.2\r\n PROJ: 9.0.1\r\n GEOS: 3.11.0\r\n PROJ DATA: /home/taugspurger/mambaforge/envs/gdal/share/proj\r\n GDAL DATA: None\r\n\r\nSystem:\r\n python: 3.10.6 | packaged by conda-forge | (main, Aug 22 2022, 20:36:39) [GCC 10.4.0]\r\nexecutable: /home/taugspurger/mambaforge/envs/gdal/bin/python\r\n machine: Linux-5.15.57.1-microsoft-standard-WSL2-x86_64-with-glibc2.31\r\n\r\nPython deps:\r\n affine: 2.3.1\r\n attrs: 22.1.0\r\n certifi: 2022.09.24\r\n click: 8.1.3\r\n cligj: 0.7.2\r\n cython: None\r\n numpy: 1.23.3\r\n snuggs: 1.4.7\r\nclick-plugins: None\r\nsetuptools: 65.4.1\r\n```\r\n\r\n## Installation Method\r\n\r\nconda\n", "before_files": [{"content": "\"\"\"Dataset paths, identifiers, and filenames\n\nNote: this module is not part of Rasterio's API. It is for internal use\nonly.\n\n\"\"\"\n\nimport pathlib\nimport re\nimport sys\nfrom urllib.parse import urlparse\n\nimport attr\n\nfrom rasterio.errors import PathError\n\n# Supported URI schemes and their mapping to GDAL's VSI suffix.\n# TODO: extend for other cloud plaforms.\nSCHEMES = {\n 'ftp': 'curl',\n 'gzip': 'gzip',\n 'http': 'curl',\n 'https': 'curl',\n 's3': 's3',\n 'tar': 'tar',\n 'zip': 'zip',\n 'file': 'file',\n 'oss': 'oss',\n 'gs': 'gs',\n 'az': 'az',\n}\n\nCURLSCHEMES = set([k for k, v in SCHEMES.items() if v == 'curl'])\n\n# TODO: extend for other cloud plaforms.\nREMOTESCHEMES = set([k for k, v in SCHEMES.items() if v in ('curl', 's3', 'oss', 'gs', 'az',)])\n\n\nclass _Path:\n \"\"\"Base class for dataset paths\"\"\"\n\n def as_vsi(self):\n return _vsi_path(self)\n\n\[email protected](slots=True)\nclass _ParsedPath(_Path):\n \"\"\"Result of parsing a dataset URI/Path\n\n Attributes\n ----------\n path : str\n Parsed path. Includes the hostname and query string in the case\n of a URI.\n archive : str\n Parsed archive path.\n scheme : str\n URI scheme such as \"https\" or \"zip+s3\".\n \"\"\"\n path = attr.ib()\n archive = attr.ib()\n scheme = attr.ib()\n\n @classmethod\n def from_uri(cls, uri):\n parts = urlparse(uri)\n path = parts.path\n scheme = parts.scheme or None\n\n if parts.query:\n path += \"?\" + parts.query\n\n if parts.scheme and parts.netloc:\n path = parts.netloc + path\n\n parts = path.split('!')\n path = parts.pop() if parts else None\n archive = parts.pop() if parts else None\n return _ParsedPath(pathlib.Path(path).as_posix(), archive, scheme)\n\n @property\n def name(self):\n \"\"\"The parsed path's original URI\"\"\"\n if not self.scheme:\n return self.path\n elif self.archive:\n return \"{}://{}!{}\".format(self.scheme, self.archive, self.path)\n else:\n return \"{}://{}\".format(self.scheme, self.path)\n\n @property\n def is_remote(self):\n \"\"\"Test if the path is a remote, network URI\"\"\"\n return bool(self.scheme) and self.scheme.split(\"+\")[-1] in REMOTESCHEMES\n\n @property\n def is_local(self):\n \"\"\"Test if the path is a local URI\"\"\"\n return not self.scheme or (self.scheme and self.scheme.split('+')[-1] not in REMOTESCHEMES)\n\n\[email protected](slots=True)\nclass _UnparsedPath(_Path):\n \"\"\"Encapsulates legacy GDAL filenames\n\n Attributes\n ----------\n path : str\n The legacy GDAL filename.\n \"\"\"\n path = attr.ib()\n\n @property\n def name(self):\n \"\"\"The unparsed path's original path\"\"\"\n return self.path\n\n\ndef _parse_path(path):\n \"\"\"Parse a dataset's identifier or path into its parts\n\n Parameters\n ----------\n path : str or path-like object\n The path to be parsed.\n\n Returns\n -------\n ParsedPath or UnparsedPath\n\n Notes\n -----\n When legacy GDAL filenames are encountered, they will be returned\n in a UnparsedPath.\n\n \"\"\"\n if isinstance(path, _Path):\n return path\n\n elif pathlib and isinstance(path, pathlib.PurePath):\n return _ParsedPath(path.as_posix(), None, None)\n\n elif isinstance(path, str):\n\n if sys.platform == \"win32\" and re.match(r\"^[a-zA-Z]\\:\", path):\n if pathlib:\n return _ParsedPath(pathlib.Path(path).as_posix(), None, None)\n else:\n return _UnparsedPath(path)\n\n elif path.startswith('/vsi'):\n return _UnparsedPath(path)\n\n else:\n parts = urlparse(path)\n\n else:\n raise PathError(\"invalid path '{!r}'\".format(path))\n\n # if the scheme is not one of Rasterio's supported schemes, we\n # return an UnparsedPath.\n if parts.scheme:\n\n if all(p in SCHEMES for p in parts.scheme.split('+')):\n return _ParsedPath.from_uri(path)\n\n return _UnparsedPath(path)\n\n\ndef _vsi_path(path):\n \"\"\"Convert a parsed path to a GDAL VSI path\n\n Parameters\n ----------\n path : Path\n A ParsedPath or UnparsedPath object.\n\n Returns\n -------\n str\n\n \"\"\"\n if isinstance(path, _UnparsedPath):\n return path.path\n\n elif isinstance(path, _ParsedPath):\n\n if not path.scheme:\n return path.path\n\n else:\n if path.scheme.split('+')[-1] in CURLSCHEMES:\n suffix = '{}://'.format(path.scheme.split('+')[-1])\n else:\n suffix = ''\n\n prefix = '/'.join('vsi{0}'.format(SCHEMES[p]) for p in path.scheme.split('+') if p != 'file')\n\n if prefix:\n if path.archive:\n result = '/{}/{}{}/{}'.format(prefix, suffix, path.archive, path.path.lstrip('/'))\n else:\n result = '/{}/{}{}'.format(prefix, suffix, path.path)\n else:\n result = path.path\n return result\n\n else:\n raise ValueError(\"path must be a ParsedPath or UnparsedPath object\")\n", "path": "rasterio/_path.py"}], "after_files": [{"content": "\"\"\"Dataset paths, identifiers, and filenames\n\nNote: this module is not part of Rasterio's API. It is for internal use\nonly.\n\n\"\"\"\n\nimport pathlib\nimport re\nimport sys\nfrom urllib.parse import urlparse\n\nimport attr\n\nfrom rasterio.errors import PathError\n\n# Supported URI schemes and their mapping to GDAL's VSI suffix.\n# TODO: extend for other cloud plaforms.\nSCHEMES = {\n 'ftp': 'curl',\n 'gzip': 'gzip',\n 'http': 'curl',\n 'https': 'curl',\n 's3': 's3',\n 'tar': 'tar',\n 'zip': 'zip',\n 'file': 'file',\n 'oss': 'oss',\n 'gs': 'gs',\n 'az': 'az',\n}\n\nCURLSCHEMES = set([k for k, v in SCHEMES.items() if v == 'curl'])\n\n# TODO: extend for other cloud plaforms.\nREMOTESCHEMES = set([k for k, v in SCHEMES.items() if v in ('curl', 's3', 'oss', 'gs', 'az',)])\n\n\nclass _Path:\n \"\"\"Base class for dataset paths\"\"\"\n\n def as_vsi(self):\n return _vsi_path(self)\n\n\[email protected](slots=True)\nclass _ParsedPath(_Path):\n \"\"\"Result of parsing a dataset URI/Path\n\n Attributes\n ----------\n path : str\n Parsed path. Includes the hostname and query string in the case\n of a URI.\n archive : str\n Parsed archive path.\n scheme : str\n URI scheme such as \"https\" or \"zip+s3\".\n \"\"\"\n path = attr.ib()\n archive = attr.ib()\n scheme = attr.ib()\n\n @classmethod\n def from_uri(cls, uri):\n parts = urlparse(uri)\n path = pathlib.Path(parts.path).as_posix() if parts.path else parts.path\n scheme = parts.scheme or None\n\n if parts.query:\n path += \"?\" + parts.query\n\n if parts.scheme and parts.netloc:\n path = parts.netloc + path\n\n parts = path.split('!')\n path = parts.pop() if parts else None\n archive = parts.pop() if parts else None\n return _ParsedPath(path, archive, scheme)\n\n @property\n def name(self):\n \"\"\"The parsed path's original URI\"\"\"\n if not self.scheme:\n return self.path\n elif self.archive:\n return \"{}://{}!{}\".format(self.scheme, self.archive, self.path)\n else:\n return \"{}://{}\".format(self.scheme, self.path)\n\n @property\n def is_remote(self):\n \"\"\"Test if the path is a remote, network URI\"\"\"\n return bool(self.scheme) and self.scheme.split(\"+\")[-1] in REMOTESCHEMES\n\n @property\n def is_local(self):\n \"\"\"Test if the path is a local URI\"\"\"\n return not self.scheme or (self.scheme and self.scheme.split('+')[-1] not in REMOTESCHEMES)\n\n\[email protected](slots=True)\nclass _UnparsedPath(_Path):\n \"\"\"Encapsulates legacy GDAL filenames\n\n Attributes\n ----------\n path : str\n The legacy GDAL filename.\n \"\"\"\n path = attr.ib()\n\n @property\n def name(self):\n \"\"\"The unparsed path's original path\"\"\"\n return self.path\n\n\ndef _parse_path(path):\n \"\"\"Parse a dataset's identifier or path into its parts\n\n Parameters\n ----------\n path : str or path-like object\n The path to be parsed.\n\n Returns\n -------\n ParsedPath or UnparsedPath\n\n Notes\n -----\n When legacy GDAL filenames are encountered, they will be returned\n in a UnparsedPath.\n\n \"\"\"\n if isinstance(path, _Path):\n return path\n\n elif pathlib and isinstance(path, pathlib.PurePath):\n return _ParsedPath(path.as_posix(), None, None)\n\n elif isinstance(path, str):\n\n if sys.platform == \"win32\" and re.match(r\"^[a-zA-Z]\\:\", path):\n if pathlib:\n return _ParsedPath(pathlib.Path(path).as_posix(), None, None)\n else:\n return _UnparsedPath(path)\n\n elif path.startswith('/vsi'):\n return _UnparsedPath(path)\n\n else:\n parts = urlparse(path)\n\n else:\n raise PathError(\"invalid path '{!r}'\".format(path))\n\n # if the scheme is not one of Rasterio's supported schemes, we\n # return an UnparsedPath.\n if parts.scheme:\n\n if all(p in SCHEMES for p in parts.scheme.split('+')):\n return _ParsedPath.from_uri(path)\n\n return _UnparsedPath(path)\n\n\ndef _vsi_path(path):\n \"\"\"Convert a parsed path to a GDAL VSI path\n\n Parameters\n ----------\n path : Path\n A ParsedPath or UnparsedPath object.\n\n Returns\n -------\n str\n\n \"\"\"\n if isinstance(path, _UnparsedPath):\n return path.path\n\n elif isinstance(path, _ParsedPath):\n\n if not path.scheme:\n return path.path\n\n else:\n if path.scheme.split('+')[-1] in CURLSCHEMES:\n suffix = '{}://'.format(path.scheme.split('+')[-1])\n else:\n suffix = ''\n\n prefix = '/'.join('vsi{0}'.format(SCHEMES[p]) for p in path.scheme.split('+') if p != 'file')\n\n if prefix:\n if path.archive:\n result = '/{}/{}{}/{}'.format(prefix, suffix, path.archive, path.path.lstrip('/'))\n else:\n result = '/{}/{}{}'.format(prefix, suffix, path.path)\n else:\n result = path.path\n return result\n\n else:\n raise ValueError(\"path must be a ParsedPath or UnparsedPath object\")\n", "path": "rasterio/_path.py"}]}
| 3,553 | 188 |
gh_patches_debug_11172
|
rasdani/github-patches
|
git_diff
|
RedHatInsights__insights-core-2464
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Enhance parser "satellite_installer_configurations"
As a default, the file "/etc/foreman-installer/custom-hiera.yaml" is empty, it means customers haven't done any tuning, it shouldn't be skipped.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `insights/parsers/satellite_installer_configurations.py`
Content:
```
1 """
2 Satellite installer configuration files
3 =======================================
4
5 Parsers included in this module are:
6
7 CustomHiera - file ``/etc/foreman-installer/custom-hiera.yaml``
8 ---------------------------------------------------------------
9 Parsers the file `/etc/foreman-installer/custom-hiera.yaml`
10
11 """
12
13 from insights import parser, YAMLParser
14 from insights.specs import Specs
15
16
17 @parser(Specs.satellite_custom_hiera)
18 class CustomHiera(YAMLParser):
19 """
20 Class to parse ``/etc/foreman-installer/custom-hiera.yaml``
21
22 Examples:
23 >>> 'apache::mod::prefork::serverlimit' in custom_hiera
24 True
25 >>> custom_hiera['apache::mod::prefork::serverlimit']
26 582
27 """
28
29 pass
30
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/insights/parsers/satellite_installer_configurations.py b/insights/parsers/satellite_installer_configurations.py
--- a/insights/parsers/satellite_installer_configurations.py
+++ b/insights/parsers/satellite_installer_configurations.py
@@ -12,6 +12,7 @@
from insights import parser, YAMLParser
from insights.specs import Specs
+from insights.parsers import SkipException
@parser(Specs.satellite_custom_hiera)
@@ -25,5 +26,8 @@
>>> custom_hiera['apache::mod::prefork::serverlimit']
582
"""
-
- pass
+ def parse_content(self, content):
+ try:
+ super(CustomHiera, self).parse_content(content)
+ except SkipException:
+ pass
|
{"golden_diff": "diff --git a/insights/parsers/satellite_installer_configurations.py b/insights/parsers/satellite_installer_configurations.py\n--- a/insights/parsers/satellite_installer_configurations.py\n+++ b/insights/parsers/satellite_installer_configurations.py\n@@ -12,6 +12,7 @@\n \n from insights import parser, YAMLParser\n from insights.specs import Specs\n+from insights.parsers import SkipException\n \n \n @parser(Specs.satellite_custom_hiera)\n@@ -25,5 +26,8 @@\n >>> custom_hiera['apache::mod::prefork::serverlimit']\n 582\n \"\"\"\n-\n- pass\n+ def parse_content(self, content):\n+ try:\n+ super(CustomHiera, self).parse_content(content)\n+ except SkipException:\n+ pass\n", "issue": "Enhance parser \"satellite_installer_configurations\"\nAs a default, the file \"/etc/foreman-installer/custom-hiera.yaml\" is empty, it means customers haven't done any tuning, it shouldn't be skipped.\n", "before_files": [{"content": "\"\"\"\nSatellite installer configuration files\n=======================================\n\nParsers included in this module are:\n\nCustomHiera - file ``/etc/foreman-installer/custom-hiera.yaml``\n---------------------------------------------------------------\nParsers the file `/etc/foreman-installer/custom-hiera.yaml`\n\n\"\"\"\n\nfrom insights import parser, YAMLParser\nfrom insights.specs import Specs\n\n\n@parser(Specs.satellite_custom_hiera)\nclass CustomHiera(YAMLParser):\n \"\"\"\n Class to parse ``/etc/foreman-installer/custom-hiera.yaml``\n\n Examples:\n >>> 'apache::mod::prefork::serverlimit' in custom_hiera\n True\n >>> custom_hiera['apache::mod::prefork::serverlimit']\n 582\n \"\"\"\n\n pass\n", "path": "insights/parsers/satellite_installer_configurations.py"}], "after_files": [{"content": "\"\"\"\nSatellite installer configuration files\n=======================================\n\nParsers included in this module are:\n\nCustomHiera - file ``/etc/foreman-installer/custom-hiera.yaml``\n---------------------------------------------------------------\nParsers the file `/etc/foreman-installer/custom-hiera.yaml`\n\n\"\"\"\n\nfrom insights import parser, YAMLParser\nfrom insights.specs import Specs\nfrom insights.parsers import SkipException\n\n\n@parser(Specs.satellite_custom_hiera)\nclass CustomHiera(YAMLParser):\n \"\"\"\n Class to parse ``/etc/foreman-installer/custom-hiera.yaml``\n\n Examples:\n >>> 'apache::mod::prefork::serverlimit' in custom_hiera\n True\n >>> custom_hiera['apache::mod::prefork::serverlimit']\n 582\n \"\"\"\n def parse_content(self, content):\n try:\n super(CustomHiera, self).parse_content(content)\n except SkipException:\n pass\n", "path": "insights/parsers/satellite_installer_configurations.py"}]}
| 529 | 190 |
gh_patches_debug_8175
|
rasdani/github-patches
|
git_diff
|
kivy__kivy-2645
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Forcing img_pil causes an error when trying to load PNGs
When I force PIL provider using `export KIVY_IMAGE=img_pil` Kivy exits with this error log:
I have installed Pillow library from PyPI.
```
[INFO ] [Logger ] Record log in /mnt/data/logs/kivy_69-12-31_5.txt
[INFO ] Kivy v1.9.0-dev
[INFO ] [Python ] v2.7.8 (default, Sep 24 2014, 05:50:30)
[GCC 4.8.2 20131219 (prerelease)]
[INFO ] [Factory ] 172 symbols loaded
[INFO ] [Image ] Providers: img_pil (img_tex, img_dds, img_sdl2, img_pygame, img_ffpyplayer, img_gif ignored)
[INFO ] [Window ] Provider: egl_rpi
[INFO ] [GL ] OpenGL version <OpenGL ES 2.0>
[INFO ] [GL ] OpenGL vendor <Broadcom>
[INFO ] [GL ] OpenGL renderer <VideoCore IV HW>
[INFO ] [GL ] OpenGL parsed version: 2, 0
[INFO ] [GL ] Shading version <OpenGL ES GLSL ES 1.00>
[INFO ] [GL ] Texture max size <2048>
[INFO ] [GL ] Texture max units <8>
[INFO ] [Shader ] fragment shader: <Compiled>
[INFO ] [Shader ] vertex shader: <Compiled>
[CRITICAL] [Window ] Unable to find any valuable Window provider at all!
egl_rpi - AttributeError: dispose
File "/opt/vendor/lib/python2.7/site-packages/kivy/core/__init__.py", line 65, in core_select_lib
cls = cls()
File "/opt/vendor/lib/python2.7/site-packages/kivy/core/window/__init__.py", line 594, in __init__
self.create_window()
File "/opt/vendor/lib/python2.7/site-packages/kivy/core/window/window_egl_rpi.py", line 26, in create_window
super(WindowEglRpi, self).create_window()
File "/opt/vendor/lib/python2.7/site-packages/kivy/core/window/__init__.py", line 731, in create_window
self.render_context = RenderContext()
File "kivy/graphics/instructions.pyx", line 751, in kivy.graphics.instructions.RenderContext.__init__ (kivy/graphics/instructions.c:10225)
File "/opt/vendor/lib/python2.7/site-packages/kivy/core/image/__init__.py", line 461, in __init__
self.filename = arg
File "/opt/vendor/lib/python2.7/site-packages/kivy/core/image/__init__.py", line 650, in _set_filename
mipmap=self._mipmap, nocache=self._nocache)
File "/opt/vendor/lib/python2.7/site-packages/kivy/core/image/__init__.py", line 388, in load
im = loader(filename, **kwargs)
File "/opt/vendor/lib/python2.7/site-packages/kivy/core/image/__init__.py", line 164, in __init__
self._data = self.load(filename)
File "/opt/vendor/lib/python2.7/site-packages/kivy/core/image/img_pil.py", line 96, in load
return list(self._img_read(im))
File "/opt/vendor/lib/python2.7/site-packages/kivy/core/image/img_pil.py", line 75, in _img_read
if not im.dispose and img_ol:
File "/opt/vendor/lib/python2.7/site-packages/PIL/Image.py", line 608, in __getattr__
raise AttributeError(name)
[INFO ] [Text ] Provider: pil(['text_sdl2', 'text_pygame', 'text_sdlttf'] ignored)
[CRITICAL] [App ] Unable to get a Window, abort.
Exception SystemExit: 1 in 'kivy.properties.dpi2px' ignored
[INFO ] [Audio ] Providers: (audio_pygst, audio_ffpyplayer, audio_sdl, audio_pygame ignored)
[CRITICAL] [App ] Unable to get a Window, abort.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kivy/core/image/img_pil.py`
Content:
```
1 '''
2 PIL: PIL image loader
3 '''
4
5 __all__ = ('ImageLoaderPIL', )
6
7 try:
8 from PIL import Image as PILImage
9 except:
10 import Image as PILImage
11
12 from kivy.logger import Logger
13 from kivy.core.image import ImageLoaderBase, ImageData, ImageLoader
14
15
16 class ImageLoaderPIL(ImageLoaderBase):
17 '''Image loader based on the PIL library.
18
19 .. versionadded:: 1.0.8
20
21 Support for GIF animation added.
22
23 Gif animation has a lot of issues(transparency/color depths... etc).
24 In order to keep it simple, what is implimented here is what is
25 natively supported by the PIL library.
26
27 As a general rule, try to use gifs that have no transparency.
28 Gif's with transparency will work but be prepared for some
29 artifacts until transparency support is improved.
30
31 '''
32
33 @staticmethod
34 def can_save():
35 return True
36
37 @staticmethod
38 def extensions():
39 '''Return accepted extensions for this loader'''
40 # See http://www.pythonware.com/library/pil/handbook/index.htm
41 return ('bmp', 'bufr', 'cur', 'dcx', 'fits', 'fl', 'fpx', 'gbr',
42 'gd', 'gif', 'grib', 'hdf5', 'ico', 'im', 'imt', 'iptc',
43 'jpeg', 'jpg', 'jpe', 'mcidas', 'mic', 'mpeg', 'msp',
44 'pcd', 'pcx', 'pixar', 'png', 'ppm', 'psd', 'sgi',
45 'spider', 'tga', 'tiff', 'wal', 'wmf', 'xbm', 'xpm',
46 'xv')
47
48 def _img_correct(self, _img_tmp):
49 '''Convert image to the correct format and orientation.
50 '''
51 # image loader work only with rgb/rgba image
52 if _img_tmp.mode.lower() not in ('rgb', 'rgba'):
53 try:
54 imc = _img_tmp.convert('RGBA')
55 except:
56 Logger.warning(
57 'Image: Unable to convert image to rgba (was %s)' %
58 (_img_tmp.mode.lower()))
59 raise
60 _img_tmp = imc
61
62 return _img_tmp
63
64 def _img_read(self, im):
65 '''Read images from an animated file.
66 '''
67 im.seek(0)
68
69 # Read all images inside
70 try:
71 img_ol = None
72 while True:
73 img_tmp = im
74 img_tmp = self._img_correct(img_tmp)
75 if not im.dispose and img_ol:
76 # paste new frame over old so as to handle
77 # transparency properly
78 img_ol.paste(img_tmp, (0, 0), img_tmp)
79 img_tmp = img_ol
80 img_ol = img_tmp
81 yield ImageData(img_tmp.size[0], img_tmp.size[1],
82 img_tmp.mode.lower(), img_tmp.tostring())
83 im.seek(im.tell() + 1)
84 except EOFError:
85 pass
86
87 def load(self, filename):
88 try:
89 im = PILImage.open(filename)
90 except:
91 Logger.warning('Image: Unable to load image <%s>' % filename)
92 raise
93 # update internals
94 self.filename = filename
95 # returns an array of type ImageData len 1 if not a sequence image
96 return list(self._img_read(im))
97
98 @staticmethod
99 def save(filename, width, height, fmt, pixels):
100 image = PILImage.fromstring(fmt.upper(), (width, height), pixels)
101 image.save(filename)
102 return True
103
104
105 # register
106 ImageLoader.register(ImageLoaderPIL)
107
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kivy/core/image/img_pil.py b/kivy/core/image/img_pil.py
--- a/kivy/core/image/img_pil.py
+++ b/kivy/core/image/img_pil.py
@@ -72,7 +72,7 @@
while True:
img_tmp = im
img_tmp = self._img_correct(img_tmp)
- if not im.dispose and img_ol:
+ if img_ol and (hasattr(im, 'dispose') and not im.dispose):
# paste new frame over old so as to handle
# transparency properly
img_ol.paste(img_tmp, (0, 0), img_tmp)
|
{"golden_diff": "diff --git a/kivy/core/image/img_pil.py b/kivy/core/image/img_pil.py\n--- a/kivy/core/image/img_pil.py\n+++ b/kivy/core/image/img_pil.py\n@@ -72,7 +72,7 @@\n while True:\n img_tmp = im\n img_tmp = self._img_correct(img_tmp)\n- if not im.dispose and img_ol:\n+ if img_ol and (hasattr(im, 'dispose') and not im.dispose):\n # paste new frame over old so as to handle\n # transparency properly\n img_ol.paste(img_tmp, (0, 0), img_tmp)\n", "issue": "Forcing img_pil causes an error when trying to load PNGs\nWhen I force PIL provider using `export KIVY_IMAGE=img_pil` Kivy exits with this error log:\nI have installed Pillow library from PyPI.\n\n```\n[INFO ] [Logger ] Record log in /mnt/data/logs/kivy_69-12-31_5.txt\n[INFO ] Kivy v1.9.0-dev\n[INFO ] [Python ] v2.7.8 (default, Sep 24 2014, 05:50:30) \n[GCC 4.8.2 20131219 (prerelease)]\n[INFO ] [Factory ] 172 symbols loaded\n[INFO ] [Image ] Providers: img_pil (img_tex, img_dds, img_sdl2, img_pygame, img_ffpyplayer, img_gif ignored)\n[INFO ] [Window ] Provider: egl_rpi\n[INFO ] [GL ] OpenGL version <OpenGL ES 2.0>\n[INFO ] [GL ] OpenGL vendor <Broadcom>\n[INFO ] [GL ] OpenGL renderer <VideoCore IV HW>\n[INFO ] [GL ] OpenGL parsed version: 2, 0\n[INFO ] [GL ] Shading version <OpenGL ES GLSL ES 1.00>\n[INFO ] [GL ] Texture max size <2048>\n[INFO ] [GL ] Texture max units <8>\n[INFO ] [Shader ] fragment shader: <Compiled>\n[INFO ] [Shader ] vertex shader: <Compiled>\n[CRITICAL] [Window ] Unable to find any valuable Window provider at all!\negl_rpi - AttributeError: dispose\n File \"/opt/vendor/lib/python2.7/site-packages/kivy/core/__init__.py\", line 65, in core_select_lib\n cls = cls()\n File \"/opt/vendor/lib/python2.7/site-packages/kivy/core/window/__init__.py\", line 594, in __init__\n self.create_window()\n File \"/opt/vendor/lib/python2.7/site-packages/kivy/core/window/window_egl_rpi.py\", line 26, in create_window\n super(WindowEglRpi, self).create_window()\n File \"/opt/vendor/lib/python2.7/site-packages/kivy/core/window/__init__.py\", line 731, in create_window\n self.render_context = RenderContext()\n File \"kivy/graphics/instructions.pyx\", line 751, in kivy.graphics.instructions.RenderContext.__init__ (kivy/graphics/instructions.c:10225)\n File \"/opt/vendor/lib/python2.7/site-packages/kivy/core/image/__init__.py\", line 461, in __init__\n self.filename = arg\n File \"/opt/vendor/lib/python2.7/site-packages/kivy/core/image/__init__.py\", line 650, in _set_filename\n mipmap=self._mipmap, nocache=self._nocache)\n File \"/opt/vendor/lib/python2.7/site-packages/kivy/core/image/__init__.py\", line 388, in load\n im = loader(filename, **kwargs)\n File \"/opt/vendor/lib/python2.7/site-packages/kivy/core/image/__init__.py\", line 164, in __init__\n self._data = self.load(filename)\n File \"/opt/vendor/lib/python2.7/site-packages/kivy/core/image/img_pil.py\", line 96, in load\n return list(self._img_read(im))\n File \"/opt/vendor/lib/python2.7/site-packages/kivy/core/image/img_pil.py\", line 75, in _img_read\n if not im.dispose and img_ol:\n File \"/opt/vendor/lib/python2.7/site-packages/PIL/Image.py\", line 608, in __getattr__\n raise AttributeError(name)\n\n[INFO ] [Text ] Provider: pil(['text_sdl2', 'text_pygame', 'text_sdlttf'] ignored)\n[CRITICAL] [App ] Unable to get a Window, abort.\n Exception SystemExit: 1 in 'kivy.properties.dpi2px' ignored\n[INFO ] [Audio ] Providers: (audio_pygst, audio_ffpyplayer, audio_sdl, audio_pygame ignored)\n[CRITICAL] [App ] Unable to get a Window, abort.\n```\n\n", "before_files": [{"content": "'''\nPIL: PIL image loader\n'''\n\n__all__ = ('ImageLoaderPIL', )\n\ntry:\n from PIL import Image as PILImage\nexcept:\n import Image as PILImage\n\nfrom kivy.logger import Logger\nfrom kivy.core.image import ImageLoaderBase, ImageData, ImageLoader\n\n\nclass ImageLoaderPIL(ImageLoaderBase):\n '''Image loader based on the PIL library.\n\n .. versionadded:: 1.0.8\n\n Support for GIF animation added.\n\n Gif animation has a lot of issues(transparency/color depths... etc).\n In order to keep it simple, what is implimented here is what is\n natively supported by the PIL library.\n\n As a general rule, try to use gifs that have no transparency.\n Gif's with transparency will work but be prepared for some\n artifacts until transparency support is improved.\n\n '''\n\n @staticmethod\n def can_save():\n return True\n\n @staticmethod\n def extensions():\n '''Return accepted extensions for this loader'''\n # See http://www.pythonware.com/library/pil/handbook/index.htm\n return ('bmp', 'bufr', 'cur', 'dcx', 'fits', 'fl', 'fpx', 'gbr',\n 'gd', 'gif', 'grib', 'hdf5', 'ico', 'im', 'imt', 'iptc',\n 'jpeg', 'jpg', 'jpe', 'mcidas', 'mic', 'mpeg', 'msp',\n 'pcd', 'pcx', 'pixar', 'png', 'ppm', 'psd', 'sgi',\n 'spider', 'tga', 'tiff', 'wal', 'wmf', 'xbm', 'xpm',\n 'xv')\n\n def _img_correct(self, _img_tmp):\n '''Convert image to the correct format and orientation.\n '''\n # image loader work only with rgb/rgba image\n if _img_tmp.mode.lower() not in ('rgb', 'rgba'):\n try:\n imc = _img_tmp.convert('RGBA')\n except:\n Logger.warning(\n 'Image: Unable to convert image to rgba (was %s)' %\n (_img_tmp.mode.lower()))\n raise\n _img_tmp = imc\n\n return _img_tmp\n\n def _img_read(self, im):\n '''Read images from an animated file.\n '''\n im.seek(0)\n\n # Read all images inside\n try:\n img_ol = None\n while True:\n img_tmp = im\n img_tmp = self._img_correct(img_tmp)\n if not im.dispose and img_ol:\n # paste new frame over old so as to handle\n # transparency properly\n img_ol.paste(img_tmp, (0, 0), img_tmp)\n img_tmp = img_ol\n img_ol = img_tmp\n yield ImageData(img_tmp.size[0], img_tmp.size[1],\n img_tmp.mode.lower(), img_tmp.tostring())\n im.seek(im.tell() + 1)\n except EOFError:\n pass\n\n def load(self, filename):\n try:\n im = PILImage.open(filename)\n except:\n Logger.warning('Image: Unable to load image <%s>' % filename)\n raise\n # update internals\n self.filename = filename\n # returns an array of type ImageData len 1 if not a sequence image\n return list(self._img_read(im))\n\n @staticmethod\n def save(filename, width, height, fmt, pixels):\n image = PILImage.fromstring(fmt.upper(), (width, height), pixels)\n image.save(filename)\n return True\n\n\n# register\nImageLoader.register(ImageLoaderPIL)\n", "path": "kivy/core/image/img_pil.py"}], "after_files": [{"content": "'''\nPIL: PIL image loader\n'''\n\n__all__ = ('ImageLoaderPIL', )\n\ntry:\n from PIL import Image as PILImage\nexcept:\n import Image as PILImage\n\nfrom kivy.logger import Logger\nfrom kivy.core.image import ImageLoaderBase, ImageData, ImageLoader\n\n\nclass ImageLoaderPIL(ImageLoaderBase):\n '''Image loader based on the PIL library.\n\n .. versionadded:: 1.0.8\n\n Support for GIF animation added.\n\n Gif animation has a lot of issues(transparency/color depths... etc).\n In order to keep it simple, what is implimented here is what is\n natively supported by the PIL library.\n\n As a general rule, try to use gifs that have no transparency.\n Gif's with transparency will work but be prepared for some\n artifacts until transparency support is improved.\n\n '''\n\n @staticmethod\n def can_save():\n return True\n\n @staticmethod\n def extensions():\n '''Return accepted extensions for this loader'''\n # See http://www.pythonware.com/library/pil/handbook/index.htm\n return ('bmp', 'bufr', 'cur', 'dcx', 'fits', 'fl', 'fpx', 'gbr',\n 'gd', 'gif', 'grib', 'hdf5', 'ico', 'im', 'imt', 'iptc',\n 'jpeg', 'jpg', 'jpe', 'mcidas', 'mic', 'mpeg', 'msp',\n 'pcd', 'pcx', 'pixar', 'png', 'ppm', 'psd', 'sgi',\n 'spider', 'tga', 'tiff', 'wal', 'wmf', 'xbm', 'xpm',\n 'xv')\n\n def _img_correct(self, _img_tmp):\n '''Convert image to the correct format and orientation.\n '''\n # image loader work only with rgb/rgba image\n if _img_tmp.mode.lower() not in ('rgb', 'rgba'):\n try:\n imc = _img_tmp.convert('RGBA')\n except:\n Logger.warning(\n 'Image: Unable to convert image to rgba (was %s)' %\n (_img_tmp.mode.lower()))\n raise\n _img_tmp = imc\n\n return _img_tmp\n\n def _img_read(self, im):\n '''Read images from an animated file.\n '''\n im.seek(0)\n\n # Read all images inside\n try:\n img_ol = None\n while True:\n img_tmp = im\n img_tmp = self._img_correct(img_tmp)\n if img_ol and (hasattr(im, 'dispose') and not im.dispose):\n # paste new frame over old so as to handle\n # transparency properly\n img_ol.paste(img_tmp, (0, 0), img_tmp)\n img_tmp = img_ol\n img_ol = img_tmp\n yield ImageData(img_tmp.size[0], img_tmp.size[1],\n img_tmp.mode.lower(), img_tmp.tostring())\n im.seek(im.tell() + 1)\n except EOFError:\n pass\n\n def load(self, filename):\n try:\n im = PILImage.open(filename)\n except:\n Logger.warning('Image: Unable to load image <%s>' % filename)\n raise\n # update internals\n self.filename = filename\n # returns an array of type ImageData len 1 if not a sequence image\n return list(self._img_read(im))\n\n @staticmethod\n def save(filename, width, height, fmt, pixels):\n image = PILImage.fromstring(fmt.upper(), (width, height), pixels)\n image.save(filename)\n return True\n\n\n# register\nImageLoader.register(ImageLoaderPIL)\n", "path": "kivy/core/image/img_pil.py"}]}
| 2,266 | 143 |
gh_patches_debug_14651
|
rasdani/github-patches
|
git_diff
|
pfnet__pytorch-pfn-extras-763
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Adding pytest in an MPI environment
There are some functions that run in a distributed environment that have not been fully verified, so some pytests will be run in MPI to verify them.
https://github.com/pfnet/pytorch-pfn-extras/blob/266e8bde2c2a1a6aa3f8648d49e758975c8b436a/tests/pytorch_pfn_extras_tests/training_tests/extensions_tests/test_distributed_snapshot.py#L40-L46
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import os
2
3 import setuptools
4
5 here = os.path.abspath(os.path.dirname(__file__))
6 # Get __version__ variable
7 exec(open(os.path.join(here, "pytorch_pfn_extras", "_version.py")).read())
8
9
10 setuptools.setup(
11 name="pytorch-pfn-extras",
12 version=__version__, # NOQA
13 description="Supplementary components to accelerate research and "
14 "development in PyTorch.",
15 author="Preferred Networks, Inc.",
16 license="MIT License",
17 install_requires=["numpy", "packaging", "torch", "typing-extensions>=3.10"],
18 extras_require={
19 "test": [
20 "pytest",
21 "onnxruntime",
22 "torchvision",
23 "torchaudio",
24 "pysen",
25 "black==23.3.0",
26 "flake8==4.0.1",
27 "isort==5.10.1",
28 "mypy==1.3.0",
29 "types-PyYAML",
30 "types-setuptools",
31 "matplotlib",
32 "tensorboard",
33 "ipython",
34 "ipywidgets",
35 "pandas",
36 "optuna",
37 "onnx",
38 "pytorch-ignite",
39 ],
40 "onnx": ["onnx"],
41 },
42 python_requires=">=3.6.0",
43 packages=setuptools.find_packages(exclude=["tests", "tests.*"]),
44 package_data={"pytorch_pfn_extras": ["py.typed"]},
45 )
46
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -16,27 +16,6 @@
license="MIT License",
install_requires=["numpy", "packaging", "torch", "typing-extensions>=3.10"],
extras_require={
- "test": [
- "pytest",
- "onnxruntime",
- "torchvision",
- "torchaudio",
- "pysen",
- "black==23.3.0",
- "flake8==4.0.1",
- "isort==5.10.1",
- "mypy==1.3.0",
- "types-PyYAML",
- "types-setuptools",
- "matplotlib",
- "tensorboard",
- "ipython",
- "ipywidgets",
- "pandas",
- "optuna",
- "onnx",
- "pytorch-ignite",
- ],
"onnx": ["onnx"],
},
python_requires=">=3.6.0",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -16,27 +16,6 @@\n license=\"MIT License\",\n install_requires=[\"numpy\", \"packaging\", \"torch\", \"typing-extensions>=3.10\"],\n extras_require={\n- \"test\": [\n- \"pytest\",\n- \"onnxruntime\",\n- \"torchvision\",\n- \"torchaudio\",\n- \"pysen\",\n- \"black==23.3.0\",\n- \"flake8==4.0.1\",\n- \"isort==5.10.1\",\n- \"mypy==1.3.0\",\n- \"types-PyYAML\",\n- \"types-setuptools\",\n- \"matplotlib\",\n- \"tensorboard\",\n- \"ipython\",\n- \"ipywidgets\",\n- \"pandas\",\n- \"optuna\",\n- \"onnx\",\n- \"pytorch-ignite\",\n- ],\n \"onnx\": [\"onnx\"],\n },\n python_requires=\">=3.6.0\",\n", "issue": "Adding pytest in an MPI environment\nThere are some functions that run in a distributed environment that have not been fully verified, so some pytests will be run in MPI to verify them.\r\n\r\nhttps://github.com/pfnet/pytorch-pfn-extras/blob/266e8bde2c2a1a6aa3f8648d49e758975c8b436a/tests/pytorch_pfn_extras_tests/training_tests/extensions_tests/test_distributed_snapshot.py#L40-L46\r\n\n", "before_files": [{"content": "import os\n\nimport setuptools\n\nhere = os.path.abspath(os.path.dirname(__file__))\n# Get __version__ variable\nexec(open(os.path.join(here, \"pytorch_pfn_extras\", \"_version.py\")).read())\n\n\nsetuptools.setup(\n name=\"pytorch-pfn-extras\",\n version=__version__, # NOQA\n description=\"Supplementary components to accelerate research and \"\n \"development in PyTorch.\",\n author=\"Preferred Networks, Inc.\",\n license=\"MIT License\",\n install_requires=[\"numpy\", \"packaging\", \"torch\", \"typing-extensions>=3.10\"],\n extras_require={\n \"test\": [\n \"pytest\",\n \"onnxruntime\",\n \"torchvision\",\n \"torchaudio\",\n \"pysen\",\n \"black==23.3.0\",\n \"flake8==4.0.1\",\n \"isort==5.10.1\",\n \"mypy==1.3.0\",\n \"types-PyYAML\",\n \"types-setuptools\",\n \"matplotlib\",\n \"tensorboard\",\n \"ipython\",\n \"ipywidgets\",\n \"pandas\",\n \"optuna\",\n \"onnx\",\n \"pytorch-ignite\",\n ],\n \"onnx\": [\"onnx\"],\n },\n python_requires=\">=3.6.0\",\n packages=setuptools.find_packages(exclude=[\"tests\", \"tests.*\"]),\n package_data={\"pytorch_pfn_extras\": [\"py.typed\"]},\n)\n", "path": "setup.py"}], "after_files": [{"content": "import os\n\nimport setuptools\n\nhere = os.path.abspath(os.path.dirname(__file__))\n# Get __version__ variable\nexec(open(os.path.join(here, \"pytorch_pfn_extras\", \"_version.py\")).read())\n\n\nsetuptools.setup(\n name=\"pytorch-pfn-extras\",\n version=__version__, # NOQA\n description=\"Supplementary components to accelerate research and \"\n \"development in PyTorch.\",\n author=\"Preferred Networks, Inc.\",\n license=\"MIT License\",\n install_requires=[\"numpy\", \"packaging\", \"torch\", \"typing-extensions>=3.10\"],\n extras_require={\n \"onnx\": [\"onnx\"],\n },\n python_requires=\">=3.6.0\",\n packages=setuptools.find_packages(exclude=[\"tests\", \"tests.*\"]),\n package_data={\"pytorch_pfn_extras\": [\"py.typed\"]},\n)\n", "path": "setup.py"}]}
| 778 | 243 |
gh_patches_debug_21560
|
rasdani/github-patches
|
git_diff
|
Lightning-AI__pytorch-lightning-3394
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
MLFlow Logger slows training steps dramatically, despite only setting metrics to be logged on epoch
## 🐛 Bug
When using the MLFlow logger, with a remote server, logging per step introduces latency which slows the training loop.
I have tried to configure logging of metrics only per epoch, however it seems this still results in much slower performance. I suspect the logger is still communicating with the MLFlow server on each training step.
### To Reproduce
1. Start an MLFlow server locally
```
mlflow ui
```
2. Run the minimal code example below as is, (with MLFlow logger set to use the default file uri.)
3. Uncomment out the `tracking_uri` to use the local MLFlow server and run the code again. You will see a 2-3 times drop in the iterations per second.
#### Code sample
```
import torch
from torch.utils.data import TensorDataset, DataLoader
import pytorch_lightning as pl
class MyModel(pl.LightningModule):
def __init__(self):
super().__init__()
self.num_examples = 5000
self.num_valid = 1000
self.batch_size = 64
self.lr = 1e-3
self.wd = 1e-2
self.num_features = 2
self.linear = torch.nn.Linear(self.num_features, 1)
self.loss_func = torch.nn.MSELoss()
self.X = torch.rand(self.num_examples, self.num_features)
self.y = self.X.matmul(torch.rand(self.num_features, 1)) + torch.rand(self.num_examples)
def forward(self, x):
return self.linear(x)
def train_dataloader(self):
ds = TensorDataset(self.X[:-self.num_valid], self.X[:-self.num_valid])
dl = DataLoader(ds, batch_size=self.batch_size)
return dl
def val_dataloader(self):
ds = TensorDataset(self.X[-self.num_valid:], self.X[-self.num_valid:])
dl = DataLoader(ds, batch_size=self.batch_size)
return dl
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=self.lr, weight_decay=self.wd)
def training_step(self, batch, batch_idx):
x, y = batch
yhat = self(x)
loss = self.loss_func(yhat, y)
result = pl.TrainResult(minimize=loss)
result.log('train_loss', loss, on_epoch=True, on_step=False)
return result
def validation_step(self, batch, batch_idx):
x, y = batch
yhat = self(x)
loss = self.loss_func(yhat, y)
result = pl.EvalResult(early_stop_on=loss)
result.log('val_loss', loss, on_epoch=True, on_step=False)
return result
if __name__ == '__main__':
from pytorch_lightning.loggers import TensorBoardLogger, MLFlowLogger
mlf_logger = MLFlowLogger(
experiment_name=f"MyModel",
# tracking_uri="http://localhost:5000"
)
trainer = pl.Trainer(
min_epochs=5,
max_epochs=50,
early_stop_callback=True,
logger=mlf_logger
)
model = MyModel()
trainer.fit(model)
```
### Expected behavior
When using the TrainResult and EvalResult, or manually handling metric logging using the `training_epoch_end` and `validation_epoch_end` callbacks. It should be possible to avoid the MLFlow logger from communicating with the server in each training loop.
This would make it feasible to implement the MLFlow when a remote server is used for experiment tracking.
### Environment
```
* CUDA:
- GPU:
- available: False
- version: None
* Packages:
- numpy: 1.18.2
- pyTorch_debug: False
- pyTorch_version: 1.6.0+cpu
- pytorch-lightning: 0.9.0
- tensorboard: 2.2.0
- tqdm: 4.48.2
* System:
- OS: Linux
- architecture:
- 64bit
-
- processor: x86_64
- python: 3.7.9
- version: #1 SMP Tue May 26 11:42:35 UTC 2020
```
### Additional context
We host a MLFlow instance in AWS and would like to be able to track experiments without affecting the training speed.
It appears that in general the MLFlow logger is much less performant than the default Tensorboard Logger, but this would not be much of a problem if we could avoid calls to the logger during the training loop.
### Solution
I've done a bit of debugging in the codebase and have been able to isolate the cause in two places
https://github.com/PyTorchLightning/pytorch-lightning/blob/d438ad8a8db3e76d3ed4e3c6bc9b91d6b3266b8e/pytorch_lightning/loggers/mlflow.py#L125-L129
Here `self.experiment` is called regardless of whether `self._run_id` exists. If we add an `if not self._run_id` here we avoid calling `self._mlflow_client.get_experiment_by_name(self._experiment_name)` on each step.
However we still call it each time we log metrics to MFlow, because of the property `self.experiment`.
https://github.com/PyTorchLightning/pytorch-lightning/blob/d438ad8a8db3e76d3ed4e3c6bc9b91d6b3266b8e/pytorch_lightning/loggers/mlflow.py#L100-L112
Here if we store `expt` within the logger and only call `self._mlflow_client.get_experiment_by_name` when it does not exist, we eliminate all overhead, it runs as fast as fast as the tensorboard logger and all the mlflow logging appears to be working as expected.
I'd be happy to raise a PR for this fix.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pytorch_lightning/loggers/mlflow.py`
Content:
```
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """
16 MLflow
17 ------
18 """
19 from argparse import Namespace
20 from time import time
21 from typing import Optional, Dict, Any, Union
22
23 try:
24 import mlflow
25 from mlflow.tracking import MlflowClient
26 _MLFLOW_AVAILABLE = True
27 except ModuleNotFoundError: # pragma: no-cover
28 mlflow = None
29 MlflowClient = None
30 _MLFLOW_AVAILABLE = False
31
32
33 from pytorch_lightning import _logger as log
34 from pytorch_lightning.loggers.base import LightningLoggerBase, rank_zero_experiment
35 from pytorch_lightning.utilities import rank_zero_only
36
37
38 LOCAL_FILE_URI_PREFIX = "file:"
39
40
41 class MLFlowLogger(LightningLoggerBase):
42 """
43 Log using `MLflow <https://mlflow.org>`_. Install it with pip:
44
45 .. code-block:: bash
46
47 pip install mlflow
48
49 Example:
50 >>> from pytorch_lightning import Trainer
51 >>> from pytorch_lightning.loggers import MLFlowLogger
52 >>> mlf_logger = MLFlowLogger(
53 ... experiment_name="default",
54 ... tracking_uri="file:./ml-runs"
55 ... )
56 >>> trainer = Trainer(logger=mlf_logger)
57
58 Use the logger anywhere in you :class:`~pytorch_lightning.core.lightning.LightningModule` as follows:
59
60 >>> from pytorch_lightning import LightningModule
61 >>> class LitModel(LightningModule):
62 ... def training_step(self, batch, batch_idx):
63 ... # example
64 ... self.logger.experiment.whatever_ml_flow_supports(...)
65 ...
66 ... def any_lightning_module_function_or_hook(self):
67 ... self.logger.experiment.whatever_ml_flow_supports(...)
68
69 Args:
70 experiment_name: The name of the experiment
71 tracking_uri: Address of local or remote tracking server.
72 If not provided, defaults to `file:<save_dir>`.
73 tags: A dictionary tags for the experiment.
74 save_dir: A path to a local directory where the MLflow runs get saved.
75 Defaults to `./mlflow` if `tracking_uri` is not provided.
76 Has no effect if `tracking_uri` is provided.
77
78 """
79
80 def __init__(self,
81 experiment_name: str = 'default',
82 tracking_uri: Optional[str] = None,
83 tags: Optional[Dict[str, Any]] = None,
84 save_dir: Optional[str] = './mlruns'):
85
86 if not _MLFLOW_AVAILABLE:
87 raise ImportError('You want to use `mlflow` logger which is not installed yet,'
88 ' install it with `pip install mlflow`.')
89 super().__init__()
90 if not tracking_uri:
91 tracking_uri = f'{LOCAL_FILE_URI_PREFIX}{save_dir}'
92
93 self._experiment_name = experiment_name
94 self._experiment_id = None
95 self._tracking_uri = tracking_uri
96 self._run_id = None
97 self.tags = tags
98 self._mlflow_client = MlflowClient(tracking_uri)
99
100 @property
101 @rank_zero_experiment
102 def experiment(self) -> MlflowClient:
103 r"""
104 Actual MLflow object. To use MLflow features in your
105 :class:`~pytorch_lightning.core.lightning.LightningModule` do the following.
106
107 Example::
108
109 self.logger.experiment.some_mlflow_function()
110
111 """
112 expt = self._mlflow_client.get_experiment_by_name(self._experiment_name)
113
114 if expt:
115 self._experiment_id = expt.experiment_id
116 else:
117 log.warning(f'Experiment with name {self._experiment_name} not found. Creating it.')
118 self._experiment_id = self._mlflow_client.create_experiment(name=self._experiment_name)
119
120 if not self._run_id:
121 run = self._mlflow_client.create_run(experiment_id=self._experiment_id, tags=self.tags)
122 self._run_id = run.info.run_id
123 return self._mlflow_client
124
125 @property
126 def run_id(self):
127 # create the experiment if it does not exist to get the run id
128 _ = self.experiment
129 return self._run_id
130
131 @property
132 def experiment_id(self):
133 # create the experiment if it does not exist to get the experiment id
134 _ = self.experiment
135 return self._experiment_id
136
137 @rank_zero_only
138 def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None:
139 params = self._convert_params(params)
140 params = self._flatten_dict(params)
141 for k, v in params.items():
142 self.experiment.log_param(self.run_id, k, v)
143
144 @rank_zero_only
145 def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) -> None:
146 assert rank_zero_only.rank == 0, 'experiment tried to log from global_rank != 0'
147
148 timestamp_ms = int(time() * 1000)
149 for k, v in metrics.items():
150 if isinstance(v, str):
151 log.warning(f'Discarding metric with string value {k}={v}.')
152 continue
153 self.experiment.log_metric(self.run_id, k, v, timestamp_ms, step)
154
155 @rank_zero_only
156 def finalize(self, status: str = 'FINISHED') -> None:
157 super().finalize(status)
158 status = 'FINISHED' if status == 'success' else status
159 if self.experiment.get_run(self.run_id):
160 self.experiment.set_terminated(self.run_id, status)
161
162 @property
163 def save_dir(self) -> Optional[str]:
164 """
165 The root file directory in which MLflow experiments are saved.
166
167 Return:
168 Local path to the root experiment directory if the tracking uri is local.
169 Otherwhise returns `None`.
170 """
171 if self._tracking_uri.startswith(LOCAL_FILE_URI_PREFIX):
172 return self._tracking_uri.lstrip(LOCAL_FILE_URI_PREFIX)
173
174 @property
175 def name(self) -> str:
176 return self.experiment_id
177
178 @property
179 def version(self) -> str:
180 return self.run_id
181
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pytorch_lightning/loggers/mlflow.py b/pytorch_lightning/loggers/mlflow.py
--- a/pytorch_lightning/loggers/mlflow.py
+++ b/pytorch_lightning/loggers/mlflow.py
@@ -109,15 +109,15 @@
self.logger.experiment.some_mlflow_function()
"""
- expt = self._mlflow_client.get_experiment_by_name(self._experiment_name)
-
- if expt:
- self._experiment_id = expt.experiment_id
- else:
- log.warning(f'Experiment with name {self._experiment_name} not found. Creating it.')
- self._experiment_id = self._mlflow_client.create_experiment(name=self._experiment_name)
-
- if not self._run_id:
+ if self._experiment_id is None:
+ expt = self._mlflow_client.get_experiment_by_name(self._experiment_name)
+ if expt is not None:
+ self._experiment_id = expt.experiment_id
+ else:
+ log.warning(f'Experiment with name {self._experiment_name} not found. Creating it.')
+ self._experiment_id = self._mlflow_client.create_experiment(name=self._experiment_name)
+
+ if self._run_id is None:
run = self._mlflow_client.create_run(experiment_id=self._experiment_id, tags=self.tags)
self._run_id = run.info.run_id
return self._mlflow_client
|
{"golden_diff": "diff --git a/pytorch_lightning/loggers/mlflow.py b/pytorch_lightning/loggers/mlflow.py\n--- a/pytorch_lightning/loggers/mlflow.py\n+++ b/pytorch_lightning/loggers/mlflow.py\n@@ -109,15 +109,15 @@\n self.logger.experiment.some_mlflow_function()\n \n \"\"\"\n- expt = self._mlflow_client.get_experiment_by_name(self._experiment_name)\n-\n- if expt:\n- self._experiment_id = expt.experiment_id\n- else:\n- log.warning(f'Experiment with name {self._experiment_name} not found. Creating it.')\n- self._experiment_id = self._mlflow_client.create_experiment(name=self._experiment_name)\n-\n- if not self._run_id:\n+ if self._experiment_id is None:\n+ expt = self._mlflow_client.get_experiment_by_name(self._experiment_name)\n+ if expt is not None:\n+ self._experiment_id = expt.experiment_id\n+ else:\n+ log.warning(f'Experiment with name {self._experiment_name} not found. Creating it.')\n+ self._experiment_id = self._mlflow_client.create_experiment(name=self._experiment_name)\n+\n+ if self._run_id is None:\n run = self._mlflow_client.create_run(experiment_id=self._experiment_id, tags=self.tags)\n self._run_id = run.info.run_id\n return self._mlflow_client\n", "issue": "MLFlow Logger slows training steps dramatically, despite only setting metrics to be logged on epoch\n## \ud83d\udc1b Bug\r\n\r\nWhen using the MLFlow logger, with a remote server, logging per step introduces latency which slows the training loop.\r\nI have tried to configure logging of metrics only per epoch, however it seems this still results in much slower performance. I suspect the logger is still communicating with the MLFlow server on each training step.\r\n\r\n### To Reproduce\r\n1. Start an MLFlow server locally\r\n```\r\nmlflow ui\r\n```\r\n2. Run the minimal code example below as is, (with MLFlow logger set to use the default file uri.)\r\n3. Uncomment out the `tracking_uri` to use the local MLFlow server and run the code again. You will see a 2-3 times drop in the iterations per second.\r\n\r\n#### Code sample\r\n```\r\nimport torch\r\nfrom torch.utils.data import TensorDataset, DataLoader\r\nimport pytorch_lightning as pl\r\n\r\nclass MyModel(pl.LightningModule):\r\n def __init__(self):\r\n super().__init__()\r\n self.num_examples = 5000\r\n self.num_valid = 1000\r\n self.batch_size = 64\r\n self.lr = 1e-3\r\n self.wd = 1e-2\r\n self.num_features = 2\r\n self.linear = torch.nn.Linear(self.num_features, 1)\r\n self.loss_func = torch.nn.MSELoss()\r\n self.X = torch.rand(self.num_examples, self.num_features)\r\n self.y = self.X.matmul(torch.rand(self.num_features, 1)) + torch.rand(self.num_examples)\r\n \r\n def forward(self, x):\r\n return self.linear(x)\r\n\r\n def train_dataloader(self): \r\n ds = TensorDataset(self.X[:-self.num_valid], self.X[:-self.num_valid])\r\n dl = DataLoader(ds, batch_size=self.batch_size)\r\n return dl\r\n\r\n def val_dataloader(self): \r\n ds = TensorDataset(self.X[-self.num_valid:], self.X[-self.num_valid:])\r\n dl = DataLoader(ds, batch_size=self.batch_size)\r\n return dl\r\n\r\n def configure_optimizers(self):\r\n return torch.optim.Adam(self.parameters(), lr=self.lr, weight_decay=self.wd)\r\n\r\n def training_step(self, batch, batch_idx):\r\n x, y = batch\r\n yhat = self(x)\r\n loss = self.loss_func(yhat, y)\r\n result = pl.TrainResult(minimize=loss)\r\n result.log('train_loss', loss, on_epoch=True, on_step=False)\r\n return result\r\n\r\n def validation_step(self, batch, batch_idx):\r\n x, y = batch\r\n yhat = self(x)\r\n loss = self.loss_func(yhat, y)\r\n result = pl.EvalResult(early_stop_on=loss)\r\n result.log('val_loss', loss, on_epoch=True, on_step=False)\r\n return result\r\n\r\nif __name__ == '__main__':\r\n from pytorch_lightning.loggers import TensorBoardLogger, MLFlowLogger\r\n mlf_logger = MLFlowLogger(\r\n experiment_name=f\"MyModel\",\r\n # tracking_uri=\"http://localhost:5000\"\r\n )\r\n trainer = pl.Trainer(\r\n min_epochs=5,\r\n max_epochs=50,\r\n early_stop_callback=True,\r\n logger=mlf_logger\r\n )\r\n model = MyModel()\r\n trainer.fit(model)\r\n```\r\n\r\n### Expected behavior\r\n\r\nWhen using the TrainResult and EvalResult, or manually handling metric logging using the `training_epoch_end` and `validation_epoch_end` callbacks. It should be possible to avoid the MLFlow logger from communicating with the server in each training loop. \r\nThis would make it feasible to implement the MLFlow when a remote server is used for experiment tracking.\r\n\r\n### Environment\r\n```\r\n* CUDA:\r\n\t- GPU:\r\n\t- available: False\r\n\t- version: None\r\n* Packages:\r\n\t- numpy: 1.18.2\r\n\t- pyTorch_debug: False\r\n\t- pyTorch_version: 1.6.0+cpu\r\n\t- pytorch-lightning: 0.9.0\r\n\t- tensorboard: 2.2.0\r\n\t- tqdm: 4.48.2\r\n* System:\r\n\t- OS: Linux\r\n\t- architecture:\r\n\t\t- 64bit\r\n\t\t-\r\n\t- processor: x86_64\r\n\t- python: 3.7.9\r\n\t- version: #1 SMP Tue May 26 11:42:35 UTC 2020\r\n```\r\n### Additional context\r\n\r\nWe host a MLFlow instance in AWS and would like to be able to track experiments without affecting the training speed. \r\nIt appears that in general the MLFlow logger is much less performant than the default Tensorboard Logger, but this would not be much of a problem if we could avoid calls to the logger during the training loop.\r\n\r\n### Solution\r\nI've done a bit of debugging in the codebase and have been able to isolate the cause in two places\r\nhttps://github.com/PyTorchLightning/pytorch-lightning/blob/d438ad8a8db3e76d3ed4e3c6bc9b91d6b3266b8e/pytorch_lightning/loggers/mlflow.py#L125-L129\r\nHere `self.experiment` is called regardless of whether `self._run_id` exists. If we add an `if not self._run_id` here we avoid calling `self._mlflow_client.get_experiment_by_name(self._experiment_name)` on each step.\r\nHowever we still call it each time we log metrics to MFlow, because of the property `self.experiment`.\r\n\r\nhttps://github.com/PyTorchLightning/pytorch-lightning/blob/d438ad8a8db3e76d3ed4e3c6bc9b91d6b3266b8e/pytorch_lightning/loggers/mlflow.py#L100-L112\r\nHere if we store `expt` within the logger and only call `self._mlflow_client.get_experiment_by_name` when it does not exist, we eliminate all overhead, it runs as fast as fast as the tensorboard logger and all the mlflow logging appears to be working as expected.\r\n\r\nI'd be happy to raise a PR for this fix.\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nMLflow\n------\n\"\"\"\nfrom argparse import Namespace\nfrom time import time\nfrom typing import Optional, Dict, Any, Union\n\ntry:\n import mlflow\n from mlflow.tracking import MlflowClient\n _MLFLOW_AVAILABLE = True\nexcept ModuleNotFoundError: # pragma: no-cover\n mlflow = None\n MlflowClient = None\n _MLFLOW_AVAILABLE = False\n\n\nfrom pytorch_lightning import _logger as log\nfrom pytorch_lightning.loggers.base import LightningLoggerBase, rank_zero_experiment\nfrom pytorch_lightning.utilities import rank_zero_only\n\n\nLOCAL_FILE_URI_PREFIX = \"file:\"\n\n\nclass MLFlowLogger(LightningLoggerBase):\n \"\"\"\n Log using `MLflow <https://mlflow.org>`_. Install it with pip:\n\n .. code-block:: bash\n\n pip install mlflow\n\n Example:\n >>> from pytorch_lightning import Trainer\n >>> from pytorch_lightning.loggers import MLFlowLogger\n >>> mlf_logger = MLFlowLogger(\n ... experiment_name=\"default\",\n ... tracking_uri=\"file:./ml-runs\"\n ... )\n >>> trainer = Trainer(logger=mlf_logger)\n\n Use the logger anywhere in you :class:`~pytorch_lightning.core.lightning.LightningModule` as follows:\n\n >>> from pytorch_lightning import LightningModule\n >>> class LitModel(LightningModule):\n ... def training_step(self, batch, batch_idx):\n ... # example\n ... self.logger.experiment.whatever_ml_flow_supports(...)\n ...\n ... def any_lightning_module_function_or_hook(self):\n ... self.logger.experiment.whatever_ml_flow_supports(...)\n\n Args:\n experiment_name: The name of the experiment\n tracking_uri: Address of local or remote tracking server.\n If not provided, defaults to `file:<save_dir>`.\n tags: A dictionary tags for the experiment.\n save_dir: A path to a local directory where the MLflow runs get saved.\n Defaults to `./mlflow` if `tracking_uri` is not provided.\n Has no effect if `tracking_uri` is provided.\n\n \"\"\"\n\n def __init__(self,\n experiment_name: str = 'default',\n tracking_uri: Optional[str] = None,\n tags: Optional[Dict[str, Any]] = None,\n save_dir: Optional[str] = './mlruns'):\n\n if not _MLFLOW_AVAILABLE:\n raise ImportError('You want to use `mlflow` logger which is not installed yet,'\n ' install it with `pip install mlflow`.')\n super().__init__()\n if not tracking_uri:\n tracking_uri = f'{LOCAL_FILE_URI_PREFIX}{save_dir}'\n\n self._experiment_name = experiment_name\n self._experiment_id = None\n self._tracking_uri = tracking_uri\n self._run_id = None\n self.tags = tags\n self._mlflow_client = MlflowClient(tracking_uri)\n\n @property\n @rank_zero_experiment\n def experiment(self) -> MlflowClient:\n r\"\"\"\n Actual MLflow object. To use MLflow features in your\n :class:`~pytorch_lightning.core.lightning.LightningModule` do the following.\n\n Example::\n\n self.logger.experiment.some_mlflow_function()\n\n \"\"\"\n expt = self._mlflow_client.get_experiment_by_name(self._experiment_name)\n\n if expt:\n self._experiment_id = expt.experiment_id\n else:\n log.warning(f'Experiment with name {self._experiment_name} not found. Creating it.')\n self._experiment_id = self._mlflow_client.create_experiment(name=self._experiment_name)\n\n if not self._run_id:\n run = self._mlflow_client.create_run(experiment_id=self._experiment_id, tags=self.tags)\n self._run_id = run.info.run_id\n return self._mlflow_client\n\n @property\n def run_id(self):\n # create the experiment if it does not exist to get the run id\n _ = self.experiment\n return self._run_id\n\n @property\n def experiment_id(self):\n # create the experiment if it does not exist to get the experiment id\n _ = self.experiment\n return self._experiment_id\n\n @rank_zero_only\n def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None:\n params = self._convert_params(params)\n params = self._flatten_dict(params)\n for k, v in params.items():\n self.experiment.log_param(self.run_id, k, v)\n\n @rank_zero_only\n def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) -> None:\n assert rank_zero_only.rank == 0, 'experiment tried to log from global_rank != 0'\n\n timestamp_ms = int(time() * 1000)\n for k, v in metrics.items():\n if isinstance(v, str):\n log.warning(f'Discarding metric with string value {k}={v}.')\n continue\n self.experiment.log_metric(self.run_id, k, v, timestamp_ms, step)\n\n @rank_zero_only\n def finalize(self, status: str = 'FINISHED') -> None:\n super().finalize(status)\n status = 'FINISHED' if status == 'success' else status\n if self.experiment.get_run(self.run_id):\n self.experiment.set_terminated(self.run_id, status)\n\n @property\n def save_dir(self) -> Optional[str]:\n \"\"\"\n The root file directory in which MLflow experiments are saved.\n\n Return:\n Local path to the root experiment directory if the tracking uri is local.\n Otherwhise returns `None`.\n \"\"\"\n if self._tracking_uri.startswith(LOCAL_FILE_URI_PREFIX):\n return self._tracking_uri.lstrip(LOCAL_FILE_URI_PREFIX)\n\n @property\n def name(self) -> str:\n return self.experiment_id\n\n @property\n def version(self) -> str:\n return self.run_id\n", "path": "pytorch_lightning/loggers/mlflow.py"}], "after_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nMLflow\n------\n\"\"\"\nfrom argparse import Namespace\nfrom time import time\nfrom typing import Optional, Dict, Any, Union\n\ntry:\n import mlflow\n from mlflow.tracking import MlflowClient\n _MLFLOW_AVAILABLE = True\nexcept ModuleNotFoundError: # pragma: no-cover\n mlflow = None\n MlflowClient = None\n _MLFLOW_AVAILABLE = False\n\n\nfrom pytorch_lightning import _logger as log\nfrom pytorch_lightning.loggers.base import LightningLoggerBase, rank_zero_experiment\nfrom pytorch_lightning.utilities import rank_zero_only\n\n\nLOCAL_FILE_URI_PREFIX = \"file:\"\n\n\nclass MLFlowLogger(LightningLoggerBase):\n \"\"\"\n Log using `MLflow <https://mlflow.org>`_. Install it with pip:\n\n .. code-block:: bash\n\n pip install mlflow\n\n Example:\n >>> from pytorch_lightning import Trainer\n >>> from pytorch_lightning.loggers import MLFlowLogger\n >>> mlf_logger = MLFlowLogger(\n ... experiment_name=\"default\",\n ... tracking_uri=\"file:./ml-runs\"\n ... )\n >>> trainer = Trainer(logger=mlf_logger)\n\n Use the logger anywhere in you :class:`~pytorch_lightning.core.lightning.LightningModule` as follows:\n\n >>> from pytorch_lightning import LightningModule\n >>> class LitModel(LightningModule):\n ... def training_step(self, batch, batch_idx):\n ... # example\n ... self.logger.experiment.whatever_ml_flow_supports(...)\n ...\n ... def any_lightning_module_function_or_hook(self):\n ... self.logger.experiment.whatever_ml_flow_supports(...)\n\n Args:\n experiment_name: The name of the experiment\n tracking_uri: Address of local or remote tracking server.\n If not provided, defaults to `file:<save_dir>`.\n tags: A dictionary tags for the experiment.\n save_dir: A path to a local directory where the MLflow runs get saved.\n Defaults to `./mlflow` if `tracking_uri` is not provided.\n Has no effect if `tracking_uri` is provided.\n\n \"\"\"\n\n def __init__(self,\n experiment_name: str = 'default',\n tracking_uri: Optional[str] = None,\n tags: Optional[Dict[str, Any]] = None,\n save_dir: Optional[str] = './mlruns'):\n\n if not _MLFLOW_AVAILABLE:\n raise ImportError('You want to use `mlflow` logger which is not installed yet,'\n ' install it with `pip install mlflow`.')\n super().__init__()\n if not tracking_uri:\n tracking_uri = f'{LOCAL_FILE_URI_PREFIX}{save_dir}'\n\n self._experiment_name = experiment_name\n self._experiment_id = None\n self._tracking_uri = tracking_uri\n self._run_id = None\n self.tags = tags\n self._mlflow_client = MlflowClient(tracking_uri)\n\n @property\n @rank_zero_experiment\n def experiment(self) -> MlflowClient:\n r\"\"\"\n Actual MLflow object. To use MLflow features in your\n :class:`~pytorch_lightning.core.lightning.LightningModule` do the following.\n\n Example::\n\n self.logger.experiment.some_mlflow_function()\n\n \"\"\"\n if self._experiment_id is None:\n expt = self._mlflow_client.get_experiment_by_name(self._experiment_name)\n if expt is not None:\n self._experiment_id = expt.experiment_id\n else:\n log.warning(f'Experiment with name {self._experiment_name} not found. Creating it.')\n self._experiment_id = self._mlflow_client.create_experiment(name=self._experiment_name)\n\n if self._run_id is None:\n run = self._mlflow_client.create_run(experiment_id=self._experiment_id, tags=self.tags)\n self._run_id = run.info.run_id\n return self._mlflow_client\n\n @property\n def run_id(self):\n # create the experiment if it does not exist to get the run id\n _ = self.experiment\n return self._run_id\n\n @property\n def experiment_id(self):\n # create the experiment if it does not exist to get the experiment id\n _ = self.experiment\n return self._experiment_id\n\n @rank_zero_only\n def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None:\n params = self._convert_params(params)\n params = self._flatten_dict(params)\n for k, v in params.items():\n self.experiment.log_param(self.run_id, k, v)\n\n @rank_zero_only\n def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) -> None:\n assert rank_zero_only.rank == 0, 'experiment tried to log from global_rank != 0'\n\n timestamp_ms = int(time() * 1000)\n for k, v in metrics.items():\n if isinstance(v, str):\n log.warning(f'Discarding metric with string value {k}={v}.')\n continue\n self.experiment.log_metric(self.run_id, k, v, timestamp_ms, step)\n\n @rank_zero_only\n def finalize(self, status: str = 'FINISHED') -> None:\n super().finalize(status)\n status = 'FINISHED' if status == 'success' else status\n if self.experiment.get_run(self.run_id):\n self.experiment.set_terminated(self.run_id, status)\n\n @property\n def save_dir(self) -> Optional[str]:\n \"\"\"\n The root file directory in which MLflow experiments are saved.\n\n Return:\n Local path to the root experiment directory if the tracking uri is local.\n Otherwhise returns `None`.\n \"\"\"\n if self._tracking_uri.startswith(LOCAL_FILE_URI_PREFIX):\n return self._tracking_uri.lstrip(LOCAL_FILE_URI_PREFIX)\n\n @property\n def name(self) -> str:\n return self.experiment_id\n\n @property\n def version(self) -> str:\n return self.run_id\n", "path": "pytorch_lightning/loggers/mlflow.py"}]}
| 3,509 | 319 |
gh_patches_debug_7159
|
rasdani/github-patches
|
git_diff
|
google__timesketch-2243
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
gcs-importer fails to read username when importing Plaso db from GCS
**Describe the bug**
When the gcs-importer attempts to import a plaso file from GCS, it fails to read the username from the "requester" field in the .plaso.metadata.json file. This means the import fails.
Jul 14 07:51:58 timesketch-server python3[14864]: 2022-07-14 07:51:58,892 [INFO] Downloaded file from GCS: /tmp/381929d791d04ec0a12f85454cf08196.plaso.metadata.json
Jul 14 07:51:58 timesketch-server python3[14864]: 2022-07-14 07:51:58 INFO Downloaded file from GCS: /tmp/381929d791d04ec0a12f85454cf08196.plaso.metadata.json
Jul 14 07:51:59 timesketch-server python3[14864]: 2022-07-14 07:51:59,923 [INFO] Downloaded file from GCS: /tmp/381929d791d04ec0a12f85454cf08196.plaso
Jul 14 07:51:59 timesketch-server python3[14864]: 2022-07-14 07:51:59 INFO Downloaded file from GCS: /tmp/381929d791d04ec0a12f85454cf08196.plaso
Jul 14 07:51:59 timesketch-server python3[14864]: 2022-07-14 07:51:59,926 [ERROR] Missing username
Jul 14 07:51:59 timesketch-server python3[14864]: 2022-07-14 07:51:59 ERROR Missing username
Since [this commit](https://github.com/google/turbinia/commit/125696c8c6bd784af435316a020a38fd608dd5e5) in Turbinia, the "requester" field is under "globals" in the .plaso.metadata.json file, while Timesketch attempts to [read it directly from the root level](https://github.com/google/timesketch/blob/e33056e17a10e961dcd4b897c2cd66a1fc12ebae/contrib/gcs_importer.py#L171)
**To Reproduce**
Steps to reproduce the behavior:
1. Install latest Turbinia
2. Install latest Timesketch
3. Set up GCS importer
4. Analyze something with Turbinia and have it drop results to GCS
5. Observe GCS importer fail.
**Expected behavior**
The plaso database is correctly imported and indexed.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Desktop (please complete the following information):**
- OS: [e.g. iOS]
- Browser [e.g. chrome, safari]
- Version [e.g. 22]
**Additional context**
Add any other context about the problem here.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `contrib/gcs_importer.py`
Content:
```
1 # Copyright 2020 Google Inc. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Google Cloud Storage importer."""
15
16 import argparse
17 import time
18 import os
19 import sys
20 import uuid
21 import json
22 import logging
23
24 from werkzeug.exceptions import Forbidden
25
26 from timesketch.app import create_app
27 from timesketch.lib import tasks
28 from timesketch.models import db_session
29 from timesketch.models.sketch import SearchIndex
30 from timesketch.models.sketch import Sketch
31 from timesketch.models.sketch import Timeline
32 from timesketch.models.user import User
33
34 try:
35 from google.cloud import pubsub_v1
36 from google.cloud import storage
37 except ImportError:
38 sys.exit("ERROR: You are missing Google Cloud libraries")
39
40 # Create logger
41 logger = logging.getLogger("gcs_importer")
42 logger.setLevel(logging.DEBUG)
43 handler = logging.StreamHandler()
44 handler.setLevel(logging.DEBUG)
45 formatter = logging.Formatter("%(asctime)s [%(levelname)s] %(message)s")
46 handler.setFormatter(formatter)
47 logger.addHandler(handler)
48
49
50 def download_from_gcs(gcs_base_path, filename):
51 """Download file from Google Cloud Storage (GCS).
52
53 Args:
54 gcs_base_path: (str) GCS bucket path
55 filename: (str) Filename of the file to download
56
57 Returns:
58 (str) Path to downloaded file
59 """
60 storage_client = storage.Client(args.project)
61 bucket = storage_client.get_bucket(args.bucket)
62 gcs_full_path = os.path.join(gcs_base_path, filename)
63 local_path = os.path.join(args.output, filename)
64 blob = bucket.blob(gcs_full_path)
65 blob.download_to_filename(local_path)
66 logger.info("Downloaded file from GCS: {}".format(local_path))
67 return local_path
68
69
70 def setup_sketch(timeline_name, index_name, username, sketch_id=None):
71 """Use existing sketch or create a new sketch.
72
73 Args:
74 timeline_name: (str) Name of the Timeline
75 index_name: (str) Name of the index
76 username: (str) Who should own the timeline
77 sketch_id: (str) Optional sketch_id to add timeline to
78
79 Returns:
80 (tuple) sketch ID and timeline ID as integers
81 """
82 with app.app_context():
83 user = User.get_or_create(username=username)
84 sketch = None
85
86 if sketch_id:
87 try:
88 sketch = Sketch.query.get_with_acl(sketch_id, user=user)
89 logger.info(
90 "Using existing sketch: {} ({})".format(sketch.name, sketch.id)
91 )
92 except Forbidden:
93 pass
94
95 if not (sketch or sketch_id):
96 # Create a new sketch.
97 sketch_name = "Turbinia: {}".format(timeline_name)
98 sketch = Sketch(name=sketch_name, description=sketch_name, user=user)
99 # Need to commit here to be able to set permissions later.
100 db_session.add(sketch)
101 db_session.commit()
102 sketch.grant_permission(permission="read", user=user)
103 sketch.grant_permission(permission="write", user=user)
104 sketch.grant_permission(permission="delete", user=user)
105 sketch.status.append(sketch.Status(user=None, status="new"))
106 db_session.add(sketch)
107 db_session.commit()
108 logger.info("Created new sketch: {} ({})".format(sketch.name, sketch.id))
109
110 searchindex = SearchIndex.get_or_create(
111 name=timeline_name,
112 description="Created by Turbinia.",
113 user=user,
114 index_name=index_name,
115 )
116 searchindex.grant_permission(permission="read", user=user)
117 searchindex.grant_permission(permission="write", user=user)
118 searchindex.grant_permission(permission="delete", user=user)
119 searchindex.set_status("processing")
120 db_session.add(searchindex)
121 db_session.commit()
122
123 timeline = Timeline(
124 name=searchindex.name,
125 description=searchindex.description,
126 sketch=sketch,
127 user=user,
128 searchindex=searchindex,
129 )
130
131 # If the user doesn't have write access to the sketch then create the
132 # timeline but don't attach it to the sketch.
133 if not sketch.has_permission(user, "write"):
134 timeline.sketch = None
135 else:
136 sketch.timelines.append(timeline)
137
138 db_session.add(timeline)
139 db_session.commit()
140 timeline.set_status("processing")
141
142 return sketch.id, timeline.id
143
144
145 def callback(message):
146 """Google PubSub callback.
147
148 This function is called on all incoming messages on the configured topic.
149
150 Args:
151 message: (dict) PubSub message
152 """
153 message.ack()
154 gcs_full_path = message.attributes.get("objectId")
155
156 # Exit early if the file type is wrong.
157 if not gcs_full_path.endswith(".plaso.metadata.json"):
158 return
159
160 gcs_base_path = os.path.dirname(gcs_full_path)
161 gcs_metadata_filename = os.path.basename(gcs_full_path)
162 gcs_base_filename = gcs_metadata_filename.replace(".metadata.json", "")
163 gcs_plaso_filename = gcs_base_filename
164
165 # Download files from GCS
166 local_metadata_file = download_from_gcs(gcs_base_path, gcs_metadata_filename)
167 local_plaso_file = download_from_gcs(gcs_base_path, gcs_plaso_filename)
168
169 with open(local_metadata_file, "r") as metadata_file:
170 metadata = json.load(metadata_file)
171 username = metadata.get("requester")
172 sketch_id_from_metadata = metadata.get("sketch_id")
173
174 if not username:
175 logger.error("Missing username")
176 return
177
178 timeline_name = os.path.splitext(gcs_plaso_filename)[0]
179 index_name = uuid.uuid4().hex
180 sketch_id, timeline_id = setup_sketch(
181 timeline_name, index_name, "admin", sketch_id_from_metadata
182 )
183
184 # Start indexing
185 with app.app_context():
186 pipeline = tasks.build_index_pipeline(
187 file_path=local_plaso_file,
188 timeline_name=gcs_base_filename,
189 index_name=index_name,
190 file_extension="plaso",
191 sketch_id=sketch_id,
192 timeline_id=timeline_id,
193 )
194 pipeline.apply_async()
195 logger.info("File sent for indexing: {}".format(gcs_base_filename))
196
197
198 if __name__ == "__main__":
199 parser = argparse.ArgumentParser(description="GCS importer")
200 parser.add_argument("--project", help="Google Cloud Project ID")
201 parser.add_argument("--bucket", help="Google Cloud Storage bucket to monitor")
202 parser.add_argument("--subscription", help="Google Cloud PubSub subscription")
203 parser.add_argument("--output", default="/tmp", help="Directory for downloads")
204 args = parser.parse_args()
205
206 # Create flask app
207 app = create_app()
208
209 # Setup Google Cloud Pub/Sub
210 subscriber = pubsub_v1.SubscriberClient()
211 subscription_path = subscriber.subscription_path(args.project, args.subscription)
212 subscriber.subscribe(subscription_path, callback=callback)
213
214 logger.info("Listening on PubSub queue: {}".format(args.subscription))
215 while True:
216 time.sleep(10)
217
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/contrib/gcs_importer.py b/contrib/gcs_importer.py
--- a/contrib/gcs_importer.py
+++ b/contrib/gcs_importer.py
@@ -168,7 +168,10 @@
with open(local_metadata_file, "r") as metadata_file:
metadata = json.load(metadata_file)
- username = metadata.get("requester")
+ username = metadata.get("globals", {}).get("requester")
+ if not username:
+ # Backwards compatibility for old Turbinia versions.
+ username = metadata.get("requester")
sketch_id_from_metadata = metadata.get("sketch_id")
if not username:
|
{"golden_diff": "diff --git a/contrib/gcs_importer.py b/contrib/gcs_importer.py\n--- a/contrib/gcs_importer.py\n+++ b/contrib/gcs_importer.py\n@@ -168,7 +168,10 @@\n \n with open(local_metadata_file, \"r\") as metadata_file:\n metadata = json.load(metadata_file)\n- username = metadata.get(\"requester\")\n+ username = metadata.get(\"globals\", {}).get(\"requester\")\n+ if not username:\n+ # Backwards compatibility for old Turbinia versions.\n+ username = metadata.get(\"requester\")\n sketch_id_from_metadata = metadata.get(\"sketch_id\")\n \n if not username:\n", "issue": "gcs-importer fails to read username when importing Plaso db from GCS\n**Describe the bug**\r\nWhen the gcs-importer attempts to import a plaso file from GCS, it fails to read the username from the \"requester\" field in the .plaso.metadata.json file. This means the import fails.\r\n\r\nJul 14 07:51:58 timesketch-server python3[14864]: 2022-07-14 07:51:58,892 [INFO] Downloaded file from GCS: /tmp/381929d791d04ec0a12f85454cf08196.plaso.metadata.json\r\nJul 14 07:51:58 timesketch-server python3[14864]: 2022-07-14 07:51:58 INFO Downloaded file from GCS: /tmp/381929d791d04ec0a12f85454cf08196.plaso.metadata.json\r\nJul 14 07:51:59 timesketch-server python3[14864]: 2022-07-14 07:51:59,923 [INFO] Downloaded file from GCS: /tmp/381929d791d04ec0a12f85454cf08196.plaso\r\nJul 14 07:51:59 timesketch-server python3[14864]: 2022-07-14 07:51:59 INFO Downloaded file from GCS: /tmp/381929d791d04ec0a12f85454cf08196.plaso\r\nJul 14 07:51:59 timesketch-server python3[14864]: 2022-07-14 07:51:59,926 [ERROR] Missing username\r\nJul 14 07:51:59 timesketch-server python3[14864]: 2022-07-14 07:51:59 ERROR Missing username\r\n\r\nSince [this commit](https://github.com/google/turbinia/commit/125696c8c6bd784af435316a020a38fd608dd5e5) in Turbinia, the \"requester\" field is under \"globals\" in the .plaso.metadata.json file, while Timesketch attempts to [read it directly from the root level](https://github.com/google/timesketch/blob/e33056e17a10e961dcd4b897c2cd66a1fc12ebae/contrib/gcs_importer.py#L171)\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Install latest Turbinia\r\n2. Install latest Timesketch\r\n3. Set up GCS importer\r\n4. Analyze something with Turbinia and have it drop results to GCS\r\n5. Observe GCS importer fail.\r\n\r\n**Expected behavior**\r\nThe plaso database is correctly imported and indexed. \r\n\r\n**Screenshots**\r\nIf applicable, add screenshots to help explain your problem.\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: [e.g. iOS]\r\n - Browser [e.g. chrome, safari]\r\n - Version [e.g. 22]\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n\n", "before_files": [{"content": "# Copyright 2020 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Google Cloud Storage importer.\"\"\"\n\nimport argparse\nimport time\nimport os\nimport sys\nimport uuid\nimport json\nimport logging\n\nfrom werkzeug.exceptions import Forbidden\n\nfrom timesketch.app import create_app\nfrom timesketch.lib import tasks\nfrom timesketch.models import db_session\nfrom timesketch.models.sketch import SearchIndex\nfrom timesketch.models.sketch import Sketch\nfrom timesketch.models.sketch import Timeline\nfrom timesketch.models.user import User\n\ntry:\n from google.cloud import pubsub_v1\n from google.cloud import storage\nexcept ImportError:\n sys.exit(\"ERROR: You are missing Google Cloud libraries\")\n\n# Create logger\nlogger = logging.getLogger(\"gcs_importer\")\nlogger.setLevel(logging.DEBUG)\nhandler = logging.StreamHandler()\nhandler.setLevel(logging.DEBUG)\nformatter = logging.Formatter(\"%(asctime)s [%(levelname)s] %(message)s\")\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\n\n\ndef download_from_gcs(gcs_base_path, filename):\n \"\"\"Download file from Google Cloud Storage (GCS).\n\n Args:\n gcs_base_path: (str) GCS bucket path\n filename: (str) Filename of the file to download\n\n Returns:\n (str) Path to downloaded file\n \"\"\"\n storage_client = storage.Client(args.project)\n bucket = storage_client.get_bucket(args.bucket)\n gcs_full_path = os.path.join(gcs_base_path, filename)\n local_path = os.path.join(args.output, filename)\n blob = bucket.blob(gcs_full_path)\n blob.download_to_filename(local_path)\n logger.info(\"Downloaded file from GCS: {}\".format(local_path))\n return local_path\n\n\ndef setup_sketch(timeline_name, index_name, username, sketch_id=None):\n \"\"\"Use existing sketch or create a new sketch.\n\n Args:\n timeline_name: (str) Name of the Timeline\n index_name: (str) Name of the index\n username: (str) Who should own the timeline\n sketch_id: (str) Optional sketch_id to add timeline to\n\n Returns:\n (tuple) sketch ID and timeline ID as integers\n \"\"\"\n with app.app_context():\n user = User.get_or_create(username=username)\n sketch = None\n\n if sketch_id:\n try:\n sketch = Sketch.query.get_with_acl(sketch_id, user=user)\n logger.info(\n \"Using existing sketch: {} ({})\".format(sketch.name, sketch.id)\n )\n except Forbidden:\n pass\n\n if not (sketch or sketch_id):\n # Create a new sketch.\n sketch_name = \"Turbinia: {}\".format(timeline_name)\n sketch = Sketch(name=sketch_name, description=sketch_name, user=user)\n # Need to commit here to be able to set permissions later.\n db_session.add(sketch)\n db_session.commit()\n sketch.grant_permission(permission=\"read\", user=user)\n sketch.grant_permission(permission=\"write\", user=user)\n sketch.grant_permission(permission=\"delete\", user=user)\n sketch.status.append(sketch.Status(user=None, status=\"new\"))\n db_session.add(sketch)\n db_session.commit()\n logger.info(\"Created new sketch: {} ({})\".format(sketch.name, sketch.id))\n\n searchindex = SearchIndex.get_or_create(\n name=timeline_name,\n description=\"Created by Turbinia.\",\n user=user,\n index_name=index_name,\n )\n searchindex.grant_permission(permission=\"read\", user=user)\n searchindex.grant_permission(permission=\"write\", user=user)\n searchindex.grant_permission(permission=\"delete\", user=user)\n searchindex.set_status(\"processing\")\n db_session.add(searchindex)\n db_session.commit()\n\n timeline = Timeline(\n name=searchindex.name,\n description=searchindex.description,\n sketch=sketch,\n user=user,\n searchindex=searchindex,\n )\n\n # If the user doesn't have write access to the sketch then create the\n # timeline but don't attach it to the sketch.\n if not sketch.has_permission(user, \"write\"):\n timeline.sketch = None\n else:\n sketch.timelines.append(timeline)\n\n db_session.add(timeline)\n db_session.commit()\n timeline.set_status(\"processing\")\n\n return sketch.id, timeline.id\n\n\ndef callback(message):\n \"\"\"Google PubSub callback.\n\n This function is called on all incoming messages on the configured topic.\n\n Args:\n message: (dict) PubSub message\n \"\"\"\n message.ack()\n gcs_full_path = message.attributes.get(\"objectId\")\n\n # Exit early if the file type is wrong.\n if not gcs_full_path.endswith(\".plaso.metadata.json\"):\n return\n\n gcs_base_path = os.path.dirname(gcs_full_path)\n gcs_metadata_filename = os.path.basename(gcs_full_path)\n gcs_base_filename = gcs_metadata_filename.replace(\".metadata.json\", \"\")\n gcs_plaso_filename = gcs_base_filename\n\n # Download files from GCS\n local_metadata_file = download_from_gcs(gcs_base_path, gcs_metadata_filename)\n local_plaso_file = download_from_gcs(gcs_base_path, gcs_plaso_filename)\n\n with open(local_metadata_file, \"r\") as metadata_file:\n metadata = json.load(metadata_file)\n username = metadata.get(\"requester\")\n sketch_id_from_metadata = metadata.get(\"sketch_id\")\n\n if not username:\n logger.error(\"Missing username\")\n return\n\n timeline_name = os.path.splitext(gcs_plaso_filename)[0]\n index_name = uuid.uuid4().hex\n sketch_id, timeline_id = setup_sketch(\n timeline_name, index_name, \"admin\", sketch_id_from_metadata\n )\n\n # Start indexing\n with app.app_context():\n pipeline = tasks.build_index_pipeline(\n file_path=local_plaso_file,\n timeline_name=gcs_base_filename,\n index_name=index_name,\n file_extension=\"plaso\",\n sketch_id=sketch_id,\n timeline_id=timeline_id,\n )\n pipeline.apply_async()\n logger.info(\"File sent for indexing: {}\".format(gcs_base_filename))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"GCS importer\")\n parser.add_argument(\"--project\", help=\"Google Cloud Project ID\")\n parser.add_argument(\"--bucket\", help=\"Google Cloud Storage bucket to monitor\")\n parser.add_argument(\"--subscription\", help=\"Google Cloud PubSub subscription\")\n parser.add_argument(\"--output\", default=\"/tmp\", help=\"Directory for downloads\")\n args = parser.parse_args()\n\n # Create flask app\n app = create_app()\n\n # Setup Google Cloud Pub/Sub\n subscriber = pubsub_v1.SubscriberClient()\n subscription_path = subscriber.subscription_path(args.project, args.subscription)\n subscriber.subscribe(subscription_path, callback=callback)\n\n logger.info(\"Listening on PubSub queue: {}\".format(args.subscription))\n while True:\n time.sleep(10)\n", "path": "contrib/gcs_importer.py"}], "after_files": [{"content": "# Copyright 2020 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Google Cloud Storage importer.\"\"\"\n\nimport argparse\nimport time\nimport os\nimport sys\nimport uuid\nimport json\nimport logging\n\nfrom werkzeug.exceptions import Forbidden\n\nfrom timesketch.app import create_app\nfrom timesketch.lib import tasks\nfrom timesketch.models import db_session\nfrom timesketch.models.sketch import SearchIndex\nfrom timesketch.models.sketch import Sketch\nfrom timesketch.models.sketch import Timeline\nfrom timesketch.models.user import User\n\ntry:\n from google.cloud import pubsub_v1\n from google.cloud import storage\nexcept ImportError:\n sys.exit(\"ERROR: You are missing Google Cloud libraries\")\n\n# Create logger\nlogger = logging.getLogger(\"gcs_importer\")\nlogger.setLevel(logging.DEBUG)\nhandler = logging.StreamHandler()\nhandler.setLevel(logging.DEBUG)\nformatter = logging.Formatter(\"%(asctime)s [%(levelname)s] %(message)s\")\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\n\n\ndef download_from_gcs(gcs_base_path, filename):\n \"\"\"Download file from Google Cloud Storage (GCS).\n\n Args:\n gcs_base_path: (str) GCS bucket path\n filename: (str) Filename of the file to download\n\n Returns:\n (str) Path to downloaded file\n \"\"\"\n storage_client = storage.Client(args.project)\n bucket = storage_client.get_bucket(args.bucket)\n gcs_full_path = os.path.join(gcs_base_path, filename)\n local_path = os.path.join(args.output, filename)\n blob = bucket.blob(gcs_full_path)\n blob.download_to_filename(local_path)\n logger.info(\"Downloaded file from GCS: {}\".format(local_path))\n return local_path\n\n\ndef setup_sketch(timeline_name, index_name, username, sketch_id=None):\n \"\"\"Use existing sketch or create a new sketch.\n\n Args:\n timeline_name: (str) Name of the Timeline\n index_name: (str) Name of the index\n username: (str) Who should own the timeline\n sketch_id: (str) Optional sketch_id to add timeline to\n\n Returns:\n (tuple) sketch ID and timeline ID as integers\n \"\"\"\n with app.app_context():\n user = User.get_or_create(username=username)\n sketch = None\n\n if sketch_id:\n try:\n sketch = Sketch.query.get_with_acl(sketch_id, user=user)\n logger.info(\n \"Using existing sketch: {} ({})\".format(sketch.name, sketch.id)\n )\n except Forbidden:\n pass\n\n if not (sketch or sketch_id):\n # Create a new sketch.\n sketch_name = \"Turbinia: {}\".format(timeline_name)\n sketch = Sketch(name=sketch_name, description=sketch_name, user=user)\n # Need to commit here to be able to set permissions later.\n db_session.add(sketch)\n db_session.commit()\n sketch.grant_permission(permission=\"read\", user=user)\n sketch.grant_permission(permission=\"write\", user=user)\n sketch.grant_permission(permission=\"delete\", user=user)\n sketch.status.append(sketch.Status(user=None, status=\"new\"))\n db_session.add(sketch)\n db_session.commit()\n logger.info(\"Created new sketch: {} ({})\".format(sketch.name, sketch.id))\n\n searchindex = SearchIndex.get_or_create(\n name=timeline_name,\n description=\"Created by Turbinia.\",\n user=user,\n index_name=index_name,\n )\n searchindex.grant_permission(permission=\"read\", user=user)\n searchindex.grant_permission(permission=\"write\", user=user)\n searchindex.grant_permission(permission=\"delete\", user=user)\n searchindex.set_status(\"processing\")\n db_session.add(searchindex)\n db_session.commit()\n\n timeline = Timeline(\n name=searchindex.name,\n description=searchindex.description,\n sketch=sketch,\n user=user,\n searchindex=searchindex,\n )\n\n # If the user doesn't have write access to the sketch then create the\n # timeline but don't attach it to the sketch.\n if not sketch.has_permission(user, \"write\"):\n timeline.sketch = None\n else:\n sketch.timelines.append(timeline)\n\n db_session.add(timeline)\n db_session.commit()\n timeline.set_status(\"processing\")\n\n return sketch.id, timeline.id\n\n\ndef callback(message):\n \"\"\"Google PubSub callback.\n\n This function is called on all incoming messages on the configured topic.\n\n Args:\n message: (dict) PubSub message\n \"\"\"\n message.ack()\n gcs_full_path = message.attributes.get(\"objectId\")\n\n # Exit early if the file type is wrong.\n if not gcs_full_path.endswith(\".plaso.metadata.json\"):\n return\n\n gcs_base_path = os.path.dirname(gcs_full_path)\n gcs_metadata_filename = os.path.basename(gcs_full_path)\n gcs_base_filename = gcs_metadata_filename.replace(\".metadata.json\", \"\")\n gcs_plaso_filename = gcs_base_filename\n\n # Download files from GCS\n local_metadata_file = download_from_gcs(gcs_base_path, gcs_metadata_filename)\n local_plaso_file = download_from_gcs(gcs_base_path, gcs_plaso_filename)\n\n with open(local_metadata_file, \"r\") as metadata_file:\n metadata = json.load(metadata_file)\n username = metadata.get(\"globals\", {}).get(\"requester\")\n if not username:\n # Backwards compatibility for old Turbinia versions.\n username = metadata.get(\"requester\")\n sketch_id_from_metadata = metadata.get(\"sketch_id\")\n\n if not username:\n logger.error(\"Missing username\")\n return\n\n timeline_name = os.path.splitext(gcs_plaso_filename)[0]\n index_name = uuid.uuid4().hex\n sketch_id, timeline_id = setup_sketch(\n timeline_name, index_name, \"admin\", sketch_id_from_metadata\n )\n\n # Start indexing\n with app.app_context():\n pipeline = tasks.build_index_pipeline(\n file_path=local_plaso_file,\n timeline_name=gcs_base_filename,\n index_name=index_name,\n file_extension=\"plaso\",\n sketch_id=sketch_id,\n timeline_id=timeline_id,\n )\n pipeline.apply_async()\n logger.info(\"File sent for indexing: {}\".format(gcs_base_filename))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"GCS importer\")\n parser.add_argument(\"--project\", help=\"Google Cloud Project ID\")\n parser.add_argument(\"--bucket\", help=\"Google Cloud Storage bucket to monitor\")\n parser.add_argument(\"--subscription\", help=\"Google Cloud PubSub subscription\")\n parser.add_argument(\"--output\", default=\"/tmp\", help=\"Directory for downloads\")\n args = parser.parse_args()\n\n # Create flask app\n app = create_app()\n\n # Setup Google Cloud Pub/Sub\n subscriber = pubsub_v1.SubscriberClient()\n subscription_path = subscriber.subscription_path(args.project, args.subscription)\n subscriber.subscribe(subscription_path, callback=callback)\n\n logger.info(\"Listening on PubSub queue: {}\".format(args.subscription))\n while True:\n time.sleep(10)\n", "path": "contrib/gcs_importer.py"}]}
| 3,274 | 151 |
gh_patches_debug_21746
|
rasdani/github-patches
|
git_diff
|
explosion__spaCy-3389
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[feature request] Factory default for extension attributes
## Feature description
As usual with Python, mutable defaults are a big no-no for extension attributes, since they are shared by all instances, which leads to subtle and ~~quick to anger~~ hard to root out bugs (see e.g. #2581).
The documentation mentions that pitfall, but doesn't offer a convenient solution: if I want to keep a static list of interesting spans in my document in a `Doc._.interesting`, it is not clear where the getter and setter that I am supposed to use for this property should store the state. (From what I understand, it should probably be somewhere in `Doc.user_data`, but I have not found a lot of doc on that either)
I propose a `factory` argument to `set_extension` that would be called the first time that the value for the corresponding extension property is retrieved for a given instance (as `collections.defaultdict` does), so one could just write
```python
spacy.tokens.Doc.set_extension('interesting', factory=list)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `spacy/tokens/underscore.py`
Content:
```
1 # coding: utf8
2 from __future__ import unicode_literals
3
4 import functools
5
6 from ..errors import Errors
7
8
9 class Underscore(object):
10 doc_extensions = {}
11 span_extensions = {}
12 token_extensions = {}
13
14 def __init__(self, extensions, obj, start=None, end=None):
15 object.__setattr__(self, "_extensions", extensions)
16 object.__setattr__(self, "_obj", obj)
17 # Assumption is that for doc values, _start and _end will both be None
18 # Span will set non-None values for _start and _end
19 # Token will have _start be non-None, _end be None
20 # This lets us key everything into the doc.user_data dictionary,
21 # (see _get_key), and lets us use a single Underscore class.
22 object.__setattr__(self, "_doc", obj.doc)
23 object.__setattr__(self, "_start", start)
24 object.__setattr__(self, "_end", end)
25
26 def __getattr__(self, name):
27 if name not in self._extensions:
28 raise AttributeError(Errors.E046.format(name=name))
29 default, method, getter, setter = self._extensions[name]
30 if getter is not None:
31 return getter(self._obj)
32 elif method is not None:
33 return functools.partial(method, self._obj)
34 else:
35 return self._doc.user_data.get(self._get_key(name), default)
36
37 def __setattr__(self, name, value):
38 if name not in self._extensions:
39 raise AttributeError(Errors.E047.format(name=name))
40 default, method, getter, setter = self._extensions[name]
41 if setter is not None:
42 return setter(self._obj, value)
43 else:
44 self._doc.user_data[self._get_key(name)] = value
45
46 def set(self, name, value):
47 return self.__setattr__(name, value)
48
49 def get(self, name):
50 return self.__getattr__(name)
51
52 def has(self, name):
53 return name in self._extensions
54
55 def _get_key(self, name):
56 return ("._.", name, self._start, self._end)
57
58
59 def get_ext_args(**kwargs):
60 """Validate and convert arguments. Reused in Doc, Token and Span."""
61 default = kwargs.get("default")
62 getter = kwargs.get("getter")
63 setter = kwargs.get("setter")
64 method = kwargs.get("method")
65 if getter is None and setter is not None:
66 raise ValueError(Errors.E089)
67 valid_opts = ("default" in kwargs, method is not None, getter is not None)
68 nr_defined = sum(t is True for t in valid_opts)
69 if nr_defined != 1:
70 raise ValueError(Errors.E083.format(nr_defined=nr_defined))
71 if setter is not None and not hasattr(setter, "__call__"):
72 raise ValueError(Errors.E091.format(name="setter", value=repr(setter)))
73 if getter is not None and not hasattr(getter, "__call__"):
74 raise ValueError(Errors.E091.format(name="getter", value=repr(getter)))
75 if method is not None and not hasattr(method, "__call__"):
76 raise ValueError(Errors.E091.format(name="method", value=repr(method)))
77 return (default, method, getter, setter)
78
79
80 def is_writable_attr(ext):
81 """Check if an extension attribute is writable.
82 ext (tuple): The (default, getter, setter, method) tuple available via
83 {Doc,Span,Token}.get_extension.
84 RETURNS (bool): Whether the attribute is writable.
85 """
86 default, method, getter, setter = ext
87 # Extension is writable if it has a setter (getter + setter), if it has a
88 # default value (or, if its default value is none, none of the other values
89 # should be set).
90 if setter is not None or default is not None or all(e is None for e in ext):
91 return True
92 return False
93
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/spacy/tokens/underscore.py b/spacy/tokens/underscore.py
--- a/spacy/tokens/underscore.py
+++ b/spacy/tokens/underscore.py
@@ -2,11 +2,13 @@
from __future__ import unicode_literals
import functools
+import copy
from ..errors import Errors
class Underscore(object):
+ mutable_types = (dict, list, set)
doc_extensions = {}
span_extensions = {}
token_extensions = {}
@@ -32,7 +34,15 @@
elif method is not None:
return functools.partial(method, self._obj)
else:
- return self._doc.user_data.get(self._get_key(name), default)
+ key = self._get_key(name)
+ if key in self._doc.user_data:
+ return self._doc.user_data[key]
+ elif isinstance(default, self.mutable_types):
+ # Handle mutable default arguments (see #2581)
+ new_default = copy.copy(default)
+ self.__setattr__(name, new_default)
+ return new_default
+ return default
def __setattr__(self, name, value):
if name not in self._extensions:
|
{"golden_diff": "diff --git a/spacy/tokens/underscore.py b/spacy/tokens/underscore.py\n--- a/spacy/tokens/underscore.py\n+++ b/spacy/tokens/underscore.py\n@@ -2,11 +2,13 @@\n from __future__ import unicode_literals\n \n import functools\n+import copy\n \n from ..errors import Errors\n \n \n class Underscore(object):\n+ mutable_types = (dict, list, set)\n doc_extensions = {}\n span_extensions = {}\n token_extensions = {}\n@@ -32,7 +34,15 @@\n elif method is not None:\n return functools.partial(method, self._obj)\n else:\n- return self._doc.user_data.get(self._get_key(name), default)\n+ key = self._get_key(name)\n+ if key in self._doc.user_data:\n+ return self._doc.user_data[key]\n+ elif isinstance(default, self.mutable_types):\n+ # Handle mutable default arguments (see #2581)\n+ new_default = copy.copy(default)\n+ self.__setattr__(name, new_default)\n+ return new_default\n+ return default\n \n def __setattr__(self, name, value):\n if name not in self._extensions:\n", "issue": "[feature request] Factory default for extension attributes\n## Feature description\r\nAs usual with Python, mutable defaults are a big no-no for extension attributes, since they are shared by all instances, which leads to subtle and ~~quick to anger~~ hard to root out bugs (see e.g. #2581).\r\nThe documentation mentions that pitfall, but doesn't offer a convenient solution: if I want to keep a static list of interesting spans in my document in a `Doc._.interesting`, it is not clear where the getter and setter that I am supposed to use for this property should store the state. (From what I understand, it should probably be somewhere in `Doc.user_data`, but I have not found a lot of doc on that either)\r\n\r\nI propose a `factory` argument to `set_extension` that would be called the first time that the value for the corresponding extension property is retrieved for a given instance (as `collections.defaultdict` does), so one could just write\r\n\r\n```python\r\nspacy.tokens.Doc.set_extension('interesting', factory=list)\r\n```\n", "before_files": [{"content": "# coding: utf8\nfrom __future__ import unicode_literals\n\nimport functools\n\nfrom ..errors import Errors\n\n\nclass Underscore(object):\n doc_extensions = {}\n span_extensions = {}\n token_extensions = {}\n\n def __init__(self, extensions, obj, start=None, end=None):\n object.__setattr__(self, \"_extensions\", extensions)\n object.__setattr__(self, \"_obj\", obj)\n # Assumption is that for doc values, _start and _end will both be None\n # Span will set non-None values for _start and _end\n # Token will have _start be non-None, _end be None\n # This lets us key everything into the doc.user_data dictionary,\n # (see _get_key), and lets us use a single Underscore class.\n object.__setattr__(self, \"_doc\", obj.doc)\n object.__setattr__(self, \"_start\", start)\n object.__setattr__(self, \"_end\", end)\n\n def __getattr__(self, name):\n if name not in self._extensions:\n raise AttributeError(Errors.E046.format(name=name))\n default, method, getter, setter = self._extensions[name]\n if getter is not None:\n return getter(self._obj)\n elif method is not None:\n return functools.partial(method, self._obj)\n else:\n return self._doc.user_data.get(self._get_key(name), default)\n\n def __setattr__(self, name, value):\n if name not in self._extensions:\n raise AttributeError(Errors.E047.format(name=name))\n default, method, getter, setter = self._extensions[name]\n if setter is not None:\n return setter(self._obj, value)\n else:\n self._doc.user_data[self._get_key(name)] = value\n\n def set(self, name, value):\n return self.__setattr__(name, value)\n\n def get(self, name):\n return self.__getattr__(name)\n\n def has(self, name):\n return name in self._extensions\n\n def _get_key(self, name):\n return (\"._.\", name, self._start, self._end)\n\n\ndef get_ext_args(**kwargs):\n \"\"\"Validate and convert arguments. Reused in Doc, Token and Span.\"\"\"\n default = kwargs.get(\"default\")\n getter = kwargs.get(\"getter\")\n setter = kwargs.get(\"setter\")\n method = kwargs.get(\"method\")\n if getter is None and setter is not None:\n raise ValueError(Errors.E089)\n valid_opts = (\"default\" in kwargs, method is not None, getter is not None)\n nr_defined = sum(t is True for t in valid_opts)\n if nr_defined != 1:\n raise ValueError(Errors.E083.format(nr_defined=nr_defined))\n if setter is not None and not hasattr(setter, \"__call__\"):\n raise ValueError(Errors.E091.format(name=\"setter\", value=repr(setter)))\n if getter is not None and not hasattr(getter, \"__call__\"):\n raise ValueError(Errors.E091.format(name=\"getter\", value=repr(getter)))\n if method is not None and not hasattr(method, \"__call__\"):\n raise ValueError(Errors.E091.format(name=\"method\", value=repr(method)))\n return (default, method, getter, setter)\n\n\ndef is_writable_attr(ext):\n \"\"\"Check if an extension attribute is writable.\n ext (tuple): The (default, getter, setter, method) tuple available via\n {Doc,Span,Token}.get_extension.\n RETURNS (bool): Whether the attribute is writable.\n \"\"\"\n default, method, getter, setter = ext\n # Extension is writable if it has a setter (getter + setter), if it has a\n # default value (or, if its default value is none, none of the other values\n # should be set).\n if setter is not None or default is not None or all(e is None for e in ext):\n return True\n return False\n", "path": "spacy/tokens/underscore.py"}], "after_files": [{"content": "# coding: utf8\nfrom __future__ import unicode_literals\n\nimport functools\nimport copy\n\nfrom ..errors import Errors\n\n\nclass Underscore(object):\n mutable_types = (dict, list, set)\n doc_extensions = {}\n span_extensions = {}\n token_extensions = {}\n\n def __init__(self, extensions, obj, start=None, end=None):\n object.__setattr__(self, \"_extensions\", extensions)\n object.__setattr__(self, \"_obj\", obj)\n # Assumption is that for doc values, _start and _end will both be None\n # Span will set non-None values for _start and _end\n # Token will have _start be non-None, _end be None\n # This lets us key everything into the doc.user_data dictionary,\n # (see _get_key), and lets us use a single Underscore class.\n object.__setattr__(self, \"_doc\", obj.doc)\n object.__setattr__(self, \"_start\", start)\n object.__setattr__(self, \"_end\", end)\n\n def __getattr__(self, name):\n if name not in self._extensions:\n raise AttributeError(Errors.E046.format(name=name))\n default, method, getter, setter = self._extensions[name]\n if getter is not None:\n return getter(self._obj)\n elif method is not None:\n return functools.partial(method, self._obj)\n else:\n key = self._get_key(name)\n if key in self._doc.user_data:\n return self._doc.user_data[key]\n elif isinstance(default, self.mutable_types):\n # Handle mutable default arguments (see #2581)\n new_default = copy.copy(default)\n self.__setattr__(name, new_default)\n return new_default\n return default\n\n def __setattr__(self, name, value):\n if name not in self._extensions:\n raise AttributeError(Errors.E047.format(name=name))\n default, method, getter, setter = self._extensions[name]\n if setter is not None:\n return setter(self._obj, value)\n else:\n self._doc.user_data[self._get_key(name)] = value\n\n def set(self, name, value):\n return self.__setattr__(name, value)\n\n def get(self, name):\n return self.__getattr__(name)\n\n def has(self, name):\n return name in self._extensions\n\n def _get_key(self, name):\n return (\"._.\", name, self._start, self._end)\n\n\ndef get_ext_args(**kwargs):\n \"\"\"Validate and convert arguments. Reused in Doc, Token and Span.\"\"\"\n default = kwargs.get(\"default\")\n getter = kwargs.get(\"getter\")\n setter = kwargs.get(\"setter\")\n method = kwargs.get(\"method\")\n if getter is None and setter is not None:\n raise ValueError(Errors.E089)\n valid_opts = (\"default\" in kwargs, method is not None, getter is not None)\n nr_defined = sum(t is True for t in valid_opts)\n if nr_defined != 1:\n raise ValueError(Errors.E083.format(nr_defined=nr_defined))\n if setter is not None and not hasattr(setter, \"__call__\"):\n raise ValueError(Errors.E091.format(name=\"setter\", value=repr(setter)))\n if getter is not None and not hasattr(getter, \"__call__\"):\n raise ValueError(Errors.E091.format(name=\"getter\", value=repr(getter)))\n if method is not None and not hasattr(method, \"__call__\"):\n raise ValueError(Errors.E091.format(name=\"method\", value=repr(method)))\n return (default, method, getter, setter)\n\n\ndef is_writable_attr(ext):\n \"\"\"Check if an extension attribute is writable.\n ext (tuple): The (default, getter, setter, method) tuple available via\n {Doc,Span,Token}.get_extension.\n RETURNS (bool): Whether the attribute is writable.\n \"\"\"\n default, method, getter, setter = ext\n # Extension is writable if it has a setter (getter + setter), if it has a\n # default value (or, if its default value is none, none of the other values\n # should be set).\n if setter is not None or default is not None or all(e is None for e in ext):\n return True\n return False\n", "path": "spacy/tokens/underscore.py"}]}
| 1,527 | 264 |
gh_patches_debug_33005
|
rasdani/github-patches
|
git_diff
|
weecology__retriever-378
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
v1.6.0 will break if download scripts are added to version.txt in master
We have now done this twice (see #180 and #199).
In v1.6.0 `__init__.py` line 16: `MASTER = True`. This results in the retriever always checking `master` for `version.txt` and discovering scripts that it doesn't know how to handle. In the future, the retriever will handle this gracefully thanks to #204, but it's unclear how we should go about introducing the download only functionality since it will break a number of existing installations.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scripts/MammalSuperTree.py`
Content:
```
1 #retriever
2 from retriever.lib.templates import DownloadOnlyTemplate
3
4 SCRIPT = DownloadOnlyTemplate(name="Mammal Super Tree",
5 shortname='mammsupertree',
6 ref='http://doi.org/10.1111/j.1461-0248.2009.01307.x',
7 citation = "Fritz, S. A., Bininda-Emonds, O. R. P. and Purvis, A. (2009), Geographical variation in predictors of mammalian extinction risk: big is bad, but only in the tropics. Ecology Letters, 12: 538-549. doi:10.1111/j.1461-0248.2009.01307.x",
8 description="Mammal Super Tree from Fritz, S.A., O.R.P Bininda-Emonds, and A. Purvis. 2009. Geographical variation in predictors of mammalian extinction risk: big is bad, but only in the tropics. Ecology Letters 12:538-549",
9 urls ={'mammal_super_tree_fritz2009.tre': 'http://onlinelibrary.wiley.com/store/10.1111/j.1461-0248.2009.01307.x/asset/supinfo/ELE_1307_sm_SA1.tre?v=1&s=366b28651a9b5d1a3148ef9a8620f8aa31a7df44'})
10
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scripts/MammalSuperTree.py b/scripts/MammalSuperTree.py
--- a/scripts/MammalSuperTree.py
+++ b/scripts/MammalSuperTree.py
@@ -1,9 +1,22 @@
#retriever
-from retriever.lib.templates import DownloadOnlyTemplate
-
-SCRIPT = DownloadOnlyTemplate(name="Mammal Super Tree",
- shortname='mammsupertree',
- ref='http://doi.org/10.1111/j.1461-0248.2009.01307.x',
- citation = "Fritz, S. A., Bininda-Emonds, O. R. P. and Purvis, A. (2009), Geographical variation in predictors of mammalian extinction risk: big is bad, but only in the tropics. Ecology Letters, 12: 538-549. doi:10.1111/j.1461-0248.2009.01307.x",
- description="Mammal Super Tree from Fritz, S.A., O.R.P Bininda-Emonds, and A. Purvis. 2009. Geographical variation in predictors of mammalian extinction risk: big is bad, but only in the tropics. Ecology Letters 12:538-549",
- urls ={'mammal_super_tree_fritz2009.tre': 'http://onlinelibrary.wiley.com/store/10.1111/j.1461-0248.2009.01307.x/asset/supinfo/ELE_1307_sm_SA1.tre?v=1&s=366b28651a9b5d1a3148ef9a8620f8aa31a7df44'})
+from retriever import VERSION
+if (VERSION == 'v1.6') or (VERSION == 'v1.6.0'):
+ #If v1.6 is running use a dummy script to avoid retriever errors
+ #See https://github.com/weecology/retriever/issues/208 for details
+ from retriever.lib.templates import Script
+ class main(Script):
+ def __init(self):
+ Script.__init__(self,
+ name="Mammal Super Tree",
+ shortname='mammsupertree',
+ )
+ SCRIPT = main()
+else:
+ #For all versions other than 1.6 run as normal
+ from retriever.lib.templates import DownloadOnlyTemplate
+ SCRIPT = DownloadOnlyTemplate(name="Mammal Super Tree",
+ shortname='mammsupertree',
+ ref='http://doi.org/10.1111/j.1461-0248.2009.01307.x',
+ citation = "Fritz, S. A., Bininda-Emonds, O. R. P. and Purvis, A. (2009), Geographical variation in predictors of mammalian extinction risk: big is bad, but only in the tropics. Ecology Letters, 12: 538-549. doi:10.1111/j.1461-0248.2009.01307.x",
+ description="Mammal Super Tree from Fritz, S.A., O.R.P Bininda-Emonds, and A. Purvis. 2009. Geographical variation in predictors of mammalian extinction risk: big is bad, but only in the tropics. Ecology Letters 12:538-549",
+ urls ={'mammal_super_tree_fritz2009.tre': 'http://onlinelibrary.wiley.com/store/10.1111/j.1461-0248.2009.01307.x/asset/supinfo/ELE_1307_sm_SA1.tre?v=1&s=366b28651a9b5d1a3148ef9a8620f8aa31a7df44'})
|
{"golden_diff": "diff --git a/scripts/MammalSuperTree.py b/scripts/MammalSuperTree.py\n--- a/scripts/MammalSuperTree.py\n+++ b/scripts/MammalSuperTree.py\n@@ -1,9 +1,22 @@\n #retriever\n-from retriever.lib.templates import DownloadOnlyTemplate\n-\n-SCRIPT = DownloadOnlyTemplate(name=\"Mammal Super Tree\",\n- shortname='mammsupertree',\n- ref='http://doi.org/10.1111/j.1461-0248.2009.01307.x',\n- citation = \"Fritz, S. A., Bininda-Emonds, O. R. P. and Purvis, A. (2009), Geographical variation in predictors of mammalian extinction risk: big is bad, but only in the tropics. Ecology Letters, 12: 538-549. doi:10.1111/j.1461-0248.2009.01307.x\",\n- description=\"Mammal Super Tree from Fritz, S.A., O.R.P Bininda-Emonds, and A. Purvis. 2009. Geographical variation in predictors of mammalian extinction risk: big is bad, but only in the tropics. Ecology Letters 12:538-549\",\n- urls ={'mammal_super_tree_fritz2009.tre': 'http://onlinelibrary.wiley.com/store/10.1111/j.1461-0248.2009.01307.x/asset/supinfo/ELE_1307_sm_SA1.tre?v=1&s=366b28651a9b5d1a3148ef9a8620f8aa31a7df44'})\n+from retriever import VERSION\n+if (VERSION == 'v1.6') or (VERSION == 'v1.6.0'):\n+ #If v1.6 is running use a dummy script to avoid retriever errors\n+ #See https://github.com/weecology/retriever/issues/208 for details\n+ from retriever.lib.templates import Script\n+ class main(Script):\n+ def __init(self):\n+ Script.__init__(self,\n+ name=\"Mammal Super Tree\",\n+ shortname='mammsupertree',\n+ )\n+ SCRIPT = main()\n+else:\n+ #For all versions other than 1.6 run as normal\n+ from retriever.lib.templates import DownloadOnlyTemplate\n+ SCRIPT = DownloadOnlyTemplate(name=\"Mammal Super Tree\",\n+ shortname='mammsupertree',\n+ ref='http://doi.org/10.1111/j.1461-0248.2009.01307.x',\n+ citation = \"Fritz, S. A., Bininda-Emonds, O. R. P. and Purvis, A. (2009), Geographical variation in predictors of mammalian extinction risk: big is bad, but only in the tropics. Ecology Letters, 12: 538-549. doi:10.1111/j.1461-0248.2009.01307.x\",\n+ description=\"Mammal Super Tree from Fritz, S.A., O.R.P Bininda-Emonds, and A. Purvis. 2009. Geographical variation in predictors of mammalian extinction risk: big is bad, but only in the tropics. Ecology Letters 12:538-549\",\n+ urls ={'mammal_super_tree_fritz2009.tre': 'http://onlinelibrary.wiley.com/store/10.1111/j.1461-0248.2009.01307.x/asset/supinfo/ELE_1307_sm_SA1.tre?v=1&s=366b28651a9b5d1a3148ef9a8620f8aa31a7df44'})\n", "issue": "v1.6.0 will break if download scripts are added to version.txt in master\nWe have now done this twice (see #180 and #199).\n\nIn v1.6.0 `__init__.py` line 16: `MASTER = True`. This results in the retriever always checking `master` for `version.txt` and discovering scripts that it doesn't know how to handle. In the future, the retriever will handle this gracefully thanks to #204, but it's unclear how we should go about introducing the download only functionality since it will break a number of existing installations.\n\n", "before_files": [{"content": "#retriever\nfrom retriever.lib.templates import DownloadOnlyTemplate\n\nSCRIPT = DownloadOnlyTemplate(name=\"Mammal Super Tree\",\n shortname='mammsupertree',\n ref='http://doi.org/10.1111/j.1461-0248.2009.01307.x',\n citation = \"Fritz, S. A., Bininda-Emonds, O. R. P. and Purvis, A. (2009), Geographical variation in predictors of mammalian extinction risk: big is bad, but only in the tropics. Ecology Letters, 12: 538-549. doi:10.1111/j.1461-0248.2009.01307.x\",\n description=\"Mammal Super Tree from Fritz, S.A., O.R.P Bininda-Emonds, and A. Purvis. 2009. Geographical variation in predictors of mammalian extinction risk: big is bad, but only in the tropics. Ecology Letters 12:538-549\",\n urls ={'mammal_super_tree_fritz2009.tre': 'http://onlinelibrary.wiley.com/store/10.1111/j.1461-0248.2009.01307.x/asset/supinfo/ELE_1307_sm_SA1.tre?v=1&s=366b28651a9b5d1a3148ef9a8620f8aa31a7df44'})\n", "path": "scripts/MammalSuperTree.py"}], "after_files": [{"content": "#retriever\nfrom retriever import VERSION\nif (VERSION == 'v1.6') or (VERSION == 'v1.6.0'):\n #If v1.6 is running use a dummy script to avoid retriever errors\n #See https://github.com/weecology/retriever/issues/208 for details\n from retriever.lib.templates import Script\n class main(Script):\n def __init(self):\n Script.__init__(self,\n name=\"Mammal Super Tree\",\n shortname='mammsupertree',\n )\n SCRIPT = main()\nelse:\n #For all versions other than 1.6 run as normal\n from retriever.lib.templates import DownloadOnlyTemplate\n SCRIPT = DownloadOnlyTemplate(name=\"Mammal Super Tree\",\n shortname='mammsupertree',\n ref='http://doi.org/10.1111/j.1461-0248.2009.01307.x',\n citation = \"Fritz, S. A., Bininda-Emonds, O. R. P. and Purvis, A. (2009), Geographical variation in predictors of mammalian extinction risk: big is bad, but only in the tropics. Ecology Letters, 12: 538-549. doi:10.1111/j.1461-0248.2009.01307.x\",\n description=\"Mammal Super Tree from Fritz, S.A., O.R.P Bininda-Emonds, and A. Purvis. 2009. Geographical variation in predictors of mammalian extinction risk: big is bad, but only in the tropics. Ecology Letters 12:538-549\",\n urls ={'mammal_super_tree_fritz2009.tre': 'http://onlinelibrary.wiley.com/store/10.1111/j.1461-0248.2009.01307.x/asset/supinfo/ELE_1307_sm_SA1.tre?v=1&s=366b28651a9b5d1a3148ef9a8620f8aa31a7df44'})\n", "path": "scripts/MammalSuperTree.py"}]}
| 775 | 959 |
gh_patches_debug_2090
|
rasdani/github-patches
|
git_diff
|
xonsh__xonsh-2295
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Python 3.4 no longer supported?
Hi all,
First of all, thank you all for your great work.
I have noticed that the version bump to 0.5.7 introduced a call to `os.scandir` which is not supported by Python <3.5 afaik. As I am still using Ubuntu 14.04 with Python 3.4 on a few machines, this is a little bit of a headache... I don't know the codebase, but it looks like `xonsh.platform.scandir` could be used instead?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `xonsh/prompt/vc.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """Prompt formatter for simple version control branchs"""
3 # pylint:disable=no-member, invalid-name
4
5 import os
6 import sys
7 import queue
8 import builtins
9 import threading
10 import subprocess
11
12 import xonsh.tools as xt
13
14
15 def _get_git_branch(q):
16 try:
17 branches = xt.decode_bytes(subprocess.check_output(
18 ['git', 'branch'],
19 stderr=subprocess.DEVNULL
20 )).splitlines()
21 except (subprocess.CalledProcessError, OSError, FileNotFoundError):
22 q.put(None)
23 else:
24 for branch in branches:
25 if not branch.startswith('* '):
26 continue
27 elif branch.endswith(')'):
28 branch = branch.split()[-1][:-1]
29 else:
30 branch = branch.split()[-1]
31
32 q.put(branch)
33 break
34 else:
35 q.put(None)
36
37
38 def get_git_branch():
39 """Attempts to find the current git branch. If this could not
40 be determined (timeout, not in a git repo, etc.) then this returns None.
41 """
42 branch = None
43 timeout = builtins.__xonsh_env__.get('VC_BRANCH_TIMEOUT')
44 q = queue.Queue()
45
46 t = threading.Thread(target=_get_git_branch, args=(q,))
47 t.start()
48 t.join(timeout=timeout)
49 try:
50 branch = q.get_nowait()
51 except queue.Empty:
52 branch = None
53 return branch
54
55
56 def _get_hg_root(q):
57 _curpwd = builtins.__xonsh_env__['PWD']
58 while True:
59 if not os.path.isdir(_curpwd):
60 return False
61 if any([b.name == '.hg' for b in os.scandir(_curpwd)]):
62 q.put(_curpwd)
63 break
64 else:
65 _oldpwd = _curpwd
66 _curpwd = os.path.split(_curpwd)[0]
67 if _oldpwd == _curpwd:
68 return False
69
70
71 def get_hg_branch(root=None):
72 """Try to get the mercurial branch of the current directory,
73 return None if not in a repo or subprocess.TimeoutExpired if timed out.
74 """
75 env = builtins.__xonsh_env__
76 timeout = env['VC_BRANCH_TIMEOUT']
77 q = queue.Queue()
78 t = threading.Thread(target=_get_hg_root, args=(q,))
79 t.start()
80 t.join(timeout=timeout)
81 try:
82 root = q.get_nowait()
83 except queue.Empty:
84 return None
85 if env.get('VC_HG_SHOW_BRANCH'):
86 # get branch name
87 branch_path = os.path.sep.join([root, '.hg', 'branch'])
88 if os.path.exists(branch_path):
89 with open(branch_path, 'r') as branch_file:
90 branch = branch_file.read()
91 else:
92 branch = 'default'
93 else:
94 branch = ''
95 # add bookmark, if we can
96 bookmark_path = os.path.sep.join([root, '.hg', 'bookmarks.current'])
97 if os.path.exists(bookmark_path):
98 with open(bookmark_path, 'r') as bookmark_file:
99 active_bookmark = bookmark_file.read()
100 if env.get('VC_HG_SHOW_BRANCH') is True:
101 branch = "{0}, {1}".format(*(b.strip(os.linesep) for b in
102 (branch, active_bookmark)))
103 else:
104 branch = active_bookmark.strip(os.linesep)
105 else:
106 branch = branch.strip(os.linesep)
107 return branch
108
109
110 _FIRST_BRANCH_TIMEOUT = True
111
112
113 def _first_branch_timeout_message():
114 global _FIRST_BRANCH_TIMEOUT
115 sbtm = builtins.__xonsh_env__['SUPPRESS_BRANCH_TIMEOUT_MESSAGE']
116 if not _FIRST_BRANCH_TIMEOUT or sbtm:
117 return
118 _FIRST_BRANCH_TIMEOUT = False
119 print('xonsh: branch timeout: computing the branch name, color, or both '
120 'timed out while formatting the prompt. You may avoid this by '
121 'increasing the value of $VC_BRANCH_TIMEOUT or by removing branch '
122 'fields, like {curr_branch}, from your $PROMPT. See the FAQ '
123 'for more details. This message will be suppressed for the remainder '
124 'of this session. To suppress this message permanently, set '
125 '$SUPPRESS_BRANCH_TIMEOUT_MESSAGE = True in your xonshrc file.',
126 file=sys.stderr)
127
128
129 def current_branch():
130 """Gets the branch for a current working directory. Returns an empty string
131 if the cwd is not a repository. This currently only works for git and hg
132 and should be extended in the future. If a timeout occurred, the string
133 '<branch-timeout>' is returned.
134 """
135 branch = None
136 cmds = builtins.__xonsh_commands_cache__
137 # check for binary only once
138 if cmds.is_empty():
139 has_git = bool(cmds.locate_binary('git', ignore_alias=True))
140 has_hg = bool(cmds.locate_binary('hg', ignore_alias=True))
141 else:
142 has_git = bool(cmds.lazy_locate_binary('git', ignore_alias=True))
143 has_hg = bool(cmds.lazy_locate_binary('hg', ignore_alias=True))
144 if has_git:
145 branch = get_git_branch()
146 if not branch and has_hg:
147 branch = get_hg_branch()
148 if isinstance(branch, subprocess.TimeoutExpired):
149 branch = '<branch-timeout>'
150 _first_branch_timeout_message()
151 return branch or None
152
153
154 def _git_dirty_working_directory(q, include_untracked):
155 status = None
156 try:
157 cmd = ['git', 'status', '--porcelain']
158 if include_untracked:
159 cmd.append('--untracked-files=normal')
160 else:
161 cmd.append('--untracked-files=no')
162 status = subprocess.check_output(cmd, stderr=subprocess.DEVNULL)
163 except (subprocess.CalledProcessError, OSError, FileNotFoundError):
164 q.put(None)
165 if status is not None:
166 return q.put(bool(status))
167
168
169 def git_dirty_working_directory(include_untracked=False):
170 """Returns whether or not the git directory is dirty. If this could not
171 be determined (timeout, file not found, etc.) then this returns None.
172 """
173 timeout = builtins.__xonsh_env__.get("VC_BRANCH_TIMEOUT")
174 q = queue.Queue()
175 t = threading.Thread(target=_git_dirty_working_directory,
176 args=(q, include_untracked))
177 t.start()
178 t.join(timeout=timeout)
179 try:
180 return q.get_nowait()
181 except queue.Empty:
182 return None
183
184
185 def hg_dirty_working_directory():
186 """Computes whether or not the mercurial working directory is dirty or not.
187 If this cannot be determined, None is returned.
188 """
189 env = builtins.__xonsh_env__
190 cwd = env['PWD']
191 denv = env.detype()
192 vcbt = env['VC_BRANCH_TIMEOUT']
193 # Override user configurations settings and aliases
194 denv['HGRCPATH'] = ''
195 try:
196 s = subprocess.check_output(['hg', 'identify', '--id'],
197 stderr=subprocess.PIPE, cwd=cwd,
198 timeout=vcbt, universal_newlines=True,
199 env=denv)
200 return s.strip(os.linesep).endswith('+')
201 except (subprocess.CalledProcessError, subprocess.TimeoutExpired,
202 FileNotFoundError):
203 return None
204
205
206 def dirty_working_directory():
207 """Returns a boolean as to whether there are uncommitted files in version
208 control repository we are inside. If this cannot be determined, returns
209 None. Currently supports git and hg.
210 """
211 dwd = None
212 cmds = builtins.__xonsh_commands_cache__
213 if cmds.lazy_locate_binary('git'):
214 dwd = git_dirty_working_directory()
215 if cmds.lazy_locate_binary('hg') and dwd is None:
216 dwd = hg_dirty_working_directory()
217 return dwd
218
219
220 def branch_color():
221 """Return red if the current branch is dirty, yellow if the dirtiness can
222 not be determined, and green if it clean. These are bold, intense colors
223 for the foreground.
224 """
225 dwd = dirty_working_directory()
226 if dwd is None:
227 color = '{BOLD_INTENSE_YELLOW}'
228 elif dwd:
229 color = '{BOLD_INTENSE_RED}'
230 else:
231 color = '{BOLD_INTENSE_GREEN}'
232 return color
233
234
235 def branch_bg_color():
236 """Return red if the current branch is dirty, yellow if the dirtiness can
237 not be determined, and green if it clean. These are bacground colors.
238 """
239 dwd = dirty_working_directory()
240 if dwd is None:
241 color = '{BACKGROUND_YELLOW}'
242 elif dwd:
243 color = '{BACKGROUND_RED}'
244 else:
245 color = '{BACKGROUND_GREEN}'
246 return color
247
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/xonsh/prompt/vc.py b/xonsh/prompt/vc.py
--- a/xonsh/prompt/vc.py
+++ b/xonsh/prompt/vc.py
@@ -58,7 +58,7 @@
while True:
if not os.path.isdir(_curpwd):
return False
- if any([b.name == '.hg' for b in os.scandir(_curpwd)]):
+ if any([b.name == '.hg' for b in xt.scandir(_curpwd)]):
q.put(_curpwd)
break
else:
|
{"golden_diff": "diff --git a/xonsh/prompt/vc.py b/xonsh/prompt/vc.py\n--- a/xonsh/prompt/vc.py\n+++ b/xonsh/prompt/vc.py\n@@ -58,7 +58,7 @@\n while True:\n if not os.path.isdir(_curpwd):\n return False\n- if any([b.name == '.hg' for b in os.scandir(_curpwd)]):\n+ if any([b.name == '.hg' for b in xt.scandir(_curpwd)]):\n q.put(_curpwd)\n break\n else:\n", "issue": "Python 3.4 no longer supported?\nHi all,\r\n\r\nFirst of all, thank you all for your great work. \r\n\r\nI have noticed that the version bump to 0.5.7 introduced a call to `os.scandir` which is not supported by Python <3.5 afaik. As I am still using Ubuntu 14.04 with Python 3.4 on a few machines, this is a little bit of a headache... I don't know the codebase, but it looks like `xonsh.platform.scandir` could be used instead?\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Prompt formatter for simple version control branchs\"\"\"\n# pylint:disable=no-member, invalid-name\n\nimport os\nimport sys\nimport queue\nimport builtins\nimport threading\nimport subprocess\n\nimport xonsh.tools as xt\n\n\ndef _get_git_branch(q):\n try:\n branches = xt.decode_bytes(subprocess.check_output(\n ['git', 'branch'],\n stderr=subprocess.DEVNULL\n )).splitlines()\n except (subprocess.CalledProcessError, OSError, FileNotFoundError):\n q.put(None)\n else:\n for branch in branches:\n if not branch.startswith('* '):\n continue\n elif branch.endswith(')'):\n branch = branch.split()[-1][:-1]\n else:\n branch = branch.split()[-1]\n\n q.put(branch)\n break\n else:\n q.put(None)\n\n\ndef get_git_branch():\n \"\"\"Attempts to find the current git branch. If this could not\n be determined (timeout, not in a git repo, etc.) then this returns None.\n \"\"\"\n branch = None\n timeout = builtins.__xonsh_env__.get('VC_BRANCH_TIMEOUT')\n q = queue.Queue()\n\n t = threading.Thread(target=_get_git_branch, args=(q,))\n t.start()\n t.join(timeout=timeout)\n try:\n branch = q.get_nowait()\n except queue.Empty:\n branch = None\n return branch\n\n\ndef _get_hg_root(q):\n _curpwd = builtins.__xonsh_env__['PWD']\n while True:\n if not os.path.isdir(_curpwd):\n return False\n if any([b.name == '.hg' for b in os.scandir(_curpwd)]):\n q.put(_curpwd)\n break\n else:\n _oldpwd = _curpwd\n _curpwd = os.path.split(_curpwd)[0]\n if _oldpwd == _curpwd:\n return False\n\n\ndef get_hg_branch(root=None):\n \"\"\"Try to get the mercurial branch of the current directory,\n return None if not in a repo or subprocess.TimeoutExpired if timed out.\n \"\"\"\n env = builtins.__xonsh_env__\n timeout = env['VC_BRANCH_TIMEOUT']\n q = queue.Queue()\n t = threading.Thread(target=_get_hg_root, args=(q,))\n t.start()\n t.join(timeout=timeout)\n try:\n root = q.get_nowait()\n except queue.Empty:\n return None\n if env.get('VC_HG_SHOW_BRANCH'):\n # get branch name\n branch_path = os.path.sep.join([root, '.hg', 'branch'])\n if os.path.exists(branch_path):\n with open(branch_path, 'r') as branch_file:\n branch = branch_file.read()\n else:\n branch = 'default'\n else:\n branch = ''\n # add bookmark, if we can\n bookmark_path = os.path.sep.join([root, '.hg', 'bookmarks.current'])\n if os.path.exists(bookmark_path):\n with open(bookmark_path, 'r') as bookmark_file:\n active_bookmark = bookmark_file.read()\n if env.get('VC_HG_SHOW_BRANCH') is True:\n branch = \"{0}, {1}\".format(*(b.strip(os.linesep) for b in\n (branch, active_bookmark)))\n else:\n branch = active_bookmark.strip(os.linesep)\n else:\n branch = branch.strip(os.linesep)\n return branch\n\n\n_FIRST_BRANCH_TIMEOUT = True\n\n\ndef _first_branch_timeout_message():\n global _FIRST_BRANCH_TIMEOUT\n sbtm = builtins.__xonsh_env__['SUPPRESS_BRANCH_TIMEOUT_MESSAGE']\n if not _FIRST_BRANCH_TIMEOUT or sbtm:\n return\n _FIRST_BRANCH_TIMEOUT = False\n print('xonsh: branch timeout: computing the branch name, color, or both '\n 'timed out while formatting the prompt. You may avoid this by '\n 'increasing the value of $VC_BRANCH_TIMEOUT or by removing branch '\n 'fields, like {curr_branch}, from your $PROMPT. See the FAQ '\n 'for more details. This message will be suppressed for the remainder '\n 'of this session. To suppress this message permanently, set '\n '$SUPPRESS_BRANCH_TIMEOUT_MESSAGE = True in your xonshrc file.',\n file=sys.stderr)\n\n\ndef current_branch():\n \"\"\"Gets the branch for a current working directory. Returns an empty string\n if the cwd is not a repository. This currently only works for git and hg\n and should be extended in the future. If a timeout occurred, the string\n '<branch-timeout>' is returned.\n \"\"\"\n branch = None\n cmds = builtins.__xonsh_commands_cache__\n # check for binary only once\n if cmds.is_empty():\n has_git = bool(cmds.locate_binary('git', ignore_alias=True))\n has_hg = bool(cmds.locate_binary('hg', ignore_alias=True))\n else:\n has_git = bool(cmds.lazy_locate_binary('git', ignore_alias=True))\n has_hg = bool(cmds.lazy_locate_binary('hg', ignore_alias=True))\n if has_git:\n branch = get_git_branch()\n if not branch and has_hg:\n branch = get_hg_branch()\n if isinstance(branch, subprocess.TimeoutExpired):\n branch = '<branch-timeout>'\n _first_branch_timeout_message()\n return branch or None\n\n\ndef _git_dirty_working_directory(q, include_untracked):\n status = None\n try:\n cmd = ['git', 'status', '--porcelain']\n if include_untracked:\n cmd.append('--untracked-files=normal')\n else:\n cmd.append('--untracked-files=no')\n status = subprocess.check_output(cmd, stderr=subprocess.DEVNULL)\n except (subprocess.CalledProcessError, OSError, FileNotFoundError):\n q.put(None)\n if status is not None:\n return q.put(bool(status))\n\n\ndef git_dirty_working_directory(include_untracked=False):\n \"\"\"Returns whether or not the git directory is dirty. If this could not\n be determined (timeout, file not found, etc.) then this returns None.\n \"\"\"\n timeout = builtins.__xonsh_env__.get(\"VC_BRANCH_TIMEOUT\")\n q = queue.Queue()\n t = threading.Thread(target=_git_dirty_working_directory,\n args=(q, include_untracked))\n t.start()\n t.join(timeout=timeout)\n try:\n return q.get_nowait()\n except queue.Empty:\n return None\n\n\ndef hg_dirty_working_directory():\n \"\"\"Computes whether or not the mercurial working directory is dirty or not.\n If this cannot be determined, None is returned.\n \"\"\"\n env = builtins.__xonsh_env__\n cwd = env['PWD']\n denv = env.detype()\n vcbt = env['VC_BRANCH_TIMEOUT']\n # Override user configurations settings and aliases\n denv['HGRCPATH'] = ''\n try:\n s = subprocess.check_output(['hg', 'identify', '--id'],\n stderr=subprocess.PIPE, cwd=cwd,\n timeout=vcbt, universal_newlines=True,\n env=denv)\n return s.strip(os.linesep).endswith('+')\n except (subprocess.CalledProcessError, subprocess.TimeoutExpired,\n FileNotFoundError):\n return None\n\n\ndef dirty_working_directory():\n \"\"\"Returns a boolean as to whether there are uncommitted files in version\n control repository we are inside. If this cannot be determined, returns\n None. Currently supports git and hg.\n \"\"\"\n dwd = None\n cmds = builtins.__xonsh_commands_cache__\n if cmds.lazy_locate_binary('git'):\n dwd = git_dirty_working_directory()\n if cmds.lazy_locate_binary('hg') and dwd is None:\n dwd = hg_dirty_working_directory()\n return dwd\n\n\ndef branch_color():\n \"\"\"Return red if the current branch is dirty, yellow if the dirtiness can\n not be determined, and green if it clean. These are bold, intense colors\n for the foreground.\n \"\"\"\n dwd = dirty_working_directory()\n if dwd is None:\n color = '{BOLD_INTENSE_YELLOW}'\n elif dwd:\n color = '{BOLD_INTENSE_RED}'\n else:\n color = '{BOLD_INTENSE_GREEN}'\n return color\n\n\ndef branch_bg_color():\n \"\"\"Return red if the current branch is dirty, yellow if the dirtiness can\n not be determined, and green if it clean. These are bacground colors.\n \"\"\"\n dwd = dirty_working_directory()\n if dwd is None:\n color = '{BACKGROUND_YELLOW}'\n elif dwd:\n color = '{BACKGROUND_RED}'\n else:\n color = '{BACKGROUND_GREEN}'\n return color\n", "path": "xonsh/prompt/vc.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Prompt formatter for simple version control branchs\"\"\"\n# pylint:disable=no-member, invalid-name\n\nimport os\nimport sys\nimport queue\nimport builtins\nimport threading\nimport subprocess\n\nimport xonsh.tools as xt\n\n\ndef _get_git_branch(q):\n try:\n branches = xt.decode_bytes(subprocess.check_output(\n ['git', 'branch'],\n stderr=subprocess.DEVNULL\n )).splitlines()\n except (subprocess.CalledProcessError, OSError, FileNotFoundError):\n q.put(None)\n else:\n for branch in branches:\n if not branch.startswith('* '):\n continue\n elif branch.endswith(')'):\n branch = branch.split()[-1][:-1]\n else:\n branch = branch.split()[-1]\n\n q.put(branch)\n break\n else:\n q.put(None)\n\n\ndef get_git_branch():\n \"\"\"Attempts to find the current git branch. If this could not\n be determined (timeout, not in a git repo, etc.) then this returns None.\n \"\"\"\n branch = None\n timeout = builtins.__xonsh_env__.get('VC_BRANCH_TIMEOUT')\n q = queue.Queue()\n\n t = threading.Thread(target=_get_git_branch, args=(q,))\n t.start()\n t.join(timeout=timeout)\n try:\n branch = q.get_nowait()\n except queue.Empty:\n branch = None\n return branch\n\n\ndef _get_hg_root(q):\n _curpwd = builtins.__xonsh_env__['PWD']\n while True:\n if not os.path.isdir(_curpwd):\n return False\n if any([b.name == '.hg' for b in xt.scandir(_curpwd)]):\n q.put(_curpwd)\n break\n else:\n _oldpwd = _curpwd\n _curpwd = os.path.split(_curpwd)[0]\n if _oldpwd == _curpwd:\n return False\n\n\ndef get_hg_branch(root=None):\n \"\"\"Try to get the mercurial branch of the current directory,\n return None if not in a repo or subprocess.TimeoutExpired if timed out.\n \"\"\"\n env = builtins.__xonsh_env__\n timeout = env['VC_BRANCH_TIMEOUT']\n q = queue.Queue()\n t = threading.Thread(target=_get_hg_root, args=(q,))\n t.start()\n t.join(timeout=timeout)\n try:\n root = q.get_nowait()\n except queue.Empty:\n return None\n if env.get('VC_HG_SHOW_BRANCH'):\n # get branch name\n branch_path = os.path.sep.join([root, '.hg', 'branch'])\n if os.path.exists(branch_path):\n with open(branch_path, 'r') as branch_file:\n branch = branch_file.read()\n else:\n branch = 'default'\n else:\n branch = ''\n # add bookmark, if we can\n bookmark_path = os.path.sep.join([root, '.hg', 'bookmarks.current'])\n if os.path.exists(bookmark_path):\n with open(bookmark_path, 'r') as bookmark_file:\n active_bookmark = bookmark_file.read()\n if env.get('VC_HG_SHOW_BRANCH') is True:\n branch = \"{0}, {1}\".format(*(b.strip(os.linesep) for b in\n (branch, active_bookmark)))\n else:\n branch = active_bookmark.strip(os.linesep)\n else:\n branch = branch.strip(os.linesep)\n return branch\n\n\n_FIRST_BRANCH_TIMEOUT = True\n\n\ndef _first_branch_timeout_message():\n global _FIRST_BRANCH_TIMEOUT\n sbtm = builtins.__xonsh_env__['SUPPRESS_BRANCH_TIMEOUT_MESSAGE']\n if not _FIRST_BRANCH_TIMEOUT or sbtm:\n return\n _FIRST_BRANCH_TIMEOUT = False\n print('xonsh: branch timeout: computing the branch name, color, or both '\n 'timed out while formatting the prompt. You may avoid this by '\n 'increasing the value of $VC_BRANCH_TIMEOUT or by removing branch '\n 'fields, like {curr_branch}, from your $PROMPT. See the FAQ '\n 'for more details. This message will be suppressed for the remainder '\n 'of this session. To suppress this message permanently, set '\n '$SUPPRESS_BRANCH_TIMEOUT_MESSAGE = True in your xonshrc file.',\n file=sys.stderr)\n\n\ndef current_branch():\n \"\"\"Gets the branch for a current working directory. Returns an empty string\n if the cwd is not a repository. This currently only works for git and hg\n and should be extended in the future. If a timeout occurred, the string\n '<branch-timeout>' is returned.\n \"\"\"\n branch = None\n cmds = builtins.__xonsh_commands_cache__\n # check for binary only once\n if cmds.is_empty():\n has_git = bool(cmds.locate_binary('git', ignore_alias=True))\n has_hg = bool(cmds.locate_binary('hg', ignore_alias=True))\n else:\n has_git = bool(cmds.lazy_locate_binary('git', ignore_alias=True))\n has_hg = bool(cmds.lazy_locate_binary('hg', ignore_alias=True))\n if has_git:\n branch = get_git_branch()\n if not branch and has_hg:\n branch = get_hg_branch()\n if isinstance(branch, subprocess.TimeoutExpired):\n branch = '<branch-timeout>'\n _first_branch_timeout_message()\n return branch or None\n\n\ndef _git_dirty_working_directory(q, include_untracked):\n status = None\n try:\n cmd = ['git', 'status', '--porcelain']\n if include_untracked:\n cmd.append('--untracked-files=normal')\n else:\n cmd.append('--untracked-files=no')\n status = subprocess.check_output(cmd, stderr=subprocess.DEVNULL)\n except (subprocess.CalledProcessError, OSError, FileNotFoundError):\n q.put(None)\n if status is not None:\n return q.put(bool(status))\n\n\ndef git_dirty_working_directory(include_untracked=False):\n \"\"\"Returns whether or not the git directory is dirty. If this could not\n be determined (timeout, file not found, etc.) then this returns None.\n \"\"\"\n timeout = builtins.__xonsh_env__.get(\"VC_BRANCH_TIMEOUT\")\n q = queue.Queue()\n t = threading.Thread(target=_git_dirty_working_directory,\n args=(q, include_untracked))\n t.start()\n t.join(timeout=timeout)\n try:\n return q.get_nowait()\n except queue.Empty:\n return None\n\n\ndef hg_dirty_working_directory():\n \"\"\"Computes whether or not the mercurial working directory is dirty or not.\n If this cannot be determined, None is returned.\n \"\"\"\n env = builtins.__xonsh_env__\n cwd = env['PWD']\n denv = env.detype()\n vcbt = env['VC_BRANCH_TIMEOUT']\n # Override user configurations settings and aliases\n denv['HGRCPATH'] = ''\n try:\n s = subprocess.check_output(['hg', 'identify', '--id'],\n stderr=subprocess.PIPE, cwd=cwd,\n timeout=vcbt, universal_newlines=True,\n env=denv)\n return s.strip(os.linesep).endswith('+')\n except (subprocess.CalledProcessError, subprocess.TimeoutExpired,\n FileNotFoundError):\n return None\n\n\ndef dirty_working_directory():\n \"\"\"Returns a boolean as to whether there are uncommitted files in version\n control repository we are inside. If this cannot be determined, returns\n None. Currently supports git and hg.\n \"\"\"\n dwd = None\n cmds = builtins.__xonsh_commands_cache__\n if cmds.lazy_locate_binary('git'):\n dwd = git_dirty_working_directory()\n if cmds.lazy_locate_binary('hg') and dwd is None:\n dwd = hg_dirty_working_directory()\n return dwd\n\n\ndef branch_color():\n \"\"\"Return red if the current branch is dirty, yellow if the dirtiness can\n not be determined, and green if it clean. These are bold, intense colors\n for the foreground.\n \"\"\"\n dwd = dirty_working_directory()\n if dwd is None:\n color = '{BOLD_INTENSE_YELLOW}'\n elif dwd:\n color = '{BOLD_INTENSE_RED}'\n else:\n color = '{BOLD_INTENSE_GREEN}'\n return color\n\n\ndef branch_bg_color():\n \"\"\"Return red if the current branch is dirty, yellow if the dirtiness can\n not be determined, and green if it clean. These are bacground colors.\n \"\"\"\n dwd = dirty_working_directory()\n if dwd is None:\n color = '{BACKGROUND_YELLOW}'\n elif dwd:\n color = '{BACKGROUND_RED}'\n else:\n color = '{BACKGROUND_GREEN}'\n return color\n", "path": "xonsh/prompt/vc.py"}]}
| 2,899 | 133 |
gh_patches_debug_29387
|
rasdani/github-patches
|
git_diff
|
joke2k__faker-828
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Locales for cs_CZ fake.name_male() shows a female name
Looking at the very last example [here](https://faker.readthedocs.io/en/latest/locales/cs_CZ.html#faker-providers-misc). The name is actually a female name.
```
fake.name_male()
# 'Ing. Sára Mašková CSc.'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `faker/providers/person/cs_CZ/__init__.py`
Content:
```
1 # coding=utf-8
2 from __future__ import unicode_literals
3 from .. import Provider as PersonProvider
4
5
6 class Provider(PersonProvider):
7 formats = (
8 '{{first_name_male}} {{last_name_male}}',
9 '{{first_name_male}} {{last_name_male}}',
10 '{{first_name_male}} {{last_name_male}}',
11 '{{first_name_male}} {{last_name_male}}',
12 '{{first_name_male}} {{last_name_male}}',
13 '{{first_name_female}} {{last_name_female}}',
14 '{{first_name_female}} {{last_name_female}}',
15 '{{first_name_female}} {{last_name_female}}',
16 '{{first_name_female}} {{last_name_female}}',
17 '{{first_name_female}} {{last_name_female}}',
18 '{{prefix_male}} {{first_name_male}} {{last_name_male}}',
19 '{{prefix_female}} {{first_name_female}} {{last_name_female}}',
20 '{{first_name_male}} {{last_name_male}} {{suffix}}',
21 '{{first_name_female}} {{last_name_female}} {{suffix}}',
22 '{{prefix_male}} {{first_name_male}} {{last_name_male}} {{suffix}}',
23 '{{prefix_female}} {{first_name_female}} {{last_name_female}} {{suffix}}')
24
25 first_names_male = (
26 'Adam',
27 'Alexander',
28 'Alexandr',
29 'Aleš',
30 'Alois',
31 'Antonín',
32 'Arnošt',
33 'Bedřich',
34 'Bohumil',
35 'Bohumír',
36 'Bohuslav',
37 'Břetislav',
38 'Dalibor',
39 'Daniel',
40 'David',
41 'Denis',
42 'Dominik',
43 'Dušan',
44 'Eduard',
45 'Emil',
46 'Erik',
47 'Filip',
48 'František',
49 'Hynek',
50 'Igor',
51 'Ivan',
52 'Ivo',
53 'Jakub',
54 'Jan',
55 'Jaromír',
56 'Jaroslav',
57 'Jindřich',
58 'Jiří',
59 'Josef',
60 'Jozef',
61 'Ján',
62 'Kamil',
63 'Karel',
64 'Kryštof',
65 'Ladislav',
66 'Leoš',
67 'Libor',
68 'Lubomír',
69 'Luboš',
70 'Ludvík',
71 'Luděk',
72 'Lukáš',
73 'Marcel',
74 'Marek',
75 'Marian',
76 'Martin',
77 'Matyáš',
78 'Matěj',
79 'Michael',
80 'Michal',
81 'Milan',
82 'Miloslav',
83 'Miloš',
84 'Miroslav',
85 'Oldřich',
86 'Ondřej',
87 'Otakar',
88 'Patrik',
89 'Pavel',
90 'Peter',
91 'Petr',
92 'Přemysl',
93 'Radek',
94 'Radim',
95 'Radomír',
96 'Radovan',
97 'René',
98 'Richard',
99 'Robert',
100 'Robin',
101 'Roman',
102 'Rostislav',
103 'Rudolf',
104 'Samuel',
105 'Stanislav',
106 'Tadeáš',
107 'Tomáš',
108 'Vasyl',
109 'Viktor',
110 'Vilém',
111 'Vladimír',
112 'Vladislav',
113 'Vlastimil',
114 'Vojtěch',
115 'Vratislav',
116 'Václav',
117 'Vít',
118 'Vítězslav',
119 'Zbyněk',
120 'Zdeněk',
121 'Šimon',
122 'Štefan',
123 'Štěpán')
124
125 first_names_female = (
126 'Adéla',
127 'Alena',
128 'Alexandra',
129 'Alice',
130 'Alžběta',
131 'Andrea',
132 'Aneta',
133 'Anežka',
134 'Anna',
135 'Barbora',
136 'Blanka',
137 'Blažena',
138 'Bohumila',
139 'Božena',
140 'Dagmar',
141 'Dana',
142 'Daniela',
143 'Danuše',
144 'Denisa',
145 'Dominika',
146 'Drahomíra',
147 'Eliška',
148 'Emilie',
149 'Eva',
150 'Františka',
151 'Gabriela',
152 'Hana',
153 'Helena',
154 'Ilona',
155 'Irena',
156 'Iva',
157 'Ivana',
158 'Iveta',
159 'Jana',
160 'Jarmila',
161 'Jaroslava',
162 'Jindřiška',
163 'Jitka',
164 'Jiřina',
165 'Julie',
166 'Kamila',
167 'Karolína',
168 'Kateřina',
169 'Klára',
170 'Kristina',
171 'Kristýna',
172 'Květa',
173 'Květoslava',
174 'Ladislava',
175 'Lenka',
176 'Libuše',
177 'Lucie',
178 'Ludmila',
179 'Magdalena',
180 'Magdaléna',
181 'Marcela',
182 'Marie',
183 'Markéta',
184 'Marta',
185 'Martina',
186 'Michaela',
187 'Milada',
188 'Milena',
189 'Miloslava',
190 'Miluše',
191 'Miroslava',
192 'Monika',
193 'Mária',
194 'Naděžda',
195 'Natálie',
196 'Nela',
197 'Nikol',
198 'Nikola',
199 'Olga',
200 'Pavla',
201 'Pavlína',
202 'Petra',
203 'Radka',
204 'Renata',
205 'Renáta',
206 'Romana',
207 'Růžena',
208 'Sabina',
209 'Simona',
210 'Soňa',
211 'Stanislava',
212 'Sára',
213 'Tereza',
214 'Vendula',
215 'Veronika',
216 'Viktorie',
217 'Vladimíra',
218 'Vlasta',
219 'Věra',
220 'Zdenka',
221 'Zdeňka',
222 'Zuzana',
223 'Štěpánka',
224 'Šárka',
225 'Žaneta')
226
227 first_names = first_names_male + first_names_female
228
229 last_names_male = (
230 'Bartoš',
231 'Beneš',
232 'Blažek',
233 'Bláha',
234 'Doležal',
235 'Dušek',
236 'Dvořák',
237 'Fiala',
238 'Holub',
239 'Horák',
240 'Hájek',
241 'Jelínek',
242 'Kadlec',
243 'Kolář',
244 'Kopecký',
245 'Kratochvíl',
246 'Krejčí',
247 'Král',
248 'Kučera',
249 'Kříž',
250 'Malý',
251 'Marek',
252 'Mareš',
253 'Mašek',
254 'Moravec',
255 'Novotný',
256 'Novák',
257 'Němec',
258 'Pokorný',
259 'Polák',
260 'Pospíšil',
261 'Procházka',
262 'Růžička',
263 'Sedláček',
264 'Soukup',
265 'Svoboda',
266 'Urban',
267 'Vaněk',
268 'Veselý',
269 'Vlček',
270 'Zeman',
271 'Čermák',
272 'Černý',
273 'Říha',
274 'Šimek',
275 'Štěpánek',
276 'Šťastný')
277
278 last_names_female = (
279 'Bartošová',
280 'Benešová',
281 'Beranová',
282 'Blažková',
283 'Bláhová',
284 'Doležalová',
285 'Dušková',
286 'Dvořáková',
287 'Fialová',
288 'Holubová',
289 'Horáková',
290 'Hájková',
291 'Jandová',
292 'Jelínková',
293 'Kadlecová',
294 'Kolářová',
295 'Kopecká',
296 'Kratochvílová',
297 'Krejčová',
298 'Králová',
299 'Kučerová',
300 'Křížová',
301 'Machová',
302 'Malá',
303 'Marešová',
304 'Marková',
305 'Mašková',
306 'Moravcová',
307 'Novotná',
308 'Nováková',
309 'Němcová',
310 'Pokorná',
311 'Poláková',
312 'Pospíšilová',
313 'Procházková',
314 'Růžičková',
315 'Sedláčková',
316 'Soukupová',
317 'Svobodová',
318 'Tichá',
319 'Urbanová',
320 'Vacková',
321 'Vaňková',
322 'Veselá',
323 'Vlčková',
324 'Vávrová',
325 'Zemanová',
326 'Čermáková',
327 'Černá',
328 'Říhová',
329 'Šimková',
330 'Štěpánková',
331 'Šťastná')
332
333 last_names = last_names_male + last_names_female
334
335 degrees = ('JUDr.', 'Ing.', 'Bc.', 'Mgr.', 'MUDr.', 'RNDr.')
336
337 prefixes_male = ('pan', ) + degrees
338
339 prefixes_female = ('paní', 'slečna', ) + degrees
340
341 suffixes = ('CSc.', 'DiS.', 'Ph.D.', 'Th.D.')
342
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/faker/providers/person/cs_CZ/__init__.py b/faker/providers/person/cs_CZ/__init__.py
--- a/faker/providers/person/cs_CZ/__init__.py
+++ b/faker/providers/person/cs_CZ/__init__.py
@@ -1,26 +1,26 @@
# coding=utf-8
from __future__ import unicode_literals
+from collections import OrderedDict
from .. import Provider as PersonProvider
class Provider(PersonProvider):
- formats = (
- '{{first_name_male}} {{last_name_male}}',
- '{{first_name_male}} {{last_name_male}}',
- '{{first_name_male}} {{last_name_male}}',
- '{{first_name_male}} {{last_name_male}}',
- '{{first_name_male}} {{last_name_male}}',
- '{{first_name_female}} {{last_name_female}}',
- '{{first_name_female}} {{last_name_female}}',
- '{{first_name_female}} {{last_name_female}}',
- '{{first_name_female}} {{last_name_female}}',
- '{{first_name_female}} {{last_name_female}}',
- '{{prefix_male}} {{first_name_male}} {{last_name_male}}',
- '{{prefix_female}} {{first_name_female}} {{last_name_female}}',
- '{{first_name_male}} {{last_name_male}} {{suffix}}',
- '{{first_name_female}} {{last_name_female}} {{suffix}}',
- '{{prefix_male}} {{first_name_male}} {{last_name_male}} {{suffix}}',
- '{{prefix_female}} {{first_name_female}} {{last_name_female}} {{suffix}}')
+ formats_female = OrderedDict((
+ ('{{first_name_female}} {{last_name_female}}', 0.97),
+ ('{{prefix_female}} {{first_name_female}} {{last_name_female}}', 0.015),
+ ('{{first_name_female}} {{last_name_female}} {{suffix}}', 0.02),
+ ('{{prefix_female}} {{first_name_female}} {{last_name_female}} {{suffix}}', 0.005)
+ ))
+
+ formats_male = OrderedDict((
+ ('{{first_name_male}} {{last_name_male}}', 0.97),
+ ('{{prefix_male}} {{first_name_male}} {{last_name_male}}', 0.015),
+ ('{{first_name_male}} {{last_name_male}} {{suffix}}', 0.02),
+ ('{{prefix_male}} {{first_name_male}} {{last_name_male}} {{suffix}}', 0.005)
+ ))
+
+ formats = formats_male.copy()
+ formats.update(formats_female)
first_names_male = (
'Adam',
|
{"golden_diff": "diff --git a/faker/providers/person/cs_CZ/__init__.py b/faker/providers/person/cs_CZ/__init__.py\n--- a/faker/providers/person/cs_CZ/__init__.py\n+++ b/faker/providers/person/cs_CZ/__init__.py\n@@ -1,26 +1,26 @@\n # coding=utf-8\n from __future__ import unicode_literals\n+from collections import OrderedDict\n from .. import Provider as PersonProvider\n \n \n class Provider(PersonProvider):\n- formats = (\n- '{{first_name_male}} {{last_name_male}}',\n- '{{first_name_male}} {{last_name_male}}',\n- '{{first_name_male}} {{last_name_male}}',\n- '{{first_name_male}} {{last_name_male}}',\n- '{{first_name_male}} {{last_name_male}}',\n- '{{first_name_female}} {{last_name_female}}',\n- '{{first_name_female}} {{last_name_female}}',\n- '{{first_name_female}} {{last_name_female}}',\n- '{{first_name_female}} {{last_name_female}}',\n- '{{first_name_female}} {{last_name_female}}',\n- '{{prefix_male}} {{first_name_male}} {{last_name_male}}',\n- '{{prefix_female}} {{first_name_female}} {{last_name_female}}',\n- '{{first_name_male}} {{last_name_male}} {{suffix}}',\n- '{{first_name_female}} {{last_name_female}} {{suffix}}',\n- '{{prefix_male}} {{first_name_male}} {{last_name_male}} {{suffix}}',\n- '{{prefix_female}} {{first_name_female}} {{last_name_female}} {{suffix}}')\n+ formats_female = OrderedDict((\n+ ('{{first_name_female}} {{last_name_female}}', 0.97),\n+ ('{{prefix_female}} {{first_name_female}} {{last_name_female}}', 0.015),\n+ ('{{first_name_female}} {{last_name_female}} {{suffix}}', 0.02),\n+ ('{{prefix_female}} {{first_name_female}} {{last_name_female}} {{suffix}}', 0.005)\n+ ))\n+\n+ formats_male = OrderedDict((\n+ ('{{first_name_male}} {{last_name_male}}', 0.97),\n+ ('{{prefix_male}} {{first_name_male}} {{last_name_male}}', 0.015),\n+ ('{{first_name_male}} {{last_name_male}} {{suffix}}', 0.02),\n+ ('{{prefix_male}} {{first_name_male}} {{last_name_male}} {{suffix}}', 0.005)\n+ ))\n+\n+ formats = formats_male.copy()\n+ formats.update(formats_female)\n \n first_names_male = (\n 'Adam',\n", "issue": "Locales for cs_CZ fake.name_male() shows a female name\nLooking at the very last example [here](https://faker.readthedocs.io/en/latest/locales/cs_CZ.html#faker-providers-misc). The name is actually a female name.\r\n\r\n```\r\nfake.name_male()\r\n# 'Ing. S\u00e1ra Ma\u0161kov\u00e1 CSc.'\r\n```\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import unicode_literals\nfrom .. import Provider as PersonProvider\n\n\nclass Provider(PersonProvider):\n formats = (\n '{{first_name_male}} {{last_name_male}}',\n '{{first_name_male}} {{last_name_male}}',\n '{{first_name_male}} {{last_name_male}}',\n '{{first_name_male}} {{last_name_male}}',\n '{{first_name_male}} {{last_name_male}}',\n '{{first_name_female}} {{last_name_female}}',\n '{{first_name_female}} {{last_name_female}}',\n '{{first_name_female}} {{last_name_female}}',\n '{{first_name_female}} {{last_name_female}}',\n '{{first_name_female}} {{last_name_female}}',\n '{{prefix_male}} {{first_name_male}} {{last_name_male}}',\n '{{prefix_female}} {{first_name_female}} {{last_name_female}}',\n '{{first_name_male}} {{last_name_male}} {{suffix}}',\n '{{first_name_female}} {{last_name_female}} {{suffix}}',\n '{{prefix_male}} {{first_name_male}} {{last_name_male}} {{suffix}}',\n '{{prefix_female}} {{first_name_female}} {{last_name_female}} {{suffix}}')\n\n first_names_male = (\n 'Adam',\n 'Alexander',\n 'Alexandr',\n 'Ale\u0161',\n 'Alois',\n 'Anton\u00edn',\n 'Arno\u0161t',\n 'Bed\u0159ich',\n 'Bohumil',\n 'Bohum\u00edr',\n 'Bohuslav',\n 'B\u0159etislav',\n 'Dalibor',\n 'Daniel',\n 'David',\n 'Denis',\n 'Dominik',\n 'Du\u0161an',\n 'Eduard',\n 'Emil',\n 'Erik',\n 'Filip',\n 'Franti\u0161ek',\n 'Hynek',\n 'Igor',\n 'Ivan',\n 'Ivo',\n 'Jakub',\n 'Jan',\n 'Jarom\u00edr',\n 'Jaroslav',\n 'Jind\u0159ich',\n 'Ji\u0159\u00ed',\n 'Josef',\n 'Jozef',\n 'J\u00e1n',\n 'Kamil',\n 'Karel',\n 'Kry\u0161tof',\n 'Ladislav',\n 'Leo\u0161',\n 'Libor',\n 'Lubom\u00edr',\n 'Lubo\u0161',\n 'Ludv\u00edk',\n 'Lud\u011bk',\n 'Luk\u00e1\u0161',\n 'Marcel',\n 'Marek',\n 'Marian',\n 'Martin',\n 'Maty\u00e1\u0161',\n 'Mat\u011bj',\n 'Michael',\n 'Michal',\n 'Milan',\n 'Miloslav',\n 'Milo\u0161',\n 'Miroslav',\n 'Old\u0159ich',\n 'Ond\u0159ej',\n 'Otakar',\n 'Patrik',\n 'Pavel',\n 'Peter',\n 'Petr',\n 'P\u0159emysl',\n 'Radek',\n 'Radim',\n 'Radom\u00edr',\n 'Radovan',\n 'Ren\u00e9',\n 'Richard',\n 'Robert',\n 'Robin',\n 'Roman',\n 'Rostislav',\n 'Rudolf',\n 'Samuel',\n 'Stanislav',\n 'Tade\u00e1\u0161',\n 'Tom\u00e1\u0161',\n 'Vasyl',\n 'Viktor',\n 'Vil\u00e9m',\n 'Vladim\u00edr',\n 'Vladislav',\n 'Vlastimil',\n 'Vojt\u011bch',\n 'Vratislav',\n 'V\u00e1clav',\n 'V\u00edt',\n 'V\u00edt\u011bzslav',\n 'Zbyn\u011bk',\n 'Zden\u011bk',\n '\u0160imon',\n '\u0160tefan',\n '\u0160t\u011bp\u00e1n')\n\n first_names_female = (\n 'Ad\u00e9la',\n 'Alena',\n 'Alexandra',\n 'Alice',\n 'Al\u017eb\u011bta',\n 'Andrea',\n 'Aneta',\n 'Ane\u017eka',\n 'Anna',\n 'Barbora',\n 'Blanka',\n 'Bla\u017eena',\n 'Bohumila',\n 'Bo\u017eena',\n 'Dagmar',\n 'Dana',\n 'Daniela',\n 'Danu\u0161e',\n 'Denisa',\n 'Dominika',\n 'Drahom\u00edra',\n 'Eli\u0161ka',\n 'Emilie',\n 'Eva',\n 'Franti\u0161ka',\n 'Gabriela',\n 'Hana',\n 'Helena',\n 'Ilona',\n 'Irena',\n 'Iva',\n 'Ivana',\n 'Iveta',\n 'Jana',\n 'Jarmila',\n 'Jaroslava',\n 'Jind\u0159i\u0161ka',\n 'Jitka',\n 'Ji\u0159ina',\n 'Julie',\n 'Kamila',\n 'Karol\u00edna',\n 'Kate\u0159ina',\n 'Kl\u00e1ra',\n 'Kristina',\n 'Krist\u00fdna',\n 'Kv\u011bta',\n 'Kv\u011btoslava',\n 'Ladislava',\n 'Lenka',\n 'Libu\u0161e',\n 'Lucie',\n 'Ludmila',\n 'Magdalena',\n 'Magdal\u00e9na',\n 'Marcela',\n 'Marie',\n 'Mark\u00e9ta',\n 'Marta',\n 'Martina',\n 'Michaela',\n 'Milada',\n 'Milena',\n 'Miloslava',\n 'Milu\u0161e',\n 'Miroslava',\n 'Monika',\n 'M\u00e1ria',\n 'Nad\u011b\u017eda',\n 'Nat\u00e1lie',\n 'Nela',\n 'Nikol',\n 'Nikola',\n 'Olga',\n 'Pavla',\n 'Pavl\u00edna',\n 'Petra',\n 'Radka',\n 'Renata',\n 'Ren\u00e1ta',\n 'Romana',\n 'R\u016f\u017eena',\n 'Sabina',\n 'Simona',\n 'So\u0148a',\n 'Stanislava',\n 'S\u00e1ra',\n 'Tereza',\n 'Vendula',\n 'Veronika',\n 'Viktorie',\n 'Vladim\u00edra',\n 'Vlasta',\n 'V\u011bra',\n 'Zdenka',\n 'Zde\u0148ka',\n 'Zuzana',\n '\u0160t\u011bp\u00e1nka',\n '\u0160\u00e1rka',\n '\u017daneta')\n\n first_names = first_names_male + first_names_female\n\n last_names_male = (\n 'Barto\u0161',\n 'Bene\u0161',\n 'Bla\u017eek',\n 'Bl\u00e1ha',\n 'Dole\u017eal',\n 'Du\u0161ek',\n 'Dvo\u0159\u00e1k',\n 'Fiala',\n 'Holub',\n 'Hor\u00e1k',\n 'H\u00e1jek',\n 'Jel\u00ednek',\n 'Kadlec',\n 'Kol\u00e1\u0159',\n 'Kopeck\u00fd',\n 'Kratochv\u00edl',\n 'Krej\u010d\u00ed',\n 'Kr\u00e1l',\n 'Ku\u010dera',\n 'K\u0159\u00ed\u017e',\n 'Mal\u00fd',\n 'Marek',\n 'Mare\u0161',\n 'Ma\u0161ek',\n 'Moravec',\n 'Novotn\u00fd',\n 'Nov\u00e1k',\n 'N\u011bmec',\n 'Pokorn\u00fd',\n 'Pol\u00e1k',\n 'Posp\u00ed\u0161il',\n 'Proch\u00e1zka',\n 'R\u016f\u017ei\u010dka',\n 'Sedl\u00e1\u010dek',\n 'Soukup',\n 'Svoboda',\n 'Urban',\n 'Van\u011bk',\n 'Vesel\u00fd',\n 'Vl\u010dek',\n 'Zeman',\n '\u010cerm\u00e1k',\n '\u010cern\u00fd',\n '\u0158\u00edha',\n '\u0160imek',\n '\u0160t\u011bp\u00e1nek',\n '\u0160\u0165astn\u00fd')\n\n last_names_female = (\n 'Barto\u0161ov\u00e1',\n 'Bene\u0161ov\u00e1',\n 'Beranov\u00e1',\n 'Bla\u017ekov\u00e1',\n 'Bl\u00e1hov\u00e1',\n 'Dole\u017ealov\u00e1',\n 'Du\u0161kov\u00e1',\n 'Dvo\u0159\u00e1kov\u00e1',\n 'Fialov\u00e1',\n 'Holubov\u00e1',\n 'Hor\u00e1kov\u00e1',\n 'H\u00e1jkov\u00e1',\n 'Jandov\u00e1',\n 'Jel\u00ednkov\u00e1',\n 'Kadlecov\u00e1',\n 'Kol\u00e1\u0159ov\u00e1',\n 'Kopeck\u00e1',\n 'Kratochv\u00edlov\u00e1',\n 'Krej\u010dov\u00e1',\n 'Kr\u00e1lov\u00e1',\n 'Ku\u010derov\u00e1',\n 'K\u0159\u00ed\u017eov\u00e1',\n 'Machov\u00e1',\n 'Mal\u00e1',\n 'Mare\u0161ov\u00e1',\n 'Markov\u00e1',\n 'Ma\u0161kov\u00e1',\n 'Moravcov\u00e1',\n 'Novotn\u00e1',\n 'Nov\u00e1kov\u00e1',\n 'N\u011bmcov\u00e1',\n 'Pokorn\u00e1',\n 'Pol\u00e1kov\u00e1',\n 'Posp\u00ed\u0161ilov\u00e1',\n 'Proch\u00e1zkov\u00e1',\n 'R\u016f\u017ei\u010dkov\u00e1',\n 'Sedl\u00e1\u010dkov\u00e1',\n 'Soukupov\u00e1',\n 'Svobodov\u00e1',\n 'Tich\u00e1',\n 'Urbanov\u00e1',\n 'Vackov\u00e1',\n 'Va\u0148kov\u00e1',\n 'Vesel\u00e1',\n 'Vl\u010dkov\u00e1',\n 'V\u00e1vrov\u00e1',\n 'Zemanov\u00e1',\n '\u010cerm\u00e1kov\u00e1',\n '\u010cern\u00e1',\n '\u0158\u00edhov\u00e1',\n '\u0160imkov\u00e1',\n '\u0160t\u011bp\u00e1nkov\u00e1',\n '\u0160\u0165astn\u00e1')\n\n last_names = last_names_male + last_names_female\n\n degrees = ('JUDr.', 'Ing.', 'Bc.', 'Mgr.', 'MUDr.', 'RNDr.')\n\n prefixes_male = ('pan', ) + degrees\n\n prefixes_female = ('pan\u00ed', 'sle\u010dna', ) + degrees\n\n suffixes = ('CSc.', 'DiS.', 'Ph.D.', 'Th.D.')\n", "path": "faker/providers/person/cs_CZ/__init__.py"}], "after_files": [{"content": "# coding=utf-8\nfrom __future__ import unicode_literals\nfrom collections import OrderedDict\nfrom .. import Provider as PersonProvider\n\n\nclass Provider(PersonProvider):\n formats_female = OrderedDict((\n ('{{first_name_female}} {{last_name_female}}', 0.97),\n ('{{prefix_female}} {{first_name_female}} {{last_name_female}}', 0.015),\n ('{{first_name_female}} {{last_name_female}} {{suffix}}', 0.02),\n ('{{prefix_female}} {{first_name_female}} {{last_name_female}} {{suffix}}', 0.005)\n ))\n\n formats_male = OrderedDict((\n ('{{first_name_male}} {{last_name_male}}', 0.97),\n ('{{prefix_male}} {{first_name_male}} {{last_name_male}}', 0.015),\n ('{{first_name_male}} {{last_name_male}} {{suffix}}', 0.02),\n ('{{prefix_male}} {{first_name_male}} {{last_name_male}} {{suffix}}', 0.005)\n ))\n\n formats = formats_male.copy()\n formats.update(formats_female)\n\n first_names_male = (\n 'Adam',\n 'Alexander',\n 'Alexandr',\n 'Ale\u0161',\n 'Alois',\n 'Anton\u00edn',\n 'Arno\u0161t',\n 'Bed\u0159ich',\n 'Bohumil',\n 'Bohum\u00edr',\n 'Bohuslav',\n 'B\u0159etislav',\n 'Dalibor',\n 'Daniel',\n 'David',\n 'Denis',\n 'Dominik',\n 'Du\u0161an',\n 'Eduard',\n 'Emil',\n 'Erik',\n 'Filip',\n 'Franti\u0161ek',\n 'Hynek',\n 'Igor',\n 'Ivan',\n 'Ivo',\n 'Jakub',\n 'Jan',\n 'Jarom\u00edr',\n 'Jaroslav',\n 'Jind\u0159ich',\n 'Ji\u0159\u00ed',\n 'Josef',\n 'Jozef',\n 'J\u00e1n',\n 'Kamil',\n 'Karel',\n 'Kry\u0161tof',\n 'Ladislav',\n 'Leo\u0161',\n 'Libor',\n 'Lubom\u00edr',\n 'Lubo\u0161',\n 'Ludv\u00edk',\n 'Lud\u011bk',\n 'Luk\u00e1\u0161',\n 'Marcel',\n 'Marek',\n 'Marian',\n 'Martin',\n 'Maty\u00e1\u0161',\n 'Mat\u011bj',\n 'Michael',\n 'Michal',\n 'Milan',\n 'Miloslav',\n 'Milo\u0161',\n 'Miroslav',\n 'Old\u0159ich',\n 'Ond\u0159ej',\n 'Otakar',\n 'Patrik',\n 'Pavel',\n 'Peter',\n 'Petr',\n 'P\u0159emysl',\n 'Radek',\n 'Radim',\n 'Radom\u00edr',\n 'Radovan',\n 'Ren\u00e9',\n 'Richard',\n 'Robert',\n 'Robin',\n 'Roman',\n 'Rostislav',\n 'Rudolf',\n 'Samuel',\n 'Stanislav',\n 'Tade\u00e1\u0161',\n 'Tom\u00e1\u0161',\n 'Vasyl',\n 'Viktor',\n 'Vil\u00e9m',\n 'Vladim\u00edr',\n 'Vladislav',\n 'Vlastimil',\n 'Vojt\u011bch',\n 'Vratislav',\n 'V\u00e1clav',\n 'V\u00edt',\n 'V\u00edt\u011bzslav',\n 'Zbyn\u011bk',\n 'Zden\u011bk',\n '\u0160imon',\n '\u0160tefan',\n '\u0160t\u011bp\u00e1n')\n\n first_names_female = (\n 'Ad\u00e9la',\n 'Alena',\n 'Alexandra',\n 'Alice',\n 'Al\u017eb\u011bta',\n 'Andrea',\n 'Aneta',\n 'Ane\u017eka',\n 'Anna',\n 'Barbora',\n 'Blanka',\n 'Bla\u017eena',\n 'Bohumila',\n 'Bo\u017eena',\n 'Dagmar',\n 'Dana',\n 'Daniela',\n 'Danu\u0161e',\n 'Denisa',\n 'Dominika',\n 'Drahom\u00edra',\n 'Eli\u0161ka',\n 'Emilie',\n 'Eva',\n 'Franti\u0161ka',\n 'Gabriela',\n 'Hana',\n 'Helena',\n 'Ilona',\n 'Irena',\n 'Iva',\n 'Ivana',\n 'Iveta',\n 'Jana',\n 'Jarmila',\n 'Jaroslava',\n 'Jind\u0159i\u0161ka',\n 'Jitka',\n 'Ji\u0159ina',\n 'Julie',\n 'Kamila',\n 'Karol\u00edna',\n 'Kate\u0159ina',\n 'Kl\u00e1ra',\n 'Kristina',\n 'Krist\u00fdna',\n 'Kv\u011bta',\n 'Kv\u011btoslava',\n 'Ladislava',\n 'Lenka',\n 'Libu\u0161e',\n 'Lucie',\n 'Ludmila',\n 'Magdalena',\n 'Magdal\u00e9na',\n 'Marcela',\n 'Marie',\n 'Mark\u00e9ta',\n 'Marta',\n 'Martina',\n 'Michaela',\n 'Milada',\n 'Milena',\n 'Miloslava',\n 'Milu\u0161e',\n 'Miroslava',\n 'Monika',\n 'M\u00e1ria',\n 'Nad\u011b\u017eda',\n 'Nat\u00e1lie',\n 'Nela',\n 'Nikol',\n 'Nikola',\n 'Olga',\n 'Pavla',\n 'Pavl\u00edna',\n 'Petra',\n 'Radka',\n 'Renata',\n 'Ren\u00e1ta',\n 'Romana',\n 'R\u016f\u017eena',\n 'Sabina',\n 'Simona',\n 'So\u0148a',\n 'Stanislava',\n 'S\u00e1ra',\n 'Tereza',\n 'Vendula',\n 'Veronika',\n 'Viktorie',\n 'Vladim\u00edra',\n 'Vlasta',\n 'V\u011bra',\n 'Zdenka',\n 'Zde\u0148ka',\n 'Zuzana',\n '\u0160t\u011bp\u00e1nka',\n '\u0160\u00e1rka',\n '\u017daneta')\n\n first_names = first_names_male + first_names_female\n\n last_names_male = (\n 'Barto\u0161',\n 'Bene\u0161',\n 'Bla\u017eek',\n 'Bl\u00e1ha',\n 'Dole\u017eal',\n 'Du\u0161ek',\n 'Dvo\u0159\u00e1k',\n 'Fiala',\n 'Holub',\n 'Hor\u00e1k',\n 'H\u00e1jek',\n 'Jel\u00ednek',\n 'Kadlec',\n 'Kol\u00e1\u0159',\n 'Kopeck\u00fd',\n 'Kratochv\u00edl',\n 'Krej\u010d\u00ed',\n 'Kr\u00e1l',\n 'Ku\u010dera',\n 'K\u0159\u00ed\u017e',\n 'Mal\u00fd',\n 'Marek',\n 'Mare\u0161',\n 'Ma\u0161ek',\n 'Moravec',\n 'Novotn\u00fd',\n 'Nov\u00e1k',\n 'N\u011bmec',\n 'Pokorn\u00fd',\n 'Pol\u00e1k',\n 'Posp\u00ed\u0161il',\n 'Proch\u00e1zka',\n 'R\u016f\u017ei\u010dka',\n 'Sedl\u00e1\u010dek',\n 'Soukup',\n 'Svoboda',\n 'Urban',\n 'Van\u011bk',\n 'Vesel\u00fd',\n 'Vl\u010dek',\n 'Zeman',\n '\u010cerm\u00e1k',\n '\u010cern\u00fd',\n '\u0158\u00edha',\n '\u0160imek',\n '\u0160t\u011bp\u00e1nek',\n '\u0160\u0165astn\u00fd')\n\n last_names_female = (\n 'Barto\u0161ov\u00e1',\n 'Bene\u0161ov\u00e1',\n 'Beranov\u00e1',\n 'Bla\u017ekov\u00e1',\n 'Bl\u00e1hov\u00e1',\n 'Dole\u017ealov\u00e1',\n 'Du\u0161kov\u00e1',\n 'Dvo\u0159\u00e1kov\u00e1',\n 'Fialov\u00e1',\n 'Holubov\u00e1',\n 'Hor\u00e1kov\u00e1',\n 'H\u00e1jkov\u00e1',\n 'Jandov\u00e1',\n 'Jel\u00ednkov\u00e1',\n 'Kadlecov\u00e1',\n 'Kol\u00e1\u0159ov\u00e1',\n 'Kopeck\u00e1',\n 'Kratochv\u00edlov\u00e1',\n 'Krej\u010dov\u00e1',\n 'Kr\u00e1lov\u00e1',\n 'Ku\u010derov\u00e1',\n 'K\u0159\u00ed\u017eov\u00e1',\n 'Machov\u00e1',\n 'Mal\u00e1',\n 'Mare\u0161ov\u00e1',\n 'Markov\u00e1',\n 'Ma\u0161kov\u00e1',\n 'Moravcov\u00e1',\n 'Novotn\u00e1',\n 'Nov\u00e1kov\u00e1',\n 'N\u011bmcov\u00e1',\n 'Pokorn\u00e1',\n 'Pol\u00e1kov\u00e1',\n 'Posp\u00ed\u0161ilov\u00e1',\n 'Proch\u00e1zkov\u00e1',\n 'R\u016f\u017ei\u010dkov\u00e1',\n 'Sedl\u00e1\u010dkov\u00e1',\n 'Soukupov\u00e1',\n 'Svobodov\u00e1',\n 'Tich\u00e1',\n 'Urbanov\u00e1',\n 'Vackov\u00e1',\n 'Va\u0148kov\u00e1',\n 'Vesel\u00e1',\n 'Vl\u010dkov\u00e1',\n 'V\u00e1vrov\u00e1',\n 'Zemanov\u00e1',\n '\u010cerm\u00e1kov\u00e1',\n '\u010cern\u00e1',\n '\u0158\u00edhov\u00e1',\n '\u0160imkov\u00e1',\n '\u0160t\u011bp\u00e1nkov\u00e1',\n '\u0160\u0165astn\u00e1')\n\n last_names = last_names_male + last_names_female\n\n degrees = ('JUDr.', 'Ing.', 'Bc.', 'Mgr.', 'MUDr.', 'RNDr.')\n\n prefixes_male = ('pan', ) + degrees\n\n prefixes_female = ('pan\u00ed', 'sle\u010dna', ) + degrees\n\n suffixes = ('CSc.', 'DiS.', 'Ph.D.', 'Th.D.')\n", "path": "faker/providers/person/cs_CZ/__init__.py"}]}
| 3,518 | 584 |
gh_patches_debug_5038
|
rasdani/github-patches
|
git_diff
|
scikit-hep__pyhf-1105
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pyhf contrib download fails gracelessly with invalid URL
# Description
calling `pyhf contrib download` (just to see what it does) fails pretty violently. hould we make itt a bit nicer @matthewfeickert
?
```
pyhf contrib download
Traceback (most recent call last):
File "/Users/lukasheinrich/Code/pyhfdev/dev/pyhfdevenv/bin/pyhf", line 33, in <module>
sys.exit(load_entry_point('pyhf', 'console_scripts', 'pyhf')())
File "/Users/lukasheinrich/Code/pyhfdev/dev/pyhfdevenv/lib/python3.7/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/Users/lukasheinrich/Code/pyhfdev/dev/pyhfdevenv/lib/python3.7/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/Users/lukasheinrich/Code/pyhfdev/dev/pyhfdevenv/lib/python3.7/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/Users/lukasheinrich/Code/pyhfdev/dev/pyhfdevenv/lib/python3.7/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/Users/lukasheinrich/Code/pyhfdev/dev/pyhfdevenv/lib/python3.7/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/Users/lukasheinrich/Code/pyhfdev/dev/pyhfdevenv/lib/python3.7/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/Users/lukasheinrich/Code/pyhfdev/dev/pyhfsrc/src/pyhf/cli/contrib.py", line 60, in download
utils.download(archive_url, output_directory, force, compress)
File "/Users/lukasheinrich/Code/pyhfdev/dev/pyhfsrc/src/pyhf/contrib/utils.py", line 47, in download
+ "To download an archive from this host use the --force option."
pyhf.exceptions.InvalidArchiveHost: is not an approved archive host: www.hepdata.net, doi.org
To download an archive from this host use the --force option.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pyhf/cli/contrib.py`
Content:
```
1 """CLI for functionality that will get migrated out eventually."""
2 import logging
3 import click
4 from pathlib import Path
5
6 from ..contrib import utils
7
8 logging.basicConfig()
9 log = logging.getLogger(__name__)
10
11
12 @click.group(name="contrib")
13 def cli():
14 """
15 Contrib experimental operations.
16
17 .. note::
18
19 Requires installation of the ``contrib`` extra.
20
21 .. code-block:: shell
22
23 $ python -m pip install pyhf[contrib]
24 """
25
26
27 @cli.command()
28 @click.argument("archive-url", default="-")
29 @click.argument("output-directory", default="-")
30 @click.option("-v", "--verbose", is_flag=True, help="Enables verbose mode")
31 @click.option(
32 "-f", "--force", is_flag=True, help="Force download from non-approved host"
33 )
34 @click.option(
35 "-c",
36 "--compress",
37 is_flag=True,
38 help="Keep the archive in a compressed tar.gz form",
39 )
40 def download(archive_url, output_directory, verbose, force, compress):
41 """
42 Download the patchset archive from the remote URL and extract it in a
43 directory at the path given.
44
45 Example:
46
47 .. code-block:: shell
48
49 $ pyhf contrib download --verbose https://www.hepdata.net/record/resource/1408476?view=true 1Lbb-likelihoods
50
51 \b
52 1Lbb-likelihoods/patchset.json
53 1Lbb-likelihoods/README.md
54 1Lbb-likelihoods/BkgOnly.json
55
56 Raises:
57 :class:`~pyhf.exceptions.InvalidArchiveHost`: if the provided archive host name is not known to be valid
58 """
59 try:
60 utils.download(archive_url, output_directory, force, compress)
61
62 if verbose:
63 file_list = [str(file) for file in list(Path(output_directory).glob("*"))]
64 print("\n".join(file_list))
65 except AttributeError as excep:
66 exception_info = (
67 str(excep)
68 + "\nInstallation of the contrib extra is required to use the contrib CLI API"
69 + "\nPlease install with: python -m pip install pyhf[contrib]\n"
70 )
71 log.error(exception_info)
72
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/pyhf/cli/contrib.py b/src/pyhf/cli/contrib.py
--- a/src/pyhf/cli/contrib.py
+++ b/src/pyhf/cli/contrib.py
@@ -25,8 +25,8 @@
@cli.command()
[email protected]("archive-url", default="-")
[email protected]("output-directory", default="-")
[email protected]("archive-url")
[email protected]("output-directory")
@click.option("-v", "--verbose", is_flag=True, help="Enables verbose mode")
@click.option(
"-f", "--force", is_flag=True, help="Force download from non-approved host"
|
{"golden_diff": "diff --git a/src/pyhf/cli/contrib.py b/src/pyhf/cli/contrib.py\n--- a/src/pyhf/cli/contrib.py\n+++ b/src/pyhf/cli/contrib.py\n@@ -25,8 +25,8 @@\n \n \n @cli.command()\[email protected](\"archive-url\", default=\"-\")\[email protected](\"output-directory\", default=\"-\")\[email protected](\"archive-url\")\[email protected](\"output-directory\")\n @click.option(\"-v\", \"--verbose\", is_flag=True, help=\"Enables verbose mode\")\n @click.option(\n \"-f\", \"--force\", is_flag=True, help=\"Force download from non-approved host\"\n", "issue": "pyhf contrib download fails gracelessly with invalid URL\n# Description\r\n\r\ncalling `pyhf contrib download` (just to see what it does) fails pretty violently. hould we make itt a bit nicer @matthewfeickert \r\n?\r\n\r\n```\r\npyhf contrib download\r\nTraceback (most recent call last):\r\n File \"/Users/lukasheinrich/Code/pyhfdev/dev/pyhfdevenv/bin/pyhf\", line 33, in <module>\r\n sys.exit(load_entry_point('pyhf', 'console_scripts', 'pyhf')())\r\n File \"/Users/lukasheinrich/Code/pyhfdev/dev/pyhfdevenv/lib/python3.7/site-packages/click/core.py\", line 829, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"/Users/lukasheinrich/Code/pyhfdev/dev/pyhfdevenv/lib/python3.7/site-packages/click/core.py\", line 782, in main\r\n rv = self.invoke(ctx)\r\n File \"/Users/lukasheinrich/Code/pyhfdev/dev/pyhfdevenv/lib/python3.7/site-packages/click/core.py\", line 1259, in invoke\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n File \"/Users/lukasheinrich/Code/pyhfdev/dev/pyhfdevenv/lib/python3.7/site-packages/click/core.py\", line 1259, in invoke\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n File \"/Users/lukasheinrich/Code/pyhfdev/dev/pyhfdevenv/lib/python3.7/site-packages/click/core.py\", line 1066, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"/Users/lukasheinrich/Code/pyhfdev/dev/pyhfdevenv/lib/python3.7/site-packages/click/core.py\", line 610, in invoke\r\n return callback(*args, **kwargs)\r\n File \"/Users/lukasheinrich/Code/pyhfdev/dev/pyhfsrc/src/pyhf/cli/contrib.py\", line 60, in download\r\n utils.download(archive_url, output_directory, force, compress)\r\n File \"/Users/lukasheinrich/Code/pyhfdev/dev/pyhfsrc/src/pyhf/contrib/utils.py\", line 47, in download\r\n + \"To download an archive from this host use the --force option.\"\r\npyhf.exceptions.InvalidArchiveHost: is not an approved archive host: www.hepdata.net, doi.org\r\nTo download an archive from this host use the --force option.\r\n```\r\n\n", "before_files": [{"content": "\"\"\"CLI for functionality that will get migrated out eventually.\"\"\"\nimport logging\nimport click\nfrom pathlib import Path\n\nfrom ..contrib import utils\n\nlogging.basicConfig()\nlog = logging.getLogger(__name__)\n\n\[email protected](name=\"contrib\")\ndef cli():\n \"\"\"\n Contrib experimental operations.\n\n .. note::\n\n Requires installation of the ``contrib`` extra.\n\n .. code-block:: shell\n\n $ python -m pip install pyhf[contrib]\n \"\"\"\n\n\[email protected]()\[email protected](\"archive-url\", default=\"-\")\[email protected](\"output-directory\", default=\"-\")\[email protected](\"-v\", \"--verbose\", is_flag=True, help=\"Enables verbose mode\")\[email protected](\n \"-f\", \"--force\", is_flag=True, help=\"Force download from non-approved host\"\n)\[email protected](\n \"-c\",\n \"--compress\",\n is_flag=True,\n help=\"Keep the archive in a compressed tar.gz form\",\n)\ndef download(archive_url, output_directory, verbose, force, compress):\n \"\"\"\n Download the patchset archive from the remote URL and extract it in a\n directory at the path given.\n\n Example:\n\n .. code-block:: shell\n\n $ pyhf contrib download --verbose https://www.hepdata.net/record/resource/1408476?view=true 1Lbb-likelihoods\n\n \\b\n 1Lbb-likelihoods/patchset.json\n 1Lbb-likelihoods/README.md\n 1Lbb-likelihoods/BkgOnly.json\n\n Raises:\n :class:`~pyhf.exceptions.InvalidArchiveHost`: if the provided archive host name is not known to be valid\n \"\"\"\n try:\n utils.download(archive_url, output_directory, force, compress)\n\n if verbose:\n file_list = [str(file) for file in list(Path(output_directory).glob(\"*\"))]\n print(\"\\n\".join(file_list))\n except AttributeError as excep:\n exception_info = (\n str(excep)\n + \"\\nInstallation of the contrib extra is required to use the contrib CLI API\"\n + \"\\nPlease install with: python -m pip install pyhf[contrib]\\n\"\n )\n log.error(exception_info)\n", "path": "src/pyhf/cli/contrib.py"}], "after_files": [{"content": "\"\"\"CLI for functionality that will get migrated out eventually.\"\"\"\nimport logging\nimport click\nfrom pathlib import Path\n\nfrom ..contrib import utils\n\nlogging.basicConfig()\nlog = logging.getLogger(__name__)\n\n\[email protected](name=\"contrib\")\ndef cli():\n \"\"\"\n Contrib experimental operations.\n\n .. note::\n\n Requires installation of the ``contrib`` extra.\n\n .. code-block:: shell\n\n $ python -m pip install pyhf[contrib]\n \"\"\"\n\n\[email protected]()\[email protected](\"archive-url\")\[email protected](\"output-directory\")\[email protected](\"-v\", \"--verbose\", is_flag=True, help=\"Enables verbose mode\")\[email protected](\n \"-f\", \"--force\", is_flag=True, help=\"Force download from non-approved host\"\n)\[email protected](\n \"-c\",\n \"--compress\",\n is_flag=True,\n help=\"Keep the archive in a compressed tar.gz form\",\n)\ndef download(archive_url, output_directory, verbose, force, compress):\n \"\"\"\n Download the patchset archive from the remote URL and extract it in a\n directory at the path given.\n\n Example:\n\n .. code-block:: shell\n\n $ pyhf contrib download --verbose https://www.hepdata.net/record/resource/1408476?view=true 1Lbb-likelihoods\n\n \\b\n 1Lbb-likelihoods/patchset.json\n 1Lbb-likelihoods/README.md\n 1Lbb-likelihoods/BkgOnly.json\n\n Raises:\n :class:`~pyhf.exceptions.InvalidArchiveHost`: if the provided archive host name is not known to be valid\n \"\"\"\n try:\n utils.download(archive_url, output_directory, force, compress)\n\n if verbose:\n file_list = [str(file) for file in list(Path(output_directory).glob(\"*\"))]\n print(\"\\n\".join(file_list))\n except AttributeError as excep:\n exception_info = (\n str(excep)\n + \"\\nInstallation of the contrib extra is required to use the contrib CLI API\"\n + \"\\nPlease install with: python -m pip install pyhf[contrib]\\n\"\n )\n log.error(exception_info)\n", "path": "src/pyhf/cli/contrib.py"}]}
| 1,447 | 139 |
gh_patches_debug_36501
|
rasdani/github-patches
|
git_diff
|
avocado-framework__avocado-4225
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
avocado list --resolver doesn't show the reason of failed resolutions
While the functionality of exposing the resolver was living inside the "avocado nlist" command, the reasons would be displayed on failed resolutions, such as:
```
avocado-instrumented /my/file File "/my/file" does not end with ".py"
python-unittest /my/file File "/my/file" does not end with ".py"
exec-test /my/file File "/my/file" does not exist or is not executable
tap /my/file File "/my/file" does not exist or is not executable
```
This is very useful, and should be brought back.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `avocado/plugins/list.py`
Content:
```
1 # This program is free software; you can redistribute it and/or modify
2 # it under the terms of the GNU General Public License as published by
3 # the Free Software Foundation; either version 2 of the License, or
4 # (at your option) any later version.
5 #
6 # This program is distributed in the hope that it will be useful,
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
9 #
10 # See LICENSE for more details.
11 #
12 # Copyright: Red Hat Inc. 2013-2014
13 # Author: Lucas Meneghel Rodrigues <[email protected]>
14 # Author: Beraldo Leal <[email protected]>
15
16 import os
17
18 from avocado.core import exit_codes, loader, parser_common_args
19 from avocado.core.output import LOG_UI, TERM_SUPPORT
20 from avocado.core.plugin_interfaces import CLICmd
21 from avocado.core.resolver import ReferenceResolutionResult
22 from avocado.core.settings import settings
23 from avocado.core.suite import TestSuite
24 from avocado.core.test import Test
25 from avocado.utils.astring import iter_tabular_output
26
27
28 def _get_test_tags(test):
29 """Return a list of all tags of a test as string."""
30 params = test[1]
31 tags_repr = []
32 for tag, values in params.get('tags', {}).items():
33 if values:
34 tags_repr.append("%s(%s)" % (tag, ",".join(values)))
35 else:
36 tags_repr.append(tag)
37 return ",".join(tags_repr)
38
39
40 class List(CLICmd):
41
42 """
43 Implements the avocado 'list' subcommand
44 """
45
46 name = 'list'
47 description = 'List available tests'
48
49 def _display(self, suite, matrix, resolution=None):
50 header = None
51 verbose = suite.config.get('core.verbose')
52 if verbose:
53 header = (TERM_SUPPORT.header_str('Type'),
54 TERM_SUPPORT.header_str('Test'),
55 TERM_SUPPORT.header_str('Tag(s)'))
56
57 for line in iter_tabular_output(matrix,
58 header=header,
59 strip=True):
60 LOG_UI.debug(line)
61
62 if verbose:
63 LOG_UI.info("")
64 LOG_UI.info("TEST TYPES SUMMARY")
65 LOG_UI.info("==================")
66 for key in sorted(suite.stats):
67 LOG_UI.info("%s: %s", key, suite.stats[key])
68
69 if suite.tags_stats:
70 LOG_UI.info("")
71 LOG_UI.info("TEST TAGS SUMMARY")
72 LOG_UI.info("=================")
73 for key in sorted(suite.tags_stats):
74 LOG_UI.info("%s: %s", key, suite.tags_stats[key])
75
76 if resolution:
77 resolution_header = (TERM_SUPPORT.header_str('Resolver'),
78 TERM_SUPPORT.header_str('Reference'),
79 TERM_SUPPORT.header_str('Info'))
80 LOG_UI.info("")
81 for line in iter_tabular_output(resolution,
82 header=resolution_header,
83 strip=True):
84 LOG_UI.info(line)
85
86 @staticmethod
87 def _get_test_matrix(suite):
88 """Used for loader."""
89 test_matrix = []
90
91 type_label_mapping = loader.loader.get_type_label_mapping()
92 decorator_mapping = loader.loader.get_decorator_mapping()
93
94 verbose = suite.config.get('core.verbose')
95 for cls, params in suite.tests:
96 if isinstance(cls, str):
97 cls = Test
98 type_label = type_label_mapping[cls]
99 decorator = decorator_mapping[cls]
100 type_label = decorator(type_label)
101
102 if verbose:
103 test_matrix.append((type_label,
104 params['name'],
105 _get_test_tags((cls, params))))
106 else:
107 test_matrix.append((type_label, params['name']))
108
109 return test_matrix
110
111 @staticmethod
112 def _get_resolution_matrix(suite):
113 """Used for resolver."""
114 test_matrix = []
115 verbose = suite.config.get('core.verbose')
116 for test in suite.tests:
117 runnable = test.runnable
118
119 type_label = TERM_SUPPORT.healthy_str(runnable.kind)
120
121 if verbose:
122 tags_repr = []
123 tags = runnable.tags or {}
124 for tag, vals in tags.items():
125 if vals:
126 tags_repr.append("%s(%s)" % (tag,
127 ",".join(vals)))
128 else:
129 tags_repr.append(tag)
130 tags_repr = ",".join(tags_repr)
131 test_matrix.append((type_label, runnable.uri, tags_repr))
132 else:
133 test_matrix.append((type_label, runnable.uri))
134 return test_matrix
135
136 @staticmethod
137 def save_recipes(suite, directory, matrix_len):
138 fmt = '%%0%uu.json' % len(str(matrix_len))
139 index = 1
140 for resolution in suite.resolutions:
141 if resolution.result == ReferenceResolutionResult.SUCCESS:
142 for res in resolution.resolutions:
143 res.write_json(os.path.join(directory, fmt % index))
144 index += 1
145
146 def configure(self, parser):
147 """
148 Add the subparser for the list action.
149
150 :param parser: The Avocado command line application parser
151 :type parser: :class:`avocado.core.parser.ArgumentParser`
152 """
153 parser = super(List, self).configure(parser)
154 help_msg = ('List of test references (aliases or paths). If empty, '
155 'Avocado will list tests on the configured test source, '
156 '(see "avocado config --datadir") Also, if there are '
157 'other test loader plugins active, tests from those '
158 'plugins might also show up (behavior may vary among '
159 'plugins)')
160 settings.register_option(section='list',
161 key='references',
162 default=[],
163 nargs='*',
164 key_type=list,
165 help_msg=help_msg,
166 parser=parser,
167 positional_arg=True)
168 loader.add_loader_options(parser, 'list')
169
170 help_msg = ('What is the method used to detect tests? If --resolver '
171 'used, Avocado will use the Next Runner Resolver method. '
172 'If not the legacy one will be used.')
173 settings.register_option(section='list',
174 key='resolver',
175 key_type=bool,
176 default=False,
177 help_msg=help_msg,
178 parser=parser,
179 long_arg='--resolver')
180
181 help_msg = ('Writes runnable recipe files to a directory. Valid only '
182 'when using --resolver.')
183 settings.register_option(section='list.recipes',
184 key='write_to_directory',
185 default=None,
186 metavar='DIRECTORY',
187 help_msg=help_msg,
188 parser=parser,
189 long_arg='--write-recipes-to-directory')
190
191 parser_common_args.add_tag_filter_args(parser)
192
193 def run(self, config):
194 runner = 'nrunner' if config.get('list.resolver') else 'runner'
195 config['run.references'] = config.get('list.references')
196 config['run.ignore_missing_references'] = True
197 config['run.test_runner'] = runner
198 try:
199 suite = TestSuite.from_config(config)
200 if runner == 'nrunner':
201 matrix = self._get_resolution_matrix(suite)
202 self._display(suite, matrix)
203
204 directory = config.get('list.recipes.write_to_directory')
205 if directory is not None:
206 self.save_recipes(suite, directory, len(matrix))
207 else:
208 matrix = self._get_test_matrix(suite)
209 self._display(suite, matrix)
210 except KeyboardInterrupt:
211 LOG_UI.error('Command interrupted by user...')
212 return exit_codes.AVOCADO_FAIL
213
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/avocado/plugins/list.py b/avocado/plugins/list.py
--- a/avocado/plugins/list.py
+++ b/avocado/plugins/list.py
@@ -46,7 +46,7 @@
name = 'list'
description = 'List available tests'
- def _display(self, suite, matrix, resolution=None):
+ def _display(self, suite, matrix):
header = None
verbose = suite.config.get('core.verbose')
if verbose:
@@ -60,6 +60,32 @@
LOG_UI.debug(line)
if verbose:
+ if suite.resolutions:
+ resolution_header = (TERM_SUPPORT.header_str('Resolver'),
+ TERM_SUPPORT.header_str('Reference'),
+ TERM_SUPPORT.header_str('Info'))
+ LOG_UI.info("")
+
+ mapping = {
+ ReferenceResolutionResult.SUCCESS: TERM_SUPPORT.healthy_str,
+ ReferenceResolutionResult.NOTFOUND: TERM_SUPPORT.fail_header_str,
+ ReferenceResolutionResult.ERROR: TERM_SUPPORT.fail_header_str
+ }
+ resolution_matrix = []
+ for r in suite.resolutions:
+ decorator = mapping.get(r.result,
+ TERM_SUPPORT.warn_header_str)
+ if r.result == ReferenceResolutionResult.SUCCESS:
+ continue
+ resolution_matrix.append((decorator(r.origin),
+ r.reference,
+ r.info or ''))
+
+ for line in iter_tabular_output(resolution_matrix,
+ header=resolution_header,
+ strip=True):
+ LOG_UI.info(line)
+
LOG_UI.info("")
LOG_UI.info("TEST TYPES SUMMARY")
LOG_UI.info("==================")
@@ -73,16 +99,6 @@
for key in sorted(suite.tags_stats):
LOG_UI.info("%s: %s", key, suite.tags_stats[key])
- if resolution:
- resolution_header = (TERM_SUPPORT.header_str('Resolver'),
- TERM_SUPPORT.header_str('Reference'),
- TERM_SUPPORT.header_str('Info'))
- LOG_UI.info("")
- for line in iter_tabular_output(resolution,
- header=resolution_header,
- strip=True):
- LOG_UI.info(line)
-
@staticmethod
def _get_test_matrix(suite):
"""Used for loader."""
|
{"golden_diff": "diff --git a/avocado/plugins/list.py b/avocado/plugins/list.py\n--- a/avocado/plugins/list.py\n+++ b/avocado/plugins/list.py\n@@ -46,7 +46,7 @@\n name = 'list'\n description = 'List available tests'\n \n- def _display(self, suite, matrix, resolution=None):\n+ def _display(self, suite, matrix):\n header = None\n verbose = suite.config.get('core.verbose')\n if verbose:\n@@ -60,6 +60,32 @@\n LOG_UI.debug(line)\n \n if verbose:\n+ if suite.resolutions:\n+ resolution_header = (TERM_SUPPORT.header_str('Resolver'),\n+ TERM_SUPPORT.header_str('Reference'),\n+ TERM_SUPPORT.header_str('Info'))\n+ LOG_UI.info(\"\")\n+\n+ mapping = {\n+ ReferenceResolutionResult.SUCCESS: TERM_SUPPORT.healthy_str,\n+ ReferenceResolutionResult.NOTFOUND: TERM_SUPPORT.fail_header_str,\n+ ReferenceResolutionResult.ERROR: TERM_SUPPORT.fail_header_str\n+ }\n+ resolution_matrix = []\n+ for r in suite.resolutions:\n+ decorator = mapping.get(r.result,\n+ TERM_SUPPORT.warn_header_str)\n+ if r.result == ReferenceResolutionResult.SUCCESS:\n+ continue\n+ resolution_matrix.append((decorator(r.origin),\n+ r.reference,\n+ r.info or ''))\n+\n+ for line in iter_tabular_output(resolution_matrix,\n+ header=resolution_header,\n+ strip=True):\n+ LOG_UI.info(line)\n+\n LOG_UI.info(\"\")\n LOG_UI.info(\"TEST TYPES SUMMARY\")\n LOG_UI.info(\"==================\")\n@@ -73,16 +99,6 @@\n for key in sorted(suite.tags_stats):\n LOG_UI.info(\"%s: %s\", key, suite.tags_stats[key])\n \n- if resolution:\n- resolution_header = (TERM_SUPPORT.header_str('Resolver'),\n- TERM_SUPPORT.header_str('Reference'),\n- TERM_SUPPORT.header_str('Info'))\n- LOG_UI.info(\"\")\n- for line in iter_tabular_output(resolution,\n- header=resolution_header,\n- strip=True):\n- LOG_UI.info(line)\n-\n @staticmethod\n def _get_test_matrix(suite):\n \"\"\"Used for loader.\"\"\"\n", "issue": "avocado list --resolver doesn't show the reason of failed resolutions\nWhile the functionality of exposing the resolver was living inside the \"avocado nlist\" command, the reasons would be displayed on failed resolutions, such as:\r\n\r\n```\r\n avocado-instrumented /my/file File \"/my/file\" does not end with \".py\"\r\n python-unittest /my/file File \"/my/file\" does not end with \".py\"\r\n exec-test /my/file File \"/my/file\" does not exist or is not executable\r\n tap /my/file File \"/my/file\" does not exist or is not executable\r\n```\r\n\r\nThis is very useful, and should be brought back.\n", "before_files": [{"content": "# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n#\n# See LICENSE for more details.\n#\n# Copyright: Red Hat Inc. 2013-2014\n# Author: Lucas Meneghel Rodrigues <[email protected]>\n# Author: Beraldo Leal <[email protected]>\n\nimport os\n\nfrom avocado.core import exit_codes, loader, parser_common_args\nfrom avocado.core.output import LOG_UI, TERM_SUPPORT\nfrom avocado.core.plugin_interfaces import CLICmd\nfrom avocado.core.resolver import ReferenceResolutionResult\nfrom avocado.core.settings import settings\nfrom avocado.core.suite import TestSuite\nfrom avocado.core.test import Test\nfrom avocado.utils.astring import iter_tabular_output\n\n\ndef _get_test_tags(test):\n \"\"\"Return a list of all tags of a test as string.\"\"\"\n params = test[1]\n tags_repr = []\n for tag, values in params.get('tags', {}).items():\n if values:\n tags_repr.append(\"%s(%s)\" % (tag, \",\".join(values)))\n else:\n tags_repr.append(tag)\n return \",\".join(tags_repr)\n\n\nclass List(CLICmd):\n\n \"\"\"\n Implements the avocado 'list' subcommand\n \"\"\"\n\n name = 'list'\n description = 'List available tests'\n\n def _display(self, suite, matrix, resolution=None):\n header = None\n verbose = suite.config.get('core.verbose')\n if verbose:\n header = (TERM_SUPPORT.header_str('Type'),\n TERM_SUPPORT.header_str('Test'),\n TERM_SUPPORT.header_str('Tag(s)'))\n\n for line in iter_tabular_output(matrix,\n header=header,\n strip=True):\n LOG_UI.debug(line)\n\n if verbose:\n LOG_UI.info(\"\")\n LOG_UI.info(\"TEST TYPES SUMMARY\")\n LOG_UI.info(\"==================\")\n for key in sorted(suite.stats):\n LOG_UI.info(\"%s: %s\", key, suite.stats[key])\n\n if suite.tags_stats:\n LOG_UI.info(\"\")\n LOG_UI.info(\"TEST TAGS SUMMARY\")\n LOG_UI.info(\"=================\")\n for key in sorted(suite.tags_stats):\n LOG_UI.info(\"%s: %s\", key, suite.tags_stats[key])\n\n if resolution:\n resolution_header = (TERM_SUPPORT.header_str('Resolver'),\n TERM_SUPPORT.header_str('Reference'),\n TERM_SUPPORT.header_str('Info'))\n LOG_UI.info(\"\")\n for line in iter_tabular_output(resolution,\n header=resolution_header,\n strip=True):\n LOG_UI.info(line)\n\n @staticmethod\n def _get_test_matrix(suite):\n \"\"\"Used for loader.\"\"\"\n test_matrix = []\n\n type_label_mapping = loader.loader.get_type_label_mapping()\n decorator_mapping = loader.loader.get_decorator_mapping()\n\n verbose = suite.config.get('core.verbose')\n for cls, params in suite.tests:\n if isinstance(cls, str):\n cls = Test\n type_label = type_label_mapping[cls]\n decorator = decorator_mapping[cls]\n type_label = decorator(type_label)\n\n if verbose:\n test_matrix.append((type_label,\n params['name'],\n _get_test_tags((cls, params))))\n else:\n test_matrix.append((type_label, params['name']))\n\n return test_matrix\n\n @staticmethod\n def _get_resolution_matrix(suite):\n \"\"\"Used for resolver.\"\"\"\n test_matrix = []\n verbose = suite.config.get('core.verbose')\n for test in suite.tests:\n runnable = test.runnable\n\n type_label = TERM_SUPPORT.healthy_str(runnable.kind)\n\n if verbose:\n tags_repr = []\n tags = runnable.tags or {}\n for tag, vals in tags.items():\n if vals:\n tags_repr.append(\"%s(%s)\" % (tag,\n \",\".join(vals)))\n else:\n tags_repr.append(tag)\n tags_repr = \",\".join(tags_repr)\n test_matrix.append((type_label, runnable.uri, tags_repr))\n else:\n test_matrix.append((type_label, runnable.uri))\n return test_matrix\n\n @staticmethod\n def save_recipes(suite, directory, matrix_len):\n fmt = '%%0%uu.json' % len(str(matrix_len))\n index = 1\n for resolution in suite.resolutions:\n if resolution.result == ReferenceResolutionResult.SUCCESS:\n for res in resolution.resolutions:\n res.write_json(os.path.join(directory, fmt % index))\n index += 1\n\n def configure(self, parser):\n \"\"\"\n Add the subparser for the list action.\n\n :param parser: The Avocado command line application parser\n :type parser: :class:`avocado.core.parser.ArgumentParser`\n \"\"\"\n parser = super(List, self).configure(parser)\n help_msg = ('List of test references (aliases or paths). If empty, '\n 'Avocado will list tests on the configured test source, '\n '(see \"avocado config --datadir\") Also, if there are '\n 'other test loader plugins active, tests from those '\n 'plugins might also show up (behavior may vary among '\n 'plugins)')\n settings.register_option(section='list',\n key='references',\n default=[],\n nargs='*',\n key_type=list,\n help_msg=help_msg,\n parser=parser,\n positional_arg=True)\n loader.add_loader_options(parser, 'list')\n\n help_msg = ('What is the method used to detect tests? If --resolver '\n 'used, Avocado will use the Next Runner Resolver method. '\n 'If not the legacy one will be used.')\n settings.register_option(section='list',\n key='resolver',\n key_type=bool,\n default=False,\n help_msg=help_msg,\n parser=parser,\n long_arg='--resolver')\n\n help_msg = ('Writes runnable recipe files to a directory. Valid only '\n 'when using --resolver.')\n settings.register_option(section='list.recipes',\n key='write_to_directory',\n default=None,\n metavar='DIRECTORY',\n help_msg=help_msg,\n parser=parser,\n long_arg='--write-recipes-to-directory')\n\n parser_common_args.add_tag_filter_args(parser)\n\n def run(self, config):\n runner = 'nrunner' if config.get('list.resolver') else 'runner'\n config['run.references'] = config.get('list.references')\n config['run.ignore_missing_references'] = True\n config['run.test_runner'] = runner\n try:\n suite = TestSuite.from_config(config)\n if runner == 'nrunner':\n matrix = self._get_resolution_matrix(suite)\n self._display(suite, matrix)\n\n directory = config.get('list.recipes.write_to_directory')\n if directory is not None:\n self.save_recipes(suite, directory, len(matrix))\n else:\n matrix = self._get_test_matrix(suite)\n self._display(suite, matrix)\n except KeyboardInterrupt:\n LOG_UI.error('Command interrupted by user...')\n return exit_codes.AVOCADO_FAIL\n", "path": "avocado/plugins/list.py"}], "after_files": [{"content": "# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n#\n# See LICENSE for more details.\n#\n# Copyright: Red Hat Inc. 2013-2014\n# Author: Lucas Meneghel Rodrigues <[email protected]>\n# Author: Beraldo Leal <[email protected]>\n\nimport os\n\nfrom avocado.core import exit_codes, loader, parser_common_args\nfrom avocado.core.output import LOG_UI, TERM_SUPPORT\nfrom avocado.core.plugin_interfaces import CLICmd\nfrom avocado.core.resolver import ReferenceResolutionResult\nfrom avocado.core.settings import settings\nfrom avocado.core.suite import TestSuite\nfrom avocado.core.test import Test\nfrom avocado.utils.astring import iter_tabular_output\n\n\ndef _get_test_tags(test):\n \"\"\"Return a list of all tags of a test as string.\"\"\"\n params = test[1]\n tags_repr = []\n for tag, values in params.get('tags', {}).items():\n if values:\n tags_repr.append(\"%s(%s)\" % (tag, \",\".join(values)))\n else:\n tags_repr.append(tag)\n return \",\".join(tags_repr)\n\n\nclass List(CLICmd):\n\n \"\"\"\n Implements the avocado 'list' subcommand\n \"\"\"\n\n name = 'list'\n description = 'List available tests'\n\n def _display(self, suite, matrix):\n header = None\n verbose = suite.config.get('core.verbose')\n if verbose:\n header = (TERM_SUPPORT.header_str('Type'),\n TERM_SUPPORT.header_str('Test'),\n TERM_SUPPORT.header_str('Tag(s)'))\n\n for line in iter_tabular_output(matrix,\n header=header,\n strip=True):\n LOG_UI.debug(line)\n\n if verbose:\n if suite.resolutions:\n resolution_header = (TERM_SUPPORT.header_str('Resolver'),\n TERM_SUPPORT.header_str('Reference'),\n TERM_SUPPORT.header_str('Info'))\n LOG_UI.info(\"\")\n\n mapping = {\n ReferenceResolutionResult.SUCCESS: TERM_SUPPORT.healthy_str,\n ReferenceResolutionResult.NOTFOUND: TERM_SUPPORT.fail_header_str,\n ReferenceResolutionResult.ERROR: TERM_SUPPORT.fail_header_str\n }\n resolution_matrix = []\n for r in suite.resolutions:\n decorator = mapping.get(r.result,\n TERM_SUPPORT.warn_header_str)\n if r.result == ReferenceResolutionResult.SUCCESS:\n continue\n resolution_matrix.append((decorator(r.origin),\n r.reference,\n r.info or ''))\n\n for line in iter_tabular_output(resolution_matrix,\n header=resolution_header,\n strip=True):\n LOG_UI.info(line)\n\n LOG_UI.info(\"\")\n LOG_UI.info(\"TEST TYPES SUMMARY\")\n LOG_UI.info(\"==================\")\n for key in sorted(suite.stats):\n LOG_UI.info(\"%s: %s\", key, suite.stats[key])\n\n if suite.tags_stats:\n LOG_UI.info(\"\")\n LOG_UI.info(\"TEST TAGS SUMMARY\")\n LOG_UI.info(\"=================\")\n for key in sorted(suite.tags_stats):\n LOG_UI.info(\"%s: %s\", key, suite.tags_stats[key])\n\n @staticmethod\n def _get_test_matrix(suite):\n \"\"\"Used for loader.\"\"\"\n test_matrix = []\n\n type_label_mapping = loader.loader.get_type_label_mapping()\n decorator_mapping = loader.loader.get_decorator_mapping()\n\n verbose = suite.config.get('core.verbose')\n for cls, params in suite.tests:\n if isinstance(cls, str):\n cls = Test\n type_label = type_label_mapping[cls]\n decorator = decorator_mapping[cls]\n type_label = decorator(type_label)\n\n if verbose:\n test_matrix.append((type_label,\n params['name'],\n _get_test_tags((cls, params))))\n else:\n test_matrix.append((type_label, params['name']))\n\n return test_matrix\n\n @staticmethod\n def _get_resolution_matrix(suite):\n \"\"\"Used for resolver.\"\"\"\n test_matrix = []\n verbose = suite.config.get('core.verbose')\n for test in suite.tests:\n runnable = test.runnable\n\n type_label = TERM_SUPPORT.healthy_str(runnable.kind)\n\n if verbose:\n tags_repr = []\n tags = runnable.tags or {}\n for tag, vals in tags.items():\n if vals:\n tags_repr.append(\"%s(%s)\" % (tag,\n \",\".join(vals)))\n else:\n tags_repr.append(tag)\n tags_repr = \",\".join(tags_repr)\n test_matrix.append((type_label, runnable.uri, tags_repr))\n else:\n test_matrix.append((type_label, runnable.uri))\n return test_matrix\n\n @staticmethod\n def save_recipes(suite, directory, matrix_len):\n fmt = '%%0%uu.json' % len(str(matrix_len))\n index = 1\n for resolution in suite.resolutions:\n if resolution.result == ReferenceResolutionResult.SUCCESS:\n for res in resolution.resolutions:\n res.write_json(os.path.join(directory, fmt % index))\n index += 1\n\n def configure(self, parser):\n \"\"\"\n Add the subparser for the list action.\n\n :param parser: The Avocado command line application parser\n :type parser: :class:`avocado.core.parser.ArgumentParser`\n \"\"\"\n parser = super(List, self).configure(parser)\n help_msg = ('List of test references (aliases or paths). If empty, '\n 'Avocado will list tests on the configured test source, '\n '(see \"avocado config --datadir\") Also, if there are '\n 'other test loader plugins active, tests from those '\n 'plugins might also show up (behavior may vary among '\n 'plugins)')\n settings.register_option(section='list',\n key='references',\n default=[],\n nargs='*',\n key_type=list,\n help_msg=help_msg,\n parser=parser,\n positional_arg=True)\n loader.add_loader_options(parser, 'list')\n\n help_msg = ('What is the method used to detect tests? If --resolver '\n 'used, Avocado will use the Next Runner Resolver method. '\n 'If not the legacy one will be used.')\n settings.register_option(section='list',\n key='resolver',\n key_type=bool,\n default=False,\n help_msg=help_msg,\n parser=parser,\n long_arg='--resolver')\n\n help_msg = ('Writes runnable recipe files to a directory. Valid only '\n 'when using --resolver.')\n settings.register_option(section='list.recipes',\n key='write_to_directory',\n default=None,\n metavar='DIRECTORY',\n help_msg=help_msg,\n parser=parser,\n long_arg='--write-recipes-to-directory')\n\n parser_common_args.add_tag_filter_args(parser)\n\n def run(self, config):\n runner = 'nrunner' if config.get('list.resolver') else 'runner'\n config['run.references'] = config.get('list.references')\n config['run.ignore_missing_references'] = True\n config['run.test_runner'] = runner\n try:\n suite = TestSuite.from_config(config)\n if runner == 'nrunner':\n matrix = self._get_resolution_matrix(suite)\n self._display(suite, matrix)\n\n directory = config.get('list.recipes.write_to_directory')\n if directory is not None:\n self.save_recipes(suite, directory, len(matrix))\n else:\n matrix = self._get_test_matrix(suite)\n self._display(suite, matrix)\n except KeyboardInterrupt:\n LOG_UI.error('Command interrupted by user...')\n return exit_codes.AVOCADO_FAIL\n", "path": "avocado/plugins/list.py"}]}
| 2,495 | 481 |
gh_patches_debug_11587
|
rasdani/github-patches
|
git_diff
|
saulpw__visidata-967
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
no loader for url scheme: postgresql
**Small description**
When attempting to start visidata with
```
vd postgresql:///localdb
```
it fails with
```
Error: no loader for url scheme: postgresql
```
**Expected result**
I would have expected it to work the same way
```
vd postgres:///localdb
```
works, as [both URL schemes are valid](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING).
P.S.: This is somewhere in between a bug and a feature request. As it's super small and about something existing, I called it a bug. I will raise a PR to fix this shortly.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `visidata/loaders/postgres.py`
Content:
```
1 from visidata import *
2
3 __all__ = ['openurl_postgres', 'openurl_rds', 'PgTable', 'PgTablesSheet']
4
5 option('postgres_schema', 'public', 'The desired schema for the Postgres database')
6
7 def codeToType(type_code, colname):
8 import psycopg2
9 try:
10 tname = psycopg2._psycopg.string_types[type_code].name
11 if 'INTEGER' in tname:
12 return int
13 if 'STRING' in tname:
14 return str
15 except KeyError:
16 vd.status('unknown postgres type_code %s for %s' % (type_code, colname))
17 return anytype
18
19
20 def openurl_rds(url, filetype=None):
21 import boto3
22 import psycopg2
23
24 rds = boto3.client('rds')
25 url = urlparse(url.given)
26
27 _, region, dbname = url.path.split('/')
28 token = rds.generate_db_auth_token(url.hostname, url.port, url.username, region)
29
30 conn = psycopg2.connect(
31 user=url.username,
32 dbname=dbname,
33 host=url.hostname,
34 port=url.port,
35 password=token)
36
37 return PgTablesSheet(dbname+"_tables", sql=SQL(conn))
38
39
40 def openurl_postgres(url, filetype=None):
41 import psycopg2
42
43 url = urlparse(url.given)
44 dbname = url.path[1:]
45 conn = psycopg2.connect(
46 user=url.username,
47 dbname=dbname,
48 host=url.hostname,
49 port=url.port,
50 password=url.password)
51
52 return PgTablesSheet(dbname+"_tables", sql=SQL(conn))
53
54
55 class SQL:
56 def __init__(self, conn):
57 self.conn = conn
58
59 def cur(self, qstr):
60 import string
61 randomname = ''.join(random.choice(string.ascii_uppercase) for _ in range(6))
62 cur = self.conn.cursor(randomname)
63 cur.execute(qstr)
64 return cur
65
66 @asyncthread
67 def query_async(self, qstr, callback=None):
68 with self.cur(qstr) as cur:
69 callback(cur)
70 cur.close()
71
72
73 def cursorToColumns(cur, sheet):
74 sheet.columns = []
75 for i, coldesc in enumerate(cur.description):
76 sheet.addColumn(ColumnItem(coldesc.name, i, type=codeToType(coldesc.type_code, coldesc.name)))
77
78
79 # rowdef: (table_name, ncols)
80 class PgTablesSheet(Sheet):
81 rowtype = 'tables'
82
83 def reload(self):
84 schema = options.postgres_schema
85 qstr = f'''
86 SELECT relname table_name, column_count.ncols, reltuples::bigint est_nrows
87 FROM pg_class, pg_namespace, (
88 SELECT table_name, COUNT(column_name) AS ncols FROM information_schema.COLUMNS WHERE table_schema = '{schema}' GROUP BY table_name
89 ) AS column_count
90 WHERE pg_class.relnamespace = pg_namespace.oid AND pg_namespace.nspname = '{schema}' AND column_count.table_name = relname;
91 '''
92
93 with self.sql.cur(qstr) as cur:
94 self.nrowsPerTable = {}
95
96 self.rows = []
97 # try to get first row to make cur.description available
98 r = cur.fetchone()
99 if r:
100 self.addRow(r)
101 cursorToColumns(cur, self)
102 self.setKeys(self.columns[0:1]) # table_name is the key
103
104 for r in cur:
105 self.addRow(r)
106
107 def openRow(self, row):
108 return PgTable(self.name+"."+row[0], source=row[0], sql=self.sql)
109
110
111 # rowdef: tuple of values as returned by fetchone()
112 class PgTable(Sheet):
113 @asyncthread
114 def reload(self):
115 if self.options.postgres_schema:
116 source = f"{self.options.postgres_schema}.{self.source}"
117 else:
118 source = self.source
119 with self.sql.cur(f"SELECT * FROM {source}") as cur:
120 self.rows = []
121 r = cur.fetchone()
122 if r:
123 self.addRow(r)
124 cursorToColumns(cur, self)
125 for r in cur:
126 self.addRow(r)
127
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/visidata/loaders/postgres.py b/visidata/loaders/postgres.py
--- a/visidata/loaders/postgres.py
+++ b/visidata/loaders/postgres.py
@@ -1,6 +1,6 @@
from visidata import *
-__all__ = ['openurl_postgres', 'openurl_rds', 'PgTable', 'PgTablesSheet']
+__all__ = ['openurl_postgres', 'openurl_postgresql', 'openurl_rds', 'PgTable', 'PgTablesSheet']
option('postgres_schema', 'public', 'The desired schema for the Postgres database')
@@ -52,6 +52,9 @@
return PgTablesSheet(dbname+"_tables", sql=SQL(conn))
+openurl_postgresql=openurl_postgres
+
+
class SQL:
def __init__(self, conn):
self.conn = conn
|
{"golden_diff": "diff --git a/visidata/loaders/postgres.py b/visidata/loaders/postgres.py\n--- a/visidata/loaders/postgres.py\n+++ b/visidata/loaders/postgres.py\n@@ -1,6 +1,6 @@\n from visidata import *\n \n-__all__ = ['openurl_postgres', 'openurl_rds', 'PgTable', 'PgTablesSheet']\n+__all__ = ['openurl_postgres', 'openurl_postgresql', 'openurl_rds', 'PgTable', 'PgTablesSheet']\n \n option('postgres_schema', 'public', 'The desired schema for the Postgres database')\n \n@@ -52,6 +52,9 @@\n return PgTablesSheet(dbname+\"_tables\", sql=SQL(conn))\n \n \n+openurl_postgresql=openurl_postgres\n+\n+\n class SQL:\n def __init__(self, conn):\n self.conn = conn\n", "issue": "no loader for url scheme: postgresql\n**Small description**\r\n\r\nWhen attempting to start visidata with\r\n```\r\nvd postgresql:///localdb\r\n```\r\nit fails with\r\n```\r\nError: no loader for url scheme: postgresql\r\n```\r\n\r\n**Expected result**\r\n\r\nI would have expected it to work the same way \r\n```\r\nvd postgres:///localdb\r\n```\r\nworks, as [both URL schemes are valid](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING).\r\n\r\nP.S.: This is somewhere in between a bug and a feature request. As it's super small and about something existing, I called it a bug. I will raise a PR to fix this shortly.\n", "before_files": [{"content": "from visidata import *\n\n__all__ = ['openurl_postgres', 'openurl_rds', 'PgTable', 'PgTablesSheet']\n\noption('postgres_schema', 'public', 'The desired schema for the Postgres database')\n\ndef codeToType(type_code, colname):\n import psycopg2\n try:\n tname = psycopg2._psycopg.string_types[type_code].name\n if 'INTEGER' in tname:\n return int\n if 'STRING' in tname:\n return str\n except KeyError:\n vd.status('unknown postgres type_code %s for %s' % (type_code, colname))\n return anytype\n\n\ndef openurl_rds(url, filetype=None):\n import boto3\n import psycopg2\n\n rds = boto3.client('rds')\n url = urlparse(url.given)\n\n _, region, dbname = url.path.split('/')\n token = rds.generate_db_auth_token(url.hostname, url.port, url.username, region)\n\n conn = psycopg2.connect(\n user=url.username,\n dbname=dbname,\n host=url.hostname,\n port=url.port,\n password=token)\n\n return PgTablesSheet(dbname+\"_tables\", sql=SQL(conn))\n\n\ndef openurl_postgres(url, filetype=None):\n import psycopg2\n\n url = urlparse(url.given)\n dbname = url.path[1:]\n conn = psycopg2.connect(\n user=url.username,\n dbname=dbname,\n host=url.hostname,\n port=url.port,\n password=url.password)\n\n return PgTablesSheet(dbname+\"_tables\", sql=SQL(conn))\n\n\nclass SQL:\n def __init__(self, conn):\n self.conn = conn\n\n def cur(self, qstr):\n import string\n randomname = ''.join(random.choice(string.ascii_uppercase) for _ in range(6))\n cur = self.conn.cursor(randomname)\n cur.execute(qstr)\n return cur\n\n @asyncthread\n def query_async(self, qstr, callback=None):\n with self.cur(qstr) as cur:\n callback(cur)\n cur.close()\n\n\ndef cursorToColumns(cur, sheet):\n sheet.columns = []\n for i, coldesc in enumerate(cur.description):\n sheet.addColumn(ColumnItem(coldesc.name, i, type=codeToType(coldesc.type_code, coldesc.name)))\n\n\n# rowdef: (table_name, ncols)\nclass PgTablesSheet(Sheet):\n rowtype = 'tables'\n\n def reload(self):\n schema = options.postgres_schema\n qstr = f'''\n SELECT relname table_name, column_count.ncols, reltuples::bigint est_nrows\n FROM pg_class, pg_namespace, (\n SELECT table_name, COUNT(column_name) AS ncols FROM information_schema.COLUMNS WHERE table_schema = '{schema}' GROUP BY table_name\n ) AS column_count\n WHERE pg_class.relnamespace = pg_namespace.oid AND pg_namespace.nspname = '{schema}' AND column_count.table_name = relname;\n '''\n\n with self.sql.cur(qstr) as cur:\n self.nrowsPerTable = {}\n\n self.rows = []\n # try to get first row to make cur.description available\n r = cur.fetchone()\n if r:\n self.addRow(r)\n cursorToColumns(cur, self)\n self.setKeys(self.columns[0:1]) # table_name is the key\n\n for r in cur:\n self.addRow(r)\n\n def openRow(self, row):\n return PgTable(self.name+\".\"+row[0], source=row[0], sql=self.sql)\n\n\n# rowdef: tuple of values as returned by fetchone()\nclass PgTable(Sheet):\n @asyncthread\n def reload(self):\n if self.options.postgres_schema:\n source = f\"{self.options.postgres_schema}.{self.source}\"\n else:\n source = self.source\n with self.sql.cur(f\"SELECT * FROM {source}\") as cur:\n self.rows = []\n r = cur.fetchone()\n if r:\n self.addRow(r)\n cursorToColumns(cur, self)\n for r in cur:\n self.addRow(r)\n", "path": "visidata/loaders/postgres.py"}], "after_files": [{"content": "from visidata import *\n\n__all__ = ['openurl_postgres', 'openurl_postgresql', 'openurl_rds', 'PgTable', 'PgTablesSheet']\n\noption('postgres_schema', 'public', 'The desired schema for the Postgres database')\n\ndef codeToType(type_code, colname):\n import psycopg2\n try:\n tname = psycopg2._psycopg.string_types[type_code].name\n if 'INTEGER' in tname:\n return int\n if 'STRING' in tname:\n return str\n except KeyError:\n vd.status('unknown postgres type_code %s for %s' % (type_code, colname))\n return anytype\n\n\ndef openurl_rds(url, filetype=None):\n import boto3\n import psycopg2\n\n rds = boto3.client('rds')\n url = urlparse(url.given)\n\n _, region, dbname = url.path.split('/')\n token = rds.generate_db_auth_token(url.hostname, url.port, url.username, region)\n\n conn = psycopg2.connect(\n user=url.username,\n dbname=dbname,\n host=url.hostname,\n port=url.port,\n password=token)\n\n return PgTablesSheet(dbname+\"_tables\", sql=SQL(conn))\n\n\ndef openurl_postgres(url, filetype=None):\n import psycopg2\n\n url = urlparse(url.given)\n dbname = url.path[1:]\n conn = psycopg2.connect(\n user=url.username,\n dbname=dbname,\n host=url.hostname,\n port=url.port,\n password=url.password)\n\n return PgTablesSheet(dbname+\"_tables\", sql=SQL(conn))\n\n\nopenurl_postgresql=openurl_postgres\n\n\nclass SQL:\n def __init__(self, conn):\n self.conn = conn\n\n def cur(self, qstr):\n import string\n randomname = ''.join(random.choice(string.ascii_uppercase) for _ in range(6))\n cur = self.conn.cursor(randomname)\n cur.execute(qstr)\n return cur\n\n @asyncthread\n def query_async(self, qstr, callback=None):\n with self.cur(qstr) as cur:\n callback(cur)\n cur.close()\n\n\ndef cursorToColumns(cur, sheet):\n sheet.columns = []\n for i, coldesc in enumerate(cur.description):\n sheet.addColumn(ColumnItem(coldesc.name, i, type=codeToType(coldesc.type_code, coldesc.name)))\n\n\n# rowdef: (table_name, ncols)\nclass PgTablesSheet(Sheet):\n rowtype = 'tables'\n\n def reload(self):\n schema = options.postgres_schema\n qstr = f'''\n SELECT relname table_name, column_count.ncols, reltuples::bigint est_nrows\n FROM pg_class, pg_namespace, (\n SELECT table_name, COUNT(column_name) AS ncols FROM information_schema.COLUMNS WHERE table_schema = '{schema}' GROUP BY table_name\n ) AS column_count\n WHERE pg_class.relnamespace = pg_namespace.oid AND pg_namespace.nspname = '{schema}' AND column_count.table_name = relname;\n '''\n\n with self.sql.cur(qstr) as cur:\n self.nrowsPerTable = {}\n\n self.rows = []\n # try to get first row to make cur.description available\n r = cur.fetchone()\n if r:\n self.addRow(r)\n cursorToColumns(cur, self)\n self.setKeys(self.columns[0:1]) # table_name is the key\n\n for r in cur:\n self.addRow(r)\n\n def openRow(self, row):\n return PgTable(self.name+\".\"+row[0], source=row[0], sql=self.sql)\n\n\n# rowdef: tuple of values as returned by fetchone()\nclass PgTable(Sheet):\n @asyncthread\n def reload(self):\n if self.options.postgres_schema:\n source = f\"{self.options.postgres_schema}.{self.source}\"\n else:\n source = self.source\n with self.sql.cur(f\"SELECT * FROM {source}\") as cur:\n self.rows = []\n r = cur.fetchone()\n if r:\n self.addRow(r)\n cursorToColumns(cur, self)\n for r in cur:\n self.addRow(r)\n", "path": "visidata/loaders/postgres.py"}]}
| 1,578 | 196 |
gh_patches_debug_8802
|
rasdani/github-patches
|
git_diff
|
oppia__oppia-1401
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make better icons for all the rich-text editor components
```
The icon for the Collapsible widget in the rich-text editor keeps confusing me
since it looks more like a folder containing a file than anything else.
It would be nice if we could replace it with something more apposite, perhaps a
plus sign with a light border around it?
```
Original issue reported on code.google.com by `[email protected]` on 6 Nov 2014 at 6:02
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `extensions/rich_text_components/Video/Video.py`
Content:
```
1 # coding: utf-8
2 #
3 # Copyright 2014 The Oppia Authors. All Rights Reserved.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, softwar
12 # distributed under the License is distributed on an "AS-IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17 from extensions.rich_text_components import base
18
19
20 NONNEGATIVE_INT_SCHEMA = {
21 'type': 'int',
22 'validators': [{
23 'id': 'is_at_least',
24 'min_value': 0
25 }],
26 }
27
28
29 class Video(base.BaseRichTextComponent):
30 """A rich-text component representing a YouTube video."""
31
32 name = 'Video'
33 category = 'Basic Input'
34 description = 'A YouTube video.'
35 frontend_name = 'video'
36 tooltip = 'Insert video'
37
38 _customization_arg_specs = [{
39 'name': 'video_id',
40 'description': (
41 'The YouTube id for this video. This is the 11-character string '
42 'after \'v=\' in the video URL.'),
43 'schema': {
44 'type': 'unicode',
45 },
46 'default_value': '',
47 }, {
48 'name': 'start',
49 'description': (
50 'Video start time in seconds: (leave at 0 to start at the '
51 'beginning.)'),
52 'schema': NONNEGATIVE_INT_SCHEMA,
53 'default_value': 0
54 }, {
55 'name': 'end',
56 'description': (
57 'Video end time in seconds: (leave at 0 to play until the end.)'),
58 'schema': NONNEGATIVE_INT_SCHEMA,
59 'default_value': 0
60 }, {
61 'name': 'autoplay',
62 'description': (
63 'Autoplay this video once the question has loaded?'),
64 'schema': {
65 'type': 'bool'
66 },
67 'default_value': False,
68 }]
69
```
Path: `extensions/rich_text_components/Tabs/Tabs.py`
Content:
```
1 # coding: utf-8
2 #
3 # Copyright 2014 The Oppia Authors. All Rights Reserved.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, softwar
12 # distributed under the License is distributed on an "AS-IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17 from extensions.rich_text_components import base
18
19
20 TAB_CONTENT_SCHEMA = {
21 'type': 'dict',
22 'properties': [{
23 'name': 'title',
24 'description': 'Tab title',
25 'schema': {
26 'type': 'unicode',
27 'validators': [{
28 'id': 'is_nonempty'
29 }]
30 }
31 }, {
32 'name': 'content',
33 'description': 'Tab content',
34 'schema': {
35 'type': 'html',
36 'ui_config': {
37 'hide_complex_extensions': True,
38 }
39
40 }
41 }]
42 }
43
44
45 class Tabs(base.BaseRichTextComponent):
46 """A rich-text component representing a series of tabs."""
47
48 name = 'Tabs'
49 category = 'Basic Input'
50 description = 'A series of tabs.'
51 frontend_name = 'tabs'
52 tooltip = 'Insert tabs (e.g. for hints)'
53 is_complex = True
54
55 _customization_arg_specs = [{
56 'name': 'tab_contents',
57 'description': 'The tab titles and contents.',
58 'schema': {
59 'type': 'list',
60 'items': TAB_CONTENT_SCHEMA,
61 'ui_config': {
62 'add_element_text': 'Add new tab'
63 }
64 },
65 'default_value': [{
66 'title': 'Hint introduction',
67 'content': ('This set of tabs shows some hints. Click on the '
68 'other tabs to display the relevant hints.')
69 }, {
70 'title': 'Hint 1',
71 'content': 'This is a first hint.'
72 }],
73 }]
74
```
Path: `extensions/rich_text_components/Collapsible/Collapsible.py`
Content:
```
1 # coding: utf-8
2 #
3 # Copyright 2014 The Oppia Authors. All Rights Reserved.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, softwar
12 # distributed under the License is distributed on an "AS-IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17 from extensions.rich_text_components import base
18
19
20 class Collapsible(base.BaseRichTextComponent):
21 """A rich-text component representing a collapsible block."""
22
23 name = 'Collapsible'
24 category = 'Basic Input'
25 description = 'A collapsible block of HTML.'
26 frontend_name = 'collapsible'
27 tooltip = 'Insert collapsible block'
28 is_complex = True
29
30 _customization_arg_specs = [{
31 'name': 'heading',
32 'description': 'The heading for the collapsible block',
33 'schema': {
34 'type': 'unicode',
35 },
36 'default_value': 'Sample Header',
37 }, {
38 'name': 'content',
39 'description': 'The content of the collapsible block',
40 'schema': {
41 'type': 'html',
42 'ui_config': {
43 'hide_complex_extensions': True,
44 }
45 },
46 'default_value': 'You have opened the collapsible block.'
47 }]
48
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/extensions/rich_text_components/Collapsible/Collapsible.py b/extensions/rich_text_components/Collapsible/Collapsible.py
--- a/extensions/rich_text_components/Collapsible/Collapsible.py
+++ b/extensions/rich_text_components/Collapsible/Collapsible.py
@@ -45,3 +45,4 @@
},
'default_value': 'You have opened the collapsible block.'
}]
+
diff --git a/extensions/rich_text_components/Tabs/Tabs.py b/extensions/rich_text_components/Tabs/Tabs.py
--- a/extensions/rich_text_components/Tabs/Tabs.py
+++ b/extensions/rich_text_components/Tabs/Tabs.py
@@ -71,3 +71,4 @@
'content': 'This is a first hint.'
}],
}]
+
diff --git a/extensions/rich_text_components/Video/Video.py b/extensions/rich_text_components/Video/Video.py
--- a/extensions/rich_text_components/Video/Video.py
+++ b/extensions/rich_text_components/Video/Video.py
@@ -66,3 +66,4 @@
},
'default_value': False,
}]
+
|
{"golden_diff": "diff --git a/extensions/rich_text_components/Collapsible/Collapsible.py b/extensions/rich_text_components/Collapsible/Collapsible.py\n--- a/extensions/rich_text_components/Collapsible/Collapsible.py\n+++ b/extensions/rich_text_components/Collapsible/Collapsible.py\n@@ -45,3 +45,4 @@\n },\n 'default_value': 'You have opened the collapsible block.'\n }]\n+\ndiff --git a/extensions/rich_text_components/Tabs/Tabs.py b/extensions/rich_text_components/Tabs/Tabs.py\n--- a/extensions/rich_text_components/Tabs/Tabs.py\n+++ b/extensions/rich_text_components/Tabs/Tabs.py\n@@ -71,3 +71,4 @@\n 'content': 'This is a first hint.'\n }],\n }]\n+\ndiff --git a/extensions/rich_text_components/Video/Video.py b/extensions/rich_text_components/Video/Video.py\n--- a/extensions/rich_text_components/Video/Video.py\n+++ b/extensions/rich_text_components/Video/Video.py\n@@ -66,3 +66,4 @@\n },\n 'default_value': False,\n }]\n+\n", "issue": "Make better icons for all the rich-text editor components\n```\nThe icon for the Collapsible widget in the rich-text editor keeps confusing me \nsince it looks more like a folder containing a file than anything else.\n\nIt would be nice if we could replace it with something more apposite, perhaps a \nplus sign with a light border around it?\n```\n\nOriginal issue reported on code.google.com by `[email protected]` on 6 Nov 2014 at 6:02\n\n", "before_files": [{"content": "# coding: utf-8\n#\n# Copyright 2014 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, softwar\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom extensions.rich_text_components import base\n\n\nNONNEGATIVE_INT_SCHEMA = {\n 'type': 'int',\n 'validators': [{\n 'id': 'is_at_least',\n 'min_value': 0\n }],\n}\n\n\nclass Video(base.BaseRichTextComponent):\n \"\"\"A rich-text component representing a YouTube video.\"\"\"\n\n name = 'Video'\n category = 'Basic Input'\n description = 'A YouTube video.'\n frontend_name = 'video'\n tooltip = 'Insert video'\n\n _customization_arg_specs = [{\n 'name': 'video_id',\n 'description': (\n 'The YouTube id for this video. This is the 11-character string '\n 'after \\'v=\\' in the video URL.'),\n 'schema': {\n 'type': 'unicode',\n },\n 'default_value': '',\n }, {\n 'name': 'start',\n 'description': (\n 'Video start time in seconds: (leave at 0 to start at the '\n 'beginning.)'),\n 'schema': NONNEGATIVE_INT_SCHEMA,\n 'default_value': 0\n }, {\n 'name': 'end',\n 'description': (\n 'Video end time in seconds: (leave at 0 to play until the end.)'),\n 'schema': NONNEGATIVE_INT_SCHEMA,\n 'default_value': 0\n }, {\n 'name': 'autoplay',\n 'description': (\n 'Autoplay this video once the question has loaded?'),\n 'schema': {\n 'type': 'bool'\n },\n 'default_value': False,\n }]\n", "path": "extensions/rich_text_components/Video/Video.py"}, {"content": "# coding: utf-8\n#\n# Copyright 2014 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, softwar\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom extensions.rich_text_components import base\n\n\nTAB_CONTENT_SCHEMA = {\n 'type': 'dict',\n 'properties': [{\n 'name': 'title',\n 'description': 'Tab title',\n 'schema': {\n 'type': 'unicode',\n 'validators': [{\n 'id': 'is_nonempty'\n }]\n }\n }, {\n 'name': 'content',\n 'description': 'Tab content',\n 'schema': {\n 'type': 'html',\n 'ui_config': {\n 'hide_complex_extensions': True,\n }\n\n }\n }]\n}\n\n\nclass Tabs(base.BaseRichTextComponent):\n \"\"\"A rich-text component representing a series of tabs.\"\"\"\n\n name = 'Tabs'\n category = 'Basic Input'\n description = 'A series of tabs.'\n frontend_name = 'tabs'\n tooltip = 'Insert tabs (e.g. for hints)'\n is_complex = True\n\n _customization_arg_specs = [{\n 'name': 'tab_contents',\n 'description': 'The tab titles and contents.',\n 'schema': {\n 'type': 'list',\n 'items': TAB_CONTENT_SCHEMA,\n 'ui_config': {\n 'add_element_text': 'Add new tab'\n }\n },\n 'default_value': [{\n 'title': 'Hint introduction',\n 'content': ('This set of tabs shows some hints. Click on the '\n 'other tabs to display the relevant hints.')\n }, {\n 'title': 'Hint 1',\n 'content': 'This is a first hint.'\n }],\n }]\n", "path": "extensions/rich_text_components/Tabs/Tabs.py"}, {"content": "# coding: utf-8\n#\n# Copyright 2014 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, softwar\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom extensions.rich_text_components import base\n\n\nclass Collapsible(base.BaseRichTextComponent):\n \"\"\"A rich-text component representing a collapsible block.\"\"\"\n\n name = 'Collapsible'\n category = 'Basic Input'\n description = 'A collapsible block of HTML.'\n frontend_name = 'collapsible'\n tooltip = 'Insert collapsible block'\n is_complex = True\n\n _customization_arg_specs = [{\n 'name': 'heading',\n 'description': 'The heading for the collapsible block',\n 'schema': {\n 'type': 'unicode',\n },\n 'default_value': 'Sample Header',\n }, {\n 'name': 'content',\n 'description': 'The content of the collapsible block',\n 'schema': {\n 'type': 'html',\n 'ui_config': {\n 'hide_complex_extensions': True,\n }\n },\n 'default_value': 'You have opened the collapsible block.'\n }]\n", "path": "extensions/rich_text_components/Collapsible/Collapsible.py"}], "after_files": [{"content": "# coding: utf-8\n#\n# Copyright 2014 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, softwar\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom extensions.rich_text_components import base\n\n\nNONNEGATIVE_INT_SCHEMA = {\n 'type': 'int',\n 'validators': [{\n 'id': 'is_at_least',\n 'min_value': 0\n }],\n}\n\n\nclass Video(base.BaseRichTextComponent):\n \"\"\"A rich-text component representing a YouTube video.\"\"\"\n\n name = 'Video'\n category = 'Basic Input'\n description = 'A YouTube video.'\n frontend_name = 'video'\n tooltip = 'Insert video'\n\n _customization_arg_specs = [{\n 'name': 'video_id',\n 'description': (\n 'The YouTube id for this video. This is the 11-character string '\n 'after \\'v=\\' in the video URL.'),\n 'schema': {\n 'type': 'unicode',\n },\n 'default_value': '',\n }, {\n 'name': 'start',\n 'description': (\n 'Video start time in seconds: (leave at 0 to start at the '\n 'beginning.)'),\n 'schema': NONNEGATIVE_INT_SCHEMA,\n 'default_value': 0\n }, {\n 'name': 'end',\n 'description': (\n 'Video end time in seconds: (leave at 0 to play until the end.)'),\n 'schema': NONNEGATIVE_INT_SCHEMA,\n 'default_value': 0\n }, {\n 'name': 'autoplay',\n 'description': (\n 'Autoplay this video once the question has loaded?'),\n 'schema': {\n 'type': 'bool'\n },\n 'default_value': False,\n }]\n\n", "path": "extensions/rich_text_components/Video/Video.py"}, {"content": "# coding: utf-8\n#\n# Copyright 2014 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, softwar\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom extensions.rich_text_components import base\n\n\nTAB_CONTENT_SCHEMA = {\n 'type': 'dict',\n 'properties': [{\n 'name': 'title',\n 'description': 'Tab title',\n 'schema': {\n 'type': 'unicode',\n 'validators': [{\n 'id': 'is_nonempty'\n }]\n }\n }, {\n 'name': 'content',\n 'description': 'Tab content',\n 'schema': {\n 'type': 'html',\n 'ui_config': {\n 'hide_complex_extensions': True,\n }\n\n }\n }]\n}\n\n\nclass Tabs(base.BaseRichTextComponent):\n \"\"\"A rich-text component representing a series of tabs.\"\"\"\n\n name = 'Tabs'\n category = 'Basic Input'\n description = 'A series of tabs.'\n frontend_name = 'tabs'\n tooltip = 'Insert tabs (e.g. for hints)'\n is_complex = True\n\n _customization_arg_specs = [{\n 'name': 'tab_contents',\n 'description': 'The tab titles and contents.',\n 'schema': {\n 'type': 'list',\n 'items': TAB_CONTENT_SCHEMA,\n 'ui_config': {\n 'add_element_text': 'Add new tab'\n }\n },\n 'default_value': [{\n 'title': 'Hint introduction',\n 'content': ('This set of tabs shows some hints. Click on the '\n 'other tabs to display the relevant hints.')\n }, {\n 'title': 'Hint 1',\n 'content': 'This is a first hint.'\n }],\n }]\n\n", "path": "extensions/rich_text_components/Tabs/Tabs.py"}, {"content": "# coding: utf-8\n#\n# Copyright 2014 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, softwar\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom extensions.rich_text_components import base\n\n\nclass Collapsible(base.BaseRichTextComponent):\n \"\"\"A rich-text component representing a collapsible block.\"\"\"\n\n name = 'Collapsible'\n category = 'Basic Input'\n description = 'A collapsible block of HTML.'\n frontend_name = 'collapsible'\n tooltip = 'Insert collapsible block'\n is_complex = True\n\n _customization_arg_specs = [{\n 'name': 'heading',\n 'description': 'The heading for the collapsible block',\n 'schema': {\n 'type': 'unicode',\n },\n 'default_value': 'Sample Header',\n }, {\n 'name': 'content',\n 'description': 'The content of the collapsible block',\n 'schema': {\n 'type': 'html',\n 'ui_config': {\n 'hide_complex_extensions': True,\n }\n },\n 'default_value': 'You have opened the collapsible block.'\n }]\n\n", "path": "extensions/rich_text_components/Collapsible/Collapsible.py"}]}
| 2,081 | 247 |
gh_patches_debug_25004
|
rasdani/github-patches
|
git_diff
|
optuna__optuna-3592
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`GridSampler` does not stop the optimization when NaN in the `search_space`
### Expected behavior
`GridSampler` should stop the optimization when all grids are evaluated.
### Environment
- Optuna version: 3.0.0b1.dev
- Python version: 3.8.6
- OS: macOS-10.16-x86_64-i386-64bit
- (Optional) Other libraries and their versions:
### Error messages, stack traces, or logs
```shell
See steps to reproduce.
```
### Steps to reproduce
In the following code, `optimize` should stop with 2 trials, but it infinitely creates trials.
```python
import optuna
sampler = optuna.samplers.GridSampler({"x": [0, float("nan")]})
study = optuna.create_study(sampler=sampler)
study.optimize(lambda _: 0)
```
### Additional context (optional)
The bug is coded here.
https://github.com/optuna/optuna/blob/c23e17c70bd77489e223f3d4c322d86ec5a6ec66/optuna/samplers/_grid.py#L270
If the `param_value` is NaN, this condition is wrong.
```python
float("nan") != float("nan") # True
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `optuna/samplers/_grid.py`
Content:
```
1 import collections
2 import itertools
3 from typing import Any
4 from typing import Dict
5 from typing import List
6 from typing import Mapping
7 from typing import Optional
8 from typing import Sequence
9 from typing import Union
10 import warnings
11
12 import numpy as np
13
14 from optuna.distributions import BaseDistribution
15 from optuna.logging import get_logger
16 from optuna.samplers import BaseSampler
17 from optuna.study import Study
18 from optuna.trial import FrozenTrial
19 from optuna.trial import TrialState
20
21
22 GridValueType = Union[str, float, int, bool, None]
23
24
25 _logger = get_logger(__name__)
26
27
28 class GridSampler(BaseSampler):
29 """Sampler using grid search.
30
31 With :class:`~optuna.samplers.GridSampler`, the trials suggest all combinations of parameters
32 in the given search space during the study.
33
34 Example:
35
36 .. testcode::
37
38 import optuna
39
40
41 def objective(trial):
42 x = trial.suggest_float("x", -100, 100)
43 y = trial.suggest_int("y", -100, 100)
44 return x**2 + y**2
45
46
47 search_space = {"x": [-50, 0, 50], "y": [-99, 0, 99]}
48 study = optuna.create_study(sampler=optuna.samplers.GridSampler(search_space))
49 study.optimize(objective)
50
51 Note:
52
53 :class:`~optuna.samplers.GridSampler` automatically stops the optimization if all
54 combinations in the passed ``search_space`` have already been evaluated, internally
55 invoking the :func:`~optuna.study.Study.stop` method.
56
57 Note:
58
59 :class:`~optuna.samplers.GridSampler` does not take care of a parameter's quantization
60 specified by discrete suggest methods but just samples one of values specified in the
61 search space. E.g., in the following code snippet, either of ``-0.5`` or ``0.5`` is
62 sampled as ``x`` instead of an integer point.
63
64 .. testcode::
65
66 import optuna
67
68
69 def objective(trial):
70 # The following suggest method specifies integer points between -5 and 5.
71 x = trial.suggest_float("x", -5, 5, step=1)
72 return x**2
73
74
75 # Non-int points are specified in the grid.
76 search_space = {"x": [-0.5, 0.5]}
77 study = optuna.create_study(sampler=optuna.samplers.GridSampler(search_space))
78 study.optimize(objective, n_trials=2)
79
80 Note:
81 A parameter configuration in the grid is not considered finished until its trial is
82 finished. Therefore, during distributed optimization where trials run concurrently,
83 different workers will occasionally suggest the same parameter configuration.
84 The total number of actual trials may therefore exceed the size of the grid.
85
86 Note:
87 All parameters must be specified when using :class:`~optuna.samplers.GridSampler` with
88 :meth:`~optuna.study.Study.enqueue_trial`.
89
90 Args:
91 search_space:
92 A dictionary whose key and value are a parameter name and the corresponding candidates
93 of values, respectively.
94 seed:
95 A seed to fix the order of trials as the grid is randomly shuffled. Please note that
96 it is not recommended using this option in distributed optimization settings since
97 this option cannot ensure the order of trials and may increase the number of duplicate
98 suggestions during distributed optimization.
99 """
100
101 def __init__(
102 self, search_space: Mapping[str, Sequence[GridValueType]], seed: Optional[int] = None
103 ) -> None:
104
105 for param_name, param_values in search_space.items():
106 for value in param_values:
107 self._check_value(param_name, value)
108
109 self._search_space = collections.OrderedDict()
110 for param_name, param_values in sorted(search_space.items()):
111 self._search_space[param_name] = param_values
112
113 self._all_grids = list(itertools.product(*self._search_space.values()))
114 self._param_names = sorted(search_space.keys())
115 self._n_min_trials = len(self._all_grids)
116 self._rng = np.random.RandomState(seed)
117
118 def reseed_rng(self) -> None:
119
120 self._rng.seed()
121
122 def infer_relative_search_space(
123 self, study: Study, trial: FrozenTrial
124 ) -> Dict[str, BaseDistribution]:
125
126 return {}
127
128 def sample_relative(
129 self, study: Study, trial: FrozenTrial, search_space: Dict[str, BaseDistribution]
130 ) -> Dict[str, Any]:
131 # Instead of returning param values, GridSampler puts the target grid id as a system attr,
132 # and the values are returned from `sample_independent`. This is because the distribution
133 # object is hard to get at the beginning of trial, while we need the access to the object
134 # to validate the sampled value.
135
136 # When the trial is created by RetryFailedTrialCallback or enqueue_trial, we should not
137 # assign a new grid_id.
138 if "grid_id" in trial.system_attrs or "fixed_params" in trial.system_attrs:
139 return {}
140
141 target_grids = self._get_unvisited_grid_ids(study)
142
143 if len(target_grids) == 0:
144 # This case may occur with distributed optimization or trial queue. If there is no
145 # target grid, `GridSampler` evaluates a visited, duplicated point with the current
146 # trial. After that, the optimization stops.
147
148 _logger.warning(
149 "`GridSampler` is re-evaluating a configuration because the grid has been "
150 "exhausted. This may happen due to a timing issue during distributed optimization "
151 "or when re-running optimizations on already finished studies."
152 )
153
154 # One of all grids is randomly picked up in this case.
155 target_grids = list(range(len(self._all_grids)))
156
157 # In distributed optimization, multiple workers may simultaneously pick up the same grid.
158 # To make the conflict less frequent, the grid is chosen randomly.
159 grid_id = self._rng.choice(target_grids)
160
161 study._storage.set_trial_system_attr(trial._trial_id, "search_space", self._search_space)
162 study._storage.set_trial_system_attr(trial._trial_id, "grid_id", grid_id)
163
164 return {}
165
166 def sample_independent(
167 self,
168 study: Study,
169 trial: FrozenTrial,
170 param_name: str,
171 param_distribution: BaseDistribution,
172 ) -> Any:
173
174 if "grid_id" not in trial.system_attrs:
175 message = "All parameters must be specified when using GridSampler with enqueue_trial."
176 raise ValueError(message)
177
178 if param_name not in self._search_space:
179 message = "The parameter name, {}, is not found in the given grid.".format(param_name)
180 raise ValueError(message)
181
182 # TODO(c-bata): Reduce the number of duplicated evaluations on multiple workers.
183 # Current selection logic may evaluate the same parameters multiple times.
184 # See https://gist.github.com/c-bata/f759f64becb24eea2040f4b2e3afce8f for details.
185 grid_id = trial.system_attrs["grid_id"]
186 param_value = self._all_grids[grid_id][self._param_names.index(param_name)]
187 contains = param_distribution._contains(param_distribution.to_internal_repr(param_value))
188 if not contains:
189 warnings.warn(
190 f"The value `{param_value}` is out of range of the parameter `{param_name}`. "
191 f"The value will be used but the actual distribution is: `{param_distribution}`."
192 )
193
194 return param_value
195
196 def after_trial(
197 self,
198 study: Study,
199 trial: FrozenTrial,
200 state: TrialState,
201 values: Optional[Sequence[float]],
202 ) -> None:
203 target_grids = self._get_unvisited_grid_ids(study)
204
205 if len(target_grids) == 0:
206 study.stop()
207 elif len(target_grids) == 1:
208 grid_id = study._storage.get_trial_system_attrs(trial._trial_id)["grid_id"]
209 if grid_id == target_grids[0]:
210 study.stop()
211
212 @staticmethod
213 def _check_value(param_name: str, param_value: Any) -> None:
214
215 if param_value is None or isinstance(param_value, (str, int, float, bool)):
216 return
217
218 message = (
219 "{} contains a value with the type of {}, which is not supported by "
220 "`GridSampler`. Please make sure a value is `str`, `int`, `float`, `bool`"
221 " or `None` for persistent storage.".format(param_name, type(param_value))
222 )
223 warnings.warn(message)
224
225 def _get_unvisited_grid_ids(self, study: Study) -> List[int]:
226
227 # List up unvisited grids based on already finished ones.
228 visited_grids = []
229 running_grids = []
230
231 # We directly query the storage to get trials here instead of `study.get_trials`,
232 # since some pruners such as `HyperbandPruner` use the study transformed
233 # to filter trials. See https://github.com/optuna/optuna/issues/2327 for details.
234 trials = study._storage.get_all_trials(study._study_id, deepcopy=False)
235
236 for t in trials:
237 if "grid_id" in t.system_attrs and self._same_search_space(
238 t.system_attrs["search_space"]
239 ):
240 if t.state.is_finished():
241 visited_grids.append(t.system_attrs["grid_id"])
242 elif t.state == TrialState.RUNNING:
243 running_grids.append(t.system_attrs["grid_id"])
244
245 unvisited_grids = set(range(self._n_min_trials)) - set(visited_grids) - set(running_grids)
246
247 # If evaluations for all grids have been started, return grids that have not yet finished
248 # because all grids should be evaluated before stopping the optimization.
249 if len(unvisited_grids) == 0:
250 unvisited_grids = set(range(self._n_min_trials)) - set(visited_grids)
251
252 return list(unvisited_grids)
253
254 def _same_search_space(self, search_space: Mapping[str, Sequence[GridValueType]]) -> bool:
255
256 if set(search_space.keys()) != set(self._search_space.keys()):
257 return False
258
259 for param_name in search_space.keys():
260 if len(search_space[param_name]) != len(self._search_space[param_name]):
261 return False
262
263 for i, param_value in enumerate(search_space[param_name]):
264 if param_value != self._search_space[param_name][i]:
265 return False
266
267 return True
268
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/optuna/samplers/_grid.py b/optuna/samplers/_grid.py
--- a/optuna/samplers/_grid.py
+++ b/optuna/samplers/_grid.py
@@ -1,5 +1,6 @@
import collections
import itertools
+from numbers import Real
from typing import Any
from typing import Dict
from typing import List
@@ -251,6 +252,12 @@
return list(unvisited_grids)
+ @staticmethod
+ def _grid_value_equal(value1: GridValueType, value2: GridValueType) -> bool:
+ value1_is_nan = isinstance(value1, Real) and np.isnan(float(value1))
+ value2_is_nan = isinstance(value2, Real) and np.isnan(float(value2))
+ return (value1 == value2) or (value1_is_nan and value2_is_nan)
+
def _same_search_space(self, search_space: Mapping[str, Sequence[GridValueType]]) -> bool:
if set(search_space.keys()) != set(self._search_space.keys()):
@@ -261,7 +268,7 @@
return False
for i, param_value in enumerate(search_space[param_name]):
- if param_value != self._search_space[param_name][i]:
+ if not self._grid_value_equal(param_value, self._search_space[param_name][i]):
return False
return True
|
{"golden_diff": "diff --git a/optuna/samplers/_grid.py b/optuna/samplers/_grid.py\n--- a/optuna/samplers/_grid.py\n+++ b/optuna/samplers/_grid.py\n@@ -1,5 +1,6 @@\n import collections\n import itertools\n+from numbers import Real\n from typing import Any\n from typing import Dict\n from typing import List\n@@ -251,6 +252,12 @@\n \n return list(unvisited_grids)\n \n+ @staticmethod\n+ def _grid_value_equal(value1: GridValueType, value2: GridValueType) -> bool:\n+ value1_is_nan = isinstance(value1, Real) and np.isnan(float(value1))\n+ value2_is_nan = isinstance(value2, Real) and np.isnan(float(value2))\n+ return (value1 == value2) or (value1_is_nan and value2_is_nan)\n+\n def _same_search_space(self, search_space: Mapping[str, Sequence[GridValueType]]) -> bool:\n \n if set(search_space.keys()) != set(self._search_space.keys()):\n@@ -261,7 +268,7 @@\n return False\n \n for i, param_value in enumerate(search_space[param_name]):\n- if param_value != self._search_space[param_name][i]:\n+ if not self._grid_value_equal(param_value, self._search_space[param_name][i]):\n return False\n \n return True\n", "issue": "`GridSampler` does not stop the optimization when NaN in the `search_space`\n### Expected behavior\n\n`GridSampler` should stop the optimization when all grids are evaluated.\n\n### Environment\n\n- Optuna version: 3.0.0b1.dev\r\n- Python version: 3.8.6\r\n- OS: macOS-10.16-x86_64-i386-64bit\r\n- (Optional) Other libraries and their versions:\r\n\n\n### Error messages, stack traces, or logs\n\n```shell\nSee steps to reproduce.\n```\n\n\n### Steps to reproduce\n\nIn the following code, `optimize` should stop with 2 trials, but it infinitely creates trials.\r\n\r\n```python\r\nimport optuna\r\nsampler = optuna.samplers.GridSampler({\"x\": [0, float(\"nan\")]})\r\nstudy = optuna.create_study(sampler=sampler)\r\nstudy.optimize(lambda _: 0)\r\n```\n\n### Additional context (optional)\n\nThe bug is coded here.\r\n\r\nhttps://github.com/optuna/optuna/blob/c23e17c70bd77489e223f3d4c322d86ec5a6ec66/optuna/samplers/_grid.py#L270\r\n\r\nIf the `param_value` is NaN, this condition is wrong.\r\n\r\n```python\r\nfloat(\"nan\") != float(\"nan\") # True\r\n```\n", "before_files": [{"content": "import collections\nimport itertools\nfrom typing import Any\nfrom typing import Dict\nfrom typing import List\nfrom typing import Mapping\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Union\nimport warnings\n\nimport numpy as np\n\nfrom optuna.distributions import BaseDistribution\nfrom optuna.logging import get_logger\nfrom optuna.samplers import BaseSampler\nfrom optuna.study import Study\nfrom optuna.trial import FrozenTrial\nfrom optuna.trial import TrialState\n\n\nGridValueType = Union[str, float, int, bool, None]\n\n\n_logger = get_logger(__name__)\n\n\nclass GridSampler(BaseSampler):\n \"\"\"Sampler using grid search.\n\n With :class:`~optuna.samplers.GridSampler`, the trials suggest all combinations of parameters\n in the given search space during the study.\n\n Example:\n\n .. testcode::\n\n import optuna\n\n\n def objective(trial):\n x = trial.suggest_float(\"x\", -100, 100)\n y = trial.suggest_int(\"y\", -100, 100)\n return x**2 + y**2\n\n\n search_space = {\"x\": [-50, 0, 50], \"y\": [-99, 0, 99]}\n study = optuna.create_study(sampler=optuna.samplers.GridSampler(search_space))\n study.optimize(objective)\n\n Note:\n\n :class:`~optuna.samplers.GridSampler` automatically stops the optimization if all\n combinations in the passed ``search_space`` have already been evaluated, internally\n invoking the :func:`~optuna.study.Study.stop` method.\n\n Note:\n\n :class:`~optuna.samplers.GridSampler` does not take care of a parameter's quantization\n specified by discrete suggest methods but just samples one of values specified in the\n search space. E.g., in the following code snippet, either of ``-0.5`` or ``0.5`` is\n sampled as ``x`` instead of an integer point.\n\n .. testcode::\n\n import optuna\n\n\n def objective(trial):\n # The following suggest method specifies integer points between -5 and 5.\n x = trial.suggest_float(\"x\", -5, 5, step=1)\n return x**2\n\n\n # Non-int points are specified in the grid.\n search_space = {\"x\": [-0.5, 0.5]}\n study = optuna.create_study(sampler=optuna.samplers.GridSampler(search_space))\n study.optimize(objective, n_trials=2)\n\n Note:\n A parameter configuration in the grid is not considered finished until its trial is\n finished. Therefore, during distributed optimization where trials run concurrently,\n different workers will occasionally suggest the same parameter configuration.\n The total number of actual trials may therefore exceed the size of the grid.\n\n Note:\n All parameters must be specified when using :class:`~optuna.samplers.GridSampler` with\n :meth:`~optuna.study.Study.enqueue_trial`.\n\n Args:\n search_space:\n A dictionary whose key and value are a parameter name and the corresponding candidates\n of values, respectively.\n seed:\n A seed to fix the order of trials as the grid is randomly shuffled. Please note that\n it is not recommended using this option in distributed optimization settings since\n this option cannot ensure the order of trials and may increase the number of duplicate\n suggestions during distributed optimization.\n \"\"\"\n\n def __init__(\n self, search_space: Mapping[str, Sequence[GridValueType]], seed: Optional[int] = None\n ) -> None:\n\n for param_name, param_values in search_space.items():\n for value in param_values:\n self._check_value(param_name, value)\n\n self._search_space = collections.OrderedDict()\n for param_name, param_values in sorted(search_space.items()):\n self._search_space[param_name] = param_values\n\n self._all_grids = list(itertools.product(*self._search_space.values()))\n self._param_names = sorted(search_space.keys())\n self._n_min_trials = len(self._all_grids)\n self._rng = np.random.RandomState(seed)\n\n def reseed_rng(self) -> None:\n\n self._rng.seed()\n\n def infer_relative_search_space(\n self, study: Study, trial: FrozenTrial\n ) -> Dict[str, BaseDistribution]:\n\n return {}\n\n def sample_relative(\n self, study: Study, trial: FrozenTrial, search_space: Dict[str, BaseDistribution]\n ) -> Dict[str, Any]:\n # Instead of returning param values, GridSampler puts the target grid id as a system attr,\n # and the values are returned from `sample_independent`. This is because the distribution\n # object is hard to get at the beginning of trial, while we need the access to the object\n # to validate the sampled value.\n\n # When the trial is created by RetryFailedTrialCallback or enqueue_trial, we should not\n # assign a new grid_id.\n if \"grid_id\" in trial.system_attrs or \"fixed_params\" in trial.system_attrs:\n return {}\n\n target_grids = self._get_unvisited_grid_ids(study)\n\n if len(target_grids) == 0:\n # This case may occur with distributed optimization or trial queue. If there is no\n # target grid, `GridSampler` evaluates a visited, duplicated point with the current\n # trial. After that, the optimization stops.\n\n _logger.warning(\n \"`GridSampler` is re-evaluating a configuration because the grid has been \"\n \"exhausted. This may happen due to a timing issue during distributed optimization \"\n \"or when re-running optimizations on already finished studies.\"\n )\n\n # One of all grids is randomly picked up in this case.\n target_grids = list(range(len(self._all_grids)))\n\n # In distributed optimization, multiple workers may simultaneously pick up the same grid.\n # To make the conflict less frequent, the grid is chosen randomly.\n grid_id = self._rng.choice(target_grids)\n\n study._storage.set_trial_system_attr(trial._trial_id, \"search_space\", self._search_space)\n study._storage.set_trial_system_attr(trial._trial_id, \"grid_id\", grid_id)\n\n return {}\n\n def sample_independent(\n self,\n study: Study,\n trial: FrozenTrial,\n param_name: str,\n param_distribution: BaseDistribution,\n ) -> Any:\n\n if \"grid_id\" not in trial.system_attrs:\n message = \"All parameters must be specified when using GridSampler with enqueue_trial.\"\n raise ValueError(message)\n\n if param_name not in self._search_space:\n message = \"The parameter name, {}, is not found in the given grid.\".format(param_name)\n raise ValueError(message)\n\n # TODO(c-bata): Reduce the number of duplicated evaluations on multiple workers.\n # Current selection logic may evaluate the same parameters multiple times.\n # See https://gist.github.com/c-bata/f759f64becb24eea2040f4b2e3afce8f for details.\n grid_id = trial.system_attrs[\"grid_id\"]\n param_value = self._all_grids[grid_id][self._param_names.index(param_name)]\n contains = param_distribution._contains(param_distribution.to_internal_repr(param_value))\n if not contains:\n warnings.warn(\n f\"The value `{param_value}` is out of range of the parameter `{param_name}`. \"\n f\"The value will be used but the actual distribution is: `{param_distribution}`.\"\n )\n\n return param_value\n\n def after_trial(\n self,\n study: Study,\n trial: FrozenTrial,\n state: TrialState,\n values: Optional[Sequence[float]],\n ) -> None:\n target_grids = self._get_unvisited_grid_ids(study)\n\n if len(target_grids) == 0:\n study.stop()\n elif len(target_grids) == 1:\n grid_id = study._storage.get_trial_system_attrs(trial._trial_id)[\"grid_id\"]\n if grid_id == target_grids[0]:\n study.stop()\n\n @staticmethod\n def _check_value(param_name: str, param_value: Any) -> None:\n\n if param_value is None or isinstance(param_value, (str, int, float, bool)):\n return\n\n message = (\n \"{} contains a value with the type of {}, which is not supported by \"\n \"`GridSampler`. Please make sure a value is `str`, `int`, `float`, `bool`\"\n \" or `None` for persistent storage.\".format(param_name, type(param_value))\n )\n warnings.warn(message)\n\n def _get_unvisited_grid_ids(self, study: Study) -> List[int]:\n\n # List up unvisited grids based on already finished ones.\n visited_grids = []\n running_grids = []\n\n # We directly query the storage to get trials here instead of `study.get_trials`,\n # since some pruners such as `HyperbandPruner` use the study transformed\n # to filter trials. See https://github.com/optuna/optuna/issues/2327 for details.\n trials = study._storage.get_all_trials(study._study_id, deepcopy=False)\n\n for t in trials:\n if \"grid_id\" in t.system_attrs and self._same_search_space(\n t.system_attrs[\"search_space\"]\n ):\n if t.state.is_finished():\n visited_grids.append(t.system_attrs[\"grid_id\"])\n elif t.state == TrialState.RUNNING:\n running_grids.append(t.system_attrs[\"grid_id\"])\n\n unvisited_grids = set(range(self._n_min_trials)) - set(visited_grids) - set(running_grids)\n\n # If evaluations for all grids have been started, return grids that have not yet finished\n # because all grids should be evaluated before stopping the optimization.\n if len(unvisited_grids) == 0:\n unvisited_grids = set(range(self._n_min_trials)) - set(visited_grids)\n\n return list(unvisited_grids)\n\n def _same_search_space(self, search_space: Mapping[str, Sequence[GridValueType]]) -> bool:\n\n if set(search_space.keys()) != set(self._search_space.keys()):\n return False\n\n for param_name in search_space.keys():\n if len(search_space[param_name]) != len(self._search_space[param_name]):\n return False\n\n for i, param_value in enumerate(search_space[param_name]):\n if param_value != self._search_space[param_name][i]:\n return False\n\n return True\n", "path": "optuna/samplers/_grid.py"}], "after_files": [{"content": "import collections\nimport itertools\nfrom numbers import Real\nfrom typing import Any\nfrom typing import Dict\nfrom typing import List\nfrom typing import Mapping\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Union\nimport warnings\n\nimport numpy as np\n\nfrom optuna.distributions import BaseDistribution\nfrom optuna.logging import get_logger\nfrom optuna.samplers import BaseSampler\nfrom optuna.study import Study\nfrom optuna.trial import FrozenTrial\nfrom optuna.trial import TrialState\n\n\nGridValueType = Union[str, float, int, bool, None]\n\n\n_logger = get_logger(__name__)\n\n\nclass GridSampler(BaseSampler):\n \"\"\"Sampler using grid search.\n\n With :class:`~optuna.samplers.GridSampler`, the trials suggest all combinations of parameters\n in the given search space during the study.\n\n Example:\n\n .. testcode::\n\n import optuna\n\n\n def objective(trial):\n x = trial.suggest_float(\"x\", -100, 100)\n y = trial.suggest_int(\"y\", -100, 100)\n return x**2 + y**2\n\n\n search_space = {\"x\": [-50, 0, 50], \"y\": [-99, 0, 99]}\n study = optuna.create_study(sampler=optuna.samplers.GridSampler(search_space))\n study.optimize(objective)\n\n Note:\n\n :class:`~optuna.samplers.GridSampler` automatically stops the optimization if all\n combinations in the passed ``search_space`` have already been evaluated, internally\n invoking the :func:`~optuna.study.Study.stop` method.\n\n Note:\n\n :class:`~optuna.samplers.GridSampler` does not take care of a parameter's quantization\n specified by discrete suggest methods but just samples one of values specified in the\n search space. E.g., in the following code snippet, either of ``-0.5`` or ``0.5`` is\n sampled as ``x`` instead of an integer point.\n\n .. testcode::\n\n import optuna\n\n\n def objective(trial):\n # The following suggest method specifies integer points between -5 and 5.\n x = trial.suggest_float(\"x\", -5, 5, step=1)\n return x**2\n\n\n # Non-int points are specified in the grid.\n search_space = {\"x\": [-0.5, 0.5]}\n study = optuna.create_study(sampler=optuna.samplers.GridSampler(search_space))\n study.optimize(objective, n_trials=2)\n\n Note:\n A parameter configuration in the grid is not considered finished until its trial is\n finished. Therefore, during distributed optimization where trials run concurrently,\n different workers will occasionally suggest the same parameter configuration.\n The total number of actual trials may therefore exceed the size of the grid.\n\n Note:\n All parameters must be specified when using :class:`~optuna.samplers.GridSampler` with\n :meth:`~optuna.study.Study.enqueue_trial`.\n\n Args:\n search_space:\n A dictionary whose key and value are a parameter name and the corresponding candidates\n of values, respectively.\n seed:\n A seed to fix the order of trials as the grid is randomly shuffled. Please note that\n it is not recommended using this option in distributed optimization settings since\n this option cannot ensure the order of trials and may increase the number of duplicate\n suggestions during distributed optimization.\n \"\"\"\n\n def __init__(\n self, search_space: Mapping[str, Sequence[GridValueType]], seed: Optional[int] = None\n ) -> None:\n\n for param_name, param_values in search_space.items():\n for value in param_values:\n self._check_value(param_name, value)\n\n self._search_space = collections.OrderedDict()\n for param_name, param_values in sorted(search_space.items()):\n self._search_space[param_name] = param_values\n\n self._all_grids = list(itertools.product(*self._search_space.values()))\n self._param_names = sorted(search_space.keys())\n self._n_min_trials = len(self._all_grids)\n self._rng = np.random.RandomState(seed)\n\n def reseed_rng(self) -> None:\n\n self._rng.seed()\n\n def infer_relative_search_space(\n self, study: Study, trial: FrozenTrial\n ) -> Dict[str, BaseDistribution]:\n\n return {}\n\n def sample_relative(\n self, study: Study, trial: FrozenTrial, search_space: Dict[str, BaseDistribution]\n ) -> Dict[str, Any]:\n # Instead of returning param values, GridSampler puts the target grid id as a system attr,\n # and the values are returned from `sample_independent`. This is because the distribution\n # object is hard to get at the beginning of trial, while we need the access to the object\n # to validate the sampled value.\n\n # When the trial is created by RetryFailedTrialCallback or enqueue_trial, we should not\n # assign a new grid_id.\n if \"grid_id\" in trial.system_attrs or \"fixed_params\" in trial.system_attrs:\n return {}\n\n target_grids = self._get_unvisited_grid_ids(study)\n\n if len(target_grids) == 0:\n # This case may occur with distributed optimization or trial queue. If there is no\n # target grid, `GridSampler` evaluates a visited, duplicated point with the current\n # trial. After that, the optimization stops.\n\n _logger.warning(\n \"`GridSampler` is re-evaluating a configuration because the grid has been \"\n \"exhausted. This may happen due to a timing issue during distributed optimization \"\n \"or when re-running optimizations on already finished studies.\"\n )\n\n # One of all grids is randomly picked up in this case.\n target_grids = list(range(len(self._all_grids)))\n\n # In distributed optimization, multiple workers may simultaneously pick up the same grid.\n # To make the conflict less frequent, the grid is chosen randomly.\n grid_id = self._rng.choice(target_grids)\n\n study._storage.set_trial_system_attr(trial._trial_id, \"search_space\", self._search_space)\n study._storage.set_trial_system_attr(trial._trial_id, \"grid_id\", grid_id)\n\n return {}\n\n def sample_independent(\n self,\n study: Study,\n trial: FrozenTrial,\n param_name: str,\n param_distribution: BaseDistribution,\n ) -> Any:\n\n if \"grid_id\" not in trial.system_attrs:\n message = \"All parameters must be specified when using GridSampler with enqueue_trial.\"\n raise ValueError(message)\n\n if param_name not in self._search_space:\n message = \"The parameter name, {}, is not found in the given grid.\".format(param_name)\n raise ValueError(message)\n\n # TODO(c-bata): Reduce the number of duplicated evaluations on multiple workers.\n # Current selection logic may evaluate the same parameters multiple times.\n # See https://gist.github.com/c-bata/f759f64becb24eea2040f4b2e3afce8f for details.\n grid_id = trial.system_attrs[\"grid_id\"]\n param_value = self._all_grids[grid_id][self._param_names.index(param_name)]\n contains = param_distribution._contains(param_distribution.to_internal_repr(param_value))\n if not contains:\n warnings.warn(\n f\"The value `{param_value}` is out of range of the parameter `{param_name}`. \"\n f\"The value will be used but the actual distribution is: `{param_distribution}`.\"\n )\n\n return param_value\n\n def after_trial(\n self,\n study: Study,\n trial: FrozenTrial,\n state: TrialState,\n values: Optional[Sequence[float]],\n ) -> None:\n target_grids = self._get_unvisited_grid_ids(study)\n\n if len(target_grids) == 0:\n study.stop()\n elif len(target_grids) == 1:\n grid_id = study._storage.get_trial_system_attrs(trial._trial_id)[\"grid_id\"]\n if grid_id == target_grids[0]:\n study.stop()\n\n @staticmethod\n def _check_value(param_name: str, param_value: Any) -> None:\n\n if param_value is None or isinstance(param_value, (str, int, float, bool)):\n return\n\n message = (\n \"{} contains a value with the type of {}, which is not supported by \"\n \"`GridSampler`. Please make sure a value is `str`, `int`, `float`, `bool`\"\n \" or `None` for persistent storage.\".format(param_name, type(param_value))\n )\n warnings.warn(message)\n\n def _get_unvisited_grid_ids(self, study: Study) -> List[int]:\n\n # List up unvisited grids based on already finished ones.\n visited_grids = []\n running_grids = []\n\n # We directly query the storage to get trials here instead of `study.get_trials`,\n # since some pruners such as `HyperbandPruner` use the study transformed\n # to filter trials. See https://github.com/optuna/optuna/issues/2327 for details.\n trials = study._storage.get_all_trials(study._study_id, deepcopy=False)\n\n for t in trials:\n if \"grid_id\" in t.system_attrs and self._same_search_space(\n t.system_attrs[\"search_space\"]\n ):\n if t.state.is_finished():\n visited_grids.append(t.system_attrs[\"grid_id\"])\n elif t.state == TrialState.RUNNING:\n running_grids.append(t.system_attrs[\"grid_id\"])\n\n unvisited_grids = set(range(self._n_min_trials)) - set(visited_grids) - set(running_grids)\n\n # If evaluations for all grids have been started, return grids that have not yet finished\n # because all grids should be evaluated before stopping the optimization.\n if len(unvisited_grids) == 0:\n unvisited_grids = set(range(self._n_min_trials)) - set(visited_grids)\n\n return list(unvisited_grids)\n\n @staticmethod\n def _grid_value_equal(value1: GridValueType, value2: GridValueType) -> bool:\n value1_is_nan = isinstance(value1, Real) and np.isnan(float(value1))\n value2_is_nan = isinstance(value2, Real) and np.isnan(float(value2))\n return (value1 == value2) or (value1_is_nan and value2_is_nan)\n\n def _same_search_space(self, search_space: Mapping[str, Sequence[GridValueType]]) -> bool:\n\n if set(search_space.keys()) != set(self._search_space.keys()):\n return False\n\n for param_name in search_space.keys():\n if len(search_space[param_name]) != len(self._search_space[param_name]):\n return False\n\n for i, param_value in enumerate(search_space[param_name]):\n if not self._grid_value_equal(param_value, self._search_space[param_name][i]):\n return False\n\n return True\n", "path": "optuna/samplers/_grid.py"}]}
| 3,563 | 309 |
gh_patches_debug_37490
|
rasdani/github-patches
|
git_diff
|
getsentry__sentry-62640
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Convert digest notifications to use block kit
Convert the `DigestNotificationMessageBuilder` (code [here](https://github.com/getsentry/sentry/blob/master/src/sentry/integrations/slack/message_builder/notifications/digest.py)) to use block kit. This may be harder to test as I have personally never received one.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/sentry/integrations/slack/message_builder/notifications/digest.py`
Content:
```
1 from __future__ import annotations
2
3 from typing import Any, Mapping
4
5 from sentry.digests import Digest
6 from sentry.digests.utils import get_groups
7 from sentry.integrations.slack.message_builder import SlackBody
8 from sentry.integrations.slack.message_builder.issues import SlackIssuesMessageBuilder
9 from sentry.notifications.notifications.digest import DigestNotification
10 from sentry.services.hybrid_cloud.actor import RpcActor
11
12 from .base import SlackNotificationsMessageBuilder
13
14
15 class DigestNotificationMessageBuilder(SlackNotificationsMessageBuilder):
16 def __init__(
17 self,
18 notification: DigestNotification,
19 context: Mapping[str, Any],
20 recipient: RpcActor,
21 ) -> None:
22 super().__init__(notification, context, recipient)
23 self.notification: DigestNotification = notification
24
25 def build(self) -> SlackBody:
26 """
27 It's currently impossible in mypy to have recursive types so we need a
28 hack to get this to return a SlackBody.
29 """
30 digest: Digest = self.context.get("digest", {})
31 return [
32 SlackIssuesMessageBuilder(
33 group=group,
34 event=event,
35 rules=[rule],
36 issue_details=True,
37 notification=self.notification,
38 recipient=self.recipient,
39 ).build()
40 for rule, group, event in get_groups(digest)
41 ]
42
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/sentry/integrations/slack/message_builder/notifications/digest.py b/src/sentry/integrations/slack/message_builder/notifications/digest.py
--- a/src/sentry/integrations/slack/message_builder/notifications/digest.py
+++ b/src/sentry/integrations/slack/message_builder/notifications/digest.py
@@ -2,9 +2,10 @@
from typing import Any, Mapping
+from sentry import features
from sentry.digests import Digest
from sentry.digests.utils import get_groups
-from sentry.integrations.slack.message_builder import SlackBody
+from sentry.integrations.slack.message_builder import SlackAttachment, SlackBlock
from sentry.integrations.slack.message_builder.issues import SlackIssuesMessageBuilder
from sentry.notifications.notifications.digest import DigestNotification
from sentry.services.hybrid_cloud.actor import RpcActor
@@ -22,14 +23,28 @@
super().__init__(notification, context, recipient)
self.notification: DigestNotification = notification
- def build(self) -> SlackBody:
+ def build(self) -> SlackAttachment | SlackBlock:
"""
It's currently impossible in mypy to have recursive types so we need a
hack to get this to return a SlackBody.
"""
digest: Digest = self.context.get("digest", {})
- return [
- SlackIssuesMessageBuilder(
+ digest_groups = get_groups(digest)
+ if not features.has("organizations:slack-block-kit", self.notification.organization):
+ return [
+ SlackIssuesMessageBuilder(
+ group=group,
+ event=event,
+ rules=[rule],
+ issue_details=True,
+ notification=self.notification,
+ recipient=self.recipient,
+ ).build()
+ for rule, group, event in digest_groups
+ ]
+ blocks = []
+ for rule, group, event in digest_groups:
+ alert_as_blocks = SlackIssuesMessageBuilder(
group=group,
event=event,
rules=[rule],
@@ -37,5 +52,8 @@
notification=self.notification,
recipient=self.recipient,
).build()
- for rule, group, event in get_groups(digest)
- ]
+ # we iterate through the list of blocks created for each alert in the digest and add
+ # each block to the list of blocks which is used for the entire digest notification
+ for block in alert_as_blocks.get("blocks"):
+ blocks.append(block)
+ return self._build_blocks(*blocks)
|
{"golden_diff": "diff --git a/src/sentry/integrations/slack/message_builder/notifications/digest.py b/src/sentry/integrations/slack/message_builder/notifications/digest.py\n--- a/src/sentry/integrations/slack/message_builder/notifications/digest.py\n+++ b/src/sentry/integrations/slack/message_builder/notifications/digest.py\n@@ -2,9 +2,10 @@\n \n from typing import Any, Mapping\n \n+from sentry import features\n from sentry.digests import Digest\n from sentry.digests.utils import get_groups\n-from sentry.integrations.slack.message_builder import SlackBody\n+from sentry.integrations.slack.message_builder import SlackAttachment, SlackBlock\n from sentry.integrations.slack.message_builder.issues import SlackIssuesMessageBuilder\n from sentry.notifications.notifications.digest import DigestNotification\n from sentry.services.hybrid_cloud.actor import RpcActor\n@@ -22,14 +23,28 @@\n super().__init__(notification, context, recipient)\n self.notification: DigestNotification = notification\n \n- def build(self) -> SlackBody:\n+ def build(self) -> SlackAttachment | SlackBlock:\n \"\"\"\n It's currently impossible in mypy to have recursive types so we need a\n hack to get this to return a SlackBody.\n \"\"\"\n digest: Digest = self.context.get(\"digest\", {})\n- return [\n- SlackIssuesMessageBuilder(\n+ digest_groups = get_groups(digest)\n+ if not features.has(\"organizations:slack-block-kit\", self.notification.organization):\n+ return [\n+ SlackIssuesMessageBuilder(\n+ group=group,\n+ event=event,\n+ rules=[rule],\n+ issue_details=True,\n+ notification=self.notification,\n+ recipient=self.recipient,\n+ ).build()\n+ for rule, group, event in digest_groups\n+ ]\n+ blocks = []\n+ for rule, group, event in digest_groups:\n+ alert_as_blocks = SlackIssuesMessageBuilder(\n group=group,\n event=event,\n rules=[rule],\n@@ -37,5 +52,8 @@\n notification=self.notification,\n recipient=self.recipient,\n ).build()\n- for rule, group, event in get_groups(digest)\n- ]\n+ # we iterate through the list of blocks created for each alert in the digest and add\n+ # each block to the list of blocks which is used for the entire digest notification\n+ for block in alert_as_blocks.get(\"blocks\"):\n+ blocks.append(block)\n+ return self._build_blocks(*blocks)\n", "issue": "Convert digest notifications to use block kit\nConvert the `DigestNotificationMessageBuilder` (code [here](https://github.com/getsentry/sentry/blob/master/src/sentry/integrations/slack/message_builder/notifications/digest.py)) to use block kit. This may be harder to test as I have personally never received one.\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any, Mapping\n\nfrom sentry.digests import Digest\nfrom sentry.digests.utils import get_groups\nfrom sentry.integrations.slack.message_builder import SlackBody\nfrom sentry.integrations.slack.message_builder.issues import SlackIssuesMessageBuilder\nfrom sentry.notifications.notifications.digest import DigestNotification\nfrom sentry.services.hybrid_cloud.actor import RpcActor\n\nfrom .base import SlackNotificationsMessageBuilder\n\n\nclass DigestNotificationMessageBuilder(SlackNotificationsMessageBuilder):\n def __init__(\n self,\n notification: DigestNotification,\n context: Mapping[str, Any],\n recipient: RpcActor,\n ) -> None:\n super().__init__(notification, context, recipient)\n self.notification: DigestNotification = notification\n\n def build(self) -> SlackBody:\n \"\"\"\n It's currently impossible in mypy to have recursive types so we need a\n hack to get this to return a SlackBody.\n \"\"\"\n digest: Digest = self.context.get(\"digest\", {})\n return [\n SlackIssuesMessageBuilder(\n group=group,\n event=event,\n rules=[rule],\n issue_details=True,\n notification=self.notification,\n recipient=self.recipient,\n ).build()\n for rule, group, event in get_groups(digest)\n ]\n", "path": "src/sentry/integrations/slack/message_builder/notifications/digest.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any, Mapping\n\nfrom sentry import features\nfrom sentry.digests import Digest\nfrom sentry.digests.utils import get_groups\nfrom sentry.integrations.slack.message_builder import SlackAttachment, SlackBlock\nfrom sentry.integrations.slack.message_builder.issues import SlackIssuesMessageBuilder\nfrom sentry.notifications.notifications.digest import DigestNotification\nfrom sentry.services.hybrid_cloud.actor import RpcActor\n\nfrom .base import SlackNotificationsMessageBuilder\n\n\nclass DigestNotificationMessageBuilder(SlackNotificationsMessageBuilder):\n def __init__(\n self,\n notification: DigestNotification,\n context: Mapping[str, Any],\n recipient: RpcActor,\n ) -> None:\n super().__init__(notification, context, recipient)\n self.notification: DigestNotification = notification\n\n def build(self) -> SlackAttachment | SlackBlock:\n \"\"\"\n It's currently impossible in mypy to have recursive types so we need a\n hack to get this to return a SlackBody.\n \"\"\"\n digest: Digest = self.context.get(\"digest\", {})\n digest_groups = get_groups(digest)\n if not features.has(\"organizations:slack-block-kit\", self.notification.organization):\n return [\n SlackIssuesMessageBuilder(\n group=group,\n event=event,\n rules=[rule],\n issue_details=True,\n notification=self.notification,\n recipient=self.recipient,\n ).build()\n for rule, group, event in digest_groups\n ]\n blocks = []\n for rule, group, event in digest_groups:\n alert_as_blocks = SlackIssuesMessageBuilder(\n group=group,\n event=event,\n rules=[rule],\n issue_details=True,\n notification=self.notification,\n recipient=self.recipient,\n ).build()\n # we iterate through the list of blocks created for each alert in the digest and add\n # each block to the list of blocks which is used for the entire digest notification\n for block in alert_as_blocks.get(\"blocks\"):\n blocks.append(block)\n return self._build_blocks(*blocks)\n", "path": "src/sentry/integrations/slack/message_builder/notifications/digest.py"}]}
| 688 | 547 |
gh_patches_debug_34272
|
rasdani/github-patches
|
git_diff
|
cal-itp__benefits-329
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Model eligibility verification form field validation
Our current practice has been to offload input validation to the Eligibility Server, since we're simply a pass-through for rules and data defined there.
However this creates at least a couple potential issues:
* The user must submit their information before they know if it's formatted incorrectly, vs. immediate in-browser feedback
* The fields allow for any size and type of value, a potential DDoS vector
Given we will have more than one `EligibilityVerifier` in the next phase, each with unique form validation needs and UX considerations, now is the time to implement Eligibility verification form field validation.
## Tasks
* [x] Add a `TextField` or similar to the `EligibilityVerifier` model that captures a regex pattern that can be used to validate the `sub` field in the [eligibility verification form](https://github.com/cal-itp/benefits/blob/dev/benefits/eligibility/forms.py#L15).
* [x] Add a `PositiveIntegerField` or similar to the `EligibilityVerifier` model that captures the maximum allowed length for the `name` field in the form.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `benefits/core/models.py`
Content:
```
1 """
2 The core application: Common model definitions.
3 """
4 import logging
5
6 from django.db import models
7 from django.urls import reverse
8
9 from jwcrypto import jwk
10
11
12 logger = logging.getLogger(__name__)
13
14
15 class PemData(models.Model):
16 """API Certificate or Key in PEM format."""
17
18 id = models.AutoField(primary_key=True)
19 text = models.TextField(help_text="The data in utf-8 encoded PEM text format.")
20 label = models.TextField(help_text="Human description of the PEM data.")
21
22 def __str__(self):
23 return self.label
24
25 @property
26 def jwk(self):
27 """jwcrypto.jwk.JWK instance from this PemData."""
28 pem_bytes = bytes(self.text, "utf-8")
29 return jwk.JWK.from_pem(pem_bytes)
30
31
32 class EligibilityType(models.Model):
33 """A single conditional eligibility type."""
34
35 id = models.AutoField(primary_key=True)
36 name = models.TextField()
37 label = models.TextField()
38 group_id = models.TextField()
39
40 def __str__(self):
41 return self.label
42
43 @staticmethod
44 def get(id):
45 """Get an EligibilityType instance by its id."""
46 logger.debug(f"Get {EligibilityType.__name__} by id: {id}")
47 return EligibilityType.objects.get(pk=id)
48
49 @staticmethod
50 def get_many(ids):
51 """Get a list of EligibilityType instances from a list of ids."""
52 logger.debug(f"Get {EligibilityType.__name__} list by ids: {ids}")
53 return EligibilityType.objects.filter(id__in=ids)
54
55
56 class EligibilityVerifier(models.Model):
57 """An entity that verifies eligibility."""
58
59 # fmt: off
60 id = models.AutoField(primary_key=True)
61 name = models.TextField()
62 api_url = models.TextField()
63 api_auth_header = models.TextField()
64 api_auth_key = models.TextField()
65 eligibility_types = models.ManyToManyField(EligibilityType)
66 public_key = models.ForeignKey(PemData, help_text="The Verifier's public key, used to encrypt requests targeted at this Verifier and to verify signed responses from this verifier.", related_name="+", on_delete=models.PROTECT) # noqa: 503
67 jwe_cek_enc = models.TextField(help_text="The JWE-compatible Content Encryption Key (CEK) key-length and mode")
68 jwe_encryption_alg = models.TextField(help_text="The JWE-compatible encryption algorithm")
69 jws_signing_alg = models.TextField(help_text="The JWS-compatible signing algorithm")
70 # fmt: on
71
72 def __str__(self):
73 return self.name
74
75 @property
76 def public_jwk(self):
77 """jwcrypto.jwk.JWK instance of this Verifier's public key"""
78 return self.public_key.jwk
79
80
81 class PaymentProcessor(models.Model):
82 """An entity that processes payments for transit agencies."""
83
84 # fmt: off
85 id = models.AutoField(primary_key=True)
86 name = models.TextField()
87 api_base_url = models.TextField()
88 api_access_token_endpoint = models.TextField()
89 api_access_token_request_key = models.TextField()
90 api_access_token_request_val = models.TextField()
91 card_tokenize_url = models.TextField()
92 card_tokenize_func = models.TextField()
93 card_tokenize_env = models.TextField()
94 client_cert = models.ForeignKey(PemData, help_text="The certificate used for client certificate authentication to the API.", related_name="+", on_delete=models.PROTECT) # noqa: 503
95 client_cert_private_key = models.ForeignKey(PemData, help_text="The private key, used to sign the certificate.", related_name="+", on_delete=models.PROTECT) # noqa: 503
96 client_cert_root_ca = models.ForeignKey(PemData, help_text="The root CA bundle, used to verify the server.", related_name="+", on_delete=models.PROTECT) # noqa: 503
97 customer_endpoint = models.TextField()
98 customers_endpoint = models.TextField()
99 group_endpoint = models.TextField()
100 # fmt: on
101
102 def __str__(self):
103 return self.name
104
105
106 class TransitAgency(models.Model):
107 """An agency offering transit service."""
108
109 # fmt: off
110 id = models.AutoField(primary_key=True)
111 slug = models.TextField()
112 short_name = models.TextField()
113 long_name = models.TextField()
114 agency_id = models.TextField()
115 merchant_id = models.TextField()
116 info_url = models.URLField()
117 phone = models.TextField()
118 active = models.BooleanField(default=False)
119 eligibility_types = models.ManyToManyField(EligibilityType)
120 eligibility_verifiers = models.ManyToManyField(EligibilityVerifier)
121 payment_processor = models.ForeignKey(PaymentProcessor, on_delete=models.PROTECT)
122 private_key = models.ForeignKey(PemData, help_text="The Agency's private key, used to sign tokens created on behalf of this Agency.", related_name="+", on_delete=models.PROTECT) # noqa: 503
123 jws_signing_alg = models.TextField(help_text="The JWS-compatible signing algorithm.")
124 # fmt: on
125
126 def __str__(self):
127 return self.long_name
128
129 def get_type_id(self, name):
130 """Get the id of the EligibilityType identified by the given name for this agency."""
131 eligibility = self.eligibility_types.all().filter(name=name)
132 if eligibility.count() == 1:
133 return eligibility[0].id
134 else:
135 raise Exception("name does not correspond to a single eligibility type for agency")
136
137 def supports_type(self, eligibility_type):
138 """True if the eligibility_type is one of this agency's types. False otherwise."""
139 return isinstance(eligibility_type, EligibilityType) and eligibility_type in self.eligibility_types.all()
140
141 def types_to_verify(self, eligibility_verifier):
142 """List of eligibility types to verify for this agency."""
143 # compute set intersection of agency and verifier type ids
144 agency_types = set(self.eligibility_types.values_list("id", flat=True))
145 verifier_types = set(eligibility_verifier.eligibility_types.values_list("id", flat=True))
146 supported_types = list(agency_types & verifier_types)
147 return EligibilityType.get_many(supported_types)
148
149 @property
150 def index_url(self):
151 """Url to the TransitAgency's landing page."""
152 return reverse("core:agency_index", args=[self.slug])
153
154 @property
155 def private_jwk(self):
156 """jwcrypto.jwk.JWK instance of this Agency's private key"""
157 return self.private_key.jwk
158
159 @staticmethod
160 def by_id(id):
161 """Get a TransitAgency instance by its ID."""
162 logger.debug(f"Get {TransitAgency.__name__} by id: {id}")
163 return TransitAgency.objects.get(id=id)
164
165 @staticmethod
166 def by_slug(slug):
167 """Get a TransitAgency instance by its slug."""
168 logger.debug(f"Get {TransitAgency.__name__} by slug: {slug}")
169 return TransitAgency.objects.filter(slug=slug).first()
170
171 @staticmethod
172 def all_active():
173 """Get all TransitAgency instances marked active."""
174 logger.debug(f"Get all active {TransitAgency.__name__}")
175 return TransitAgency.objects.filter(active=True)
176
```
Path: `benefits/core/migrations/0001_initial.py`
Content:
```
1 from django.db import migrations, models
2
3
4 class Migration(migrations.Migration):
5
6 initial = True
7
8 dependencies = []
9
10 operations = [
11 migrations.CreateModel(
12 name="EligibilityType",
13 fields=[
14 ("id", models.AutoField(primary_key=True, serialize=False, verbose_name="ID")),
15 ("name", models.TextField()),
16 ("label", models.TextField()),
17 ("group_id", models.TextField()),
18 ],
19 ),
20 migrations.CreateModel(
21 name="PemData",
22 fields=[
23 ("id", models.AutoField(primary_key=True, serialize=False)),
24 ("text", models.TextField(help_text="The data in utf-8 encoded PEM text format.")),
25 ("label", models.TextField(help_text="Human description of the PEM data.")),
26 ],
27 ),
28 migrations.CreateModel(
29 name="EligibilityVerifier",
30 fields=[
31 ("id", models.AutoField(primary_key=True, serialize=False, verbose_name="ID")),
32 ("name", models.TextField()),
33 ("api_url", models.TextField()),
34 ("api_auth_header", models.TextField()),
35 ("api_auth_key", models.TextField()),
36 # fmt: off
37 ("public_key", models.ForeignKey(help_text="The Verifier's public key, used to encrypt requests targeted at this Verifier and to verify signed responses from this verifier.", on_delete=models.deletion.PROTECT, related_name="+", to="core.PemData")), # noqa: E501
38 ("jwe_cek_enc", models.TextField(help_text="The JWE-compatible Content Encryption Key (CEK) key-length and mode")), # noqa: E501
39 ("jwe_encryption_alg", models.TextField(help_text="The JWE-compatible encryption algorithm")),
40 # fmt: on
41 ("jws_signing_alg", models.TextField(help_text="The JWS-compatible signing algorithm")),
42 ("eligibility_types", models.ManyToManyField(to="core.EligibilityType")),
43 ],
44 ),
45 migrations.CreateModel(
46 name="PaymentProcessor",
47 fields=[
48 ("id", models.AutoField(primary_key=True, serialize=False, verbose_name="ID")),
49 ("name", models.TextField()),
50 ("api_base_url", models.TextField()),
51 ("api_access_token_endpoint", models.TextField()),
52 ("api_access_token_request_key", models.TextField()),
53 ("api_access_token_request_val", models.TextField()),
54 ("card_tokenize_url", models.TextField()),
55 ("card_tokenize_func", models.TextField()),
56 ("card_tokenize_env", models.TextField()),
57 # fmt: off
58 ("client_cert", models.ForeignKey(help_text="The certificate used for client certificate authentication to the API.", on_delete=models.deletion.PROTECT, related_name="+", to="core.PemData")), # noqa: E501
59 ("client_cert_private_key", models.ForeignKey(help_text="The private key used to sign the certificate.", on_delete=models.deletion.PROTECT, related_name="+", to="core.PemData")), # noqa: E501
60 ("client_cert_root_ca", models.ForeignKey(help_text="The root CA bundle used to verify the server.", on_delete=models.deletion.PROTECT, related_name="+", to="core.PemData")), # noqa: E501
61 ("customer_endpoint", models.TextField()),
62 # fmt: on
63 ("customers_endpoint", models.TextField()),
64 ("group_endpoint", models.TextField()),
65 ],
66 ),
67 migrations.CreateModel(
68 name="TransitAgency",
69 fields=[
70 ("id", models.AutoField(primary_key=True, serialize=False, verbose_name="ID")),
71 ("slug", models.TextField()),
72 ("short_name", models.TextField()),
73 ("long_name", models.TextField()),
74 ("agency_id", models.TextField()),
75 ("merchant_id", models.TextField()),
76 ("info_url", models.URLField()),
77 ("phone", models.TextField()),
78 ("active", models.BooleanField(default=False)),
79 # fmt: off
80 ("private_key", models.ForeignKey(help_text="The Agency's private key, used to sign tokens created on behalf of this Agency.", on_delete=models.deletion.PROTECT, related_name="+", to="core.PemData")), # noqa: E501
81 ("jws_signing_alg", models.TextField(help_text="The JWS-compatible signing algorithm.")),
82 ("payment_processor", models.ForeignKey(on_delete=models.deletion.PROTECT, to="core.paymentprocessor")),
83 ("eligibility_types", models.ManyToManyField(to="core.EligibilityType")),
84 ("eligibility_verifiers", models.ManyToManyField(to="core.eligibilityverifier")),
85 # fmt: on
86 ],
87 ),
88 ]
89
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/benefits/core/migrations/0001_initial.py b/benefits/core/migrations/0001_initial.py
--- a/benefits/core/migrations/0001_initial.py
+++ b/benefits/core/migrations/0001_initial.py
@@ -34,6 +34,8 @@
("api_auth_header", models.TextField()),
("api_auth_key", models.TextField()),
# fmt: off
+ ("sub_regex", models.TextField(help_text="A regular expression used to validate the 'sub' API field before sending to this verifier", null=True)), # noqa: E501
+ ("name_max_length", models.PositiveSmallIntegerField(help_text="The maximum length accepted for the 'name' API field before sending to this verifier", null=True)), # noqa: E501
("public_key", models.ForeignKey(help_text="The Verifier's public key, used to encrypt requests targeted at this Verifier and to verify signed responses from this verifier.", on_delete=models.deletion.PROTECT, related_name="+", to="core.PemData")), # noqa: E501
("jwe_cek_enc", models.TextField(help_text="The JWE-compatible Content Encryption Key (CEK) key-length and mode")), # noqa: E501
("jwe_encryption_alg", models.TextField(help_text="The JWE-compatible encryption algorithm")),
diff --git a/benefits/core/models.py b/benefits/core/models.py
--- a/benefits/core/models.py
+++ b/benefits/core/models.py
@@ -67,6 +67,8 @@
jwe_cek_enc = models.TextField(help_text="The JWE-compatible Content Encryption Key (CEK) key-length and mode")
jwe_encryption_alg = models.TextField(help_text="The JWE-compatible encryption algorithm")
jws_signing_alg = models.TextField(help_text="The JWS-compatible signing algorithm")
+ sub_regex = models.TextField(null=True, help_text="A regular expression used to validate the 'sub' API field before sending to this verifier") # noqa: 503
+ name_max_length = models.PositiveSmallIntegerField(null=True, help_text="The maximum length accepted for the 'name' API field before sending to this verifier") # noqa: 503
# fmt: on
def __str__(self):
|
{"golden_diff": "diff --git a/benefits/core/migrations/0001_initial.py b/benefits/core/migrations/0001_initial.py\n--- a/benefits/core/migrations/0001_initial.py\n+++ b/benefits/core/migrations/0001_initial.py\n@@ -34,6 +34,8 @@\n (\"api_auth_header\", models.TextField()),\n (\"api_auth_key\", models.TextField()),\n # fmt: off\n+ (\"sub_regex\", models.TextField(help_text=\"A regular expression used to validate the 'sub' API field before sending to this verifier\", null=True)), # noqa: E501\n+ (\"name_max_length\", models.PositiveSmallIntegerField(help_text=\"The maximum length accepted for the 'name' API field before sending to this verifier\", null=True)), # noqa: E501\n (\"public_key\", models.ForeignKey(help_text=\"The Verifier's public key, used to encrypt requests targeted at this Verifier and to verify signed responses from this verifier.\", on_delete=models.deletion.PROTECT, related_name=\"+\", to=\"core.PemData\")), # noqa: E501\n (\"jwe_cek_enc\", models.TextField(help_text=\"The JWE-compatible Content Encryption Key (CEK) key-length and mode\")), # noqa: E501\n (\"jwe_encryption_alg\", models.TextField(help_text=\"The JWE-compatible encryption algorithm\")),\ndiff --git a/benefits/core/models.py b/benefits/core/models.py\n--- a/benefits/core/models.py\n+++ b/benefits/core/models.py\n@@ -67,6 +67,8 @@\n jwe_cek_enc = models.TextField(help_text=\"The JWE-compatible Content Encryption Key (CEK) key-length and mode\")\n jwe_encryption_alg = models.TextField(help_text=\"The JWE-compatible encryption algorithm\")\n jws_signing_alg = models.TextField(help_text=\"The JWS-compatible signing algorithm\")\n+ sub_regex = models.TextField(null=True, help_text=\"A regular expression used to validate the 'sub' API field before sending to this verifier\") # noqa: 503\n+ name_max_length = models.PositiveSmallIntegerField(null=True, help_text=\"The maximum length accepted for the 'name' API field before sending to this verifier\") # noqa: 503\n # fmt: on\n \n def __str__(self):\n", "issue": "Model eligibility verification form field validation\nOur current practice has been to offload input validation to the Eligibility Server, since we're simply a pass-through for rules and data defined there.\r\n\r\nHowever this creates at least a couple potential issues:\r\n\r\n* The user must submit their information before they know if it's formatted incorrectly, vs. immediate in-browser feedback\r\n* The fields allow for any size and type of value, a potential DDoS vector\r\n\r\nGiven we will have more than one `EligibilityVerifier` in the next phase, each with unique form validation needs and UX considerations, now is the time to implement Eligibility verification form field validation.\r\n\r\n## Tasks\r\n\r\n* [x] Add a `TextField` or similar to the `EligibilityVerifier` model that captures a regex pattern that can be used to validate the `sub` field in the [eligibility verification form](https://github.com/cal-itp/benefits/blob/dev/benefits/eligibility/forms.py#L15).\r\n* [x] Add a `PositiveIntegerField` or similar to the `EligibilityVerifier` model that captures the maximum allowed length for the `name` field in the form.\n", "before_files": [{"content": "\"\"\"\nThe core application: Common model definitions.\n\"\"\"\nimport logging\n\nfrom django.db import models\nfrom django.urls import reverse\n\nfrom jwcrypto import jwk\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass PemData(models.Model):\n \"\"\"API Certificate or Key in PEM format.\"\"\"\n\n id = models.AutoField(primary_key=True)\n text = models.TextField(help_text=\"The data in utf-8 encoded PEM text format.\")\n label = models.TextField(help_text=\"Human description of the PEM data.\")\n\n def __str__(self):\n return self.label\n\n @property\n def jwk(self):\n \"\"\"jwcrypto.jwk.JWK instance from this PemData.\"\"\"\n pem_bytes = bytes(self.text, \"utf-8\")\n return jwk.JWK.from_pem(pem_bytes)\n\n\nclass EligibilityType(models.Model):\n \"\"\"A single conditional eligibility type.\"\"\"\n\n id = models.AutoField(primary_key=True)\n name = models.TextField()\n label = models.TextField()\n group_id = models.TextField()\n\n def __str__(self):\n return self.label\n\n @staticmethod\n def get(id):\n \"\"\"Get an EligibilityType instance by its id.\"\"\"\n logger.debug(f\"Get {EligibilityType.__name__} by id: {id}\")\n return EligibilityType.objects.get(pk=id)\n\n @staticmethod\n def get_many(ids):\n \"\"\"Get a list of EligibilityType instances from a list of ids.\"\"\"\n logger.debug(f\"Get {EligibilityType.__name__} list by ids: {ids}\")\n return EligibilityType.objects.filter(id__in=ids)\n\n\nclass EligibilityVerifier(models.Model):\n \"\"\"An entity that verifies eligibility.\"\"\"\n\n # fmt: off\n id = models.AutoField(primary_key=True)\n name = models.TextField()\n api_url = models.TextField()\n api_auth_header = models.TextField()\n api_auth_key = models.TextField()\n eligibility_types = models.ManyToManyField(EligibilityType)\n public_key = models.ForeignKey(PemData, help_text=\"The Verifier's public key, used to encrypt requests targeted at this Verifier and to verify signed responses from this verifier.\", related_name=\"+\", on_delete=models.PROTECT) # noqa: 503\n jwe_cek_enc = models.TextField(help_text=\"The JWE-compatible Content Encryption Key (CEK) key-length and mode\")\n jwe_encryption_alg = models.TextField(help_text=\"The JWE-compatible encryption algorithm\")\n jws_signing_alg = models.TextField(help_text=\"The JWS-compatible signing algorithm\")\n # fmt: on\n\n def __str__(self):\n return self.name\n\n @property\n def public_jwk(self):\n \"\"\"jwcrypto.jwk.JWK instance of this Verifier's public key\"\"\"\n return self.public_key.jwk\n\n\nclass PaymentProcessor(models.Model):\n \"\"\"An entity that processes payments for transit agencies.\"\"\"\n\n # fmt: off\n id = models.AutoField(primary_key=True)\n name = models.TextField()\n api_base_url = models.TextField()\n api_access_token_endpoint = models.TextField()\n api_access_token_request_key = models.TextField()\n api_access_token_request_val = models.TextField()\n card_tokenize_url = models.TextField()\n card_tokenize_func = models.TextField()\n card_tokenize_env = models.TextField()\n client_cert = models.ForeignKey(PemData, help_text=\"The certificate used for client certificate authentication to the API.\", related_name=\"+\", on_delete=models.PROTECT) # noqa: 503\n client_cert_private_key = models.ForeignKey(PemData, help_text=\"The private key, used to sign the certificate.\", related_name=\"+\", on_delete=models.PROTECT) # noqa: 503\n client_cert_root_ca = models.ForeignKey(PemData, help_text=\"The root CA bundle, used to verify the server.\", related_name=\"+\", on_delete=models.PROTECT) # noqa: 503\n customer_endpoint = models.TextField()\n customers_endpoint = models.TextField()\n group_endpoint = models.TextField()\n # fmt: on\n\n def __str__(self):\n return self.name\n\n\nclass TransitAgency(models.Model):\n \"\"\"An agency offering transit service.\"\"\"\n\n # fmt: off\n id = models.AutoField(primary_key=True)\n slug = models.TextField()\n short_name = models.TextField()\n long_name = models.TextField()\n agency_id = models.TextField()\n merchant_id = models.TextField()\n info_url = models.URLField()\n phone = models.TextField()\n active = models.BooleanField(default=False)\n eligibility_types = models.ManyToManyField(EligibilityType)\n eligibility_verifiers = models.ManyToManyField(EligibilityVerifier)\n payment_processor = models.ForeignKey(PaymentProcessor, on_delete=models.PROTECT)\n private_key = models.ForeignKey(PemData, help_text=\"The Agency's private key, used to sign tokens created on behalf of this Agency.\", related_name=\"+\", on_delete=models.PROTECT) # noqa: 503\n jws_signing_alg = models.TextField(help_text=\"The JWS-compatible signing algorithm.\")\n # fmt: on\n\n def __str__(self):\n return self.long_name\n\n def get_type_id(self, name):\n \"\"\"Get the id of the EligibilityType identified by the given name for this agency.\"\"\"\n eligibility = self.eligibility_types.all().filter(name=name)\n if eligibility.count() == 1:\n return eligibility[0].id\n else:\n raise Exception(\"name does not correspond to a single eligibility type for agency\")\n\n def supports_type(self, eligibility_type):\n \"\"\"True if the eligibility_type is one of this agency's types. False otherwise.\"\"\"\n return isinstance(eligibility_type, EligibilityType) and eligibility_type in self.eligibility_types.all()\n\n def types_to_verify(self, eligibility_verifier):\n \"\"\"List of eligibility types to verify for this agency.\"\"\"\n # compute set intersection of agency and verifier type ids\n agency_types = set(self.eligibility_types.values_list(\"id\", flat=True))\n verifier_types = set(eligibility_verifier.eligibility_types.values_list(\"id\", flat=True))\n supported_types = list(agency_types & verifier_types)\n return EligibilityType.get_many(supported_types)\n\n @property\n def index_url(self):\n \"\"\"Url to the TransitAgency's landing page.\"\"\"\n return reverse(\"core:agency_index\", args=[self.slug])\n\n @property\n def private_jwk(self):\n \"\"\"jwcrypto.jwk.JWK instance of this Agency's private key\"\"\"\n return self.private_key.jwk\n\n @staticmethod\n def by_id(id):\n \"\"\"Get a TransitAgency instance by its ID.\"\"\"\n logger.debug(f\"Get {TransitAgency.__name__} by id: {id}\")\n return TransitAgency.objects.get(id=id)\n\n @staticmethod\n def by_slug(slug):\n \"\"\"Get a TransitAgency instance by its slug.\"\"\"\n logger.debug(f\"Get {TransitAgency.__name__} by slug: {slug}\")\n return TransitAgency.objects.filter(slug=slug).first()\n\n @staticmethod\n def all_active():\n \"\"\"Get all TransitAgency instances marked active.\"\"\"\n logger.debug(f\"Get all active {TransitAgency.__name__}\")\n return TransitAgency.objects.filter(active=True)\n", "path": "benefits/core/models.py"}, {"content": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = []\n\n operations = [\n migrations.CreateModel(\n name=\"EligibilityType\",\n fields=[\n (\"id\", models.AutoField(primary_key=True, serialize=False, verbose_name=\"ID\")),\n (\"name\", models.TextField()),\n (\"label\", models.TextField()),\n (\"group_id\", models.TextField()),\n ],\n ),\n migrations.CreateModel(\n name=\"PemData\",\n fields=[\n (\"id\", models.AutoField(primary_key=True, serialize=False)),\n (\"text\", models.TextField(help_text=\"The data in utf-8 encoded PEM text format.\")),\n (\"label\", models.TextField(help_text=\"Human description of the PEM data.\")),\n ],\n ),\n migrations.CreateModel(\n name=\"EligibilityVerifier\",\n fields=[\n (\"id\", models.AutoField(primary_key=True, serialize=False, verbose_name=\"ID\")),\n (\"name\", models.TextField()),\n (\"api_url\", models.TextField()),\n (\"api_auth_header\", models.TextField()),\n (\"api_auth_key\", models.TextField()),\n # fmt: off\n (\"public_key\", models.ForeignKey(help_text=\"The Verifier's public key, used to encrypt requests targeted at this Verifier and to verify signed responses from this verifier.\", on_delete=models.deletion.PROTECT, related_name=\"+\", to=\"core.PemData\")), # noqa: E501\n (\"jwe_cek_enc\", models.TextField(help_text=\"The JWE-compatible Content Encryption Key (CEK) key-length and mode\")), # noqa: E501\n (\"jwe_encryption_alg\", models.TextField(help_text=\"The JWE-compatible encryption algorithm\")),\n # fmt: on\n (\"jws_signing_alg\", models.TextField(help_text=\"The JWS-compatible signing algorithm\")),\n (\"eligibility_types\", models.ManyToManyField(to=\"core.EligibilityType\")),\n ],\n ),\n migrations.CreateModel(\n name=\"PaymentProcessor\",\n fields=[\n (\"id\", models.AutoField(primary_key=True, serialize=False, verbose_name=\"ID\")),\n (\"name\", models.TextField()),\n (\"api_base_url\", models.TextField()),\n (\"api_access_token_endpoint\", models.TextField()),\n (\"api_access_token_request_key\", models.TextField()),\n (\"api_access_token_request_val\", models.TextField()),\n (\"card_tokenize_url\", models.TextField()),\n (\"card_tokenize_func\", models.TextField()),\n (\"card_tokenize_env\", models.TextField()),\n # fmt: off\n (\"client_cert\", models.ForeignKey(help_text=\"The certificate used for client certificate authentication to the API.\", on_delete=models.deletion.PROTECT, related_name=\"+\", to=\"core.PemData\")), # noqa: E501\n (\"client_cert_private_key\", models.ForeignKey(help_text=\"The private key used to sign the certificate.\", on_delete=models.deletion.PROTECT, related_name=\"+\", to=\"core.PemData\")), # noqa: E501\n (\"client_cert_root_ca\", models.ForeignKey(help_text=\"The root CA bundle used to verify the server.\", on_delete=models.deletion.PROTECT, related_name=\"+\", to=\"core.PemData\")), # noqa: E501\n (\"customer_endpoint\", models.TextField()),\n # fmt: on\n (\"customers_endpoint\", models.TextField()),\n (\"group_endpoint\", models.TextField()),\n ],\n ),\n migrations.CreateModel(\n name=\"TransitAgency\",\n fields=[\n (\"id\", models.AutoField(primary_key=True, serialize=False, verbose_name=\"ID\")),\n (\"slug\", models.TextField()),\n (\"short_name\", models.TextField()),\n (\"long_name\", models.TextField()),\n (\"agency_id\", models.TextField()),\n (\"merchant_id\", models.TextField()),\n (\"info_url\", models.URLField()),\n (\"phone\", models.TextField()),\n (\"active\", models.BooleanField(default=False)),\n # fmt: off\n (\"private_key\", models.ForeignKey(help_text=\"The Agency's private key, used to sign tokens created on behalf of this Agency.\", on_delete=models.deletion.PROTECT, related_name=\"+\", to=\"core.PemData\")), # noqa: E501\n (\"jws_signing_alg\", models.TextField(help_text=\"The JWS-compatible signing algorithm.\")),\n (\"payment_processor\", models.ForeignKey(on_delete=models.deletion.PROTECT, to=\"core.paymentprocessor\")),\n (\"eligibility_types\", models.ManyToManyField(to=\"core.EligibilityType\")),\n (\"eligibility_verifiers\", models.ManyToManyField(to=\"core.eligibilityverifier\")),\n # fmt: on\n ],\n ),\n ]\n", "path": "benefits/core/migrations/0001_initial.py"}], "after_files": [{"content": "\"\"\"\nThe core application: Common model definitions.\n\"\"\"\nimport logging\n\nfrom django.db import models\nfrom django.urls import reverse\n\nfrom jwcrypto import jwk\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass PemData(models.Model):\n \"\"\"API Certificate or Key in PEM format.\"\"\"\n\n id = models.AutoField(primary_key=True)\n text = models.TextField(help_text=\"The data in utf-8 encoded PEM text format.\")\n label = models.TextField(help_text=\"Human description of the PEM data.\")\n\n def __str__(self):\n return self.label\n\n @property\n def jwk(self):\n \"\"\"jwcrypto.jwk.JWK instance from this PemData.\"\"\"\n pem_bytes = bytes(self.text, \"utf-8\")\n return jwk.JWK.from_pem(pem_bytes)\n\n\nclass EligibilityType(models.Model):\n \"\"\"A single conditional eligibility type.\"\"\"\n\n id = models.AutoField(primary_key=True)\n name = models.TextField()\n label = models.TextField()\n group_id = models.TextField()\n\n def __str__(self):\n return self.label\n\n @staticmethod\n def get(id):\n \"\"\"Get an EligibilityType instance by its id.\"\"\"\n logger.debug(f\"Get {EligibilityType.__name__} by id: {id}\")\n return EligibilityType.objects.get(pk=id)\n\n @staticmethod\n def get_many(ids):\n \"\"\"Get a list of EligibilityType instances from a list of ids.\"\"\"\n logger.debug(f\"Get {EligibilityType.__name__} list by ids: {ids}\")\n return EligibilityType.objects.filter(id__in=ids)\n\n\nclass EligibilityVerifier(models.Model):\n \"\"\"An entity that verifies eligibility.\"\"\"\n\n # fmt: off\n id = models.AutoField(primary_key=True)\n name = models.TextField()\n api_url = models.TextField()\n api_auth_header = models.TextField()\n api_auth_key = models.TextField()\n eligibility_types = models.ManyToManyField(EligibilityType)\n public_key = models.ForeignKey(PemData, help_text=\"The Verifier's public key, used to encrypt requests targeted at this Verifier and to verify signed responses from this verifier.\", related_name=\"+\", on_delete=models.PROTECT) # noqa: 503\n jwe_cek_enc = models.TextField(help_text=\"The JWE-compatible Content Encryption Key (CEK) key-length and mode\")\n jwe_encryption_alg = models.TextField(help_text=\"The JWE-compatible encryption algorithm\")\n jws_signing_alg = models.TextField(help_text=\"The JWS-compatible signing algorithm\")\n sub_regex = models.TextField(null=True, help_text=\"A regular expression used to validate the 'sub' API field before sending to this verifier\") # noqa: 503\n name_max_length = models.PositiveSmallIntegerField(null=True, help_text=\"The maximum length accepted for the 'name' API field before sending to this verifier\") # noqa: 503\n # fmt: on\n\n def __str__(self):\n return self.name\n\n @property\n def public_jwk(self):\n \"\"\"jwcrypto.jwk.JWK instance of this Verifier's public key\"\"\"\n return self.public_key.jwk\n\n\nclass PaymentProcessor(models.Model):\n \"\"\"An entity that processes payments for transit agencies.\"\"\"\n\n # fmt: off\n id = models.AutoField(primary_key=True)\n name = models.TextField()\n api_base_url = models.TextField()\n api_access_token_endpoint = models.TextField()\n api_access_token_request_key = models.TextField()\n api_access_token_request_val = models.TextField()\n card_tokenize_url = models.TextField()\n card_tokenize_func = models.TextField()\n card_tokenize_env = models.TextField()\n client_cert = models.ForeignKey(PemData, help_text=\"The certificate used for client certificate authentication to the API.\", related_name=\"+\", on_delete=models.PROTECT) # noqa: 503\n client_cert_private_key = models.ForeignKey(PemData, help_text=\"The private key, used to sign the certificate.\", related_name=\"+\", on_delete=models.PROTECT) # noqa: 503\n client_cert_root_ca = models.ForeignKey(PemData, help_text=\"The root CA bundle, used to verify the server.\", related_name=\"+\", on_delete=models.PROTECT) # noqa: 503\n customer_endpoint = models.TextField()\n customers_endpoint = models.TextField()\n group_endpoint = models.TextField()\n # fmt: on\n\n def __str__(self):\n return self.name\n\n\nclass TransitAgency(models.Model):\n \"\"\"An agency offering transit service.\"\"\"\n\n # fmt: off\n id = models.AutoField(primary_key=True)\n slug = models.TextField()\n short_name = models.TextField()\n long_name = models.TextField()\n agency_id = models.TextField()\n merchant_id = models.TextField()\n info_url = models.URLField()\n phone = models.TextField()\n active = models.BooleanField(default=False)\n eligibility_types = models.ManyToManyField(EligibilityType)\n eligibility_verifiers = models.ManyToManyField(EligibilityVerifier)\n payment_processor = models.ForeignKey(PaymentProcessor, on_delete=models.PROTECT)\n private_key = models.ForeignKey(PemData, help_text=\"The Agency's private key, used to sign tokens created on behalf of this Agency.\", related_name=\"+\", on_delete=models.PROTECT) # noqa: 503\n jws_signing_alg = models.TextField(help_text=\"The JWS-compatible signing algorithm.\")\n # fmt: on\n\n def __str__(self):\n return self.long_name\n\n def get_type_id(self, name):\n \"\"\"Get the id of the EligibilityType identified by the given name for this agency.\"\"\"\n eligibility = self.eligibility_types.all().filter(name=name)\n if eligibility.count() == 1:\n return eligibility[0].id\n else:\n raise Exception(\"name does not correspond to a single eligibility type for agency\")\n\n def supports_type(self, eligibility_type):\n \"\"\"True if the eligibility_type is one of this agency's types. False otherwise.\"\"\"\n return isinstance(eligibility_type, EligibilityType) and eligibility_type in self.eligibility_types.all()\n\n def types_to_verify(self, eligibility_verifier):\n \"\"\"List of eligibility types to verify for this agency.\"\"\"\n # compute set intersection of agency and verifier type ids\n agency_types = set(self.eligibility_types.values_list(\"id\", flat=True))\n verifier_types = set(eligibility_verifier.eligibility_types.values_list(\"id\", flat=True))\n supported_types = list(agency_types & verifier_types)\n return EligibilityType.get_many(supported_types)\n\n @property\n def index_url(self):\n \"\"\"Url to the TransitAgency's landing page.\"\"\"\n return reverse(\"core:agency_index\", args=[self.slug])\n\n @property\n def private_jwk(self):\n \"\"\"jwcrypto.jwk.JWK instance of this Agency's private key\"\"\"\n return self.private_key.jwk\n\n @staticmethod\n def by_id(id):\n \"\"\"Get a TransitAgency instance by its ID.\"\"\"\n logger.debug(f\"Get {TransitAgency.__name__} by id: {id}\")\n return TransitAgency.objects.get(id=id)\n\n @staticmethod\n def by_slug(slug):\n \"\"\"Get a TransitAgency instance by its slug.\"\"\"\n logger.debug(f\"Get {TransitAgency.__name__} by slug: {slug}\")\n return TransitAgency.objects.filter(slug=slug).first()\n\n @staticmethod\n def all_active():\n \"\"\"Get all TransitAgency instances marked active.\"\"\"\n logger.debug(f\"Get all active {TransitAgency.__name__}\")\n return TransitAgency.objects.filter(active=True)\n", "path": "benefits/core/models.py"}, {"content": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = []\n\n operations = [\n migrations.CreateModel(\n name=\"EligibilityType\",\n fields=[\n (\"id\", models.AutoField(primary_key=True, serialize=False, verbose_name=\"ID\")),\n (\"name\", models.TextField()),\n (\"label\", models.TextField()),\n (\"group_id\", models.TextField()),\n ],\n ),\n migrations.CreateModel(\n name=\"PemData\",\n fields=[\n (\"id\", models.AutoField(primary_key=True, serialize=False)),\n (\"text\", models.TextField(help_text=\"The data in utf-8 encoded PEM text format.\")),\n (\"label\", models.TextField(help_text=\"Human description of the PEM data.\")),\n ],\n ),\n migrations.CreateModel(\n name=\"EligibilityVerifier\",\n fields=[\n (\"id\", models.AutoField(primary_key=True, serialize=False, verbose_name=\"ID\")),\n (\"name\", models.TextField()),\n (\"api_url\", models.TextField()),\n (\"api_auth_header\", models.TextField()),\n (\"api_auth_key\", models.TextField()),\n # fmt: off\n (\"sub_regex\", models.TextField(help_text=\"A regular expression used to validate the 'sub' API field before sending to this verifier\", null=True)), # noqa: E501\n (\"name_max_length\", models.PositiveSmallIntegerField(help_text=\"The maximum length accepted for the 'name' API field before sending to this verifier\", null=True)), # noqa: E501\n (\"public_key\", models.ForeignKey(help_text=\"The Verifier's public key, used to encrypt requests targeted at this Verifier and to verify signed responses from this verifier.\", on_delete=models.deletion.PROTECT, related_name=\"+\", to=\"core.PemData\")), # noqa: E501\n (\"jwe_cek_enc\", models.TextField(help_text=\"The JWE-compatible Content Encryption Key (CEK) key-length and mode\")), # noqa: E501\n (\"jwe_encryption_alg\", models.TextField(help_text=\"The JWE-compatible encryption algorithm\")),\n # fmt: on\n (\"jws_signing_alg\", models.TextField(help_text=\"The JWS-compatible signing algorithm\")),\n (\"eligibility_types\", models.ManyToManyField(to=\"core.EligibilityType\")),\n ],\n ),\n migrations.CreateModel(\n name=\"PaymentProcessor\",\n fields=[\n (\"id\", models.AutoField(primary_key=True, serialize=False, verbose_name=\"ID\")),\n (\"name\", models.TextField()),\n (\"api_base_url\", models.TextField()),\n (\"api_access_token_endpoint\", models.TextField()),\n (\"api_access_token_request_key\", models.TextField()),\n (\"api_access_token_request_val\", models.TextField()),\n (\"card_tokenize_url\", models.TextField()),\n (\"card_tokenize_func\", models.TextField()),\n (\"card_tokenize_env\", models.TextField()),\n # fmt: off\n (\"client_cert\", models.ForeignKey(help_text=\"The certificate used for client certificate authentication to the API.\", on_delete=models.deletion.PROTECT, related_name=\"+\", to=\"core.PemData\")), # noqa: E501\n (\"client_cert_private_key\", models.ForeignKey(help_text=\"The private key used to sign the certificate.\", on_delete=models.deletion.PROTECT, related_name=\"+\", to=\"core.PemData\")), # noqa: E501\n (\"client_cert_root_ca\", models.ForeignKey(help_text=\"The root CA bundle used to verify the server.\", on_delete=models.deletion.PROTECT, related_name=\"+\", to=\"core.PemData\")), # noqa: E501\n (\"customer_endpoint\", models.TextField()),\n # fmt: on\n (\"customers_endpoint\", models.TextField()),\n (\"group_endpoint\", models.TextField()),\n ],\n ),\n migrations.CreateModel(\n name=\"TransitAgency\",\n fields=[\n (\"id\", models.AutoField(primary_key=True, serialize=False, verbose_name=\"ID\")),\n (\"slug\", models.TextField()),\n (\"short_name\", models.TextField()),\n (\"long_name\", models.TextField()),\n (\"agency_id\", models.TextField()),\n (\"merchant_id\", models.TextField()),\n (\"info_url\", models.URLField()),\n (\"phone\", models.TextField()),\n (\"active\", models.BooleanField(default=False)),\n # fmt: off\n (\"private_key\", models.ForeignKey(help_text=\"The Agency's private key, used to sign tokens created on behalf of this Agency.\", on_delete=models.deletion.PROTECT, related_name=\"+\", to=\"core.PemData\")), # noqa: E501\n (\"jws_signing_alg\", models.TextField(help_text=\"The JWS-compatible signing algorithm.\")),\n (\"payment_processor\", models.ForeignKey(on_delete=models.deletion.PROTECT, to=\"core.paymentprocessor\")),\n (\"eligibility_types\", models.ManyToManyField(to=\"core.EligibilityType\")),\n (\"eligibility_verifiers\", models.ManyToManyField(to=\"core.eligibilityverifier\")),\n # fmt: on\n ],\n ),\n ]\n", "path": "benefits/core/migrations/0001_initial.py"}]}
| 3,598 | 514 |
gh_patches_debug_14648
|
rasdani/github-patches
|
git_diff
|
Kinto__kinto-1003
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
GET on /v1/admin returns 404
The expected address for built-in admin plugin is `/v1/admin/`. But if you forget the trailing slash and type `/v1/admin` you get a 404. I think it would be better to raise a 307.
Related to #112 and #858.
```
> http get localhost:8888/v1/admin/
HTTP/1.1 200 OK
(...)
> http get localhost:8888/v1/admin
HTTP/1.1 404 Not Found
(...)
```
GET on /v1/admin returns 404
The expected address for built-in admin plugin is `/v1/admin/`. But if you forget the trailing slash and type `/v1/admin` you get a 404. I think it would be better to raise a 307.
Related to #112 and #858.
```
> http get localhost:8888/v1/admin/
HTTP/1.1 200 OK
(...)
> http get localhost:8888/v1/admin
HTTP/1.1 404 Not Found
(...)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kinto/plugins/admin/__init__.py`
Content:
```
1 from pyramid.static import static_view
2
3
4 def includeme(config):
5 # Process settings to remove storage wording.
6
7 # Expose capability.
8 config.add_api_capability(
9 "admin",
10 version="1.6.0",
11 description="Serves the admin console.",
12 url="https://github.com/Kinto/kinto-admin/",
13 )
14
15 build_dir = static_view('kinto.plugins.admin:build', use_subpath=True)
16 config.add_route('catchall_static', '/admin/*subpath')
17 config.add_view(build_dir, route_name="catchall_static")
18
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kinto/plugins/admin/__init__.py b/kinto/plugins/admin/__init__.py
--- a/kinto/plugins/admin/__init__.py
+++ b/kinto/plugins/admin/__init__.py
@@ -1,4 +1,5 @@
from pyramid.static import static_view
+from pyramid.httpexceptions import HTTPTemporaryRedirect
def includeme(config):
@@ -15,3 +16,10 @@
build_dir = static_view('kinto.plugins.admin:build', use_subpath=True)
config.add_route('catchall_static', '/admin/*subpath')
config.add_view(build_dir, route_name="catchall_static")
+
+ # Setup redirect without trailing slash.
+ def admin_redirect_view(request):
+ raise HTTPTemporaryRedirect(request.path + '/')
+
+ config.add_route('admin_redirect', '/admin')
+ config.add_view(admin_redirect_view, route_name="admin_redirect")
|
{"golden_diff": "diff --git a/kinto/plugins/admin/__init__.py b/kinto/plugins/admin/__init__.py\n--- a/kinto/plugins/admin/__init__.py\n+++ b/kinto/plugins/admin/__init__.py\n@@ -1,4 +1,5 @@\n from pyramid.static import static_view\n+from pyramid.httpexceptions import HTTPTemporaryRedirect\n \n \n def includeme(config):\n@@ -15,3 +16,10 @@\n build_dir = static_view('kinto.plugins.admin:build', use_subpath=True)\n config.add_route('catchall_static', '/admin/*subpath')\n config.add_view(build_dir, route_name=\"catchall_static\")\n+\n+ # Setup redirect without trailing slash.\n+ def admin_redirect_view(request):\n+ raise HTTPTemporaryRedirect(request.path + '/')\n+\n+ config.add_route('admin_redirect', '/admin')\n+ config.add_view(admin_redirect_view, route_name=\"admin_redirect\")\n", "issue": "GET on /v1/admin returns 404\nThe expected address for built-in admin plugin is `/v1/admin/`. But if you forget the trailing slash and type `/v1/admin` you get a 404. I think it would be better to raise a 307.\r\n\r\nRelated to #112 and #858.\r\n\r\n```\r\n> http get localhost:8888/v1/admin/\r\nHTTP/1.1 200 OK\r\n(...)\r\n\r\n> http get localhost:8888/v1/admin\r\nHTTP/1.1 404 Not Found\r\n(...)\r\n```\r\n\r\n\nGET on /v1/admin returns 404\nThe expected address for built-in admin plugin is `/v1/admin/`. But if you forget the trailing slash and type `/v1/admin` you get a 404. I think it would be better to raise a 307.\r\n\r\nRelated to #112 and #858.\r\n\r\n```\r\n> http get localhost:8888/v1/admin/\r\nHTTP/1.1 200 OK\r\n(...)\r\n\r\n> http get localhost:8888/v1/admin\r\nHTTP/1.1 404 Not Found\r\n(...)\r\n```\r\n\r\n\n", "before_files": [{"content": "from pyramid.static import static_view\n\n\ndef includeme(config):\n # Process settings to remove storage wording.\n\n # Expose capability.\n config.add_api_capability(\n \"admin\",\n version=\"1.6.0\",\n description=\"Serves the admin console.\",\n url=\"https://github.com/Kinto/kinto-admin/\",\n )\n\n build_dir = static_view('kinto.plugins.admin:build', use_subpath=True)\n config.add_route('catchall_static', '/admin/*subpath')\n config.add_view(build_dir, route_name=\"catchall_static\")\n", "path": "kinto/plugins/admin/__init__.py"}], "after_files": [{"content": "from pyramid.static import static_view\nfrom pyramid.httpexceptions import HTTPTemporaryRedirect\n\n\ndef includeme(config):\n # Process settings to remove storage wording.\n\n # Expose capability.\n config.add_api_capability(\n \"admin\",\n version=\"1.6.0\",\n description=\"Serves the admin console.\",\n url=\"https://github.com/Kinto/kinto-admin/\",\n )\n\n build_dir = static_view('kinto.plugins.admin:build', use_subpath=True)\n config.add_route('catchall_static', '/admin/*subpath')\n config.add_view(build_dir, route_name=\"catchall_static\")\n\n # Setup redirect without trailing slash.\n def admin_redirect_view(request):\n raise HTTPTemporaryRedirect(request.path + '/')\n\n config.add_route('admin_redirect', '/admin')\n config.add_view(admin_redirect_view, route_name=\"admin_redirect\")\n", "path": "kinto/plugins/admin/__init__.py"}]}
| 678 | 198 |
gh_patches_debug_7780
|
rasdani/github-patches
|
git_diff
|
unionai-oss__pandera-1627
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Nullability for `pl.Float64` in `pl.DataFrame` fails
**Describe the bug**
The nullability constraint is not enforced in `pl.DataFrame` when the underlying data type is `pl.Float64`. When I provide a `pl.DataFrame` with a float column that contains `None`, it passes.
- [x] I have checked that this issue has not already been reported.
- [x] I have confirmed this bug exists on the latest version of pandera.
#### Code Sample, a copy-pastable example
```python
import polars as pl
import pandera.polars as pa
class Schema(pa.DataFrameModel):
col: float = pa.Field(nullable=False)
df = pl.DataFrame(data={"col": [1.0, 2.5, None]}, schema={"col": pl.Float64})
Schema.validate(df)
```
#### Expected behavior
The validation should vail as the column `col` contains a `None` value, but the schema requires it to be non-nullable.
#### Desktop (please complete the following information):
- OS: MacOS
- Browser: Chrome
#### Additional context
The nullability constraint is correctly enforced if the underlying data type is `int` or `str`.
For `float`, the nullability constraint correctly detects NULL values in the form of `np.nan`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pandera/backends/polars/components.py`
Content:
```
1 """Validation backend for polars components."""
2
3 import warnings
4 from typing import Any, Callable, Iterable, List, Optional, cast
5
6 import polars as pl
7
8 from pandera.api.base.error_handler import ErrorHandler
9 from pandera.api.polars.components import Column
10 from pandera.api.polars.types import PolarsData
11 from pandera.backends.base import CoreCheckResult
12 from pandera.backends.polars.base import PolarsSchemaBackend, is_float_dtype
13 from pandera.config import ValidationDepth, ValidationScope, get_config_context
14 from pandera.constants import CHECK_OUTPUT_KEY
15 from pandera.errors import (
16 ParserError,
17 SchemaDefinitionError,
18 SchemaError,
19 SchemaErrorReason,
20 SchemaErrors,
21 )
22 from pandera.validation_depth import validate_scope, validation_type
23
24
25 class ColumnBackend(PolarsSchemaBackend):
26 """Column backend for polars LazyFrames."""
27
28 def preprocess(self, check_obj, inplace: bool = False):
29 """Returns a copy of the object if inplace is False."""
30 # NOTE: is this even necessary?
31 return check_obj if inplace else check_obj.clone()
32
33 # pylint: disable=too-many-locals
34 def validate(
35 self,
36 check_obj: pl.LazyFrame,
37 schema: Column,
38 *,
39 head: Optional[int] = None,
40 tail: Optional[int] = None,
41 sample: Optional[int] = None,
42 random_state: Optional[int] = None,
43 lazy: bool = False,
44 inplace: bool = False,
45 ) -> pl.LazyFrame:
46
47 if inplace:
48 warnings.warn("setting inplace=True will have no effect.")
49
50 if schema.name is None:
51 raise SchemaDefinitionError(
52 "Column schema must have a name specified."
53 )
54
55 error_handler = ErrorHandler(lazy)
56 check_obj = self.preprocess(check_obj, inplace)
57
58 if getattr(schema, "drop_invalid_rows", False) and not lazy:
59 raise SchemaDefinitionError(
60 "When drop_invalid_rows is True, lazy must be set to True."
61 )
62
63 core_parsers: List[Callable[..., Any]] = [
64 self.coerce_dtype,
65 self.set_default,
66 ]
67
68 for parser in core_parsers:
69 try:
70 check_obj = parser(check_obj, schema)
71 except SchemaError as exc:
72 error_handler.collect_error(
73 validation_type(exc.reason_code),
74 exc.reason_code,
75 exc,
76 )
77 except SchemaErrors as exc:
78 error_handler.collect_errors(exc.schema_errors)
79
80 error_handler = self.run_checks_and_handle_errors(
81 error_handler,
82 schema,
83 check_obj,
84 head=head,
85 tail=tail,
86 sample=sample,
87 random_state=random_state,
88 )
89
90 if lazy and error_handler.collected_errors:
91 if getattr(schema, "drop_invalid_rows", False):
92 check_obj = self.drop_invalid_rows(check_obj, error_handler)
93 else:
94 raise SchemaErrors(
95 schema=schema,
96 schema_errors=error_handler.schema_errors,
97 data=check_obj,
98 )
99
100 return check_obj
101
102 def get_regex_columns(self, schema, check_obj) -> Iterable:
103 return check_obj.select(pl.col(schema.selector)).columns
104
105 def run_checks_and_handle_errors(
106 self,
107 error_handler: ErrorHandler,
108 schema,
109 check_obj: pl.LazyFrame,
110 **subsample_kwargs,
111 ):
112 """Run checks on schema"""
113 # pylint: disable=too-many-locals
114 check_obj_subsample = self.subsample(check_obj, **subsample_kwargs)
115
116 core_checks = [
117 self.check_nullable,
118 self.check_unique,
119 self.check_dtype,
120 self.run_checks,
121 ]
122 args = (check_obj_subsample, schema)
123 for core_check in core_checks:
124 results = core_check(*args)
125 if isinstance(results, CoreCheckResult):
126 results = [results]
127 results = cast(List[CoreCheckResult], results)
128 for result in results:
129 if result.passed:
130 continue
131
132 if result.schema_error is not None:
133 error = result.schema_error
134 else:
135 assert result.reason_code is not None
136 error = SchemaError(
137 schema=schema,
138 data=check_obj,
139 message=result.message,
140 failure_cases=result.failure_cases,
141 check=result.check,
142 check_index=result.check_index,
143 check_output=result.check_output,
144 reason_code=result.reason_code,
145 )
146 error_handler.collect_error(
147 validation_type(result.reason_code),
148 result.reason_code,
149 error,
150 original_exc=result.original_exc,
151 )
152
153 return error_handler
154
155 def coerce_dtype(
156 self,
157 check_obj: pl.LazyFrame,
158 schema=None,
159 # pylint: disable=unused-argument
160 ) -> pl.LazyFrame:
161 """Coerce type of a pd.Series by type specified in dtype.
162
163 :param check_obj: LazyFrame to coerce
164 :returns: coerced LazyFrame
165 """
166 assert schema is not None, "The `schema` argument must be provided."
167 if schema.dtype is None or not schema.coerce:
168 return check_obj
169
170 config_ctx = get_config_context(validation_depth_default=None)
171 coerce_fn: Callable[[pl.LazyFrame], pl.LazyFrame] = (
172 schema.dtype.coerce
173 if config_ctx.validation_depth == ValidationDepth.SCHEMA_ONLY
174 else schema.dtype.try_coerce
175 )
176
177 try:
178 return coerce_fn(check_obj)
179 except ParserError as exc:
180 raise SchemaError(
181 schema=schema,
182 data=check_obj,
183 message=(
184 f"Error while coercing '{schema.selector}' to type "
185 f"{schema.dtype}: {exc}"
186 ),
187 check=f"coerce_dtype('{schema.dtype}')",
188 reason_code=SchemaErrorReason.DATATYPE_COERCION,
189 ) from exc
190
191 @validate_scope(scope=ValidationScope.DATA)
192 def check_nullable(
193 self,
194 check_obj: pl.LazyFrame,
195 schema,
196 ) -> List[CoreCheckResult]:
197 """Check if a column is nullable.
198
199 This check considers nulls and nan values as effectively equivalent.
200 """
201 if schema.nullable:
202 return [
203 CoreCheckResult(
204 passed=True,
205 check="not_nullable",
206 reason_code=SchemaErrorReason.SERIES_CONTAINS_NULLS,
207 )
208 ]
209
210 if is_float_dtype(check_obj, schema.selector):
211 expr = pl.col(schema.selector).is_not_nan()
212 else:
213 expr = pl.col(schema.selector).is_not_null()
214
215 isna = check_obj.select(expr)
216 passed = isna.select([pl.col("*").all()]).collect()
217 results = []
218 for column in isna.columns:
219 if passed.select(column).item():
220 continue
221 failure_cases = (
222 check_obj.with_context(
223 isna.select(pl.col(column).alias(CHECK_OUTPUT_KEY))
224 )
225 .filter(pl.col(CHECK_OUTPUT_KEY).not_())
226 .select(column)
227 .collect()
228 )
229 results.append(
230 CoreCheckResult(
231 passed=cast(bool, passed.select(column).item()),
232 check_output=isna.collect().rename(
233 {column: CHECK_OUTPUT_KEY}
234 ),
235 check="not_nullable",
236 reason_code=SchemaErrorReason.SERIES_CONTAINS_NULLS,
237 message=(
238 f"non-nullable column '{schema.selector}' contains "
239 f"null values"
240 ),
241 failure_cases=failure_cases,
242 )
243 )
244 return results
245
246 @validate_scope(scope=ValidationScope.DATA)
247 def check_unique(
248 self,
249 check_obj: pl.LazyFrame,
250 schema,
251 ) -> List[CoreCheckResult]:
252 check_name = "field_uniqueness"
253 if not schema.unique:
254 return [
255 CoreCheckResult(
256 passed=True,
257 check=check_name,
258 reason_code=SchemaErrorReason.SERIES_CONTAINS_DUPLICATES,
259 )
260 ]
261
262 results = []
263 duplicates = (
264 check_obj.select(schema.selector)
265 .collect()
266 .select(pl.col("*").is_duplicated())
267 )
268 for column in duplicates.columns:
269 if duplicates.select(pl.col(column).any()).item():
270 failure_cases = (
271 check_obj.with_context(
272 duplicates.select(
273 pl.col(column).alias("_duplicated")
274 ).lazy()
275 )
276 .filter(pl.col("_duplicated"))
277 .select(column)
278 .collect()
279 )
280 results.append(
281 CoreCheckResult(
282 passed=False,
283 check=check_name,
284 check_output=duplicates.select(
285 pl.col(column).not_().alias(CHECK_OUTPUT_KEY)
286 ),
287 reason_code=SchemaErrorReason.SERIES_CONTAINS_DUPLICATES,
288 message=(
289 f"column '{schema.selector}' "
290 f"not unique:\n{failure_cases}"
291 ),
292 failure_cases=failure_cases,
293 )
294 )
295
296 return results
297
298 @validate_scope(scope=ValidationScope.SCHEMA)
299 def check_dtype(
300 self,
301 check_obj: pl.LazyFrame,
302 schema: Column,
303 ) -> List[CoreCheckResult]:
304
305 passed = True
306 failure_cases = None
307 msg = None
308
309 if schema.dtype is None:
310 return [
311 CoreCheckResult(
312 passed=passed,
313 check=f"dtype('{schema.dtype}')",
314 reason_code=SchemaErrorReason.WRONG_DATATYPE,
315 message=msg,
316 failure_cases=failure_cases,
317 )
318 ]
319
320 results = []
321 check_obj_subset = check_obj.select(schema.selector)
322 for column in check_obj_subset.columns:
323 obj_dtype = check_obj_subset.schema[column]
324 results.append(
325 CoreCheckResult(
326 passed=schema.dtype.check(
327 obj_dtype,
328 PolarsData(check_obj_subset, schema.selector),
329 ),
330 check=f"dtype('{schema.dtype}')",
331 reason_code=SchemaErrorReason.WRONG_DATATYPE,
332 message=(
333 f"expected column '{column}' to have type "
334 f"{schema.dtype}, got {obj_dtype}"
335 ),
336 failure_cases=str(obj_dtype),
337 )
338 )
339 return results
340
341 # pylint: disable=unused-argument
342 @validate_scope(scope=ValidationScope.DATA)
343 def run_checks(self, check_obj, schema) -> List[CoreCheckResult]:
344 check_results: List[CoreCheckResult] = []
345 for check_index, check in enumerate(schema.checks):
346 try:
347 check_results.append(
348 self.run_check(
349 check_obj,
350 schema,
351 check,
352 check_index,
353 schema.selector,
354 )
355 )
356 except Exception as err: # pylint: disable=broad-except
357 # catch other exceptions that may occur when executing the Check
358 err_msg = f'"{err.args[0]}"' if len(err.args) > 0 else ""
359 msg = f"{err.__class__.__name__}({err_msg})"
360 check_results.append(
361 CoreCheckResult(
362 passed=False,
363 check=check,
364 check_index=check_index,
365 reason_code=SchemaErrorReason.CHECK_ERROR,
366 message=msg,
367 failure_cases=msg,
368 original_exc=err,
369 )
370 )
371 return check_results
372
373 def set_default(self, check_obj: pl.LazyFrame, schema) -> pl.LazyFrame:
374 """Set default values for columns with missing values."""
375 if hasattr(schema, "default") and schema.default is None:
376 return check_obj
377
378 default_value = pl.lit(schema.default, dtype=schema.dtype.type)
379 expr = pl.col(schema.selector)
380 if is_float_dtype(check_obj, schema.selector):
381 expr = expr.fill_nan(default_value)
382 else:
383 expr = expr.fill_null(default_value)
384
385 return check_obj.with_columns(expr)
386
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pandera/backends/polars/components.py b/pandera/backends/polars/components.py
--- a/pandera/backends/polars/components.py
+++ b/pandera/backends/polars/components.py
@@ -207,10 +207,9 @@
)
]
+ expr = pl.col(schema.selector).is_not_null()
if is_float_dtype(check_obj, schema.selector):
- expr = pl.col(schema.selector).is_not_nan()
- else:
- expr = pl.col(schema.selector).is_not_null()
+ expr = expr & pl.col(schema.selector).is_not_nan()
isna = check_obj.select(expr)
passed = isna.select([pl.col("*").all()]).collect()
|
{"golden_diff": "diff --git a/pandera/backends/polars/components.py b/pandera/backends/polars/components.py\n--- a/pandera/backends/polars/components.py\n+++ b/pandera/backends/polars/components.py\n@@ -207,10 +207,9 @@\n )\n ]\n \n+ expr = pl.col(schema.selector).is_not_null()\n if is_float_dtype(check_obj, schema.selector):\n- expr = pl.col(schema.selector).is_not_nan()\n- else:\n- expr = pl.col(schema.selector).is_not_null()\n+ expr = expr & pl.col(schema.selector).is_not_nan()\n \n isna = check_obj.select(expr)\n passed = isna.select([pl.col(\"*\").all()]).collect()\n", "issue": "Nullability for `pl.Float64` in `pl.DataFrame` fails\n**Describe the bug**\r\nThe nullability constraint is not enforced in `pl.DataFrame` when the underlying data type is `pl.Float64`. When I provide a `pl.DataFrame` with a float column that contains `None`, it passes.\r\n\r\n- [x] I have checked that this issue has not already been reported.\r\n- [x] I have confirmed this bug exists on the latest version of pandera.\r\n\r\n#### Code Sample, a copy-pastable example\r\n\r\n```python\r\nimport polars as pl\r\nimport pandera.polars as pa\r\n\r\n\r\nclass Schema(pa.DataFrameModel):\r\n col: float = pa.Field(nullable=False)\r\n\r\n\r\ndf = pl.DataFrame(data={\"col\": [1.0, 2.5, None]}, schema={\"col\": pl.Float64})\r\n\r\nSchema.validate(df)\r\n\r\n\r\n```\r\n\r\n#### Expected behavior\r\nThe validation should vail as the column `col` contains a `None` value, but the schema requires it to be non-nullable.\r\n\r\n#### Desktop (please complete the following information):\r\n\r\n - OS: MacOS\r\n - Browser: Chrome\r\n\r\n#### Additional context\r\nThe nullability constraint is correctly enforced if the underlying data type is `int` or `str`.\r\nFor `float`, the nullability constraint correctly detects NULL values in the form of `np.nan`.\r\n\r\n\n", "before_files": [{"content": "\"\"\"Validation backend for polars components.\"\"\"\n\nimport warnings\nfrom typing import Any, Callable, Iterable, List, Optional, cast\n\nimport polars as pl\n\nfrom pandera.api.base.error_handler import ErrorHandler\nfrom pandera.api.polars.components import Column\nfrom pandera.api.polars.types import PolarsData\nfrom pandera.backends.base import CoreCheckResult\nfrom pandera.backends.polars.base import PolarsSchemaBackend, is_float_dtype\nfrom pandera.config import ValidationDepth, ValidationScope, get_config_context\nfrom pandera.constants import CHECK_OUTPUT_KEY\nfrom pandera.errors import (\n ParserError,\n SchemaDefinitionError,\n SchemaError,\n SchemaErrorReason,\n SchemaErrors,\n)\nfrom pandera.validation_depth import validate_scope, validation_type\n\n\nclass ColumnBackend(PolarsSchemaBackend):\n \"\"\"Column backend for polars LazyFrames.\"\"\"\n\n def preprocess(self, check_obj, inplace: bool = False):\n \"\"\"Returns a copy of the object if inplace is False.\"\"\"\n # NOTE: is this even necessary?\n return check_obj if inplace else check_obj.clone()\n\n # pylint: disable=too-many-locals\n def validate(\n self,\n check_obj: pl.LazyFrame,\n schema: Column,\n *,\n head: Optional[int] = None,\n tail: Optional[int] = None,\n sample: Optional[int] = None,\n random_state: Optional[int] = None,\n lazy: bool = False,\n inplace: bool = False,\n ) -> pl.LazyFrame:\n\n if inplace:\n warnings.warn(\"setting inplace=True will have no effect.\")\n\n if schema.name is None:\n raise SchemaDefinitionError(\n \"Column schema must have a name specified.\"\n )\n\n error_handler = ErrorHandler(lazy)\n check_obj = self.preprocess(check_obj, inplace)\n\n if getattr(schema, \"drop_invalid_rows\", False) and not lazy:\n raise SchemaDefinitionError(\n \"When drop_invalid_rows is True, lazy must be set to True.\"\n )\n\n core_parsers: List[Callable[..., Any]] = [\n self.coerce_dtype,\n self.set_default,\n ]\n\n for parser in core_parsers:\n try:\n check_obj = parser(check_obj, schema)\n except SchemaError as exc:\n error_handler.collect_error(\n validation_type(exc.reason_code),\n exc.reason_code,\n exc,\n )\n except SchemaErrors as exc:\n error_handler.collect_errors(exc.schema_errors)\n\n error_handler = self.run_checks_and_handle_errors(\n error_handler,\n schema,\n check_obj,\n head=head,\n tail=tail,\n sample=sample,\n random_state=random_state,\n )\n\n if lazy and error_handler.collected_errors:\n if getattr(schema, \"drop_invalid_rows\", False):\n check_obj = self.drop_invalid_rows(check_obj, error_handler)\n else:\n raise SchemaErrors(\n schema=schema,\n schema_errors=error_handler.schema_errors,\n data=check_obj,\n )\n\n return check_obj\n\n def get_regex_columns(self, schema, check_obj) -> Iterable:\n return check_obj.select(pl.col(schema.selector)).columns\n\n def run_checks_and_handle_errors(\n self,\n error_handler: ErrorHandler,\n schema,\n check_obj: pl.LazyFrame,\n **subsample_kwargs,\n ):\n \"\"\"Run checks on schema\"\"\"\n # pylint: disable=too-many-locals\n check_obj_subsample = self.subsample(check_obj, **subsample_kwargs)\n\n core_checks = [\n self.check_nullable,\n self.check_unique,\n self.check_dtype,\n self.run_checks,\n ]\n args = (check_obj_subsample, schema)\n for core_check in core_checks:\n results = core_check(*args)\n if isinstance(results, CoreCheckResult):\n results = [results]\n results = cast(List[CoreCheckResult], results)\n for result in results:\n if result.passed:\n continue\n\n if result.schema_error is not None:\n error = result.schema_error\n else:\n assert result.reason_code is not None\n error = SchemaError(\n schema=schema,\n data=check_obj,\n message=result.message,\n failure_cases=result.failure_cases,\n check=result.check,\n check_index=result.check_index,\n check_output=result.check_output,\n reason_code=result.reason_code,\n )\n error_handler.collect_error(\n validation_type(result.reason_code),\n result.reason_code,\n error,\n original_exc=result.original_exc,\n )\n\n return error_handler\n\n def coerce_dtype(\n self,\n check_obj: pl.LazyFrame,\n schema=None,\n # pylint: disable=unused-argument\n ) -> pl.LazyFrame:\n \"\"\"Coerce type of a pd.Series by type specified in dtype.\n\n :param check_obj: LazyFrame to coerce\n :returns: coerced LazyFrame\n \"\"\"\n assert schema is not None, \"The `schema` argument must be provided.\"\n if schema.dtype is None or not schema.coerce:\n return check_obj\n\n config_ctx = get_config_context(validation_depth_default=None)\n coerce_fn: Callable[[pl.LazyFrame], pl.LazyFrame] = (\n schema.dtype.coerce\n if config_ctx.validation_depth == ValidationDepth.SCHEMA_ONLY\n else schema.dtype.try_coerce\n )\n\n try:\n return coerce_fn(check_obj)\n except ParserError as exc:\n raise SchemaError(\n schema=schema,\n data=check_obj,\n message=(\n f\"Error while coercing '{schema.selector}' to type \"\n f\"{schema.dtype}: {exc}\"\n ),\n check=f\"coerce_dtype('{schema.dtype}')\",\n reason_code=SchemaErrorReason.DATATYPE_COERCION,\n ) from exc\n\n @validate_scope(scope=ValidationScope.DATA)\n def check_nullable(\n self,\n check_obj: pl.LazyFrame,\n schema,\n ) -> List[CoreCheckResult]:\n \"\"\"Check if a column is nullable.\n\n This check considers nulls and nan values as effectively equivalent.\n \"\"\"\n if schema.nullable:\n return [\n CoreCheckResult(\n passed=True,\n check=\"not_nullable\",\n reason_code=SchemaErrorReason.SERIES_CONTAINS_NULLS,\n )\n ]\n\n if is_float_dtype(check_obj, schema.selector):\n expr = pl.col(schema.selector).is_not_nan()\n else:\n expr = pl.col(schema.selector).is_not_null()\n\n isna = check_obj.select(expr)\n passed = isna.select([pl.col(\"*\").all()]).collect()\n results = []\n for column in isna.columns:\n if passed.select(column).item():\n continue\n failure_cases = (\n check_obj.with_context(\n isna.select(pl.col(column).alias(CHECK_OUTPUT_KEY))\n )\n .filter(pl.col(CHECK_OUTPUT_KEY).not_())\n .select(column)\n .collect()\n )\n results.append(\n CoreCheckResult(\n passed=cast(bool, passed.select(column).item()),\n check_output=isna.collect().rename(\n {column: CHECK_OUTPUT_KEY}\n ),\n check=\"not_nullable\",\n reason_code=SchemaErrorReason.SERIES_CONTAINS_NULLS,\n message=(\n f\"non-nullable column '{schema.selector}' contains \"\n f\"null values\"\n ),\n failure_cases=failure_cases,\n )\n )\n return results\n\n @validate_scope(scope=ValidationScope.DATA)\n def check_unique(\n self,\n check_obj: pl.LazyFrame,\n schema,\n ) -> List[CoreCheckResult]:\n check_name = \"field_uniqueness\"\n if not schema.unique:\n return [\n CoreCheckResult(\n passed=True,\n check=check_name,\n reason_code=SchemaErrorReason.SERIES_CONTAINS_DUPLICATES,\n )\n ]\n\n results = []\n duplicates = (\n check_obj.select(schema.selector)\n .collect()\n .select(pl.col(\"*\").is_duplicated())\n )\n for column in duplicates.columns:\n if duplicates.select(pl.col(column).any()).item():\n failure_cases = (\n check_obj.with_context(\n duplicates.select(\n pl.col(column).alias(\"_duplicated\")\n ).lazy()\n )\n .filter(pl.col(\"_duplicated\"))\n .select(column)\n .collect()\n )\n results.append(\n CoreCheckResult(\n passed=False,\n check=check_name,\n check_output=duplicates.select(\n pl.col(column).not_().alias(CHECK_OUTPUT_KEY)\n ),\n reason_code=SchemaErrorReason.SERIES_CONTAINS_DUPLICATES,\n message=(\n f\"column '{schema.selector}' \"\n f\"not unique:\\n{failure_cases}\"\n ),\n failure_cases=failure_cases,\n )\n )\n\n return results\n\n @validate_scope(scope=ValidationScope.SCHEMA)\n def check_dtype(\n self,\n check_obj: pl.LazyFrame,\n schema: Column,\n ) -> List[CoreCheckResult]:\n\n passed = True\n failure_cases = None\n msg = None\n\n if schema.dtype is None:\n return [\n CoreCheckResult(\n passed=passed,\n check=f\"dtype('{schema.dtype}')\",\n reason_code=SchemaErrorReason.WRONG_DATATYPE,\n message=msg,\n failure_cases=failure_cases,\n )\n ]\n\n results = []\n check_obj_subset = check_obj.select(schema.selector)\n for column in check_obj_subset.columns:\n obj_dtype = check_obj_subset.schema[column]\n results.append(\n CoreCheckResult(\n passed=schema.dtype.check(\n obj_dtype,\n PolarsData(check_obj_subset, schema.selector),\n ),\n check=f\"dtype('{schema.dtype}')\",\n reason_code=SchemaErrorReason.WRONG_DATATYPE,\n message=(\n f\"expected column '{column}' to have type \"\n f\"{schema.dtype}, got {obj_dtype}\"\n ),\n failure_cases=str(obj_dtype),\n )\n )\n return results\n\n # pylint: disable=unused-argument\n @validate_scope(scope=ValidationScope.DATA)\n def run_checks(self, check_obj, schema) -> List[CoreCheckResult]:\n check_results: List[CoreCheckResult] = []\n for check_index, check in enumerate(schema.checks):\n try:\n check_results.append(\n self.run_check(\n check_obj,\n schema,\n check,\n check_index,\n schema.selector,\n )\n )\n except Exception as err: # pylint: disable=broad-except\n # catch other exceptions that may occur when executing the Check\n err_msg = f'\"{err.args[0]}\"' if len(err.args) > 0 else \"\"\n msg = f\"{err.__class__.__name__}({err_msg})\"\n check_results.append(\n CoreCheckResult(\n passed=False,\n check=check,\n check_index=check_index,\n reason_code=SchemaErrorReason.CHECK_ERROR,\n message=msg,\n failure_cases=msg,\n original_exc=err,\n )\n )\n return check_results\n\n def set_default(self, check_obj: pl.LazyFrame, schema) -> pl.LazyFrame:\n \"\"\"Set default values for columns with missing values.\"\"\"\n if hasattr(schema, \"default\") and schema.default is None:\n return check_obj\n\n default_value = pl.lit(schema.default, dtype=schema.dtype.type)\n expr = pl.col(schema.selector)\n if is_float_dtype(check_obj, schema.selector):\n expr = expr.fill_nan(default_value)\n else:\n expr = expr.fill_null(default_value)\n\n return check_obj.with_columns(expr)\n", "path": "pandera/backends/polars/components.py"}], "after_files": [{"content": "\"\"\"Validation backend for polars components.\"\"\"\n\nimport warnings\nfrom typing import Any, Callable, Iterable, List, Optional, cast\n\nimport polars as pl\n\nfrom pandera.api.base.error_handler import ErrorHandler\nfrom pandera.api.polars.components import Column\nfrom pandera.api.polars.types import PolarsData\nfrom pandera.backends.base import CoreCheckResult\nfrom pandera.backends.polars.base import PolarsSchemaBackend, is_float_dtype\nfrom pandera.config import ValidationDepth, ValidationScope, get_config_context\nfrom pandera.constants import CHECK_OUTPUT_KEY\nfrom pandera.errors import (\n ParserError,\n SchemaDefinitionError,\n SchemaError,\n SchemaErrorReason,\n SchemaErrors,\n)\nfrom pandera.validation_depth import validate_scope, validation_type\n\n\nclass ColumnBackend(PolarsSchemaBackend):\n \"\"\"Column backend for polars LazyFrames.\"\"\"\n\n def preprocess(self, check_obj, inplace: bool = False):\n \"\"\"Returns a copy of the object if inplace is False.\"\"\"\n # NOTE: is this even necessary?\n return check_obj if inplace else check_obj.clone()\n\n # pylint: disable=too-many-locals\n def validate(\n self,\n check_obj: pl.LazyFrame,\n schema: Column,\n *,\n head: Optional[int] = None,\n tail: Optional[int] = None,\n sample: Optional[int] = None,\n random_state: Optional[int] = None,\n lazy: bool = False,\n inplace: bool = False,\n ) -> pl.LazyFrame:\n\n if inplace:\n warnings.warn(\"setting inplace=True will have no effect.\")\n\n if schema.name is None:\n raise SchemaDefinitionError(\n \"Column schema must have a name specified.\"\n )\n\n error_handler = ErrorHandler(lazy)\n check_obj = self.preprocess(check_obj, inplace)\n\n if getattr(schema, \"drop_invalid_rows\", False) and not lazy:\n raise SchemaDefinitionError(\n \"When drop_invalid_rows is True, lazy must be set to True.\"\n )\n\n core_parsers: List[Callable[..., Any]] = [\n self.coerce_dtype,\n self.set_default,\n ]\n\n for parser in core_parsers:\n try:\n check_obj = parser(check_obj, schema)\n except SchemaError as exc:\n error_handler.collect_error(\n validation_type(exc.reason_code),\n exc.reason_code,\n exc,\n )\n except SchemaErrors as exc:\n error_handler.collect_errors(exc.schema_errors)\n\n error_handler = self.run_checks_and_handle_errors(\n error_handler,\n schema,\n check_obj,\n head=head,\n tail=tail,\n sample=sample,\n random_state=random_state,\n )\n\n if lazy and error_handler.collected_errors:\n if getattr(schema, \"drop_invalid_rows\", False):\n check_obj = self.drop_invalid_rows(check_obj, error_handler)\n else:\n raise SchemaErrors(\n schema=schema,\n schema_errors=error_handler.schema_errors,\n data=check_obj,\n )\n\n return check_obj\n\n def get_regex_columns(self, schema, check_obj) -> Iterable:\n return check_obj.select(pl.col(schema.selector)).columns\n\n def run_checks_and_handle_errors(\n self,\n error_handler: ErrorHandler,\n schema,\n check_obj: pl.LazyFrame,\n **subsample_kwargs,\n ):\n \"\"\"Run checks on schema\"\"\"\n # pylint: disable=too-many-locals\n check_obj_subsample = self.subsample(check_obj, **subsample_kwargs)\n\n core_checks = [\n self.check_nullable,\n self.check_unique,\n self.check_dtype,\n self.run_checks,\n ]\n args = (check_obj_subsample, schema)\n for core_check in core_checks:\n results = core_check(*args)\n if isinstance(results, CoreCheckResult):\n results = [results]\n results = cast(List[CoreCheckResult], results)\n for result in results:\n if result.passed:\n continue\n\n if result.schema_error is not None:\n error = result.schema_error\n else:\n assert result.reason_code is not None\n error = SchemaError(\n schema=schema,\n data=check_obj,\n message=result.message,\n failure_cases=result.failure_cases,\n check=result.check,\n check_index=result.check_index,\n check_output=result.check_output,\n reason_code=result.reason_code,\n )\n error_handler.collect_error(\n validation_type(result.reason_code),\n result.reason_code,\n error,\n original_exc=result.original_exc,\n )\n\n return error_handler\n\n def coerce_dtype(\n self,\n check_obj: pl.LazyFrame,\n schema=None,\n # pylint: disable=unused-argument\n ) -> pl.LazyFrame:\n \"\"\"Coerce type of a pd.Series by type specified in dtype.\n\n :param check_obj: LazyFrame to coerce\n :returns: coerced LazyFrame\n \"\"\"\n assert schema is not None, \"The `schema` argument must be provided.\"\n if schema.dtype is None or not schema.coerce:\n return check_obj\n\n config_ctx = get_config_context(validation_depth_default=None)\n coerce_fn: Callable[[pl.LazyFrame], pl.LazyFrame] = (\n schema.dtype.coerce\n if config_ctx.validation_depth == ValidationDepth.SCHEMA_ONLY\n else schema.dtype.try_coerce\n )\n\n try:\n return coerce_fn(check_obj)\n except ParserError as exc:\n raise SchemaError(\n schema=schema,\n data=check_obj,\n message=(\n f\"Error while coercing '{schema.selector}' to type \"\n f\"{schema.dtype}: {exc}\"\n ),\n check=f\"coerce_dtype('{schema.dtype}')\",\n reason_code=SchemaErrorReason.DATATYPE_COERCION,\n ) from exc\n\n @validate_scope(scope=ValidationScope.DATA)\n def check_nullable(\n self,\n check_obj: pl.LazyFrame,\n schema,\n ) -> List[CoreCheckResult]:\n \"\"\"Check if a column is nullable.\n\n This check considers nulls and nan values as effectively equivalent.\n \"\"\"\n if schema.nullable:\n return [\n CoreCheckResult(\n passed=True,\n check=\"not_nullable\",\n reason_code=SchemaErrorReason.SERIES_CONTAINS_NULLS,\n )\n ]\n\n expr = pl.col(schema.selector).is_not_null()\n if is_float_dtype(check_obj, schema.selector):\n expr = expr & pl.col(schema.selector).is_not_nan()\n\n isna = check_obj.select(expr)\n passed = isna.select([pl.col(\"*\").all()]).collect()\n results = []\n for column in isna.columns:\n if passed.select(column).item():\n continue\n failure_cases = (\n check_obj.with_context(\n isna.select(pl.col(column).alias(CHECK_OUTPUT_KEY))\n )\n .filter(pl.col(CHECK_OUTPUT_KEY).not_())\n .select(column)\n .collect()\n )\n results.append(\n CoreCheckResult(\n passed=cast(bool, passed.select(column).item()),\n check_output=isna.collect().rename(\n {column: CHECK_OUTPUT_KEY}\n ),\n check=\"not_nullable\",\n reason_code=SchemaErrorReason.SERIES_CONTAINS_NULLS,\n message=(\n f\"non-nullable column '{schema.selector}' contains \"\n f\"null values\"\n ),\n failure_cases=failure_cases,\n )\n )\n return results\n\n @validate_scope(scope=ValidationScope.DATA)\n def check_unique(\n self,\n check_obj: pl.LazyFrame,\n schema,\n ) -> List[CoreCheckResult]:\n check_name = \"field_uniqueness\"\n if not schema.unique:\n return [\n CoreCheckResult(\n passed=True,\n check=check_name,\n reason_code=SchemaErrorReason.SERIES_CONTAINS_DUPLICATES,\n )\n ]\n\n results = []\n duplicates = (\n check_obj.select(schema.selector)\n .collect()\n .select(pl.col(\"*\").is_duplicated())\n )\n for column in duplicates.columns:\n if duplicates.select(pl.col(column).any()).item():\n failure_cases = (\n check_obj.with_context(\n duplicates.select(\n pl.col(column).alias(\"_duplicated\")\n ).lazy()\n )\n .filter(pl.col(\"_duplicated\"))\n .select(column)\n .collect()\n )\n results.append(\n CoreCheckResult(\n passed=False,\n check=check_name,\n check_output=duplicates.select(\n pl.col(column).not_().alias(CHECK_OUTPUT_KEY)\n ),\n reason_code=SchemaErrorReason.SERIES_CONTAINS_DUPLICATES,\n message=(\n f\"column '{schema.selector}' \"\n f\"not unique:\\n{failure_cases}\"\n ),\n failure_cases=failure_cases,\n )\n )\n\n return results\n\n @validate_scope(scope=ValidationScope.SCHEMA)\n def check_dtype(\n self,\n check_obj: pl.LazyFrame,\n schema: Column,\n ) -> List[CoreCheckResult]:\n\n passed = True\n failure_cases = None\n msg = None\n\n if schema.dtype is None:\n return [\n CoreCheckResult(\n passed=passed,\n check=f\"dtype('{schema.dtype}')\",\n reason_code=SchemaErrorReason.WRONG_DATATYPE,\n message=msg,\n failure_cases=failure_cases,\n )\n ]\n\n results = []\n check_obj_subset = check_obj.select(schema.selector)\n for column in check_obj_subset.columns:\n obj_dtype = check_obj_subset.schema[column]\n results.append(\n CoreCheckResult(\n passed=schema.dtype.check(\n obj_dtype,\n PolarsData(check_obj_subset, schema.selector),\n ),\n check=f\"dtype('{schema.dtype}')\",\n reason_code=SchemaErrorReason.WRONG_DATATYPE,\n message=(\n f\"expected column '{column}' to have type \"\n f\"{schema.dtype}, got {obj_dtype}\"\n ),\n failure_cases=str(obj_dtype),\n )\n )\n return results\n\n # pylint: disable=unused-argument\n @validate_scope(scope=ValidationScope.DATA)\n def run_checks(self, check_obj, schema) -> List[CoreCheckResult]:\n check_results: List[CoreCheckResult] = []\n for check_index, check in enumerate(schema.checks):\n try:\n check_results.append(\n self.run_check(\n check_obj,\n schema,\n check,\n check_index,\n schema.selector,\n )\n )\n except Exception as err: # pylint: disable=broad-except\n # catch other exceptions that may occur when executing the Check\n err_msg = f'\"{err.args[0]}\"' if len(err.args) > 0 else \"\"\n msg = f\"{err.__class__.__name__}({err_msg})\"\n check_results.append(\n CoreCheckResult(\n passed=False,\n check=check,\n check_index=check_index,\n reason_code=SchemaErrorReason.CHECK_ERROR,\n message=msg,\n failure_cases=msg,\n original_exc=err,\n )\n )\n return check_results\n\n def set_default(self, check_obj: pl.LazyFrame, schema) -> pl.LazyFrame:\n \"\"\"Set default values for columns with missing values.\"\"\"\n if hasattr(schema, \"default\") and schema.default is None:\n return check_obj\n\n default_value = pl.lit(schema.default, dtype=schema.dtype.type)\n expr = pl.col(schema.selector)\n if is_float_dtype(check_obj, schema.selector):\n expr = expr.fill_nan(default_value)\n else:\n expr = expr.fill_null(default_value)\n\n return check_obj.with_columns(expr)\n", "path": "pandera/backends/polars/components.py"}]}
| 4,057 | 163 |
gh_patches_debug_61588
|
rasdani/github-patches
|
git_diff
|
scikit-image__scikit-image-1660
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Document negative `sigma` values in `filters.gaussian_filter` are clipped to zero.
Negative sigma values have no effect; they are clipped to zero. This should be documented.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `skimage/filters/_gaussian.py`
Content:
```
1 import collections as coll
2 import numpy as np
3 from scipy import ndimage as ndi
4 import warnings
5
6 from ..util import img_as_float
7 from ..color import guess_spatial_dimensions
8
9 __all__ = ['gaussian_filter']
10
11
12 def gaussian_filter(image, sigma, output=None, mode='nearest', cval=0,
13 multichannel=None):
14 """Multi-dimensional Gaussian filter
15
16 Parameters
17 ----------
18 image : array-like
19 input image (grayscale or color) to filter.
20 sigma : scalar or sequence of scalars
21 standard deviation for Gaussian kernel. The standard
22 deviations of the Gaussian filter are given for each axis as a
23 sequence, or as a single number, in which case it is equal for
24 all axes.
25 output : array, optional
26 The ``output`` parameter passes an array in which to store the
27 filter output.
28 mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
29 The `mode` parameter determines how the array borders are
30 handled, where `cval` is the value when mode is equal to
31 'constant'. Default is 'nearest'.
32 cval : scalar, optional
33 Value to fill past edges of input if `mode` is 'constant'. Default
34 is 0.0
35 multichannel : bool, optional (default: None)
36 Whether the last axis of the image is to be interpreted as multiple
37 channels. If True, each channel is filtered separately (channels are
38 not mixed together). Only 3 channels are supported. If `None`,
39 the function will attempt to guess this, and raise a warning if
40 ambiguous, when the array has shape (M, N, 3).
41
42 Returns
43 -------
44 filtered_image : ndarray
45 the filtered array
46
47 Notes
48 -----
49 This function is a wrapper around :func:`scipy.ndi.gaussian_filter`.
50
51 Integer arrays are converted to float.
52
53 The multi-dimensional filter is implemented as a sequence of
54 one-dimensional convolution filters. The intermediate arrays are
55 stored in the same data type as the output. Therefore, for output
56 types with a limited precision, the results may be imprecise
57 because intermediate results may be stored with insufficient
58 precision.
59
60 Examples
61 --------
62
63 >>> a = np.zeros((3, 3))
64 >>> a[1, 1] = 1
65 >>> a
66 array([[ 0., 0., 0.],
67 [ 0., 1., 0.],
68 [ 0., 0., 0.]])
69 >>> gaussian_filter(a, sigma=0.4) # mild smoothing
70 array([[ 0.00163116, 0.03712502, 0.00163116],
71 [ 0.03712502, 0.84496158, 0.03712502],
72 [ 0.00163116, 0.03712502, 0.00163116]])
73 >>> gaussian_filter(a, sigma=1) # more smooting
74 array([[ 0.05855018, 0.09653293, 0.05855018],
75 [ 0.09653293, 0.15915589, 0.09653293],
76 [ 0.05855018, 0.09653293, 0.05855018]])
77 >>> # Several modes are possible for handling boundaries
78 >>> gaussian_filter(a, sigma=1, mode='reflect')
79 array([[ 0.08767308, 0.12075024, 0.08767308],
80 [ 0.12075024, 0.16630671, 0.12075024],
81 [ 0.08767308, 0.12075024, 0.08767308]])
82 >>> # For RGB images, each is filtered separately
83 >>> from skimage.data import astronaut
84 >>> image = astronaut()
85 >>> filtered_img = gaussian_filter(image, sigma=1, multichannel=True)
86
87 """
88
89 spatial_dims = guess_spatial_dimensions(image)
90 if spatial_dims is None and multichannel is None:
91 msg = ("Images with dimensions (M, N, 3) are interpreted as 2D+RGB "
92 "by default. Use `multichannel=False` to interpret as "
93 "3D image with last dimension of length 3.")
94 warnings.warn(RuntimeWarning(msg))
95 multichannel = True
96 if multichannel:
97 # do not filter across channels
98 if not isinstance(sigma, coll.Iterable):
99 sigma = [sigma] * (image.ndim - 1)
100 if len(sigma) != image.ndim:
101 sigma = np.concatenate((np.asarray(sigma), [0]))
102 image = img_as_float(image)
103 return ndi.gaussian_filter(image, sigma, mode=mode, cval=cval)
104
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/skimage/filters/_gaussian.py b/skimage/filters/_gaussian.py
--- a/skimage/filters/_gaussian.py
+++ b/skimage/filters/_gaussian.py
@@ -93,6 +93,8 @@
"3D image with last dimension of length 3.")
warnings.warn(RuntimeWarning(msg))
multichannel = True
+ if np.any(np.asarray(sigma) < 0.0):
+ raise ValueError("Sigma values less than zero are not valid")
if multichannel:
# do not filter across channels
if not isinstance(sigma, coll.Iterable):
|
{"golden_diff": "diff --git a/skimage/filters/_gaussian.py b/skimage/filters/_gaussian.py\n--- a/skimage/filters/_gaussian.py\n+++ b/skimage/filters/_gaussian.py\n@@ -93,6 +93,8 @@\n \"3D image with last dimension of length 3.\")\n warnings.warn(RuntimeWarning(msg))\n multichannel = True\n+ if np.any(np.asarray(sigma) < 0.0):\n+ raise ValueError(\"Sigma values less than zero are not valid\")\n if multichannel:\n # do not filter across channels\n if not isinstance(sigma, coll.Iterable):\n", "issue": "Document negative `sigma` values in `filters.gaussian_filter` are clipped to zero.\nNegative sigma values have no effect; they are clipped to zero. This should be documented.\n\n", "before_files": [{"content": "import collections as coll\nimport numpy as np\nfrom scipy import ndimage as ndi\nimport warnings\n\nfrom ..util import img_as_float\nfrom ..color import guess_spatial_dimensions\n\n__all__ = ['gaussian_filter']\n\n\ndef gaussian_filter(image, sigma, output=None, mode='nearest', cval=0,\n multichannel=None):\n \"\"\"Multi-dimensional Gaussian filter\n\n Parameters\n ----------\n image : array-like\n input image (grayscale or color) to filter.\n sigma : scalar or sequence of scalars\n standard deviation for Gaussian kernel. The standard\n deviations of the Gaussian filter are given for each axis as a\n sequence, or as a single number, in which case it is equal for\n all axes.\n output : array, optional\n The ``output`` parameter passes an array in which to store the\n filter output.\n mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional\n The `mode` parameter determines how the array borders are\n handled, where `cval` is the value when mode is equal to\n 'constant'. Default is 'nearest'.\n cval : scalar, optional\n Value to fill past edges of input if `mode` is 'constant'. Default\n is 0.0\n multichannel : bool, optional (default: None)\n Whether the last axis of the image is to be interpreted as multiple\n channels. If True, each channel is filtered separately (channels are\n not mixed together). Only 3 channels are supported. If `None`,\n the function will attempt to guess this, and raise a warning if\n ambiguous, when the array has shape (M, N, 3).\n\n Returns\n -------\n filtered_image : ndarray\n the filtered array\n\n Notes\n -----\n This function is a wrapper around :func:`scipy.ndi.gaussian_filter`.\n\n Integer arrays are converted to float.\n\n The multi-dimensional filter is implemented as a sequence of\n one-dimensional convolution filters. The intermediate arrays are\n stored in the same data type as the output. Therefore, for output\n types with a limited precision, the results may be imprecise\n because intermediate results may be stored with insufficient\n precision.\n\n Examples\n --------\n\n >>> a = np.zeros((3, 3))\n >>> a[1, 1] = 1\n >>> a\n array([[ 0., 0., 0.],\n [ 0., 1., 0.],\n [ 0., 0., 0.]])\n >>> gaussian_filter(a, sigma=0.4) # mild smoothing\n array([[ 0.00163116, 0.03712502, 0.00163116],\n [ 0.03712502, 0.84496158, 0.03712502],\n [ 0.00163116, 0.03712502, 0.00163116]])\n >>> gaussian_filter(a, sigma=1) # more smooting\n array([[ 0.05855018, 0.09653293, 0.05855018],\n [ 0.09653293, 0.15915589, 0.09653293],\n [ 0.05855018, 0.09653293, 0.05855018]])\n >>> # Several modes are possible for handling boundaries\n >>> gaussian_filter(a, sigma=1, mode='reflect')\n array([[ 0.08767308, 0.12075024, 0.08767308],\n [ 0.12075024, 0.16630671, 0.12075024],\n [ 0.08767308, 0.12075024, 0.08767308]])\n >>> # For RGB images, each is filtered separately\n >>> from skimage.data import astronaut\n >>> image = astronaut()\n >>> filtered_img = gaussian_filter(image, sigma=1, multichannel=True)\n\n \"\"\"\n\n spatial_dims = guess_spatial_dimensions(image)\n if spatial_dims is None and multichannel is None:\n msg = (\"Images with dimensions (M, N, 3) are interpreted as 2D+RGB \"\n \"by default. Use `multichannel=False` to interpret as \"\n \"3D image with last dimension of length 3.\")\n warnings.warn(RuntimeWarning(msg))\n multichannel = True\n if multichannel:\n # do not filter across channels\n if not isinstance(sigma, coll.Iterable):\n sigma = [sigma] * (image.ndim - 1)\n if len(sigma) != image.ndim:\n sigma = np.concatenate((np.asarray(sigma), [0]))\n image = img_as_float(image)\n return ndi.gaussian_filter(image, sigma, mode=mode, cval=cval)\n", "path": "skimage/filters/_gaussian.py"}], "after_files": [{"content": "import collections as coll\nimport numpy as np\nfrom scipy import ndimage as ndi\nimport warnings\n\nfrom ..util import img_as_float\nfrom ..color import guess_spatial_dimensions\n\n__all__ = ['gaussian_filter']\n\n\ndef gaussian_filter(image, sigma, output=None, mode='nearest', cval=0,\n multichannel=None):\n \"\"\"Multi-dimensional Gaussian filter\n\n Parameters\n ----------\n image : array-like\n input image (grayscale or color) to filter.\n sigma : scalar or sequence of scalars\n standard deviation for Gaussian kernel. The standard\n deviations of the Gaussian filter are given for each axis as a\n sequence, or as a single number, in which case it is equal for\n all axes.\n output : array, optional\n The ``output`` parameter passes an array in which to store the\n filter output.\n mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional\n The `mode` parameter determines how the array borders are\n handled, where `cval` is the value when mode is equal to\n 'constant'. Default is 'nearest'.\n cval : scalar, optional\n Value to fill past edges of input if `mode` is 'constant'. Default\n is 0.0\n multichannel : bool, optional (default: None)\n Whether the last axis of the image is to be interpreted as multiple\n channels. If True, each channel is filtered separately (channels are\n not mixed together). Only 3 channels are supported. If `None`,\n the function will attempt to guess this, and raise a warning if\n ambiguous, when the array has shape (M, N, 3).\n\n Returns\n -------\n filtered_image : ndarray\n the filtered array\n\n Notes\n -----\n This function is a wrapper around :func:`scipy.ndi.gaussian_filter`.\n\n Integer arrays are converted to float.\n\n The multi-dimensional filter is implemented as a sequence of\n one-dimensional convolution filters. The intermediate arrays are\n stored in the same data type as the output. Therefore, for output\n types with a limited precision, the results may be imprecise\n because intermediate results may be stored with insufficient\n precision.\n\n Examples\n --------\n\n >>> a = np.zeros((3, 3))\n >>> a[1, 1] = 1\n >>> a\n array([[ 0., 0., 0.],\n [ 0., 1., 0.],\n [ 0., 0., 0.]])\n >>> gaussian_filter(a, sigma=0.4) # mild smoothing\n array([[ 0.00163116, 0.03712502, 0.00163116],\n [ 0.03712502, 0.84496158, 0.03712502],\n [ 0.00163116, 0.03712502, 0.00163116]])\n >>> gaussian_filter(a, sigma=1) # more smooting\n array([[ 0.05855018, 0.09653293, 0.05855018],\n [ 0.09653293, 0.15915589, 0.09653293],\n [ 0.05855018, 0.09653293, 0.05855018]])\n >>> # Several modes are possible for handling boundaries\n >>> gaussian_filter(a, sigma=1, mode='reflect')\n array([[ 0.08767308, 0.12075024, 0.08767308],\n [ 0.12075024, 0.16630671, 0.12075024],\n [ 0.08767308, 0.12075024, 0.08767308]])\n >>> # For RGB images, each is filtered separately\n >>> from skimage.data import astronaut\n >>> image = astronaut()\n >>> filtered_img = gaussian_filter(image, sigma=1, multichannel=True)\n\n \"\"\"\n\n spatial_dims = guess_spatial_dimensions(image)\n if spatial_dims is None and multichannel is None:\n msg = (\"Images with dimensions (M, N, 3) are interpreted as 2D+RGB \"\n \"by default. Use `multichannel=False` to interpret as \"\n \"3D image with last dimension of length 3.\")\n warnings.warn(RuntimeWarning(msg))\n multichannel = True\n if np.any(np.asarray(sigma) < 0.0):\n raise ValueError(\"Sigma values less than zero are not valid\")\n if multichannel:\n # do not filter across channels\n if not isinstance(sigma, coll.Iterable):\n sigma = [sigma] * (image.ndim - 1)\n if len(sigma) != image.ndim:\n sigma = np.concatenate((np.asarray(sigma), [0]))\n image = img_as_float(image)\n return ndi.gaussian_filter(image, sigma, mode=mode, cval=cval)\n", "path": "skimage/filters/_gaussian.py"}]}
| 1,717 | 140 |
gh_patches_debug_27760
|
rasdani/github-patches
|
git_diff
|
microsoft__playwright-python-191
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Internal docs are published on the gh pages instead of the official API
We should remove everything that is not relevant to the end user!
Docs: https://microsoft.github.io/playwright-python/sync_api.html
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `playwright/__init__.py`
Content:
```
1 # Copyright (c) Microsoft Corporation.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import playwright.helper as helper
16 from playwright.main import AsyncPlaywrightContextManager, SyncPlaywrightContextManager
17
18 Error = helper.Error
19 TimeoutError = helper.TimeoutError
20
21
22 def async_playwright() -> AsyncPlaywrightContextManager:
23 return AsyncPlaywrightContextManager()
24
25
26 def sync_playwright() -> SyncPlaywrightContextManager:
27 return SyncPlaywrightContextManager()
28
29
30 __all__ = [
31 "async_playwright",
32 "sync_playwright",
33 "Error",
34 "TimeoutError",
35 ]
36
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/playwright/__init__.py b/playwright/__init__.py
--- a/playwright/__init__.py
+++ b/playwright/__init__.py
@@ -11,6 +11,13 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+"""
+Python package `playwright` is a Python library to automate Chromium,
+Firefox and WebKit with a single API. Playwright is built to enable cross-browser
+web automation that is ever-green, capable, reliable and fast.
+For more information you'll find the documentation for the sync API [here](sync_api.html)
+and for the async API [here](async_api.html).
+"""
import playwright.helper as helper
from playwright.main import AsyncPlaywrightContextManager, SyncPlaywrightContextManager
@@ -33,3 +40,37 @@
"Error",
"TimeoutError",
]
+
+__pdoc__ = {
+ "accessibility": False,
+ "async_base": False,
+ "browser": False,
+ "browser_context": False,
+ "browser_type": False,
+ "cdp_session": False,
+ "chromium_browser_context": False,
+ "connection": False,
+ "console_message": False,
+ "dialog": False,
+ "download": False,
+ "element_handle": False,
+ "event_context_manager": False,
+ "file_chooser": False,
+ "frame": False,
+ "helper": False,
+ "impl_to_api_mapping": False,
+ "input": False,
+ "js_handle": False,
+ "main": False,
+ "network": False,
+ "object_factory": False,
+ "page": False,
+ "path_utils": False,
+ "playwright": False,
+ "selectors": False,
+ "sync_base": False,
+ "transport": False,
+ "wait_helper": False,
+ "async_playwright": False,
+ "sync_playwright": False,
+}
|
{"golden_diff": "diff --git a/playwright/__init__.py b/playwright/__init__.py\n--- a/playwright/__init__.py\n+++ b/playwright/__init__.py\n@@ -11,6 +11,13 @@\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n # See the License for the specific language governing permissions and\n # limitations under the License.\n+\"\"\"\n+Python package `playwright` is a Python library to automate Chromium,\n+Firefox and WebKit with a single API. Playwright is built to enable cross-browser\n+web automation that is ever-green, capable, reliable and fast.\n+For more information you'll find the documentation for the sync API [here](sync_api.html)\n+and for the async API [here](async_api.html).\n+\"\"\"\n \n import playwright.helper as helper\n from playwright.main import AsyncPlaywrightContextManager, SyncPlaywrightContextManager\n@@ -33,3 +40,37 @@\n \"Error\",\n \"TimeoutError\",\n ]\n+\n+__pdoc__ = {\n+ \"accessibility\": False,\n+ \"async_base\": False,\n+ \"browser\": False,\n+ \"browser_context\": False,\n+ \"browser_type\": False,\n+ \"cdp_session\": False,\n+ \"chromium_browser_context\": False,\n+ \"connection\": False,\n+ \"console_message\": False,\n+ \"dialog\": False,\n+ \"download\": False,\n+ \"element_handle\": False,\n+ \"event_context_manager\": False,\n+ \"file_chooser\": False,\n+ \"frame\": False,\n+ \"helper\": False,\n+ \"impl_to_api_mapping\": False,\n+ \"input\": False,\n+ \"js_handle\": False,\n+ \"main\": False,\n+ \"network\": False,\n+ \"object_factory\": False,\n+ \"page\": False,\n+ \"path_utils\": False,\n+ \"playwright\": False,\n+ \"selectors\": False,\n+ \"sync_base\": False,\n+ \"transport\": False,\n+ \"wait_helper\": False,\n+ \"async_playwright\": False,\n+ \"sync_playwright\": False,\n+}\n", "issue": "Internal docs are published on the gh pages instead of the official API\nWe should remove everything that is not relevant to the end user!\n\nDocs: https://microsoft.github.io/playwright-python/sync_api.html\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport playwright.helper as helper\nfrom playwright.main import AsyncPlaywrightContextManager, SyncPlaywrightContextManager\n\nError = helper.Error\nTimeoutError = helper.TimeoutError\n\n\ndef async_playwright() -> AsyncPlaywrightContextManager:\n return AsyncPlaywrightContextManager()\n\n\ndef sync_playwright() -> SyncPlaywrightContextManager:\n return SyncPlaywrightContextManager()\n\n\n__all__ = [\n \"async_playwright\",\n \"sync_playwright\",\n \"Error\",\n \"TimeoutError\",\n]\n", "path": "playwright/__init__.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nPython package `playwright` is a Python library to automate Chromium,\nFirefox and WebKit with a single API. Playwright is built to enable cross-browser\nweb automation that is ever-green, capable, reliable and fast.\nFor more information you'll find the documentation for the sync API [here](sync_api.html)\nand for the async API [here](async_api.html).\n\"\"\"\n\nimport playwright.helper as helper\nfrom playwright.main import AsyncPlaywrightContextManager, SyncPlaywrightContextManager\n\nError = helper.Error\nTimeoutError = helper.TimeoutError\n\n\ndef async_playwright() -> AsyncPlaywrightContextManager:\n return AsyncPlaywrightContextManager()\n\n\ndef sync_playwright() -> SyncPlaywrightContextManager:\n return SyncPlaywrightContextManager()\n\n\n__all__ = [\n \"async_playwright\",\n \"sync_playwright\",\n \"Error\",\n \"TimeoutError\",\n]\n\n__pdoc__ = {\n \"accessibility\": False,\n \"async_base\": False,\n \"browser\": False,\n \"browser_context\": False,\n \"browser_type\": False,\n \"cdp_session\": False,\n \"chromium_browser_context\": False,\n \"connection\": False,\n \"console_message\": False,\n \"dialog\": False,\n \"download\": False,\n \"element_handle\": False,\n \"event_context_manager\": False,\n \"file_chooser\": False,\n \"frame\": False,\n \"helper\": False,\n \"impl_to_api_mapping\": False,\n \"input\": False,\n \"js_handle\": False,\n \"main\": False,\n \"network\": False,\n \"object_factory\": False,\n \"page\": False,\n \"path_utils\": False,\n \"playwright\": False,\n \"selectors\": False,\n \"sync_base\": False,\n \"transport\": False,\n \"wait_helper\": False,\n \"async_playwright\": False,\n \"sync_playwright\": False,\n}\n", "path": "playwright/__init__.py"}]}
| 602 | 467 |
gh_patches_debug_34896
|
rasdani/github-patches
|
git_diff
|
saleor__saleor-2840
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Replace the custom Decimal scalar type with Float
Currently `graphene` uses a custom scalar type to represent an implementation detail of using a fixed-precision implementation for floating-point arithmetic.
This means the client (typically not written in Python) now has to deal with conversion to and from string. It also makes it hard to enforce strict type checking in TypeScript as there is no easy way to statically check whether a value is a valid decimal literal for Python.
I propose that we overload the builtin types to use `decimal.Decimal` in backend code and `Float!` in the GraphQL API.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `saleor/graphql/core/types/common.py`
Content:
```
1 import decimal
2
3 import graphene
4 from graphene.types import Scalar
5 from graphene_django import DjangoObjectType
6 from graphql.language import ast
7
8 from ....core import weight
9 from ..connection import CountableConnection
10
11
12 # FIXME: Decimal scalar implementation has been taken from Graphene repo.
13 # Remove it when a new version including this implementation is released.
14 class Decimal(Scalar):
15 """
16 The `Decimal` scalar type represents a python Decimal.
17 """
18
19 @staticmethod
20 def serialize(dec):
21 if isinstance(dec, str):
22 dec = decimal.Decimal(dec)
23 assert isinstance(dec, decimal.Decimal), 'Received not compatible Decimal "{}"'.format(
24 repr(dec))
25 return str(dec)
26
27 @classmethod
28 def parse_literal(cls, node):
29 if isinstance(node, ast.StringValue):
30 return cls.parse_value(node.value)
31
32 @staticmethod
33 def parse_value(value):
34 try:
35 return decimal.Decimal(value)
36 except decimal.DecimalException:
37 return None
38
39
40 class CountryDisplay(graphene.ObjectType):
41 code = graphene.String(description='Country code.', required=True)
42 country = graphene.String(description='Country.', required=True)
43
44
45 class CountableDjangoObjectType(DjangoObjectType):
46 class Meta:
47 abstract = True
48
49 @classmethod
50 def __init_subclass_with_meta__(cls, *args, **kwargs):
51 # Force it to use the countable connection
52 countable_conn = CountableConnection.create_type(
53 "{}CountableConnection".format(cls.__name__),
54 node=cls)
55 super().__init_subclass_with_meta__(
56 *args, connection=countable_conn, **kwargs)
57
58
59 class Error(graphene.ObjectType):
60 field = graphene.String(
61 description="""Name of a field that caused the error. A value of
62 `null` indicates that the error isn't associated with a particular
63 field.""", required=False)
64 message = graphene.String(description='The error message.')
65
66 class Meta:
67 description = 'Represents an error in the input of a mutation.'
68
69
70 class LanguageDisplay(graphene.ObjectType):
71 code = graphene.String(description='Language code.', required=True)
72 language = graphene.String(description='Language.', required=True)
73
74
75 class PermissionDisplay(graphene.ObjectType):
76 code = graphene.String(
77 description='Internal code for permission.', required=True)
78 name = graphene.String(
79 description='Describe action(s) allowed to do by permission.',
80 required=True)
81
82 class Meta:
83 description = 'Represents a permission object in a friendly form.'
84
85
86 class SeoInput(graphene.InputObjectType):
87 title = graphene.String(description='SEO title.')
88 description = graphene.String(description='SEO description.')
89
90
91 class Weight(graphene.ObjectType):
92 unit = graphene.String(description='Weight unit', required=True)
93 value = graphene.Float(description='Weight value', required=True)
94
95 class Meta:
96 description = 'Represents weight value in a specific weight unit.'
97
98
99 WeightUnitsEnum = graphene.Enum.from_enum(weight.WeightUnitsEnum)
100
```
Path: `saleor/graphql/order/types.py`
Content:
```
1 import graphene
2 from graphene import relay
3
4 from ...order import OrderEvents, models
5 from ..account.types import User
6 from ..core.types.common import CountableDjangoObjectType
7 from ..core.types.money import Money, TaxedMoney
8 from decimal import Decimal
9
10 OrderEventsEnum = graphene.Enum.from_enum(OrderEvents)
11
12
13 class OrderEvent(CountableDjangoObjectType):
14 date = graphene.types.datetime.DateTime(
15 description='Date when event happened at in ISO 8601 format.')
16 type = OrderEventsEnum(description='Order event type')
17 user = graphene.Field(
18 User, id=graphene.Argument(graphene.ID),
19 description='User who performed the action.')
20 message = graphene.String(
21 description='Content of a note added to the order.')
22 email = graphene.String(description='Email of the customer')
23 email_type = graphene.String(
24 description='Type of an email sent to the customer')
25 amount = graphene.Float(description='Amount of money.')
26 quantity = graphene.Int(description='Number of items.')
27 composed_id = graphene.String(
28 description='Composed id of the Fulfillment.')
29
30 class Meta:
31 description = 'History log of the order.'
32 model = models.OrderEvent
33 interfaces = [relay.Node]
34 exclude_fields = ['order', 'parameters']
35
36 def resolve_email(self, info):
37 return self.parameters.get('email', None)
38
39 def resolve_email_type(self, info):
40 return self.parameters.get('email_type', None)
41
42 def resolve_amount(self, info):
43 amount = self.parameters.get('amount', None)
44 return Decimal(amount) if amount else None
45
46 def resolve_quantity(self, info):
47 quantity = self.parameters.get('quantity', None)
48 return int(quantity) if quantity else None
49
50 def resolve_message(self, info):
51 return self.parameters.get('message', None)
52
53 def resolve_composed_id(self, info):
54 return self.parameters.get('composed_id', None)
55
56
57 class Fulfillment(CountableDjangoObjectType):
58 status_display = graphene.String(
59 description='User-friendly fulfillment status.')
60
61 class Meta:
62 description = 'Represents order fulfillment.'
63 interfaces = [relay.Node]
64 model = models.Fulfillment
65 exclude_fields = ['order']
66
67 def resolve_status_display(self, info):
68 return self.get_status_display()
69
70
71 class FulfillmentLine(CountableDjangoObjectType):
72 class Meta:
73 description = 'Represents line of the fulfillment.'
74 interfaces = [relay.Node]
75 model = models.FulfillmentLine
76 exclude_fields = ['fulfillment']
77
78
79 class Order(CountableDjangoObjectType):
80 fulfillments = graphene.List(
81 Fulfillment,
82 required=True,
83 description='List of shipments for the order.')
84 is_paid = graphene.Boolean(
85 description='Informs if an order is fully paid.')
86 number = graphene.String(description='User-friendly number of an order.')
87 payment_status = graphene.String(description='Internal payment status.')
88 payment_status_display = graphene.String(
89 description='User-friendly payment status.')
90 subtotal = graphene.Field(
91 TaxedMoney,
92 description='The sum of line prices not including shipping.')
93 status_display = graphene.String(description='User-friendly order status.')
94 total_authorized = graphene.Field(
95 Money, description='Amount authorized for the order.')
96 total_captured = graphene.Field(
97 Money, description='Amount captured by payment.')
98 events = graphene.List(
99 OrderEvent,
100 description='List of events associated with the order.')
101
102 class Meta:
103 description = 'Represents an order in the shop.'
104 interfaces = [relay.Node]
105 model = models.Order
106 exclude_fields = [
107 'shipping_price_gross', 'shipping_price_net', 'total_gross',
108 'total_net']
109
110 @staticmethod
111 def resolve_subtotal(obj, info):
112 return obj.get_subtotal()
113
114 @staticmethod
115 def resolve_total_authorized(obj, info):
116 payment = obj.get_last_payment()
117 if payment:
118 return payment.get_total_price().gross
119
120 @staticmethod
121 def resolve_total_captured(obj, info):
122 payment = obj.get_last_payment()
123 if payment:
124 return payment.get_captured_price()
125
126 @staticmethod
127 def resolve_fulfillments(obj, info):
128 return obj.fulfillments.all()
129
130 @staticmethod
131 def resolve_events(obj, info):
132 return obj.events.all()
133
134 @staticmethod
135 def resolve_is_paid(obj, info):
136 return obj.is_fully_paid()
137
138 @staticmethod
139 def resolve_number(obj, info):
140 return str(obj.pk)
141
142 @staticmethod
143 def resolve_payment_status(obj, info):
144 return obj.get_last_payment_status()
145
146 @staticmethod
147 def resolve_payment_status_display(obj, info):
148 return obj.get_last_payment_status_display()
149
150 @staticmethod
151 def resolve_status_display(obj, info):
152 return obj.get_status_display()
153
154 @staticmethod
155 def resolve_user_email(obj, info):
156 if obj.user_email:
157 return obj.user_email
158 if obj.user_id:
159 return obj.user.email
160
161
162 class OrderLine(CountableDjangoObjectType):
163 class Meta:
164 description = 'Represents order line of particular order.'
165 model = models.OrderLine
166 interfaces = [relay.Node]
167 exclude_fields = [
168 'order', 'unit_price_gross', 'unit_price_net', 'variant']
169
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/saleor/graphql/core/types/common.py b/saleor/graphql/core/types/common.py
--- a/saleor/graphql/core/types/common.py
+++ b/saleor/graphql/core/types/common.py
@@ -1,37 +1,24 @@
import decimal
import graphene
-from graphene.types import Scalar
from graphene_django import DjangoObjectType
-from graphql.language import ast
from ....core import weight
from ..connection import CountableConnection
-# FIXME: Decimal scalar implementation has been taken from Graphene repo.
-# Remove it when a new version including this implementation is released.
-class Decimal(Scalar):
+class Decimal(graphene.Float):
+ """Custom Decimal implementation.
+ Returns Decimal as a float in the API,
+ parses float to the Decimal on the way back.
"""
- The `Decimal` scalar type represents a python Decimal.
- """
-
- @staticmethod
- def serialize(dec):
- if isinstance(dec, str):
- dec = decimal.Decimal(dec)
- assert isinstance(dec, decimal.Decimal), 'Received not compatible Decimal "{}"'.format(
- repr(dec))
- return str(dec)
-
- @classmethod
- def parse_literal(cls, node):
- if isinstance(node, ast.StringValue):
- return cls.parse_value(node.value)
@staticmethod
def parse_value(value):
try:
+ # Converting the float to str before parsing it to Decimal is
+ # necessary to keep the decimal places as typed
+ value = str(value)
return decimal.Decimal(value)
except decimal.DecimalException:
return None
diff --git a/saleor/graphql/order/types.py b/saleor/graphql/order/types.py
--- a/saleor/graphql/order/types.py
+++ b/saleor/graphql/order/types.py
@@ -5,7 +5,6 @@
from ..account.types import User
from ..core.types.common import CountableDjangoObjectType
from ..core.types.money import Money, TaxedMoney
-from decimal import Decimal
OrderEventsEnum = graphene.Enum.from_enum(OrderEvents)
@@ -41,7 +40,7 @@
def resolve_amount(self, info):
amount = self.parameters.get('amount', None)
- return Decimal(amount) if amount else None
+ return float(amount) if amount else None
def resolve_quantity(self, info):
quantity = self.parameters.get('quantity', None)
|
{"golden_diff": "diff --git a/saleor/graphql/core/types/common.py b/saleor/graphql/core/types/common.py\n--- a/saleor/graphql/core/types/common.py\n+++ b/saleor/graphql/core/types/common.py\n@@ -1,37 +1,24 @@\n import decimal\n \n import graphene\n-from graphene.types import Scalar\n from graphene_django import DjangoObjectType\n-from graphql.language import ast\n \n from ....core import weight\n from ..connection import CountableConnection\n \n \n-# FIXME: Decimal scalar implementation has been taken from Graphene repo.\n-# Remove it when a new version including this implementation is released.\n-class Decimal(Scalar):\n+class Decimal(graphene.Float):\n+ \"\"\"Custom Decimal implementation.\n+ Returns Decimal as a float in the API,\n+ parses float to the Decimal on the way back.\n \"\"\"\n- The `Decimal` scalar type represents a python Decimal.\n- \"\"\"\n-\n- @staticmethod\n- def serialize(dec):\n- if isinstance(dec, str):\n- dec = decimal.Decimal(dec)\n- assert isinstance(dec, decimal.Decimal), 'Received not compatible Decimal \"{}\"'.format(\n- repr(dec))\n- return str(dec)\n-\n- @classmethod\n- def parse_literal(cls, node):\n- if isinstance(node, ast.StringValue):\n- return cls.parse_value(node.value)\n \n @staticmethod\n def parse_value(value):\n try:\n+ # Converting the float to str before parsing it to Decimal is\n+ # necessary to keep the decimal places as typed\n+ value = str(value)\n return decimal.Decimal(value)\n except decimal.DecimalException:\n return None\ndiff --git a/saleor/graphql/order/types.py b/saleor/graphql/order/types.py\n--- a/saleor/graphql/order/types.py\n+++ b/saleor/graphql/order/types.py\n@@ -5,7 +5,6 @@\n from ..account.types import User\n from ..core.types.common import CountableDjangoObjectType\n from ..core.types.money import Money, TaxedMoney\n-from decimal import Decimal\n \n OrderEventsEnum = graphene.Enum.from_enum(OrderEvents)\n \n@@ -41,7 +40,7 @@\n \n def resolve_amount(self, info):\n amount = self.parameters.get('amount', None)\n- return Decimal(amount) if amount else None\n+ return float(amount) if amount else None\n \n def resolve_quantity(self, info):\n quantity = self.parameters.get('quantity', None)\n", "issue": "Replace the custom Decimal scalar type with Float\nCurrently `graphene` uses a custom scalar type to represent an implementation detail of using a fixed-precision implementation for floating-point arithmetic.\r\n\r\nThis means the client (typically not written in Python) now has to deal with conversion to and from string. It also makes it hard to enforce strict type checking in TypeScript as there is no easy way to statically check whether a value is a valid decimal literal for Python.\r\n\r\nI propose that we overload the builtin types to use `decimal.Decimal` in backend code and `Float!` in the GraphQL API.\n", "before_files": [{"content": "import decimal\n\nimport graphene\nfrom graphene.types import Scalar\nfrom graphene_django import DjangoObjectType\nfrom graphql.language import ast\n\nfrom ....core import weight\nfrom ..connection import CountableConnection\n\n\n# FIXME: Decimal scalar implementation has been taken from Graphene repo.\n# Remove it when a new version including this implementation is released.\nclass Decimal(Scalar):\n \"\"\"\n The `Decimal` scalar type represents a python Decimal.\n \"\"\"\n\n @staticmethod\n def serialize(dec):\n if isinstance(dec, str):\n dec = decimal.Decimal(dec)\n assert isinstance(dec, decimal.Decimal), 'Received not compatible Decimal \"{}\"'.format(\n repr(dec))\n return str(dec)\n\n @classmethod\n def parse_literal(cls, node):\n if isinstance(node, ast.StringValue):\n return cls.parse_value(node.value)\n\n @staticmethod\n def parse_value(value):\n try:\n return decimal.Decimal(value)\n except decimal.DecimalException:\n return None\n\n\nclass CountryDisplay(graphene.ObjectType):\n code = graphene.String(description='Country code.', required=True)\n country = graphene.String(description='Country.', required=True)\n\n\nclass CountableDjangoObjectType(DjangoObjectType):\n class Meta:\n abstract = True\n\n @classmethod\n def __init_subclass_with_meta__(cls, *args, **kwargs):\n # Force it to use the countable connection\n countable_conn = CountableConnection.create_type(\n \"{}CountableConnection\".format(cls.__name__),\n node=cls)\n super().__init_subclass_with_meta__(\n *args, connection=countable_conn, **kwargs)\n\n\nclass Error(graphene.ObjectType):\n field = graphene.String(\n description=\"\"\"Name of a field that caused the error. A value of\n `null` indicates that the error isn't associated with a particular\n field.\"\"\", required=False)\n message = graphene.String(description='The error message.')\n\n class Meta:\n description = 'Represents an error in the input of a mutation.'\n\n\nclass LanguageDisplay(graphene.ObjectType):\n code = graphene.String(description='Language code.', required=True)\n language = graphene.String(description='Language.', required=True)\n\n\nclass PermissionDisplay(graphene.ObjectType):\n code = graphene.String(\n description='Internal code for permission.', required=True)\n name = graphene.String(\n description='Describe action(s) allowed to do by permission.',\n required=True)\n\n class Meta:\n description = 'Represents a permission object in a friendly form.'\n\n\nclass SeoInput(graphene.InputObjectType):\n title = graphene.String(description='SEO title.')\n description = graphene.String(description='SEO description.')\n\n\nclass Weight(graphene.ObjectType):\n unit = graphene.String(description='Weight unit', required=True)\n value = graphene.Float(description='Weight value', required=True)\n\n class Meta:\n description = 'Represents weight value in a specific weight unit.'\n\n\nWeightUnitsEnum = graphene.Enum.from_enum(weight.WeightUnitsEnum)\n", "path": "saleor/graphql/core/types/common.py"}, {"content": "import graphene\nfrom graphene import relay\n\nfrom ...order import OrderEvents, models\nfrom ..account.types import User\nfrom ..core.types.common import CountableDjangoObjectType\nfrom ..core.types.money import Money, TaxedMoney\nfrom decimal import Decimal\n\nOrderEventsEnum = graphene.Enum.from_enum(OrderEvents)\n\n\nclass OrderEvent(CountableDjangoObjectType):\n date = graphene.types.datetime.DateTime(\n description='Date when event happened at in ISO 8601 format.')\n type = OrderEventsEnum(description='Order event type')\n user = graphene.Field(\n User, id=graphene.Argument(graphene.ID),\n description='User who performed the action.')\n message = graphene.String(\n description='Content of a note added to the order.')\n email = graphene.String(description='Email of the customer')\n email_type = graphene.String(\n description='Type of an email sent to the customer')\n amount = graphene.Float(description='Amount of money.')\n quantity = graphene.Int(description='Number of items.')\n composed_id = graphene.String(\n description='Composed id of the Fulfillment.')\n\n class Meta:\n description = 'History log of the order.'\n model = models.OrderEvent\n interfaces = [relay.Node]\n exclude_fields = ['order', 'parameters']\n\n def resolve_email(self, info):\n return self.parameters.get('email', None)\n\n def resolve_email_type(self, info):\n return self.parameters.get('email_type', None)\n\n def resolve_amount(self, info):\n amount = self.parameters.get('amount', None)\n return Decimal(amount) if amount else None\n\n def resolve_quantity(self, info):\n quantity = self.parameters.get('quantity', None)\n return int(quantity) if quantity else None\n\n def resolve_message(self, info):\n return self.parameters.get('message', None)\n\n def resolve_composed_id(self, info):\n return self.parameters.get('composed_id', None)\n\n\nclass Fulfillment(CountableDjangoObjectType):\n status_display = graphene.String(\n description='User-friendly fulfillment status.')\n\n class Meta:\n description = 'Represents order fulfillment.'\n interfaces = [relay.Node]\n model = models.Fulfillment\n exclude_fields = ['order']\n\n def resolve_status_display(self, info):\n return self.get_status_display()\n\n\nclass FulfillmentLine(CountableDjangoObjectType):\n class Meta:\n description = 'Represents line of the fulfillment.'\n interfaces = [relay.Node]\n model = models.FulfillmentLine\n exclude_fields = ['fulfillment']\n\n\nclass Order(CountableDjangoObjectType):\n fulfillments = graphene.List(\n Fulfillment,\n required=True,\n description='List of shipments for the order.')\n is_paid = graphene.Boolean(\n description='Informs if an order is fully paid.')\n number = graphene.String(description='User-friendly number of an order.')\n payment_status = graphene.String(description='Internal payment status.')\n payment_status_display = graphene.String(\n description='User-friendly payment status.')\n subtotal = graphene.Field(\n TaxedMoney,\n description='The sum of line prices not including shipping.')\n status_display = graphene.String(description='User-friendly order status.')\n total_authorized = graphene.Field(\n Money, description='Amount authorized for the order.')\n total_captured = graphene.Field(\n Money, description='Amount captured by payment.')\n events = graphene.List(\n OrderEvent,\n description='List of events associated with the order.')\n\n class Meta:\n description = 'Represents an order in the shop.'\n interfaces = [relay.Node]\n model = models.Order\n exclude_fields = [\n 'shipping_price_gross', 'shipping_price_net', 'total_gross',\n 'total_net']\n\n @staticmethod\n def resolve_subtotal(obj, info):\n return obj.get_subtotal()\n\n @staticmethod\n def resolve_total_authorized(obj, info):\n payment = obj.get_last_payment()\n if payment:\n return payment.get_total_price().gross\n\n @staticmethod\n def resolve_total_captured(obj, info):\n payment = obj.get_last_payment()\n if payment:\n return payment.get_captured_price()\n\n @staticmethod\n def resolve_fulfillments(obj, info):\n return obj.fulfillments.all()\n\n @staticmethod\n def resolve_events(obj, info):\n return obj.events.all()\n\n @staticmethod\n def resolve_is_paid(obj, info):\n return obj.is_fully_paid()\n\n @staticmethod\n def resolve_number(obj, info):\n return str(obj.pk)\n\n @staticmethod\n def resolve_payment_status(obj, info):\n return obj.get_last_payment_status()\n\n @staticmethod\n def resolve_payment_status_display(obj, info):\n return obj.get_last_payment_status_display()\n\n @staticmethod\n def resolve_status_display(obj, info):\n return obj.get_status_display()\n\n @staticmethod\n def resolve_user_email(obj, info):\n if obj.user_email:\n return obj.user_email\n if obj.user_id:\n return obj.user.email\n\n\nclass OrderLine(CountableDjangoObjectType):\n class Meta:\n description = 'Represents order line of particular order.'\n model = models.OrderLine\n interfaces = [relay.Node]\n exclude_fields = [\n 'order', 'unit_price_gross', 'unit_price_net', 'variant']\n", "path": "saleor/graphql/order/types.py"}], "after_files": [{"content": "import decimal\n\nimport graphene\nfrom graphene_django import DjangoObjectType\n\nfrom ....core import weight\nfrom ..connection import CountableConnection\n\n\nclass Decimal(graphene.Float):\n \"\"\"Custom Decimal implementation.\n Returns Decimal as a float in the API,\n parses float to the Decimal on the way back.\n \"\"\"\n\n @staticmethod\n def parse_value(value):\n try:\n # Converting the float to str before parsing it to Decimal is\n # necessary to keep the decimal places as typed\n value = str(value)\n return decimal.Decimal(value)\n except decimal.DecimalException:\n return None\n\n\nclass CountryDisplay(graphene.ObjectType):\n code = graphene.String(description='Country code.', required=True)\n country = graphene.String(description='Country.', required=True)\n\n\nclass CountableDjangoObjectType(DjangoObjectType):\n class Meta:\n abstract = True\n\n @classmethod\n def __init_subclass_with_meta__(cls, *args, **kwargs):\n # Force it to use the countable connection\n countable_conn = CountableConnection.create_type(\n \"{}CountableConnection\".format(cls.__name__),\n node=cls)\n super().__init_subclass_with_meta__(\n *args, connection=countable_conn, **kwargs)\n\n\nclass Error(graphene.ObjectType):\n field = graphene.String(\n description=\"\"\"Name of a field that caused the error. A value of\n `null` indicates that the error isn't associated with a particular\n field.\"\"\", required=False)\n message = graphene.String(description='The error message.')\n\n class Meta:\n description = 'Represents an error in the input of a mutation.'\n\n\nclass LanguageDisplay(graphene.ObjectType):\n code = graphene.String(description='Language code.', required=True)\n language = graphene.String(description='Language.', required=True)\n\n\nclass PermissionDisplay(graphene.ObjectType):\n code = graphene.String(\n description='Internal code for permission.', required=True)\n name = graphene.String(\n description='Describe action(s) allowed to do by permission.',\n required=True)\n\n class Meta:\n description = 'Represents a permission object in a friendly form.'\n\n\nclass SeoInput(graphene.InputObjectType):\n title = graphene.String(description='SEO title.')\n description = graphene.String(description='SEO description.')\n\n\nclass Weight(graphene.ObjectType):\n unit = graphene.String(description='Weight unit', required=True)\n value = graphene.Float(description='Weight value', required=True)\n\n class Meta:\n description = 'Represents weight value in a specific weight unit.'\n\n\nWeightUnitsEnum = graphene.Enum.from_enum(weight.WeightUnitsEnum)\n", "path": "saleor/graphql/core/types/common.py"}, {"content": "import graphene\nfrom graphene import relay\n\nfrom ...order import OrderEvents, models\nfrom ..account.types import User\nfrom ..core.types.common import CountableDjangoObjectType\nfrom ..core.types.money import Money, TaxedMoney\n\nOrderEventsEnum = graphene.Enum.from_enum(OrderEvents)\n\n\nclass OrderEvent(CountableDjangoObjectType):\n date = graphene.types.datetime.DateTime(\n description='Date when event happened at in ISO 8601 format.')\n type = OrderEventsEnum(description='Order event type')\n user = graphene.Field(\n User, id=graphene.Argument(graphene.ID),\n description='User who performed the action.')\n message = graphene.String(\n description='Content of a note added to the order.')\n email = graphene.String(description='Email of the customer')\n email_type = graphene.String(\n description='Type of an email sent to the customer')\n amount = graphene.Float(description='Amount of money.')\n quantity = graphene.Int(description='Number of items.')\n composed_id = graphene.String(\n description='Composed id of the Fulfillment.')\n\n class Meta:\n description = 'History log of the order.'\n model = models.OrderEvent\n interfaces = [relay.Node]\n exclude_fields = ['order', 'parameters']\n\n def resolve_email(self, info):\n return self.parameters.get('email', None)\n\n def resolve_email_type(self, info):\n return self.parameters.get('email_type', None)\n\n def resolve_amount(self, info):\n amount = self.parameters.get('amount', None)\n return float(amount) if amount else None\n\n def resolve_quantity(self, info):\n quantity = self.parameters.get('quantity', None)\n return int(quantity) if quantity else None\n\n def resolve_message(self, info):\n return self.parameters.get('message', None)\n\n def resolve_composed_id(self, info):\n return self.parameters.get('composed_id', None)\n\n\nclass Fulfillment(CountableDjangoObjectType):\n status_display = graphene.String(\n description='User-friendly fulfillment status.')\n\n class Meta:\n description = 'Represents order fulfillment.'\n interfaces = [relay.Node]\n model = models.Fulfillment\n exclude_fields = ['order']\n\n def resolve_status_display(self, info):\n return self.get_status_display()\n\n\nclass FulfillmentLine(CountableDjangoObjectType):\n class Meta:\n description = 'Represents line of the fulfillment.'\n interfaces = [relay.Node]\n model = models.FulfillmentLine\n exclude_fields = ['fulfillment']\n\n\nclass Order(CountableDjangoObjectType):\n fulfillments = graphene.List(\n Fulfillment,\n required=True,\n description='List of shipments for the order.')\n is_paid = graphene.Boolean(\n description='Informs if an order is fully paid.')\n number = graphene.String(description='User-friendly number of an order.')\n payment_status = graphene.String(description='Internal payment status.')\n payment_status_display = graphene.String(\n description='User-friendly payment status.')\n subtotal = graphene.Field(\n TaxedMoney,\n description='The sum of line prices not including shipping.')\n status_display = graphene.String(description='User-friendly order status.')\n total_authorized = graphene.Field(\n Money, description='Amount authorized for the order.')\n total_captured = graphene.Field(\n Money, description='Amount captured by payment.')\n events = graphene.List(\n OrderEvent,\n description='List of events associated with the order.')\n\n class Meta:\n description = 'Represents an order in the shop.'\n interfaces = [relay.Node]\n model = models.Order\n exclude_fields = [\n 'shipping_price_gross', 'shipping_price_net', 'total_gross',\n 'total_net']\n\n @staticmethod\n def resolve_subtotal(obj, info):\n return obj.get_subtotal()\n\n @staticmethod\n def resolve_total_authorized(obj, info):\n payment = obj.get_last_payment()\n if payment:\n return payment.get_total_price().gross\n\n @staticmethod\n def resolve_total_captured(obj, info):\n payment = obj.get_last_payment()\n if payment:\n return payment.get_captured_price()\n\n @staticmethod\n def resolve_fulfillments(obj, info):\n return obj.fulfillments.all()\n\n @staticmethod\n def resolve_events(obj, info):\n return obj.events.all()\n\n @staticmethod\n def resolve_is_paid(obj, info):\n return obj.is_fully_paid()\n\n @staticmethod\n def resolve_number(obj, info):\n return str(obj.pk)\n\n @staticmethod\n def resolve_payment_status(obj, info):\n return obj.get_last_payment_status()\n\n @staticmethod\n def resolve_payment_status_display(obj, info):\n return obj.get_last_payment_status_display()\n\n @staticmethod\n def resolve_status_display(obj, info):\n return obj.get_status_display()\n\n @staticmethod\n def resolve_user_email(obj, info):\n if obj.user_email:\n return obj.user_email\n if obj.user_id:\n return obj.user.email\n\n\nclass OrderLine(CountableDjangoObjectType):\n class Meta:\n description = 'Represents order line of particular order.'\n model = models.OrderLine\n interfaces = [relay.Node]\n exclude_fields = [\n 'order', 'unit_price_gross', 'unit_price_net', 'variant']\n", "path": "saleor/graphql/order/types.py"}]}
| 2,756 | 514 |
gh_patches_debug_6610
|
rasdani/github-patches
|
git_diff
|
alltheplaces__alltheplaces-3338
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider scooters_coffee is broken
During the global build at 2021-06-23-14-42-18, spider **scooters_coffee** failed with **324 features** and **1 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-06-23-14-42-18/logs/scooters_coffee.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-23-14-42-18/output/scooters_coffee.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-23-14-42-18/output/scooters_coffee.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/scooters_coffee.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import json
3 import re
4
5 import scrapy
6
7 from locations.items import GeojsonPointItem
8 from locations.hours import OpeningHours
9
10 DAY_MAPPING = {'Mon': 'Mo', 'Tue': 'Tu',
11 'Wed': 'We', 'Thu': 'Th',
12 'Fri': 'Fr', 'Sat': 'Sa',
13 'Sun': 'Su'}
14
15
16 class ScootersCoffeeSpider(scrapy.Spider):
17 name = "scooters_coffee"
18 item_attributes = {'brand': "Scooter's Coffee"}
19 allowed_domains = ['code.metalocator.com']
20 download_delay = 0.5
21
22 def start_requests(self):
23 n = 327
24 for store_id in range(1, n+1):
25 url = f'https://code.metalocator.com/index.php?option=com_locator&view=location&tmpl=component&task=load&framed=1&sample_data=undefined&format=json&Itemid=12991&templ[]=item_address_template&lang=&_opt_out=&_urlparams=&distance=NaN&id={store_id}'
26
27 yield scrapy.Request(url=url, callback=self.parse)
28
29 def parse_hours(self, hours):
30 opening_hours = OpeningHours()
31
32 weekdays = re.findall(r'{(.*?)}', hours)
33 for weekday in weekdays:
34 day, open_close = weekday.split('|')
35 if open_close == 'C':
36 continue
37 else:
38 open_time, close_time = open_close.split('-')
39 opening_hours.add_range(day=DAY_MAPPING[day], open_time=open_time, close_time=close_time, time_format='%I:%M%p')
40
41 return opening_hours.as_opening_hours()
42
43 def parse(self, response):
44 store_data = json.loads(response.text)[0]
45 name = store_data['name']
46 if '*permanently closed' in name.lower():
47 pass
48 else: # Gather the store details
49
50 properties = {
51 'ref': store_data['id'],
52 'name': store_data['name'].strip(' *COMING SOON'),
53 'addr_full': store_data['address'],
54 'city': store_data['city'],
55 'state': store_data['state'],
56 'postcode': store_data['postalcode'],
57 'country': store_data['country'],
58 'lat': store_data['lat'],
59 'lon': store_data['lng'],
60 'phone': store_data['phone'],
61 'website': response.url
62 }
63
64 hours = store_data.get('hours', '')
65 if hours and hours != '{Sun|C}{Mon|C}{Tue|C}{Wed|C}{Thu|C}{Fri|C}{Sat|C}':
66 store_hours = self.parse_hours(hours)
67 properties["opening_hours"] = store_hours
68
69 yield GeojsonPointItem(**properties)
70
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/locations/spiders/scooters_coffee.py b/locations/spiders/scooters_coffee.py
--- a/locations/spiders/scooters_coffee.py
+++ b/locations/spiders/scooters_coffee.py
@@ -35,6 +35,7 @@
if open_close == 'C':
continue
else:
+ open_close = open_close.replace(' ', '')
open_time, close_time = open_close.split('-')
opening_hours.add_range(day=DAY_MAPPING[day], open_time=open_time, close_time=close_time, time_format='%I:%M%p')
|
{"golden_diff": "diff --git a/locations/spiders/scooters_coffee.py b/locations/spiders/scooters_coffee.py\n--- a/locations/spiders/scooters_coffee.py\n+++ b/locations/spiders/scooters_coffee.py\n@@ -35,6 +35,7 @@\n if open_close == 'C':\n continue\n else:\n+ open_close = open_close.replace(' ', '')\n open_time, close_time = open_close.split('-')\n opening_hours.add_range(day=DAY_MAPPING[day], open_time=open_time, close_time=close_time, time_format='%I:%M%p')\n", "issue": "Spider scooters_coffee is broken\nDuring the global build at 2021-06-23-14-42-18, spider **scooters_coffee** failed with **324 features** and **1 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-06-23-14-42-18/logs/scooters_coffee.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-23-14-42-18/output/scooters_coffee.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-23-14-42-18/output/scooters_coffee.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nimport re\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\nDAY_MAPPING = {'Mon': 'Mo', 'Tue': 'Tu',\n 'Wed': 'We', 'Thu': 'Th',\n 'Fri': 'Fr', 'Sat': 'Sa',\n 'Sun': 'Su'}\n\n\nclass ScootersCoffeeSpider(scrapy.Spider):\n name = \"scooters_coffee\"\n item_attributes = {'brand': \"Scooter's Coffee\"}\n allowed_domains = ['code.metalocator.com']\n download_delay = 0.5\n\n def start_requests(self):\n n = 327\n for store_id in range(1, n+1):\n url = f'https://code.metalocator.com/index.php?option=com_locator&view=location&tmpl=component&task=load&framed=1&sample_data=undefined&format=json&Itemid=12991&templ[]=item_address_template&lang=&_opt_out=&_urlparams=&distance=NaN&id={store_id}'\n \n yield scrapy.Request(url=url, callback=self.parse)\n\n def parse_hours(self, hours):\n opening_hours = OpeningHours()\n\n weekdays = re.findall(r'{(.*?)}', hours)\n for weekday in weekdays:\n day, open_close = weekday.split('|')\n if open_close == 'C':\n continue\n else:\n open_time, close_time = open_close.split('-')\n opening_hours.add_range(day=DAY_MAPPING[day], open_time=open_time, close_time=close_time, time_format='%I:%M%p')\n\n return opening_hours.as_opening_hours()\n\n def parse(self, response):\n store_data = json.loads(response.text)[0]\n name = store_data['name']\n if '*permanently closed' in name.lower():\n pass\n else: # Gather the store details\n\n properties = {\n 'ref': store_data['id'],\n 'name': store_data['name'].strip(' *COMING SOON'),\n 'addr_full': store_data['address'],\n 'city': store_data['city'],\n 'state': store_data['state'],\n 'postcode': store_data['postalcode'],\n 'country': store_data['country'],\n 'lat': store_data['lat'],\n 'lon': store_data['lng'],\n 'phone': store_data['phone'],\n 'website': response.url\n }\n\n hours = store_data.get('hours', '')\n if hours and hours != '{Sun|C}{Mon|C}{Tue|C}{Wed|C}{Thu|C}{Fri|C}{Sat|C}':\n store_hours = self.parse_hours(hours)\n properties[\"opening_hours\"] = store_hours\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/scooters_coffee.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nimport re\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\nDAY_MAPPING = {'Mon': 'Mo', 'Tue': 'Tu',\n 'Wed': 'We', 'Thu': 'Th',\n 'Fri': 'Fr', 'Sat': 'Sa',\n 'Sun': 'Su'}\n\n\nclass ScootersCoffeeSpider(scrapy.Spider):\n name = \"scooters_coffee\"\n item_attributes = {'brand': \"Scooter's Coffee\"}\n allowed_domains = ['code.metalocator.com']\n download_delay = 0.5\n\n def start_requests(self):\n n = 327\n for store_id in range(1, n+1):\n url = f'https://code.metalocator.com/index.php?option=com_locator&view=location&tmpl=component&task=load&framed=1&sample_data=undefined&format=json&Itemid=12991&templ[]=item_address_template&lang=&_opt_out=&_urlparams=&distance=NaN&id={store_id}'\n \n yield scrapy.Request(url=url, callback=self.parse)\n\n def parse_hours(self, hours):\n opening_hours = OpeningHours()\n\n weekdays = re.findall(r'{(.*?)}', hours)\n for weekday in weekdays:\n day, open_close = weekday.split('|')\n if open_close == 'C':\n continue\n else:\n open_close = open_close.replace(' ', '')\n open_time, close_time = open_close.split('-')\n opening_hours.add_range(day=DAY_MAPPING[day], open_time=open_time, close_time=close_time, time_format='%I:%M%p')\n\n return opening_hours.as_opening_hours()\n\n def parse(self, response):\n store_data = json.loads(response.text)[0]\n name = store_data['name']\n if '*permanently closed' in name.lower():\n pass\n else: # Gather the store details\n\n properties = {\n 'ref': store_data['id'],\n 'name': store_data['name'].strip(' *COMING SOON'),\n 'addr_full': store_data['address'],\n 'city': store_data['city'],\n 'state': store_data['state'],\n 'postcode': store_data['postalcode'],\n 'country': store_data['country'],\n 'lat': store_data['lat'],\n 'lon': store_data['lng'],\n 'phone': store_data['phone'],\n 'website': response.url\n }\n\n hours = store_data.get('hours', '')\n if hours and hours != '{Sun|C}{Mon|C}{Tue|C}{Wed|C}{Thu|C}{Fri|C}{Sat|C}':\n store_hours = self.parse_hours(hours)\n properties[\"opening_hours\"] = store_hours\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/scooters_coffee.py"}]}
| 1,199 | 132 |
gh_patches_debug_5979
|
rasdani/github-patches
|
git_diff
|
getnikola__nikola-1994
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
nikola no longer has all-nocdn.*
I started using `nikola` today for a gallery. The generated pages make reference to `all-nocdn.css` and `all-nocdn.js`, but they're not (longer?) part of `nikola`'s code:
``` bash
mdione@diablo:~/src/projects/nikola$ git remote show origin
* remote origin
Fetch URL: https://github.com/getnikola/nikola.git
Push URL: https://github.com/getnikola/nikola.git
HEAD branch: master
mdione@diablo:~/src/projects/nikola$ git branch
* master
mdione@diablo:~/src/projects/nikola$ git pull
Already up-to-date.
mdione@diablo:~/src/projects/nikola$ find . -name all-nocdn.*
mdione@diablo:~/src/projects/nikola$
```
I just copied those files from and old attempt to use `nikola` and the site works. This happens with both versions `7.6.0-3` from Debian and from `master`, as you can see above.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nikola/plugins/task/bundles.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Copyright © 2012-2015 Roberto Alsina and others.
4
5 # Permission is hereby granted, free of charge, to any
6 # person obtaining a copy of this software and associated
7 # documentation files (the "Software"), to deal in the
8 # Software without restriction, including without limitation
9 # the rights to use, copy, modify, merge, publish,
10 # distribute, sublicense, and/or sell copies of the
11 # Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice
15 # shall be included in all copies or substantial portions of
16 # the Software.
17 #
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 """Bundle assets using WebAssets."""
28
29 from __future__ import unicode_literals
30
31 import os
32
33 try:
34 import webassets
35 except ImportError:
36 webassets = None # NOQA
37
38 from nikola.plugin_categories import LateTask
39 from nikola import utils
40
41
42 class BuildBundles(LateTask):
43
44 """Bundle assets using WebAssets."""
45
46 name = "create_bundles"
47
48 def set_site(self, site):
49 """Set Nikola site."""
50 self.logger = utils.get_logger('bundles', utils.STDERR_HANDLER)
51 if webassets is None and site.config['USE_BUNDLES']:
52 utils.req_missing(['webassets'], 'USE_BUNDLES', optional=True)
53 self.logger.warn('Setting USE_BUNDLES to False.')
54 site.config['USE_BUNDLES'] = False
55 super(BuildBundles, self).set_site(site)
56
57 def gen_tasks(self):
58 """Bundle assets using WebAssets."""
59 kw = {
60 'filters': self.site.config['FILTERS'],
61 'output_folder': self.site.config['OUTPUT_FOLDER'],
62 'cache_folder': self.site.config['CACHE_FOLDER'],
63 'theme_bundles': get_theme_bundles(self.site.THEMES),
64 'themes': self.site.THEMES,
65 'files_folders': self.site.config['FILES_FOLDERS'],
66 'code_color_scheme': self.site.config['CODE_COLOR_SCHEME'],
67 }
68
69 def build_bundle(output, inputs):
70 out_dir = os.path.join(kw['output_folder'],
71 os.path.dirname(output))
72 inputs = [os.path.relpath(i, out_dir) for i in inputs if os.path.isfile(i)]
73 cache_dir = os.path.join(kw['cache_folder'], 'webassets')
74 utils.makedirs(cache_dir)
75 env = webassets.Environment(out_dir, os.path.dirname(output),
76 cache=cache_dir)
77 if inputs:
78 bundle = webassets.Bundle(*inputs, output=os.path.basename(output))
79 env.register(output, bundle)
80 # This generates the file
81 try:
82 env[output].urls()
83 except Exception as e:
84 self.logger.error("Failed to build bundles.")
85 self.logger.exception(e)
86 self.logger.notice("Try running ``nikola clean`` and building again.")
87 else:
88 with open(os.path.join(out_dir, os.path.basename(output)), 'wb+'):
89 pass # Create empty file
90
91 yield self.group_task()
92 if (webassets is not None and self.site.config['USE_BUNDLES'] is not
93 False):
94 for name, _files in kw['theme_bundles'].items():
95 output_path = os.path.join(kw['output_folder'], name)
96 dname = os.path.dirname(name)
97 files = []
98 for fname in _files:
99 # paths are relative to dirname
100 files.append(os.path.join(dname, fname))
101 file_dep = [os.path.join(kw['output_folder'], fname)
102 for fname in files if
103 utils.get_asset_path(fname, self.site.THEMES, self.site.config['FILES_FOLDERS']) or fname == os.path.join('assets', 'css', 'code.css')]
104 # code.css will be generated by us if it does not exist in
105 # FILES_FOLDERS or theme assets. It is guaranteed that the
106 # generation will happen before this task.
107 task = {
108 'file_dep': list(file_dep),
109 'task_dep': ['copy_assets', 'copy_files'],
110 'basename': str(self.name),
111 'name': str(output_path),
112 'actions': [(build_bundle, (name, file_dep))],
113 'targets': [output_path],
114 'uptodate': [
115 utils.config_changed({
116 1: kw,
117 2: file_dep
118 }, 'nikola.plugins.task.bundles')],
119 'clean': True,
120 }
121 yield utils.apply_filters(task, kw['filters'])
122
123
124 def get_theme_bundles(themes):
125 """Given a theme chain, return the bundle definitions."""
126 bundles = {}
127 for theme_name in themes:
128 bundles_path = os.path.join(
129 utils.get_theme_path(theme_name), 'bundles')
130 if os.path.isfile(bundles_path):
131 with open(bundles_path) as fd:
132 for line in fd:
133 try:
134 name, files = line.split('=')
135 files = [f.strip() for f in files.split(',')]
136 bundles[name.strip().replace('/', os.sep)] = files
137 except ValueError:
138 # for empty lines
139 pass
140 break
141 return bundles
142
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/nikola/plugins/task/bundles.py b/nikola/plugins/task/bundles.py
--- a/nikola/plugins/task/bundles.py
+++ b/nikola/plugins/task/bundles.py
@@ -52,6 +52,7 @@
utils.req_missing(['webassets'], 'USE_BUNDLES', optional=True)
self.logger.warn('Setting USE_BUNDLES to False.')
site.config['USE_BUNDLES'] = False
+ site._GLOBAL_CONTEXT['use_bundles'] = False
super(BuildBundles, self).set_site(site)
def gen_tasks(self):
|
{"golden_diff": "diff --git a/nikola/plugins/task/bundles.py b/nikola/plugins/task/bundles.py\n--- a/nikola/plugins/task/bundles.py\n+++ b/nikola/plugins/task/bundles.py\n@@ -52,6 +52,7 @@\n utils.req_missing(['webassets'], 'USE_BUNDLES', optional=True)\n self.logger.warn('Setting USE_BUNDLES to False.')\n site.config['USE_BUNDLES'] = False\n+ site._GLOBAL_CONTEXT['use_bundles'] = False\n super(BuildBundles, self).set_site(site)\n \n def gen_tasks(self):\n", "issue": "nikola no longer has all-nocdn.*\nI started using `nikola` today for a gallery. The generated pages make reference to `all-nocdn.css` and `all-nocdn.js`, but they're not (longer?) part of `nikola`'s code:\n\n``` bash\nmdione@diablo:~/src/projects/nikola$ git remote show origin\n* remote origin\n Fetch URL: https://github.com/getnikola/nikola.git\n Push URL: https://github.com/getnikola/nikola.git\n HEAD branch: master\nmdione@diablo:~/src/projects/nikola$ git branch\n* master\nmdione@diablo:~/src/projects/nikola$ git pull\nAlready up-to-date.\nmdione@diablo:~/src/projects/nikola$ find . -name all-nocdn.*\nmdione@diablo:~/src/projects/nikola$\n```\n\nI just copied those files from and old attempt to use `nikola` and the site works. This happens with both versions `7.6.0-3` from Debian and from `master`, as you can see above.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2015 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Bundle assets using WebAssets.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport os\n\ntry:\n import webassets\nexcept ImportError:\n webassets = None # NOQA\n\nfrom nikola.plugin_categories import LateTask\nfrom nikola import utils\n\n\nclass BuildBundles(LateTask):\n\n \"\"\"Bundle assets using WebAssets.\"\"\"\n\n name = \"create_bundles\"\n\n def set_site(self, site):\n \"\"\"Set Nikola site.\"\"\"\n self.logger = utils.get_logger('bundles', utils.STDERR_HANDLER)\n if webassets is None and site.config['USE_BUNDLES']:\n utils.req_missing(['webassets'], 'USE_BUNDLES', optional=True)\n self.logger.warn('Setting USE_BUNDLES to False.')\n site.config['USE_BUNDLES'] = False\n super(BuildBundles, self).set_site(site)\n\n def gen_tasks(self):\n \"\"\"Bundle assets using WebAssets.\"\"\"\n kw = {\n 'filters': self.site.config['FILTERS'],\n 'output_folder': self.site.config['OUTPUT_FOLDER'],\n 'cache_folder': self.site.config['CACHE_FOLDER'],\n 'theme_bundles': get_theme_bundles(self.site.THEMES),\n 'themes': self.site.THEMES,\n 'files_folders': self.site.config['FILES_FOLDERS'],\n 'code_color_scheme': self.site.config['CODE_COLOR_SCHEME'],\n }\n\n def build_bundle(output, inputs):\n out_dir = os.path.join(kw['output_folder'],\n os.path.dirname(output))\n inputs = [os.path.relpath(i, out_dir) for i in inputs if os.path.isfile(i)]\n cache_dir = os.path.join(kw['cache_folder'], 'webassets')\n utils.makedirs(cache_dir)\n env = webassets.Environment(out_dir, os.path.dirname(output),\n cache=cache_dir)\n if inputs:\n bundle = webassets.Bundle(*inputs, output=os.path.basename(output))\n env.register(output, bundle)\n # This generates the file\n try:\n env[output].urls()\n except Exception as e:\n self.logger.error(\"Failed to build bundles.\")\n self.logger.exception(e)\n self.logger.notice(\"Try running ``nikola clean`` and building again.\")\n else:\n with open(os.path.join(out_dir, os.path.basename(output)), 'wb+'):\n pass # Create empty file\n\n yield self.group_task()\n if (webassets is not None and self.site.config['USE_BUNDLES'] is not\n False):\n for name, _files in kw['theme_bundles'].items():\n output_path = os.path.join(kw['output_folder'], name)\n dname = os.path.dirname(name)\n files = []\n for fname in _files:\n # paths are relative to dirname\n files.append(os.path.join(dname, fname))\n file_dep = [os.path.join(kw['output_folder'], fname)\n for fname in files if\n utils.get_asset_path(fname, self.site.THEMES, self.site.config['FILES_FOLDERS']) or fname == os.path.join('assets', 'css', 'code.css')]\n # code.css will be generated by us if it does not exist in\n # FILES_FOLDERS or theme assets. It is guaranteed that the\n # generation will happen before this task.\n task = {\n 'file_dep': list(file_dep),\n 'task_dep': ['copy_assets', 'copy_files'],\n 'basename': str(self.name),\n 'name': str(output_path),\n 'actions': [(build_bundle, (name, file_dep))],\n 'targets': [output_path],\n 'uptodate': [\n utils.config_changed({\n 1: kw,\n 2: file_dep\n }, 'nikola.plugins.task.bundles')],\n 'clean': True,\n }\n yield utils.apply_filters(task, kw['filters'])\n\n\ndef get_theme_bundles(themes):\n \"\"\"Given a theme chain, return the bundle definitions.\"\"\"\n bundles = {}\n for theme_name in themes:\n bundles_path = os.path.join(\n utils.get_theme_path(theme_name), 'bundles')\n if os.path.isfile(bundles_path):\n with open(bundles_path) as fd:\n for line in fd:\n try:\n name, files = line.split('=')\n files = [f.strip() for f in files.split(',')]\n bundles[name.strip().replace('/', os.sep)] = files\n except ValueError:\n # for empty lines\n pass\n break\n return bundles\n", "path": "nikola/plugins/task/bundles.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2015 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Bundle assets using WebAssets.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport os\n\ntry:\n import webassets\nexcept ImportError:\n webassets = None # NOQA\n\nfrom nikola.plugin_categories import LateTask\nfrom nikola import utils\n\n\nclass BuildBundles(LateTask):\n\n \"\"\"Bundle assets using WebAssets.\"\"\"\n\n name = \"create_bundles\"\n\n def set_site(self, site):\n \"\"\"Set Nikola site.\"\"\"\n self.logger = utils.get_logger('bundles', utils.STDERR_HANDLER)\n if webassets is None and site.config['USE_BUNDLES']:\n utils.req_missing(['webassets'], 'USE_BUNDLES', optional=True)\n self.logger.warn('Setting USE_BUNDLES to False.')\n site.config['USE_BUNDLES'] = False\n site._GLOBAL_CONTEXT['use_bundles'] = False\n super(BuildBundles, self).set_site(site)\n\n def gen_tasks(self):\n \"\"\"Bundle assets using WebAssets.\"\"\"\n kw = {\n 'filters': self.site.config['FILTERS'],\n 'output_folder': self.site.config['OUTPUT_FOLDER'],\n 'cache_folder': self.site.config['CACHE_FOLDER'],\n 'theme_bundles': get_theme_bundles(self.site.THEMES),\n 'themes': self.site.THEMES,\n 'files_folders': self.site.config['FILES_FOLDERS'],\n 'code_color_scheme': self.site.config['CODE_COLOR_SCHEME'],\n }\n\n def build_bundle(output, inputs):\n out_dir = os.path.join(kw['output_folder'],\n os.path.dirname(output))\n inputs = [os.path.relpath(i, out_dir) for i in inputs if os.path.isfile(i)]\n cache_dir = os.path.join(kw['cache_folder'], 'webassets')\n utils.makedirs(cache_dir)\n env = webassets.Environment(out_dir, os.path.dirname(output),\n cache=cache_dir)\n if inputs:\n bundle = webassets.Bundle(*inputs, output=os.path.basename(output))\n env.register(output, bundle)\n # This generates the file\n try:\n env[output].urls()\n except Exception as e:\n self.logger.error(\"Failed to build bundles.\")\n self.logger.exception(e)\n self.logger.notice(\"Try running ``nikola clean`` and building again.\")\n else:\n with open(os.path.join(out_dir, os.path.basename(output)), 'wb+'):\n pass # Create empty file\n\n yield self.group_task()\n if (webassets is not None and self.site.config['USE_BUNDLES'] is not\n False):\n for name, _files in kw['theme_bundles'].items():\n output_path = os.path.join(kw['output_folder'], name)\n dname = os.path.dirname(name)\n files = []\n for fname in _files:\n # paths are relative to dirname\n files.append(os.path.join(dname, fname))\n file_dep = [os.path.join(kw['output_folder'], fname)\n for fname in files if\n utils.get_asset_path(fname, self.site.THEMES, self.site.config['FILES_FOLDERS']) or fname == os.path.join('assets', 'css', 'code.css')]\n # code.css will be generated by us if it does not exist in\n # FILES_FOLDERS or theme assets. It is guaranteed that the\n # generation will happen before this task.\n task = {\n 'file_dep': list(file_dep),\n 'task_dep': ['copy_assets', 'copy_files'],\n 'basename': str(self.name),\n 'name': str(output_path),\n 'actions': [(build_bundle, (name, file_dep))],\n 'targets': [output_path],\n 'uptodate': [\n utils.config_changed({\n 1: kw,\n 2: file_dep\n }, 'nikola.plugins.task.bundles')],\n 'clean': True,\n }\n yield utils.apply_filters(task, kw['filters'])\n\n\ndef get_theme_bundles(themes):\n \"\"\"Given a theme chain, return the bundle definitions.\"\"\"\n bundles = {}\n for theme_name in themes:\n bundles_path = os.path.join(\n utils.get_theme_path(theme_name), 'bundles')\n if os.path.isfile(bundles_path):\n with open(bundles_path) as fd:\n for line in fd:\n try:\n name, files = line.split('=')\n files = [f.strip() for f in files.split(',')]\n bundles[name.strip().replace('/', os.sep)] = files\n except ValueError:\n # for empty lines\n pass\n break\n return bundles\n", "path": "nikola/plugins/task/bundles.py"}]}
| 2,043 | 131 |
gh_patches_debug_40591
|
rasdani/github-patches
|
git_diff
|
hpcaitech__ColossalAI-4802
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `colossalai/inference/tensor_parallel/policies/llama.py`
Content:
```
1 from functools import partial
2
3 import torch
4 from transformers.models.llama.modeling_llama import LlamaAttention, LlamaDecoderLayer, LlamaModel, LlamaRMSNorm
5
6 from colossalai.shardformer.layer import VocabParallelEmbedding1D
7 from colossalai.shardformer.policies.base_policy import ModulePolicyDescription, Policy, SubModuleReplacementDescription
8 # import colossalai
9 from colossalai.shardformer.policies.llama import LlamaForCausalLMPolicy
10
11 from ..modeling.llama import LlamaInferenceForwards, get_llama_vllm_rmsnorm_forward
12
13 try:
14 from colossalai.kernel.triton import rmsnorm_forward
15
16 HAS_TRITON_RMSNORM = True
17 except:
18 print("you should install triton from https://github.com/openai/triton")
19 HAS_TRITON_RMSNORM = False
20
21
22 def get_triton_rmsnorm_forward():
23 if HAS_TRITON_RMSNORM:
24
25 def _triton_rmsnorm_forward(self: LlamaRMSNorm, hidden_states: torch.Tensor):
26 return rmsnorm_forward(hidden_states, self.weight.data, self.variance_epsilon)
27
28 return _triton_rmsnorm_forward
29 else:
30 return None
31
32
33 class LlamaModelInferPolicy(LlamaForCausalLMPolicy):
34 def __init__(self) -> None:
35 super().__init__()
36
37 def module_policy(self):
38 policy = super().module_policy()
39
40 if self.shard_config.inference_gptq:
41 from colossalai.inference.quant.gptq.cai_gptq import ColCaiQuantLinear, RowCaiQuantLinear
42
43 decoder_attribute_replacement = {
44 "self_attn.hidden_size": self.model.config.hidden_size // self.shard_config.tensor_parallel_size,
45 "self_attn.num_heads": self.model.config.num_attention_heads // self.shard_config.tensor_parallel_size,
46 }
47 policy[LlamaDecoderLayer] = ModulePolicyDescription(
48 attribute_replacement=decoder_attribute_replacement,
49 sub_module_replacement=[
50 SubModuleReplacementDescription(
51 suffix="self_attn.q_proj",
52 target_module=ColCaiQuantLinear,
53 kwargs={'split_num': 1},
54 ),
55 SubModuleReplacementDescription(
56 suffix="self_attn.k_proj",
57 target_module=ColCaiQuantLinear,
58 kwargs={'split_num': 1},
59 ),
60 SubModuleReplacementDescription(
61 suffix="self_attn.v_proj",
62 target_module=ColCaiQuantLinear,
63 kwargs={'split_num': 1},
64 ),
65 SubModuleReplacementDescription(
66 suffix="self_attn.o_proj",
67 target_module=RowCaiQuantLinear,
68 kwargs={'split_num': 1},
69 ),
70 SubModuleReplacementDescription(
71 suffix="mlp.gate_proj",
72 target_module=ColCaiQuantLinear,
73 kwargs={'split_num': 1},
74 ),
75 SubModuleReplacementDescription(
76 suffix="mlp.up_proj",
77 target_module=ColCaiQuantLinear,
78 kwargs={'split_num': 1},
79 ),
80 SubModuleReplacementDescription(
81 suffix="mlp.down_proj",
82 target_module=RowCaiQuantLinear,
83 kwargs={'split_num': 1},
84 )
85 ],
86 )
87
88 self.shard_config._infer()
89
90 infer_forward = LlamaInferenceForwards.llama_model_forward
91 method_replacement = {"forward": partial(infer_forward)}
92 self.append_or_create_method_replacement(description=method_replacement, policy=policy, target_key=LlamaModel)
93
94 infer_forward = LlamaInferenceForwards.llama_decoder_layer_forward
95 method_replacement = {"forward": partial(infer_forward)}
96 self.append_or_create_method_replacement(
97 description=method_replacement, policy=policy, target_key=LlamaDecoderLayer
98 )
99
100 infer_forward = LlamaInferenceForwards.llama_flash_attn_kvcache_forward
101 method_replacement = {"forward": partial(infer_forward)}
102 self.append_or_create_method_replacement(
103 description=method_replacement, policy=policy, target_key=LlamaAttention
104 )
105
106 infer_forward = None
107 if HAS_TRITON_RMSNORM:
108 infer_forward = get_triton_rmsnorm_forward()
109 else:
110 # NOTE: adding rms_norm from cuda kernels caused precision issue, fix @tiandiao123
111 infer_forward = get_llama_vllm_rmsnorm_forward()
112
113 if infer_forward is not None:
114 method_replacement = {"forward": partial(infer_forward)}
115 self.append_or_create_method_replacement(
116 description=method_replacement, policy=policy, target_key=LlamaRMSNorm
117 )
118
119 return policy
120
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/colossalai/inference/tensor_parallel/policies/llama.py b/colossalai/inference/tensor_parallel/policies/llama.py
--- a/colossalai/inference/tensor_parallel/policies/llama.py
+++ b/colossalai/inference/tensor_parallel/policies/llama.py
@@ -3,8 +3,8 @@
import torch
from transformers.models.llama.modeling_llama import LlamaAttention, LlamaDecoderLayer, LlamaModel, LlamaRMSNorm
-from colossalai.shardformer.layer import VocabParallelEmbedding1D
-from colossalai.shardformer.policies.base_policy import ModulePolicyDescription, Policy, SubModuleReplacementDescription
+from colossalai.shardformer.policies.base_policy import ModulePolicyDescription, SubModuleReplacementDescription
+
# import colossalai
from colossalai.shardformer.policies.llama import LlamaForCausalLMPolicy
@@ -50,38 +50,38 @@
SubModuleReplacementDescription(
suffix="self_attn.q_proj",
target_module=ColCaiQuantLinear,
- kwargs={'split_num': 1},
+ kwargs={"split_num": 1},
),
SubModuleReplacementDescription(
suffix="self_attn.k_proj",
target_module=ColCaiQuantLinear,
- kwargs={'split_num': 1},
+ kwargs={"split_num": 1},
),
SubModuleReplacementDescription(
suffix="self_attn.v_proj",
target_module=ColCaiQuantLinear,
- kwargs={'split_num': 1},
+ kwargs={"split_num": 1},
),
SubModuleReplacementDescription(
suffix="self_attn.o_proj",
target_module=RowCaiQuantLinear,
- kwargs={'split_num': 1},
+ kwargs={"split_num": 1},
),
SubModuleReplacementDescription(
suffix="mlp.gate_proj",
target_module=ColCaiQuantLinear,
- kwargs={'split_num': 1},
+ kwargs={"split_num": 1},
),
SubModuleReplacementDescription(
suffix="mlp.up_proj",
target_module=ColCaiQuantLinear,
- kwargs={'split_num': 1},
+ kwargs={"split_num": 1},
),
SubModuleReplacementDescription(
suffix="mlp.down_proj",
target_module=RowCaiQuantLinear,
- kwargs={'split_num': 1},
- )
+ kwargs={"split_num": 1},
+ ),
],
)
|
{"golden_diff": "diff --git a/colossalai/inference/tensor_parallel/policies/llama.py b/colossalai/inference/tensor_parallel/policies/llama.py\n--- a/colossalai/inference/tensor_parallel/policies/llama.py\n+++ b/colossalai/inference/tensor_parallel/policies/llama.py\n@@ -3,8 +3,8 @@\n import torch\n from transformers.models.llama.modeling_llama import LlamaAttention, LlamaDecoderLayer, LlamaModel, LlamaRMSNorm\n \n-from colossalai.shardformer.layer import VocabParallelEmbedding1D\n-from colossalai.shardformer.policies.base_policy import ModulePolicyDescription, Policy, SubModuleReplacementDescription\n+from colossalai.shardformer.policies.base_policy import ModulePolicyDescription, SubModuleReplacementDescription\n+\n # import colossalai\n from colossalai.shardformer.policies.llama import LlamaForCausalLMPolicy\n \n@@ -50,38 +50,38 @@\n SubModuleReplacementDescription(\n suffix=\"self_attn.q_proj\",\n target_module=ColCaiQuantLinear,\n- kwargs={'split_num': 1},\n+ kwargs={\"split_num\": 1},\n ),\n SubModuleReplacementDescription(\n suffix=\"self_attn.k_proj\",\n target_module=ColCaiQuantLinear,\n- kwargs={'split_num': 1},\n+ kwargs={\"split_num\": 1},\n ),\n SubModuleReplacementDescription(\n suffix=\"self_attn.v_proj\",\n target_module=ColCaiQuantLinear,\n- kwargs={'split_num': 1},\n+ kwargs={\"split_num\": 1},\n ),\n SubModuleReplacementDescription(\n suffix=\"self_attn.o_proj\",\n target_module=RowCaiQuantLinear,\n- kwargs={'split_num': 1},\n+ kwargs={\"split_num\": 1},\n ),\n SubModuleReplacementDescription(\n suffix=\"mlp.gate_proj\",\n target_module=ColCaiQuantLinear,\n- kwargs={'split_num': 1},\n+ kwargs={\"split_num\": 1},\n ),\n SubModuleReplacementDescription(\n suffix=\"mlp.up_proj\",\n target_module=ColCaiQuantLinear,\n- kwargs={'split_num': 1},\n+ kwargs={\"split_num\": 1},\n ),\n SubModuleReplacementDescription(\n suffix=\"mlp.down_proj\",\n target_module=RowCaiQuantLinear,\n- kwargs={'split_num': 1},\n- )\n+ kwargs={\"split_num\": 1},\n+ ),\n ],\n )\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "from functools import partial\n\nimport torch\nfrom transformers.models.llama.modeling_llama import LlamaAttention, LlamaDecoderLayer, LlamaModel, LlamaRMSNorm\n\nfrom colossalai.shardformer.layer import VocabParallelEmbedding1D\nfrom colossalai.shardformer.policies.base_policy import ModulePolicyDescription, Policy, SubModuleReplacementDescription\n# import colossalai\nfrom colossalai.shardformer.policies.llama import LlamaForCausalLMPolicy\n\nfrom ..modeling.llama import LlamaInferenceForwards, get_llama_vllm_rmsnorm_forward\n\ntry:\n from colossalai.kernel.triton import rmsnorm_forward\n\n HAS_TRITON_RMSNORM = True\nexcept:\n print(\"you should install triton from https://github.com/openai/triton\")\n HAS_TRITON_RMSNORM = False\n\n\ndef get_triton_rmsnorm_forward():\n if HAS_TRITON_RMSNORM:\n\n def _triton_rmsnorm_forward(self: LlamaRMSNorm, hidden_states: torch.Tensor):\n return rmsnorm_forward(hidden_states, self.weight.data, self.variance_epsilon)\n\n return _triton_rmsnorm_forward\n else:\n return None\n\n\nclass LlamaModelInferPolicy(LlamaForCausalLMPolicy):\n def __init__(self) -> None:\n super().__init__()\n\n def module_policy(self):\n policy = super().module_policy()\n\n if self.shard_config.inference_gptq:\n from colossalai.inference.quant.gptq.cai_gptq import ColCaiQuantLinear, RowCaiQuantLinear\n\n decoder_attribute_replacement = {\n \"self_attn.hidden_size\": self.model.config.hidden_size // self.shard_config.tensor_parallel_size,\n \"self_attn.num_heads\": self.model.config.num_attention_heads // self.shard_config.tensor_parallel_size,\n }\n policy[LlamaDecoderLayer] = ModulePolicyDescription(\n attribute_replacement=decoder_attribute_replacement,\n sub_module_replacement=[\n SubModuleReplacementDescription(\n suffix=\"self_attn.q_proj\",\n target_module=ColCaiQuantLinear,\n kwargs={'split_num': 1},\n ),\n SubModuleReplacementDescription(\n suffix=\"self_attn.k_proj\",\n target_module=ColCaiQuantLinear,\n kwargs={'split_num': 1},\n ),\n SubModuleReplacementDescription(\n suffix=\"self_attn.v_proj\",\n target_module=ColCaiQuantLinear,\n kwargs={'split_num': 1},\n ),\n SubModuleReplacementDescription(\n suffix=\"self_attn.o_proj\",\n target_module=RowCaiQuantLinear,\n kwargs={'split_num': 1},\n ),\n SubModuleReplacementDescription(\n suffix=\"mlp.gate_proj\",\n target_module=ColCaiQuantLinear,\n kwargs={'split_num': 1},\n ),\n SubModuleReplacementDescription(\n suffix=\"mlp.up_proj\",\n target_module=ColCaiQuantLinear,\n kwargs={'split_num': 1},\n ),\n SubModuleReplacementDescription(\n suffix=\"mlp.down_proj\",\n target_module=RowCaiQuantLinear,\n kwargs={'split_num': 1},\n )\n ],\n )\n\n self.shard_config._infer()\n\n infer_forward = LlamaInferenceForwards.llama_model_forward\n method_replacement = {\"forward\": partial(infer_forward)}\n self.append_or_create_method_replacement(description=method_replacement, policy=policy, target_key=LlamaModel)\n\n infer_forward = LlamaInferenceForwards.llama_decoder_layer_forward\n method_replacement = {\"forward\": partial(infer_forward)}\n self.append_or_create_method_replacement(\n description=method_replacement, policy=policy, target_key=LlamaDecoderLayer\n )\n\n infer_forward = LlamaInferenceForwards.llama_flash_attn_kvcache_forward\n method_replacement = {\"forward\": partial(infer_forward)}\n self.append_or_create_method_replacement(\n description=method_replacement, policy=policy, target_key=LlamaAttention\n )\n\n infer_forward = None\n if HAS_TRITON_RMSNORM:\n infer_forward = get_triton_rmsnorm_forward()\n else:\n # NOTE: adding rms_norm from cuda kernels caused precision issue, fix @tiandiao123\n infer_forward = get_llama_vllm_rmsnorm_forward()\n\n if infer_forward is not None:\n method_replacement = {\"forward\": partial(infer_forward)}\n self.append_or_create_method_replacement(\n description=method_replacement, policy=policy, target_key=LlamaRMSNorm\n )\n\n return policy\n", "path": "colossalai/inference/tensor_parallel/policies/llama.py"}], "after_files": [{"content": "from functools import partial\n\nimport torch\nfrom transformers.models.llama.modeling_llama import LlamaAttention, LlamaDecoderLayer, LlamaModel, LlamaRMSNorm\n\nfrom colossalai.shardformer.policies.base_policy import ModulePolicyDescription, SubModuleReplacementDescription\n\n# import colossalai\nfrom colossalai.shardformer.policies.llama import LlamaForCausalLMPolicy\n\nfrom ..modeling.llama import LlamaInferenceForwards, get_llama_vllm_rmsnorm_forward\n\ntry:\n from colossalai.kernel.triton import rmsnorm_forward\n\n HAS_TRITON_RMSNORM = True\nexcept:\n print(\"you should install triton from https://github.com/openai/triton\")\n HAS_TRITON_RMSNORM = False\n\n\ndef get_triton_rmsnorm_forward():\n if HAS_TRITON_RMSNORM:\n\n def _triton_rmsnorm_forward(self: LlamaRMSNorm, hidden_states: torch.Tensor):\n return rmsnorm_forward(hidden_states, self.weight.data, self.variance_epsilon)\n\n return _triton_rmsnorm_forward\n else:\n return None\n\n\nclass LlamaModelInferPolicy(LlamaForCausalLMPolicy):\n def __init__(self) -> None:\n super().__init__()\n\n def module_policy(self):\n policy = super().module_policy()\n\n if self.shard_config.inference_gptq:\n from colossalai.inference.quant.gptq.cai_gptq import ColCaiQuantLinear, RowCaiQuantLinear\n\n decoder_attribute_replacement = {\n \"self_attn.hidden_size\": self.model.config.hidden_size // self.shard_config.tensor_parallel_size,\n \"self_attn.num_heads\": self.model.config.num_attention_heads // self.shard_config.tensor_parallel_size,\n }\n policy[LlamaDecoderLayer] = ModulePolicyDescription(\n attribute_replacement=decoder_attribute_replacement,\n sub_module_replacement=[\n SubModuleReplacementDescription(\n suffix=\"self_attn.q_proj\",\n target_module=ColCaiQuantLinear,\n kwargs={\"split_num\": 1},\n ),\n SubModuleReplacementDescription(\n suffix=\"self_attn.k_proj\",\n target_module=ColCaiQuantLinear,\n kwargs={\"split_num\": 1},\n ),\n SubModuleReplacementDescription(\n suffix=\"self_attn.v_proj\",\n target_module=ColCaiQuantLinear,\n kwargs={\"split_num\": 1},\n ),\n SubModuleReplacementDescription(\n suffix=\"self_attn.o_proj\",\n target_module=RowCaiQuantLinear,\n kwargs={\"split_num\": 1},\n ),\n SubModuleReplacementDescription(\n suffix=\"mlp.gate_proj\",\n target_module=ColCaiQuantLinear,\n kwargs={\"split_num\": 1},\n ),\n SubModuleReplacementDescription(\n suffix=\"mlp.up_proj\",\n target_module=ColCaiQuantLinear,\n kwargs={\"split_num\": 1},\n ),\n SubModuleReplacementDescription(\n suffix=\"mlp.down_proj\",\n target_module=RowCaiQuantLinear,\n kwargs={\"split_num\": 1},\n ),\n ],\n )\n\n self.shard_config._infer()\n\n infer_forward = LlamaInferenceForwards.llama_model_forward\n method_replacement = {\"forward\": partial(infer_forward)}\n self.append_or_create_method_replacement(description=method_replacement, policy=policy, target_key=LlamaModel)\n\n infer_forward = LlamaInferenceForwards.llama_decoder_layer_forward\n method_replacement = {\"forward\": partial(infer_forward)}\n self.append_or_create_method_replacement(\n description=method_replacement, policy=policy, target_key=LlamaDecoderLayer\n )\n\n infer_forward = LlamaInferenceForwards.llama_flash_attn_kvcache_forward\n method_replacement = {\"forward\": partial(infer_forward)}\n self.append_or_create_method_replacement(\n description=method_replacement, policy=policy, target_key=LlamaAttention\n )\n\n infer_forward = None\n if HAS_TRITON_RMSNORM:\n infer_forward = get_triton_rmsnorm_forward()\n else:\n # NOTE: adding rms_norm from cuda kernels caused precision issue, fix @tiandiao123\n infer_forward = get_llama_vllm_rmsnorm_forward()\n\n if infer_forward is not None:\n method_replacement = {\"forward\": partial(infer_forward)}\n self.append_or_create_method_replacement(\n description=method_replacement, policy=policy, target_key=LlamaRMSNorm\n )\n\n return policy\n", "path": "colossalai/inference/tensor_parallel/policies/llama.py"}]}
| 1,546 | 548 |
gh_patches_debug_32000
|
rasdani/github-patches
|
git_diff
|
WordPress__openverse-api-875
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Thumbnail endpoint returns 500 if the upstream image URL 404s
## Description
<!-- Concisely describe the bug. Compare your experience with what you expected to happen. -->
<!-- For example: "I clicked the 'submit' button and instead of seeing a thank you message, I saw a blank page." -->
I was looking through production logs to see where some 500 errors were coming from and I found a handful of logs that look like this:
Field | Value
-- | --
`@ingestionTime` | 1659625225674
`@log` | 140733703834:/prod/api/nginx
`@logStream` | i-05a7072edd44e97d9
`@timestamp` | 1659625221900
body_bytes_sent | 66
host_header | api.openverse.engineering
http_referrer | https://search.openverse.engineering/
http_user_agent | Mozilla/5.0 (Linux; Android 6.0.1; Nexus 5X Build/MMB29P) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.5060.134 Mobile Safari/537.36 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)
http_x_forwarded_for | [redacted]
remote_addr | [redacted]
request | GET /v1/images/81954166-168e-41a6-bff5-eb966cb940ec/thumb/ HTTP/1.1
request_time | 1.175
status | 500
time_local | 04/Aug/2022:15:00:21 +0000
upstream_response_time | 1.175
Indeed, if you visit `https://api.openverse.engineering/v1/images/81954166-168e-41a6-bff5-eb966cb940ec/thumb/` you will get a 500 back with a message saying that the upstream request resulted in a 404. I think these should just 404 all the way, instead of resulting in a 500.
I wonder if these should also be marked as dead links? It should already be anyway because the upstream url 404s and this is how we detect dead links during the search request, but clearly some still get through :thinking:
## Reproduction
<!-- Provide detailed steps to reproduce the bug. -->
1. <!-- Step 1 ... --> Visit https://api.openverse.engineering/v1/images/81954166-168e-41a6-bff5-eb966cb940ec/thumb/
2. <!-- Step 2 ... --> See the 500 response code with the 404 body details.
## Resolution
<!-- Replace the [ ] with [x] to check the box. -->
- [ ] 🙋 I would be interested in resolving this bug.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `api/catalog/api/views/media_views.py`
Content:
```
1 import json
2 import logging as log
3 from http.client import RemoteDisconnected
4 from urllib.error import HTTPError
5 from urllib.parse import urlencode
6 from urllib.request import Request, urlopen
7
8 from django.conf import settings
9 from django.http.response import HttpResponse
10 from rest_framework import status
11 from rest_framework.decorators import action
12 from rest_framework.response import Response
13 from rest_framework.viewsets import ReadOnlyModelViewSet
14
15 from catalog.api.controllers import search_controller
16 from catalog.api.models import ContentProvider
17 from catalog.api.serializers.provider_serializers import ProviderSerializer
18 from catalog.api.utils.exceptions import get_api_exception
19 from catalog.api.utils.pagination import StandardPagination
20 from catalog.custom_auto_schema import CustomAutoSchema
21
22
23 class MediaViewSet(ReadOnlyModelViewSet):
24 swagger_schema = CustomAutoSchema
25
26 lookup_field = "identifier"
27 # TODO: https://github.com/encode/django-rest-framework/pull/6789
28 lookup_value_regex = (
29 r"[a-f0-9]{8}-[a-f0-9]{4}-4[a-f0-9]{3}-[89ab][a-f0-9]{3}-[a-f0-9]{12}"
30 )
31
32 pagination_class = StandardPagination
33
34 # Populate these in the corresponding subclass
35 model_class = None
36 query_serializer_class = None
37 default_index = None
38 qa_index = None
39
40 def __init__(self, *args, **kwargs):
41 super().__init__(*args, **kwargs)
42 required_fields = [
43 self.model_class,
44 self.query_serializer_class,
45 self.default_index,
46 self.qa_index,
47 ]
48 if any(val is None for val in required_fields):
49 msg = "Viewset fields are not completely populated."
50 raise ValueError(msg)
51
52 def get_queryset(self):
53 return self.model_class.objects.all()
54
55 # Standard actions
56
57 def list(self, request, *_, **__):
58 self.paginator.page_size = request.query_params.get("page_size")
59 page_size = self.paginator.page_size
60 self.paginator.page = request.query_params.get("page")
61 page = self.paginator.page
62
63 params = self.query_serializer_class(
64 data=request.query_params, context={"request": request}
65 )
66 params.is_valid(raise_exception=True)
67
68 hashed_ip = hash(self._get_user_ip(request))
69 qa = params.validated_data["qa"]
70 filter_dead = params.validated_data["filter_dead"]
71
72 search_index = self.qa_index if qa else self.default_index
73 try:
74 results, num_pages, num_results = search_controller.search(
75 params,
76 search_index,
77 page_size,
78 hashed_ip,
79 request,
80 filter_dead,
81 page,
82 )
83 self.paginator.page_count = num_pages
84 self.paginator.result_count = num_results
85 except ValueError as e:
86 raise get_api_exception(getattr(e, "message", str(e)))
87
88 serializer = self.get_serializer(results, many=True)
89 return self.get_paginated_response(serializer.data)
90
91 # Extra actions
92
93 @action(detail=False, serializer_class=ProviderSerializer, pagination_class=None)
94 def stats(self, *_, **__):
95 source_counts = search_controller.get_sources(self.default_index)
96 context = self.get_serializer_context() | {
97 "source_counts": source_counts,
98 }
99
100 providers = ContentProvider.objects.filter(
101 media_type=self.default_index, filter_content=False
102 )
103 serializer = self.get_serializer(providers, many=True, context=context)
104 return Response(serializer.data)
105
106 @action(detail=True)
107 def related(self, request, identifier=None, *_, **__):
108 try:
109 results, num_results = search_controller.related_media(
110 uuid=identifier,
111 index=self.default_index,
112 request=request,
113 filter_dead=True,
114 )
115 self.paginator.result_count = num_results
116 self.paginator.page_count = 1
117 # `page_size` refers to the maximum number of related images to return.
118 self.paginator.page_size = 10
119 except ValueError as e:
120 raise get_api_exception(getattr(e, "message", str(e)))
121 # If there are no hits in the search controller
122 except IndexError:
123 raise get_api_exception("Could not find items.", 404)
124
125 serializer = self.get_serializer(results, many=True)
126 return self.get_paginated_response(serializer.data)
127
128 def report(self, request, *_, **__):
129 media = self.get_object()
130 identifier = media.identifier
131 serializer = self.get_serializer(data=request.data)
132 if not serializer.is_valid():
133 raise get_api_exception("Invalid input.", 400)
134 report = serializer.save(identifier=identifier)
135
136 serializer = self.get_serializer(report)
137 return Response(data=serializer.data, status=status.HTTP_201_CREATED)
138
139 def thumbnail(self, image_url, request, *_, **__):
140 serializer = self.get_serializer(data=request.query_params)
141 serializer.is_valid(raise_exception=True)
142 return self._get_proxied_image(
143 image_url,
144 accept_header=request.headers.get("Accept", "image/*"),
145 **serializer.validated_data,
146 )
147
148 # Helper functions
149
150 @staticmethod
151 def _get_user_ip(request):
152 """
153 Read request headers to find the correct IP address.
154 It is assumed that X-Forwarded-For has been sanitized by the load
155 balancer and thus cannot be rewritten by malicious users.
156 :param request: A Django request object.
157 :return: An IP address.
158 """
159 x_forwarded_for = request.META.get("HTTP_X_FORWARDED_FOR")
160 if x_forwarded_for:
161 ip = x_forwarded_for.split(",")[0]
162 else:
163 ip = request.META.get("REMOTE_ADDR")
164 return ip
165
166 @staticmethod
167 def _thumbnail_proxy_comm(
168 path: str,
169 params: dict,
170 headers: tuple[tuple[str, str]] = (),
171 ):
172 proxy_url = settings.THUMBNAIL_PROXY_URL
173 query_string = urlencode(params)
174 upstream_url = f"{proxy_url}/{path}?{query_string}"
175 log.debug(f"Image proxy upstream URL: {upstream_url}")
176
177 try:
178 req = Request(upstream_url)
179 for key, val in headers:
180 req.add_header(key, val)
181 upstream_response = urlopen(req, timeout=10)
182
183 res_status = upstream_response.status
184 content_type = upstream_response.headers.get("Content-Type")
185 log.debug(
186 "Image proxy response "
187 f"status: {res_status}, content-type: {content_type}"
188 )
189
190 return upstream_response, res_status, content_type
191 except (HTTPError, RemoteDisconnected, TimeoutError) as exc:
192 raise get_api_exception(f"Failed to render thumbnail: {exc}")
193 except Exception as exc:
194 raise get_api_exception(
195 f"Failed to render thumbnail due to unidentified exception: {exc}"
196 )
197
198 @staticmethod
199 def _get_proxied_image(
200 image_url: str,
201 accept_header: str = "image/*",
202 is_full_size: bool = False,
203 is_compressed: bool = True,
204 ):
205 width = settings.THUMBNAIL_WIDTH_PX
206 if is_full_size:
207 info_res, *_ = MediaViewSet._thumbnail_proxy_comm(
208 "info", {"url": image_url}
209 )
210 info = json.loads(info_res.read())
211 width = info["width"]
212
213 params = {
214 "url": image_url,
215 "width": width,
216 }
217
218 if is_compressed:
219 params |= {
220 "quality": settings.THUMBNAIL_JPG_QUALITY,
221 "compression": settings.THUMBNAIL_PNG_COMPRESSION,
222 }
223 else:
224 params |= {
225 "quality": 100,
226 "compression": 0,
227 }
228
229 if "webp" in accept_header:
230 params["type"] = "auto" # Use ``Accept`` header to determine output type.
231
232 img_res, res_status, content_type = MediaViewSet._thumbnail_proxy_comm(
233 "resize", params, (("Accept", accept_header),)
234 )
235 response = HttpResponse(
236 img_res.read(), status=res_status, content_type=content_type
237 )
238 return response
239
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/api/catalog/api/views/media_views.py b/api/catalog/api/views/media_views.py
--- a/api/catalog/api/views/media_views.py
+++ b/api/catalog/api/views/media_views.py
@@ -9,9 +9,12 @@
from django.http.response import HttpResponse
from rest_framework import status
from rest_framework.decorators import action
+from rest_framework.exceptions import APIException
from rest_framework.response import Response
from rest_framework.viewsets import ReadOnlyModelViewSet
+from sentry_sdk import capture_exception
+
from catalog.api.controllers import search_controller
from catalog.api.models import ContentProvider
from catalog.api.serializers.provider_serializers import ProviderSerializer
@@ -20,6 +23,11 @@
from catalog.custom_auto_schema import CustomAutoSchema
+class UpstreamThumbnailException(APIException):
+ status_code = status.HTTP_424_FAILED_DEPENDENCY
+ default_detail = "Could not render thumbnail due to upstream provider error."
+
+
class MediaViewSet(ReadOnlyModelViewSet):
swagger_schema = CustomAutoSchema
@@ -189,9 +197,11 @@
return upstream_response, res_status, content_type
except (HTTPError, RemoteDisconnected, TimeoutError) as exc:
- raise get_api_exception(f"Failed to render thumbnail: {exc}")
+ capture_exception(exc)
+ raise UpstreamThumbnailException(f"Failed to render thumbnail: {exc}")
except Exception as exc:
- raise get_api_exception(
+ capture_exception(exc)
+ raise UpstreamThumbnailException(
f"Failed to render thumbnail due to unidentified exception: {exc}"
)
|
{"golden_diff": "diff --git a/api/catalog/api/views/media_views.py b/api/catalog/api/views/media_views.py\n--- a/api/catalog/api/views/media_views.py\n+++ b/api/catalog/api/views/media_views.py\n@@ -9,9 +9,12 @@\n from django.http.response import HttpResponse\n from rest_framework import status\n from rest_framework.decorators import action\n+from rest_framework.exceptions import APIException\n from rest_framework.response import Response\n from rest_framework.viewsets import ReadOnlyModelViewSet\n \n+from sentry_sdk import capture_exception\n+\n from catalog.api.controllers import search_controller\n from catalog.api.models import ContentProvider\n from catalog.api.serializers.provider_serializers import ProviderSerializer\n@@ -20,6 +23,11 @@\n from catalog.custom_auto_schema import CustomAutoSchema\n \n \n+class UpstreamThumbnailException(APIException):\n+ status_code = status.HTTP_424_FAILED_DEPENDENCY\n+ default_detail = \"Could not render thumbnail due to upstream provider error.\"\n+\n+\n class MediaViewSet(ReadOnlyModelViewSet):\n swagger_schema = CustomAutoSchema\n \n@@ -189,9 +197,11 @@\n \n return upstream_response, res_status, content_type\n except (HTTPError, RemoteDisconnected, TimeoutError) as exc:\n- raise get_api_exception(f\"Failed to render thumbnail: {exc}\")\n+ capture_exception(exc)\n+ raise UpstreamThumbnailException(f\"Failed to render thumbnail: {exc}\")\n except Exception as exc:\n- raise get_api_exception(\n+ capture_exception(exc)\n+ raise UpstreamThumbnailException(\n f\"Failed to render thumbnail due to unidentified exception: {exc}\"\n )\n", "issue": "Thumbnail endpoint returns 500 if the upstream image URL 404s\n## Description\r\n<!-- Concisely describe the bug. Compare your experience with what you expected to happen. -->\r\n<!-- For example: \"I clicked the 'submit' button and instead of seeing a thank you message, I saw a blank page.\" -->\r\nI was looking through production logs to see where some 500 errors were coming from and I found a handful of logs that look like this:\r\n\r\nField | Value\r\n-- | --\r\n`@ingestionTime` | 1659625225674\r\n`@log` | 140733703834:/prod/api/nginx\r\n`@logStream` | i-05a7072edd44e97d9\r\n`@timestamp` | 1659625221900\r\nbody_bytes_sent | 66\r\nhost_header | api.openverse.engineering\r\nhttp_referrer | https://search.openverse.engineering/\r\nhttp_user_agent | Mozilla/5.0 (Linux; Android 6.0.1; Nexus 5X Build/MMB29P) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.5060.134 Mobile Safari/537.36 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)\r\nhttp_x_forwarded_for | [redacted]\r\nremote_addr | [redacted]\r\nrequest | GET /v1/images/81954166-168e-41a6-bff5-eb966cb940ec/thumb/ HTTP/1.1\r\nrequest_time | 1.175\r\nstatus | 500\r\ntime_local | 04/Aug/2022:15:00:21 +0000\r\nupstream_response_time | 1.175\r\n\r\nIndeed, if you visit `https://api.openverse.engineering/v1/images/81954166-168e-41a6-bff5-eb966cb940ec/thumb/` you will get a 500 back with a message saying that the upstream request resulted in a 404. I think these should just 404 all the way, instead of resulting in a 500. \r\n\r\nI wonder if these should also be marked as dead links? It should already be anyway because the upstream url 404s and this is how we detect dead links during the search request, but clearly some still get through :thinking: \r\n\r\n## Reproduction\r\n<!-- Provide detailed steps to reproduce the bug. -->\r\n1. <!-- Step 1 ... --> Visit https://api.openverse.engineering/v1/images/81954166-168e-41a6-bff5-eb966cb940ec/thumb/\r\n2. <!-- Step 2 ... --> See the 500 response code with the 404 body details.\r\n\r\n## Resolution\r\n<!-- Replace the [ ] with [x] to check the box. -->\r\n- [ ] \ud83d\ude4b I would be interested in resolving this bug.\r\n\n", "before_files": [{"content": "import json\nimport logging as log\nfrom http.client import RemoteDisconnected\nfrom urllib.error import HTTPError\nfrom urllib.parse import urlencode\nfrom urllib.request import Request, urlopen\n\nfrom django.conf import settings\nfrom django.http.response import HttpResponse\nfrom rest_framework import status\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import ReadOnlyModelViewSet\n\nfrom catalog.api.controllers import search_controller\nfrom catalog.api.models import ContentProvider\nfrom catalog.api.serializers.provider_serializers import ProviderSerializer\nfrom catalog.api.utils.exceptions import get_api_exception\nfrom catalog.api.utils.pagination import StandardPagination\nfrom catalog.custom_auto_schema import CustomAutoSchema\n\n\nclass MediaViewSet(ReadOnlyModelViewSet):\n swagger_schema = CustomAutoSchema\n\n lookup_field = \"identifier\"\n # TODO: https://github.com/encode/django-rest-framework/pull/6789\n lookup_value_regex = (\n r\"[a-f0-9]{8}-[a-f0-9]{4}-4[a-f0-9]{3}-[89ab][a-f0-9]{3}-[a-f0-9]{12}\"\n )\n\n pagination_class = StandardPagination\n\n # Populate these in the corresponding subclass\n model_class = None\n query_serializer_class = None\n default_index = None\n qa_index = None\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n required_fields = [\n self.model_class,\n self.query_serializer_class,\n self.default_index,\n self.qa_index,\n ]\n if any(val is None for val in required_fields):\n msg = \"Viewset fields are not completely populated.\"\n raise ValueError(msg)\n\n def get_queryset(self):\n return self.model_class.objects.all()\n\n # Standard actions\n\n def list(self, request, *_, **__):\n self.paginator.page_size = request.query_params.get(\"page_size\")\n page_size = self.paginator.page_size\n self.paginator.page = request.query_params.get(\"page\")\n page = self.paginator.page\n\n params = self.query_serializer_class(\n data=request.query_params, context={\"request\": request}\n )\n params.is_valid(raise_exception=True)\n\n hashed_ip = hash(self._get_user_ip(request))\n qa = params.validated_data[\"qa\"]\n filter_dead = params.validated_data[\"filter_dead\"]\n\n search_index = self.qa_index if qa else self.default_index\n try:\n results, num_pages, num_results = search_controller.search(\n params,\n search_index,\n page_size,\n hashed_ip,\n request,\n filter_dead,\n page,\n )\n self.paginator.page_count = num_pages\n self.paginator.result_count = num_results\n except ValueError as e:\n raise get_api_exception(getattr(e, \"message\", str(e)))\n\n serializer = self.get_serializer(results, many=True)\n return self.get_paginated_response(serializer.data)\n\n # Extra actions\n\n @action(detail=False, serializer_class=ProviderSerializer, pagination_class=None)\n def stats(self, *_, **__):\n source_counts = search_controller.get_sources(self.default_index)\n context = self.get_serializer_context() | {\n \"source_counts\": source_counts,\n }\n\n providers = ContentProvider.objects.filter(\n media_type=self.default_index, filter_content=False\n )\n serializer = self.get_serializer(providers, many=True, context=context)\n return Response(serializer.data)\n\n @action(detail=True)\n def related(self, request, identifier=None, *_, **__):\n try:\n results, num_results = search_controller.related_media(\n uuid=identifier,\n index=self.default_index,\n request=request,\n filter_dead=True,\n )\n self.paginator.result_count = num_results\n self.paginator.page_count = 1\n # `page_size` refers to the maximum number of related images to return.\n self.paginator.page_size = 10\n except ValueError as e:\n raise get_api_exception(getattr(e, \"message\", str(e)))\n # If there are no hits in the search controller\n except IndexError:\n raise get_api_exception(\"Could not find items.\", 404)\n\n serializer = self.get_serializer(results, many=True)\n return self.get_paginated_response(serializer.data)\n\n def report(self, request, *_, **__):\n media = self.get_object()\n identifier = media.identifier\n serializer = self.get_serializer(data=request.data)\n if not serializer.is_valid():\n raise get_api_exception(\"Invalid input.\", 400)\n report = serializer.save(identifier=identifier)\n\n serializer = self.get_serializer(report)\n return Response(data=serializer.data, status=status.HTTP_201_CREATED)\n\n def thumbnail(self, image_url, request, *_, **__):\n serializer = self.get_serializer(data=request.query_params)\n serializer.is_valid(raise_exception=True)\n return self._get_proxied_image(\n image_url,\n accept_header=request.headers.get(\"Accept\", \"image/*\"),\n **serializer.validated_data,\n )\n\n # Helper functions\n\n @staticmethod\n def _get_user_ip(request):\n \"\"\"\n Read request headers to find the correct IP address.\n It is assumed that X-Forwarded-For has been sanitized by the load\n balancer and thus cannot be rewritten by malicious users.\n :param request: A Django request object.\n :return: An IP address.\n \"\"\"\n x_forwarded_for = request.META.get(\"HTTP_X_FORWARDED_FOR\")\n if x_forwarded_for:\n ip = x_forwarded_for.split(\",\")[0]\n else:\n ip = request.META.get(\"REMOTE_ADDR\")\n return ip\n\n @staticmethod\n def _thumbnail_proxy_comm(\n path: str,\n params: dict,\n headers: tuple[tuple[str, str]] = (),\n ):\n proxy_url = settings.THUMBNAIL_PROXY_URL\n query_string = urlencode(params)\n upstream_url = f\"{proxy_url}/{path}?{query_string}\"\n log.debug(f\"Image proxy upstream URL: {upstream_url}\")\n\n try:\n req = Request(upstream_url)\n for key, val in headers:\n req.add_header(key, val)\n upstream_response = urlopen(req, timeout=10)\n\n res_status = upstream_response.status\n content_type = upstream_response.headers.get(\"Content-Type\")\n log.debug(\n \"Image proxy response \"\n f\"status: {res_status}, content-type: {content_type}\"\n )\n\n return upstream_response, res_status, content_type\n except (HTTPError, RemoteDisconnected, TimeoutError) as exc:\n raise get_api_exception(f\"Failed to render thumbnail: {exc}\")\n except Exception as exc:\n raise get_api_exception(\n f\"Failed to render thumbnail due to unidentified exception: {exc}\"\n )\n\n @staticmethod\n def _get_proxied_image(\n image_url: str,\n accept_header: str = \"image/*\",\n is_full_size: bool = False,\n is_compressed: bool = True,\n ):\n width = settings.THUMBNAIL_WIDTH_PX\n if is_full_size:\n info_res, *_ = MediaViewSet._thumbnail_proxy_comm(\n \"info\", {\"url\": image_url}\n )\n info = json.loads(info_res.read())\n width = info[\"width\"]\n\n params = {\n \"url\": image_url,\n \"width\": width,\n }\n\n if is_compressed:\n params |= {\n \"quality\": settings.THUMBNAIL_JPG_QUALITY,\n \"compression\": settings.THUMBNAIL_PNG_COMPRESSION,\n }\n else:\n params |= {\n \"quality\": 100,\n \"compression\": 0,\n }\n\n if \"webp\" in accept_header:\n params[\"type\"] = \"auto\" # Use ``Accept`` header to determine output type.\n\n img_res, res_status, content_type = MediaViewSet._thumbnail_proxy_comm(\n \"resize\", params, ((\"Accept\", accept_header),)\n )\n response = HttpResponse(\n img_res.read(), status=res_status, content_type=content_type\n )\n return response\n", "path": "api/catalog/api/views/media_views.py"}], "after_files": [{"content": "import json\nimport logging as log\nfrom http.client import RemoteDisconnected\nfrom urllib.error import HTTPError\nfrom urllib.parse import urlencode\nfrom urllib.request import Request, urlopen\n\nfrom django.conf import settings\nfrom django.http.response import HttpResponse\nfrom rest_framework import status\nfrom rest_framework.decorators import action\nfrom rest_framework.exceptions import APIException\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import ReadOnlyModelViewSet\n\nfrom sentry_sdk import capture_exception\n\nfrom catalog.api.controllers import search_controller\nfrom catalog.api.models import ContentProvider\nfrom catalog.api.serializers.provider_serializers import ProviderSerializer\nfrom catalog.api.utils.exceptions import get_api_exception\nfrom catalog.api.utils.pagination import StandardPagination\nfrom catalog.custom_auto_schema import CustomAutoSchema\n\n\nclass UpstreamThumbnailException(APIException):\n status_code = status.HTTP_424_FAILED_DEPENDENCY\n default_detail = \"Could not render thumbnail due to upstream provider error.\"\n\n\nclass MediaViewSet(ReadOnlyModelViewSet):\n swagger_schema = CustomAutoSchema\n\n lookup_field = \"identifier\"\n # TODO: https://github.com/encode/django-rest-framework/pull/6789\n lookup_value_regex = (\n r\"[a-f0-9]{8}-[a-f0-9]{4}-4[a-f0-9]{3}-[89ab][a-f0-9]{3}-[a-f0-9]{12}\"\n )\n\n pagination_class = StandardPagination\n\n # Populate these in the corresponding subclass\n model_class = None\n query_serializer_class = None\n default_index = None\n qa_index = None\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n required_fields = [\n self.model_class,\n self.query_serializer_class,\n self.default_index,\n self.qa_index,\n ]\n if any(val is None for val in required_fields):\n msg = \"Viewset fields are not completely populated.\"\n raise ValueError(msg)\n\n def get_queryset(self):\n return self.model_class.objects.all()\n\n # Standard actions\n\n def list(self, request, *_, **__):\n self.paginator.page_size = request.query_params.get(\"page_size\")\n page_size = self.paginator.page_size\n self.paginator.page = request.query_params.get(\"page\")\n page = self.paginator.page\n\n params = self.query_serializer_class(\n data=request.query_params, context={\"request\": request}\n )\n params.is_valid(raise_exception=True)\n\n hashed_ip = hash(self._get_user_ip(request))\n qa = params.validated_data[\"qa\"]\n filter_dead = params.validated_data[\"filter_dead\"]\n\n search_index = self.qa_index if qa else self.default_index\n try:\n results, num_pages, num_results = search_controller.search(\n params,\n search_index,\n page_size,\n hashed_ip,\n request,\n filter_dead,\n page,\n )\n self.paginator.page_count = num_pages\n self.paginator.result_count = num_results\n except ValueError as e:\n raise get_api_exception(getattr(e, \"message\", str(e)))\n\n serializer = self.get_serializer(results, many=True)\n return self.get_paginated_response(serializer.data)\n\n # Extra actions\n\n @action(detail=False, serializer_class=ProviderSerializer, pagination_class=None)\n def stats(self, *_, **__):\n source_counts = search_controller.get_sources(self.default_index)\n context = self.get_serializer_context() | {\n \"source_counts\": source_counts,\n }\n\n providers = ContentProvider.objects.filter(\n media_type=self.default_index, filter_content=False\n )\n serializer = self.get_serializer(providers, many=True, context=context)\n return Response(serializer.data)\n\n @action(detail=True)\n def related(self, request, identifier=None, *_, **__):\n try:\n results, num_results = search_controller.related_media(\n uuid=identifier,\n index=self.default_index,\n request=request,\n filter_dead=True,\n )\n self.paginator.result_count = num_results\n self.paginator.page_count = 1\n # `page_size` refers to the maximum number of related images to return.\n self.paginator.page_size = 10\n except ValueError as e:\n raise get_api_exception(getattr(e, \"message\", str(e)))\n # If there are no hits in the search controller\n except IndexError:\n raise get_api_exception(\"Could not find items.\", 404)\n\n serializer = self.get_serializer(results, many=True)\n return self.get_paginated_response(serializer.data)\n\n def report(self, request, *_, **__):\n media = self.get_object()\n identifier = media.identifier\n serializer = self.get_serializer(data=request.data)\n if not serializer.is_valid():\n raise get_api_exception(\"Invalid input.\", 400)\n report = serializer.save(identifier=identifier)\n\n serializer = self.get_serializer(report)\n return Response(data=serializer.data, status=status.HTTP_201_CREATED)\n\n def thumbnail(self, image_url, request, *_, **__):\n serializer = self.get_serializer(data=request.query_params)\n serializer.is_valid(raise_exception=True)\n return self._get_proxied_image(\n image_url,\n accept_header=request.headers.get(\"Accept\", \"image/*\"),\n **serializer.validated_data,\n )\n\n # Helper functions\n\n @staticmethod\n def _get_user_ip(request):\n \"\"\"\n Read request headers to find the correct IP address.\n It is assumed that X-Forwarded-For has been sanitized by the load\n balancer and thus cannot be rewritten by malicious users.\n :param request: A Django request object.\n :return: An IP address.\n \"\"\"\n x_forwarded_for = request.META.get(\"HTTP_X_FORWARDED_FOR\")\n if x_forwarded_for:\n ip = x_forwarded_for.split(\",\")[0]\n else:\n ip = request.META.get(\"REMOTE_ADDR\")\n return ip\n\n @staticmethod\n def _thumbnail_proxy_comm(\n path: str,\n params: dict,\n headers: tuple[tuple[str, str]] = (),\n ):\n proxy_url = settings.THUMBNAIL_PROXY_URL\n query_string = urlencode(params)\n upstream_url = f\"{proxy_url}/{path}?{query_string}\"\n log.debug(f\"Image proxy upstream URL: {upstream_url}\")\n\n try:\n req = Request(upstream_url)\n for key, val in headers:\n req.add_header(key, val)\n upstream_response = urlopen(req, timeout=10)\n\n res_status = upstream_response.status\n content_type = upstream_response.headers.get(\"Content-Type\")\n log.debug(\n \"Image proxy response \"\n f\"status: {res_status}, content-type: {content_type}\"\n )\n\n return upstream_response, res_status, content_type\n except (HTTPError, RemoteDisconnected, TimeoutError) as exc:\n capture_exception(exc)\n raise UpstreamThumbnailException(f\"Failed to render thumbnail: {exc}\")\n except Exception as exc:\n capture_exception(exc)\n raise UpstreamThumbnailException(\n f\"Failed to render thumbnail due to unidentified exception: {exc}\"\n )\n\n @staticmethod\n def _get_proxied_image(\n image_url: str,\n accept_header: str = \"image/*\",\n is_full_size: bool = False,\n is_compressed: bool = True,\n ):\n width = settings.THUMBNAIL_WIDTH_PX\n if is_full_size:\n info_res, *_ = MediaViewSet._thumbnail_proxy_comm(\n \"info\", {\"url\": image_url}\n )\n info = json.loads(info_res.read())\n width = info[\"width\"]\n\n params = {\n \"url\": image_url,\n \"width\": width,\n }\n\n if is_compressed:\n params |= {\n \"quality\": settings.THUMBNAIL_JPG_QUALITY,\n \"compression\": settings.THUMBNAIL_PNG_COMPRESSION,\n }\n else:\n params |= {\n \"quality\": 100,\n \"compression\": 0,\n }\n\n if \"webp\" in accept_header:\n params[\"type\"] = \"auto\" # Use ``Accept`` header to determine output type.\n\n img_res, res_status, content_type = MediaViewSet._thumbnail_proxy_comm(\n \"resize\", params, ((\"Accept\", accept_header),)\n )\n response = HttpResponse(\n img_res.read(), status=res_status, content_type=content_type\n )\n return response\n", "path": "api/catalog/api/views/media_views.py"}]}
| 3,337 | 342 |
gh_patches_debug_47928
|
rasdani/github-patches
|
git_diff
|
uccser__cs-unplugged-862
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Only prepend www for production website
It should not be used for development website.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `csunplugged/config/settings/production.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """
3 Django settings for production environment.
4
5 - Load secret values from environment variables.
6 - Set static URL to Google Cloud Storage Bucket.
7 """
8
9 from .base import * # noqa: F403
10
11
12 # SECRET CONFIGURATION
13 # ------------------------------------------------------------------------------
14 # See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
15 # Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ
16 SECRET_KEY = env("DJANGO_SECRET_KEY") # noqa: F405
17
18 # SECURITY WARNING: App Engine"s security features ensure that it is safe to
19 # have ALLOWED_HOSTS = ["*"] when the app is deployed. If you deploy a Django
20 # app not on App Engine, make sure to set an appropriate host here.
21 # See https://docs.djangoproject.com/en/1.10/ref/settings/
22 ALLOWED_HOSTS = ["*"]
23
24 # URL Configuration
25 # ------------------------------------------------------------------------------
26 PREPEND_WWW = True
27
28 # DATABASE CONFIGURATION
29 # ----------------------------------------------------------------------------
30 # See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
31 DATABASES = {
32 "default": {
33 "ENGINE": "django.db.backends.postgresql",
34 "NAME": "csunplugged",
35 "USER": env("GOOGLE_CLOUD_SQL_DATABASE_USERNAME"), # noqa: F405
36 "PASSWORD": env("GOOGLE_CLOUD_SQL_DATABASE_PASSWORD"), # noqa: F405
37 "HOST": "/cloudsql/" + env("GOOGLE_CLOUD_SQL_CONNECTION_NAME"), # noqa: F405
38 }
39 }
40 DATABASES["default"]["ATOMIC_REQUESTS"] = True
41
42 # Static files
43 STATIC_URL = "https://storage.googleapis.com/" + env("GOOGLE_CLOUD_STORAGE_BUCKET_NAME") + "/static/" # noqa: F405
44
45 # SECURITY CONFIGURATION
46 # ------------------------------------------------------------------------------
47 # See https://docs.djangoproject.com/en/dev/ref/middleware/#module-django.middleware.security
48 # and https://docs.djangoproject.com/en/dev/howto/deployment/checklist/#run-manage-py-check-deploy
49
50 # set this to 60 seconds and then to 518400 when you can prove it works
51 SECURE_HSTS_SECONDS = 60
52 SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
53 SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool("DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True) # noqa: F405
54 SECURE_CONTENT_TYPE_NOSNIFF = env.bool("DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True) # noqa: F405
55 SECURE_BROWSER_XSS_FILTER = True
56 SESSION_COOKIE_SECURE = True
57 SESSION_COOKIE_HTTPONLY = True
58 SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True) # noqa: F405
59 CSRF_COOKIE_SECURE = True
60 CSRF_COOKIE_HTTPONLY = True
61 X_FRAME_OPTIONS = "DENY"
62
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/csunplugged/config/settings/production.py b/csunplugged/config/settings/production.py
--- a/csunplugged/config/settings/production.py
+++ b/csunplugged/config/settings/production.py
@@ -23,7 +23,10 @@
# URL Configuration
# ------------------------------------------------------------------------------
-PREPEND_WWW = True
+if env("DEPLOYMENT", default=None) == "prod": # noqa: F405
+ PREPEND_WWW = True
+else:
+ PREPEND_WWW = False
# DATABASE CONFIGURATION
# ----------------------------------------------------------------------------
|
{"golden_diff": "diff --git a/csunplugged/config/settings/production.py b/csunplugged/config/settings/production.py\n--- a/csunplugged/config/settings/production.py\n+++ b/csunplugged/config/settings/production.py\n@@ -23,7 +23,10 @@\n \n # URL Configuration\n # ------------------------------------------------------------------------------\n-PREPEND_WWW = True\n+if env(\"DEPLOYMENT\", default=None) == \"prod\": # noqa: F405\n+ PREPEND_WWW = True\n+else:\n+ PREPEND_WWW = False\n \n # DATABASE CONFIGURATION\n # ----------------------------------------------------------------------------\n", "issue": "Only prepend www for production website\nIt should not be used for development website.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nDjango settings for production environment.\n\n- Load secret values from environment variables.\n- Set static URL to Google Cloud Storage Bucket.\n\"\"\"\n\nfrom .base import * # noqa: F403\n\n\n# SECRET CONFIGURATION\n# ------------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key\n# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ\nSECRET_KEY = env(\"DJANGO_SECRET_KEY\") # noqa: F405\n\n# SECURITY WARNING: App Engine\"s security features ensure that it is safe to\n# have ALLOWED_HOSTS = [\"*\"] when the app is deployed. If you deploy a Django\n# app not on App Engine, make sure to set an appropriate host here.\n# See https://docs.djangoproject.com/en/1.10/ref/settings/\nALLOWED_HOSTS = [\"*\"]\n\n# URL Configuration\n# ------------------------------------------------------------------------------\nPREPEND_WWW = True\n\n# DATABASE CONFIGURATION\n# ----------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql\",\n \"NAME\": \"csunplugged\",\n \"USER\": env(\"GOOGLE_CLOUD_SQL_DATABASE_USERNAME\"), # noqa: F405\n \"PASSWORD\": env(\"GOOGLE_CLOUD_SQL_DATABASE_PASSWORD\"), # noqa: F405\n \"HOST\": \"/cloudsql/\" + env(\"GOOGLE_CLOUD_SQL_CONNECTION_NAME\"), # noqa: F405\n }\n}\nDATABASES[\"default\"][\"ATOMIC_REQUESTS\"] = True\n\n# Static files\nSTATIC_URL = \"https://storage.googleapis.com/\" + env(\"GOOGLE_CLOUD_STORAGE_BUCKET_NAME\") + \"/static/\" # noqa: F405\n\n# SECURITY CONFIGURATION\n# ------------------------------------------------------------------------------\n# See https://docs.djangoproject.com/en/dev/ref/middleware/#module-django.middleware.security\n# and https://docs.djangoproject.com/en/dev/howto/deployment/checklist/#run-manage-py-check-deploy\n\n# set this to 60 seconds and then to 518400 when you can prove it works\nSECURE_HSTS_SECONDS = 60\nSECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\nSECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(\"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS\", default=True) # noqa: F405\nSECURE_CONTENT_TYPE_NOSNIFF = env.bool(\"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF\", default=True) # noqa: F405\nSECURE_BROWSER_XSS_FILTER = True\nSESSION_COOKIE_SECURE = True\nSESSION_COOKIE_HTTPONLY = True\nSECURE_SSL_REDIRECT = env.bool(\"DJANGO_SECURE_SSL_REDIRECT\", default=True) # noqa: F405\nCSRF_COOKIE_SECURE = True\nCSRF_COOKIE_HTTPONLY = True\nX_FRAME_OPTIONS = \"DENY\"\n", "path": "csunplugged/config/settings/production.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nDjango settings for production environment.\n\n- Load secret values from environment variables.\n- Set static URL to Google Cloud Storage Bucket.\n\"\"\"\n\nfrom .base import * # noqa: F403\n\n\n# SECRET CONFIGURATION\n# ------------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key\n# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ\nSECRET_KEY = env(\"DJANGO_SECRET_KEY\") # noqa: F405\n\n# SECURITY WARNING: App Engine\"s security features ensure that it is safe to\n# have ALLOWED_HOSTS = [\"*\"] when the app is deployed. If you deploy a Django\n# app not on App Engine, make sure to set an appropriate host here.\n# See https://docs.djangoproject.com/en/1.10/ref/settings/\nALLOWED_HOSTS = [\"*\"]\n\n# URL Configuration\n# ------------------------------------------------------------------------------\nif env(\"DEPLOYMENT\", default=None) == \"prod\": # noqa: F405\n PREPEND_WWW = True\nelse:\n PREPEND_WWW = False\n\n# DATABASE CONFIGURATION\n# ----------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql\",\n \"NAME\": \"csunplugged\",\n \"USER\": env(\"GOOGLE_CLOUD_SQL_DATABASE_USERNAME\"), # noqa: F405\n \"PASSWORD\": env(\"GOOGLE_CLOUD_SQL_DATABASE_PASSWORD\"), # noqa: F405\n \"HOST\": \"/cloudsql/\" + env(\"GOOGLE_CLOUD_SQL_CONNECTION_NAME\"), # noqa: F405\n }\n}\nDATABASES[\"default\"][\"ATOMIC_REQUESTS\"] = True\n\n# Static files\nSTATIC_URL = \"https://storage.googleapis.com/\" + env(\"GOOGLE_CLOUD_STORAGE_BUCKET_NAME\") + \"/static/\" # noqa: F405\n\n# SECURITY CONFIGURATION\n# ------------------------------------------------------------------------------\n# See https://docs.djangoproject.com/en/dev/ref/middleware/#module-django.middleware.security\n# and https://docs.djangoproject.com/en/dev/howto/deployment/checklist/#run-manage-py-check-deploy\n\n# set this to 60 seconds and then to 518400 when you can prove it works\nSECURE_HSTS_SECONDS = 60\nSECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\nSECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(\"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS\", default=True) # noqa: F405\nSECURE_CONTENT_TYPE_NOSNIFF = env.bool(\"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF\", default=True) # noqa: F405\nSECURE_BROWSER_XSS_FILTER = True\nSESSION_COOKIE_SECURE = True\nSESSION_COOKIE_HTTPONLY = True\nSECURE_SSL_REDIRECT = env.bool(\"DJANGO_SECURE_SSL_REDIRECT\", default=True) # noqa: F405\nCSRF_COOKIE_SECURE = True\nCSRF_COOKIE_HTTPONLY = True\nX_FRAME_OPTIONS = \"DENY\"\n", "path": "csunplugged/config/settings/production.py"}]}
| 1,026 | 129 |
gh_patches_debug_6831
|
rasdani/github-patches
|
git_diff
|
wagtail__wagtail-6104
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Typo in wagtail/core/middleware.py warning
The second line of the warning in `wagtail/core/middleware.py` should read:
` 'Please update your code to use Site.find_for_request(request) in place of request.site, '`
instead of:
` 'Please update your code to use Site.get_for_request(request) in place of request.site, '`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wagtail/core/middleware.py`
Content:
```
1 import warnings
2
3 from django.utils.deprecation import MiddlewareMixin
4 from wagtail.core.models import Site
5 from wagtail.utils.deprecation import RemovedInWagtail211Warning
6
7
8 warnings.warn(
9 'wagtail.core.middleware.SiteMiddleware and the use of request.site is deprecated. '
10 'Please update your code to use Site.get_for_request(request) in place of request.site, '
11 'and remove wagtail.core.middleware.SiteMiddleware from MIDDLEWARES',
12 RemovedInWagtail211Warning
13 )
14
15
16 class SiteMiddleware(MiddlewareMixin):
17 def process_request(self, request):
18 """
19 Set request.site to contain the Site object responsible for handling this request,
20 according to hostname matching rules
21 """
22 try:
23 request.site = Site.find_for_request(request)
24 except Site.DoesNotExist:
25 request.site = None
26
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/wagtail/core/middleware.py b/wagtail/core/middleware.py
--- a/wagtail/core/middleware.py
+++ b/wagtail/core/middleware.py
@@ -7,7 +7,7 @@
warnings.warn(
'wagtail.core.middleware.SiteMiddleware and the use of request.site is deprecated. '
- 'Please update your code to use Site.get_for_request(request) in place of request.site, '
+ 'Please update your code to use Site.find_for_request(request) in place of request.site, '
'and remove wagtail.core.middleware.SiteMiddleware from MIDDLEWARES',
RemovedInWagtail211Warning
)
|
{"golden_diff": "diff --git a/wagtail/core/middleware.py b/wagtail/core/middleware.py\n--- a/wagtail/core/middleware.py\n+++ b/wagtail/core/middleware.py\n@@ -7,7 +7,7 @@\n \n warnings.warn(\n 'wagtail.core.middleware.SiteMiddleware and the use of request.site is deprecated. '\n- 'Please update your code to use Site.get_for_request(request) in place of request.site, '\n+ 'Please update your code to use Site.find_for_request(request) in place of request.site, '\n 'and remove wagtail.core.middleware.SiteMiddleware from MIDDLEWARES',\n RemovedInWagtail211Warning\n )\n", "issue": "Typo in wagtail/core/middleware.py warning\nThe second line of the warning in `wagtail/core/middleware.py` should read:\r\n\r\n` 'Please update your code to use Site.find_for_request(request) in place of request.site, '`\r\n\r\ninstead of:\r\n\r\n` 'Please update your code to use Site.get_for_request(request) in place of request.site, '`\r\n\n", "before_files": [{"content": "import warnings\n\nfrom django.utils.deprecation import MiddlewareMixin\nfrom wagtail.core.models import Site\nfrom wagtail.utils.deprecation import RemovedInWagtail211Warning\n\n\nwarnings.warn(\n 'wagtail.core.middleware.SiteMiddleware and the use of request.site is deprecated. '\n 'Please update your code to use Site.get_for_request(request) in place of request.site, '\n 'and remove wagtail.core.middleware.SiteMiddleware from MIDDLEWARES',\n RemovedInWagtail211Warning\n)\n\n\nclass SiteMiddleware(MiddlewareMixin):\n def process_request(self, request):\n \"\"\"\n Set request.site to contain the Site object responsible for handling this request,\n according to hostname matching rules\n \"\"\"\n try:\n request.site = Site.find_for_request(request)\n except Site.DoesNotExist:\n request.site = None\n", "path": "wagtail/core/middleware.py"}], "after_files": [{"content": "import warnings\n\nfrom django.utils.deprecation import MiddlewareMixin\nfrom wagtail.core.models import Site\nfrom wagtail.utils.deprecation import RemovedInWagtail211Warning\n\n\nwarnings.warn(\n 'wagtail.core.middleware.SiteMiddleware and the use of request.site is deprecated. '\n 'Please update your code to use Site.find_for_request(request) in place of request.site, '\n 'and remove wagtail.core.middleware.SiteMiddleware from MIDDLEWARES',\n RemovedInWagtail211Warning\n)\n\n\nclass SiteMiddleware(MiddlewareMixin):\n def process_request(self, request):\n \"\"\"\n Set request.site to contain the Site object responsible for handling this request,\n according to hostname matching rules\n \"\"\"\n try:\n request.site = Site.find_for_request(request)\n except Site.DoesNotExist:\n request.site = None\n", "path": "wagtail/core/middleware.py"}]}
| 562 | 145 |
gh_patches_debug_1223
|
rasdani/github-patches
|
git_diff
|
magenta__magenta-629
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ValueError: Cell returns tuple of states, but the flag state_is_tuple is not set. State size is: (LSTMStateTuple(c=128, h=128), LSTMStateTuple(c=128, h=128))
Hey guys,
I've just set up my conda environment and packages. When I running the bazel test //magenta/... command, the test //magenta/models/shared:events_rnn_graph_test failed. I am new to this project so hopefully someone could point me to the right direction! For your info, I have installed all the required packages according to setup.py, and confirmed installation with 'pip freeze' and 'conda list' command.
Thanks in advance!
Simon
Bellow is the error message in the log file:
`ERROR: testBuildGraphWithAttention (__main__.EventSequenceRNNGraphTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/private/var/tmp/_bazel_simonttk/2d57163c72209284de52b06652358cc7/execroot/magenta/bazel-out/local-opt/bin/magenta/models/shared/events_rnn_graph_test.runfiles/__main__/magenta/models/shared/events_rnn_graph_test.py", line 58, in testBuildGraphWithAttention
'train', self.config, sequence_example_file_paths=['test'])
File "/private/var/tmp/_bazel_simonttk/2d57163c72209284de52b06652358cc7/execroot/magenta/bazel-out/local-opt/bin/magenta/models/shared/events_rnn_graph_test.runfiles/__main__/magenta/models/shared/events_rnn_graph.py", line 98, in build_graph
attn_length=hparams.attn_length)
File "/private/var/tmp/_bazel_simonttk/2d57163c72209284de52b06652358cc7/execroot/magenta/bazel-out/local-opt/bin/magenta/models/shared/events_rnn_graph_test.runfiles/__main__/magenta/models/shared/events_rnn_graph.py", line 47, in make_rnn_cell
cell = tf.contrib.rnn.AttentionCellWrapper(cell, attn_length)
File "/Users/simonttk/anaconda2/envs/magenta/lib/python2.7/site-packages/tensorflow/contrib/rnn/python/ops/rnn_cell.py", line 1077, in __init__
% str(cell.state_size))
ValueError: Cell returns tuple of states, but the flag state_is_tuple is not set. State size is: (LSTMStateTuple(c=128, h=128), LSTMStateTuple(c=128, h=128))`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `magenta/models/shared/events_rnn_graph.py`
Content:
```
1 # Copyright 2016 Google Inc. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Provides function to build an event sequence RNN model's graph."""
15
16 # internal imports
17 import tensorflow as tf
18 import magenta
19
20
21 def make_rnn_cell(rnn_layer_sizes,
22 dropout_keep_prob=1.0,
23 attn_length=0,
24 base_cell=tf.contrib.rnn.BasicLSTMCell):
25 """Makes a RNN cell from the given hyperparameters.
26
27 Args:
28 rnn_layer_sizes: A list of integer sizes (in units) for each layer of the
29 RNN.
30 dropout_keep_prob: The float probability to keep the output of any given
31 sub-cell.
32 attn_length: The size of the attention vector.
33 base_cell: The base tf.contrib.rnn.RNNCell to use for sub-cells.
34
35 Returns:
36 A tf.contrib.rnn.MultiRNNCell based on the given hyperparameters.
37 """
38 cells = []
39 for num_units in rnn_layer_sizes:
40 cell = base_cell(num_units)
41 cell = tf.contrib.rnn.DropoutWrapper(
42 cell, output_keep_prob=dropout_keep_prob)
43 cells.append(cell)
44
45 cell = tf.contrib.rnn.MultiRNNCell(cells)
46 if attn_length:
47 cell = tf.contrib.rnn.AttentionCellWrapper(cell, attn_length)
48
49 return cell
50
51
52 def build_graph(mode, config, sequence_example_file_paths=None):
53 """Builds the TensorFlow graph.
54
55 Args:
56 mode: 'train', 'eval', or 'generate'. Only mode related ops are added to
57 the graph.
58 config: An EventSequenceRnnConfig containing the encoder/decoder and HParams
59 to use.
60 sequence_example_file_paths: A list of paths to TFRecord files containing
61 tf.train.SequenceExample protos. Only needed for training and
62 evaluation. May be a sharded file of the form.
63
64 Returns:
65 A tf.Graph instance which contains the TF ops.
66
67 Raises:
68 ValueError: If mode is not 'train', 'eval', or 'generate'.
69 """
70 if mode not in ('train', 'eval', 'generate'):
71 raise ValueError("The mode parameter must be 'train', 'eval', "
72 "or 'generate'. The mode parameter was: %s" % mode)
73
74 hparams = config.hparams
75 encoder_decoder = config.encoder_decoder
76
77 tf.logging.info('hparams = %s', hparams.values())
78
79 input_size = encoder_decoder.input_size
80 num_classes = encoder_decoder.num_classes
81 no_event_label = encoder_decoder.default_event_label
82
83 with tf.Graph().as_default() as graph:
84 inputs, labels, lengths, = None, None, None
85
86 if mode == 'train' or mode == 'eval':
87 inputs, labels, lengths = magenta.common.get_padded_batch(
88 sequence_example_file_paths, hparams.batch_size, input_size)
89
90 elif mode == 'generate':
91 inputs = tf.placeholder(tf.float32, [hparams.batch_size, None,
92 input_size])
93
94 cell = make_rnn_cell(
95 hparams.rnn_layer_sizes,
96 dropout_keep_prob=(
97 1.0 if mode == 'generate' else hparams.dropout_keep_prob),
98 attn_length=hparams.attn_length)
99
100 initial_state = cell.zero_state(hparams.batch_size, tf.float32)
101
102 outputs, final_state = tf.nn.dynamic_rnn(
103 cell, inputs, initial_state=initial_state, swap_memory=True)
104
105 outputs_flat = tf.reshape(outputs, [-1, cell.output_size])
106 logits_flat = tf.contrib.layers.linear(outputs_flat, num_classes)
107
108 if mode == 'train' or mode == 'eval':
109 labels_flat = tf.reshape(labels, [-1])
110 mask = tf.sequence_mask(lengths)
111 if hparams.skip_first_n_losses:
112 skip = tf.minimum(lengths, hparams.skip_first_n_losses)
113 skip_mask = tf.sequence_mask(skip, maxlen=tf.reduce_max(lengths))
114 mask = tf.logical_and(mask, tf.logical_not(skip_mask))
115 mask = tf.cast(mask, tf.float32)
116 mask_flat = tf.reshape(mask, [-1])
117
118 num_logits = tf.to_float(tf.reduce_sum(lengths))
119
120 with tf.control_dependencies(
121 [tf.Assert(tf.greater(num_logits, 0.), [num_logits])]):
122 softmax_cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
123 labels=labels_flat, logits=logits_flat)
124 loss = tf.reduce_sum(mask_flat * softmax_cross_entropy) / num_logits
125 perplexity = (tf.reduce_sum(mask_flat * tf.exp(softmax_cross_entropy)) /
126 num_logits)
127
128 correct_predictions = tf.to_float(
129 tf.nn.in_top_k(logits_flat, labels_flat, 1)) * mask_flat
130 accuracy = tf.reduce_sum(correct_predictions) / num_logits * 100
131
132 event_positions = (
133 tf.to_float(tf.not_equal(labels_flat, no_event_label)) * mask_flat)
134 event_accuracy = (
135 tf.reduce_sum(tf.multiply(correct_predictions, event_positions)) /
136 tf.reduce_sum(event_positions) * 100)
137
138 no_event_positions = (
139 tf.to_float(tf.equal(labels_flat, no_event_label)) * mask_flat)
140 no_event_accuracy = (
141 tf.reduce_sum(tf.multiply(correct_predictions, no_event_positions)) /
142 tf.reduce_sum(no_event_positions) * 100)
143
144 global_step = tf.Variable(0, trainable=False, name='global_step')
145
146 tf.add_to_collection('loss', loss)
147 tf.add_to_collection('perplexity', perplexity)
148 tf.add_to_collection('accuracy', accuracy)
149 tf.add_to_collection('global_step', global_step)
150
151 summaries = [
152 tf.summary.scalar('loss', loss),
153 tf.summary.scalar('perplexity', perplexity),
154 tf.summary.scalar('accuracy', accuracy),
155 tf.summary.scalar(
156 'event_accuracy', event_accuracy),
157 tf.summary.scalar(
158 'no_event_accuracy', no_event_accuracy),
159 ]
160
161 if mode == 'train':
162 learning_rate = tf.train.exponential_decay(
163 hparams.initial_learning_rate, global_step, hparams.decay_steps,
164 hparams.decay_rate, staircase=True, name='learning_rate')
165
166 opt = tf.train.AdamOptimizer(learning_rate)
167 params = tf.trainable_variables()
168 gradients = tf.gradients(loss, params)
169 clipped_gradients, _ = tf.clip_by_global_norm(gradients,
170 hparams.clip_norm)
171 train_op = opt.apply_gradients(zip(clipped_gradients, params),
172 global_step)
173 tf.add_to_collection('learning_rate', learning_rate)
174 tf.add_to_collection('train_op', train_op)
175
176 summaries.append(tf.summary.scalar(
177 'learning_rate', learning_rate))
178
179 if mode == 'eval':
180 summary_op = tf.summary.merge(summaries)
181 tf.add_to_collection('summary_op', summary_op)
182
183 elif mode == 'generate':
184 temperature = tf.placeholder(tf.float32, [])
185 softmax_flat = tf.nn.softmax(
186 tf.div(logits_flat, tf.fill([num_classes], temperature)))
187 softmax = tf.reshape(softmax_flat, [hparams.batch_size, -1, num_classes])
188
189 tf.add_to_collection('inputs', inputs)
190 tf.add_to_collection('initial_state', initial_state)
191 tf.add_to_collection('final_state', final_state)
192 tf.add_to_collection('temperature', temperature)
193 tf.add_to_collection('softmax', softmax)
194
195 return graph
196
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/magenta/models/shared/events_rnn_graph.py b/magenta/models/shared/events_rnn_graph.py
--- a/magenta/models/shared/events_rnn_graph.py
+++ b/magenta/models/shared/events_rnn_graph.py
@@ -44,7 +44,8 @@
cell = tf.contrib.rnn.MultiRNNCell(cells)
if attn_length:
- cell = tf.contrib.rnn.AttentionCellWrapper(cell, attn_length)
+ cell = tf.contrib.rnn.AttentionCellWrapper(
+ cell, attn_length, state_is_tuple=True)
return cell
|
{"golden_diff": "diff --git a/magenta/models/shared/events_rnn_graph.py b/magenta/models/shared/events_rnn_graph.py\n--- a/magenta/models/shared/events_rnn_graph.py\n+++ b/magenta/models/shared/events_rnn_graph.py\n@@ -44,7 +44,8 @@\n \n cell = tf.contrib.rnn.MultiRNNCell(cells)\n if attn_length:\n- cell = tf.contrib.rnn.AttentionCellWrapper(cell, attn_length)\n+ cell = tf.contrib.rnn.AttentionCellWrapper(\n+ cell, attn_length, state_is_tuple=True)\n \n return cell\n", "issue": "ValueError: Cell returns tuple of states, but the flag state_is_tuple is not set. State size is: (LSTMStateTuple(c=128, h=128), LSTMStateTuple(c=128, h=128))\nHey guys,\r\n\r\nI've just set up my conda environment and packages. When I running the bazel test //magenta/... command, the test //magenta/models/shared:events_rnn_graph_test failed. I am new to this project so hopefully someone could point me to the right direction! For your info, I have installed all the required packages according to setup.py, and confirmed installation with 'pip freeze' and 'conda list' command. \r\n\r\nThanks in advance!\r\nSimon \r\n\r\nBellow is the error message in the log file:\r\n\r\n`ERROR: testBuildGraphWithAttention (__main__.EventSequenceRNNGraphTest)\r\n----------------------------------------------------------------------\r\nTraceback (most recent call last):\r\n File \"/private/var/tmp/_bazel_simonttk/2d57163c72209284de52b06652358cc7/execroot/magenta/bazel-out/local-opt/bin/magenta/models/shared/events_rnn_graph_test.runfiles/__main__/magenta/models/shared/events_rnn_graph_test.py\", line 58, in testBuildGraphWithAttention\r\n 'train', self.config, sequence_example_file_paths=['test'])\r\n File \"/private/var/tmp/_bazel_simonttk/2d57163c72209284de52b06652358cc7/execroot/magenta/bazel-out/local-opt/bin/magenta/models/shared/events_rnn_graph_test.runfiles/__main__/magenta/models/shared/events_rnn_graph.py\", line 98, in build_graph\r\n attn_length=hparams.attn_length)\r\n File \"/private/var/tmp/_bazel_simonttk/2d57163c72209284de52b06652358cc7/execroot/magenta/bazel-out/local-opt/bin/magenta/models/shared/events_rnn_graph_test.runfiles/__main__/magenta/models/shared/events_rnn_graph.py\", line 47, in make_rnn_cell\r\n cell = tf.contrib.rnn.AttentionCellWrapper(cell, attn_length)\r\n File \"/Users/simonttk/anaconda2/envs/magenta/lib/python2.7/site-packages/tensorflow/contrib/rnn/python/ops/rnn_cell.py\", line 1077, in __init__\r\n % str(cell.state_size))\r\nValueError: Cell returns tuple of states, but the flag state_is_tuple is not set. State size is: (LSTMStateTuple(c=128, h=128), LSTMStateTuple(c=128, h=128))`\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Provides function to build an event sequence RNN model's graph.\"\"\"\n\n# internal imports\nimport tensorflow as tf\nimport magenta\n\n\ndef make_rnn_cell(rnn_layer_sizes,\n dropout_keep_prob=1.0,\n attn_length=0,\n base_cell=tf.contrib.rnn.BasicLSTMCell):\n \"\"\"Makes a RNN cell from the given hyperparameters.\n\n Args:\n rnn_layer_sizes: A list of integer sizes (in units) for each layer of the\n RNN.\n dropout_keep_prob: The float probability to keep the output of any given\n sub-cell.\n attn_length: The size of the attention vector.\n base_cell: The base tf.contrib.rnn.RNNCell to use for sub-cells.\n\n Returns:\n A tf.contrib.rnn.MultiRNNCell based on the given hyperparameters.\n \"\"\"\n cells = []\n for num_units in rnn_layer_sizes:\n cell = base_cell(num_units)\n cell = tf.contrib.rnn.DropoutWrapper(\n cell, output_keep_prob=dropout_keep_prob)\n cells.append(cell)\n\n cell = tf.contrib.rnn.MultiRNNCell(cells)\n if attn_length:\n cell = tf.contrib.rnn.AttentionCellWrapper(cell, attn_length)\n\n return cell\n\n\ndef build_graph(mode, config, sequence_example_file_paths=None):\n \"\"\"Builds the TensorFlow graph.\n\n Args:\n mode: 'train', 'eval', or 'generate'. Only mode related ops are added to\n the graph.\n config: An EventSequenceRnnConfig containing the encoder/decoder and HParams\n to use.\n sequence_example_file_paths: A list of paths to TFRecord files containing\n tf.train.SequenceExample protos. Only needed for training and\n evaluation. May be a sharded file of the form.\n\n Returns:\n A tf.Graph instance which contains the TF ops.\n\n Raises:\n ValueError: If mode is not 'train', 'eval', or 'generate'.\n \"\"\"\n if mode not in ('train', 'eval', 'generate'):\n raise ValueError(\"The mode parameter must be 'train', 'eval', \"\n \"or 'generate'. The mode parameter was: %s\" % mode)\n\n hparams = config.hparams\n encoder_decoder = config.encoder_decoder\n\n tf.logging.info('hparams = %s', hparams.values())\n\n input_size = encoder_decoder.input_size\n num_classes = encoder_decoder.num_classes\n no_event_label = encoder_decoder.default_event_label\n\n with tf.Graph().as_default() as graph:\n inputs, labels, lengths, = None, None, None\n\n if mode == 'train' or mode == 'eval':\n inputs, labels, lengths = magenta.common.get_padded_batch(\n sequence_example_file_paths, hparams.batch_size, input_size)\n\n elif mode == 'generate':\n inputs = tf.placeholder(tf.float32, [hparams.batch_size, None,\n input_size])\n\n cell = make_rnn_cell(\n hparams.rnn_layer_sizes,\n dropout_keep_prob=(\n 1.0 if mode == 'generate' else hparams.dropout_keep_prob),\n attn_length=hparams.attn_length)\n\n initial_state = cell.zero_state(hparams.batch_size, tf.float32)\n\n outputs, final_state = tf.nn.dynamic_rnn(\n cell, inputs, initial_state=initial_state, swap_memory=True)\n\n outputs_flat = tf.reshape(outputs, [-1, cell.output_size])\n logits_flat = tf.contrib.layers.linear(outputs_flat, num_classes)\n\n if mode == 'train' or mode == 'eval':\n labels_flat = tf.reshape(labels, [-1])\n mask = tf.sequence_mask(lengths)\n if hparams.skip_first_n_losses:\n skip = tf.minimum(lengths, hparams.skip_first_n_losses)\n skip_mask = tf.sequence_mask(skip, maxlen=tf.reduce_max(lengths))\n mask = tf.logical_and(mask, tf.logical_not(skip_mask))\n mask = tf.cast(mask, tf.float32)\n mask_flat = tf.reshape(mask, [-1])\n\n num_logits = tf.to_float(tf.reduce_sum(lengths))\n\n with tf.control_dependencies(\n [tf.Assert(tf.greater(num_logits, 0.), [num_logits])]):\n softmax_cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=labels_flat, logits=logits_flat)\n loss = tf.reduce_sum(mask_flat * softmax_cross_entropy) / num_logits\n perplexity = (tf.reduce_sum(mask_flat * tf.exp(softmax_cross_entropy)) /\n num_logits)\n\n correct_predictions = tf.to_float(\n tf.nn.in_top_k(logits_flat, labels_flat, 1)) * mask_flat\n accuracy = tf.reduce_sum(correct_predictions) / num_logits * 100\n\n event_positions = (\n tf.to_float(tf.not_equal(labels_flat, no_event_label)) * mask_flat)\n event_accuracy = (\n tf.reduce_sum(tf.multiply(correct_predictions, event_positions)) /\n tf.reduce_sum(event_positions) * 100)\n\n no_event_positions = (\n tf.to_float(tf.equal(labels_flat, no_event_label)) * mask_flat)\n no_event_accuracy = (\n tf.reduce_sum(tf.multiply(correct_predictions, no_event_positions)) /\n tf.reduce_sum(no_event_positions) * 100)\n\n global_step = tf.Variable(0, trainable=False, name='global_step')\n\n tf.add_to_collection('loss', loss)\n tf.add_to_collection('perplexity', perplexity)\n tf.add_to_collection('accuracy', accuracy)\n tf.add_to_collection('global_step', global_step)\n\n summaries = [\n tf.summary.scalar('loss', loss),\n tf.summary.scalar('perplexity', perplexity),\n tf.summary.scalar('accuracy', accuracy),\n tf.summary.scalar(\n 'event_accuracy', event_accuracy),\n tf.summary.scalar(\n 'no_event_accuracy', no_event_accuracy),\n ]\n\n if mode == 'train':\n learning_rate = tf.train.exponential_decay(\n hparams.initial_learning_rate, global_step, hparams.decay_steps,\n hparams.decay_rate, staircase=True, name='learning_rate')\n\n opt = tf.train.AdamOptimizer(learning_rate)\n params = tf.trainable_variables()\n gradients = tf.gradients(loss, params)\n clipped_gradients, _ = tf.clip_by_global_norm(gradients,\n hparams.clip_norm)\n train_op = opt.apply_gradients(zip(clipped_gradients, params),\n global_step)\n tf.add_to_collection('learning_rate', learning_rate)\n tf.add_to_collection('train_op', train_op)\n\n summaries.append(tf.summary.scalar(\n 'learning_rate', learning_rate))\n\n if mode == 'eval':\n summary_op = tf.summary.merge(summaries)\n tf.add_to_collection('summary_op', summary_op)\n\n elif mode == 'generate':\n temperature = tf.placeholder(tf.float32, [])\n softmax_flat = tf.nn.softmax(\n tf.div(logits_flat, tf.fill([num_classes], temperature)))\n softmax = tf.reshape(softmax_flat, [hparams.batch_size, -1, num_classes])\n\n tf.add_to_collection('inputs', inputs)\n tf.add_to_collection('initial_state', initial_state)\n tf.add_to_collection('final_state', final_state)\n tf.add_to_collection('temperature', temperature)\n tf.add_to_collection('softmax', softmax)\n\n return graph\n", "path": "magenta/models/shared/events_rnn_graph.py"}], "after_files": [{"content": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Provides function to build an event sequence RNN model's graph.\"\"\"\n\n# internal imports\nimport tensorflow as tf\nimport magenta\n\n\ndef make_rnn_cell(rnn_layer_sizes,\n dropout_keep_prob=1.0,\n attn_length=0,\n base_cell=tf.contrib.rnn.BasicLSTMCell):\n \"\"\"Makes a RNN cell from the given hyperparameters.\n\n Args:\n rnn_layer_sizes: A list of integer sizes (in units) for each layer of the\n RNN.\n dropout_keep_prob: The float probability to keep the output of any given\n sub-cell.\n attn_length: The size of the attention vector.\n base_cell: The base tf.contrib.rnn.RNNCell to use for sub-cells.\n\n Returns:\n A tf.contrib.rnn.MultiRNNCell based on the given hyperparameters.\n \"\"\"\n cells = []\n for num_units in rnn_layer_sizes:\n cell = base_cell(num_units)\n cell = tf.contrib.rnn.DropoutWrapper(\n cell, output_keep_prob=dropout_keep_prob)\n cells.append(cell)\n\n cell = tf.contrib.rnn.MultiRNNCell(cells)\n if attn_length:\n cell = tf.contrib.rnn.AttentionCellWrapper(\n cell, attn_length, state_is_tuple=True)\n\n return cell\n\n\ndef build_graph(mode, config, sequence_example_file_paths=None):\n \"\"\"Builds the TensorFlow graph.\n\n Args:\n mode: 'train', 'eval', or 'generate'. Only mode related ops are added to\n the graph.\n config: An EventSequenceRnnConfig containing the encoder/decoder and HParams\n to use.\n sequence_example_file_paths: A list of paths to TFRecord files containing\n tf.train.SequenceExample protos. Only needed for training and\n evaluation. May be a sharded file of the form.\n\n Returns:\n A tf.Graph instance which contains the TF ops.\n\n Raises:\n ValueError: If mode is not 'train', 'eval', or 'generate'.\n \"\"\"\n if mode not in ('train', 'eval', 'generate'):\n raise ValueError(\"The mode parameter must be 'train', 'eval', \"\n \"or 'generate'. The mode parameter was: %s\" % mode)\n\n hparams = config.hparams\n encoder_decoder = config.encoder_decoder\n\n tf.logging.info('hparams = %s', hparams.values())\n\n input_size = encoder_decoder.input_size\n num_classes = encoder_decoder.num_classes\n no_event_label = encoder_decoder.default_event_label\n\n with tf.Graph().as_default() as graph:\n inputs, labels, lengths, = None, None, None\n\n if mode == 'train' or mode == 'eval':\n inputs, labels, lengths = magenta.common.get_padded_batch(\n sequence_example_file_paths, hparams.batch_size, input_size)\n\n elif mode == 'generate':\n inputs = tf.placeholder(tf.float32, [hparams.batch_size, None,\n input_size])\n\n cell = make_rnn_cell(\n hparams.rnn_layer_sizes,\n dropout_keep_prob=(\n 1.0 if mode == 'generate' else hparams.dropout_keep_prob),\n attn_length=hparams.attn_length)\n\n initial_state = cell.zero_state(hparams.batch_size, tf.float32)\n\n outputs, final_state = tf.nn.dynamic_rnn(\n cell, inputs, initial_state=initial_state, swap_memory=True)\n\n outputs_flat = tf.reshape(outputs, [-1, cell.output_size])\n logits_flat = tf.contrib.layers.linear(outputs_flat, num_classes)\n\n if mode == 'train' or mode == 'eval':\n labels_flat = tf.reshape(labels, [-1])\n mask = tf.sequence_mask(lengths)\n if hparams.skip_first_n_losses:\n skip = tf.minimum(lengths, hparams.skip_first_n_losses)\n skip_mask = tf.sequence_mask(skip, maxlen=tf.reduce_max(lengths))\n mask = tf.logical_and(mask, tf.logical_not(skip_mask))\n mask = tf.cast(mask, tf.float32)\n mask_flat = tf.reshape(mask, [-1])\n\n num_logits = tf.to_float(tf.reduce_sum(lengths))\n\n with tf.control_dependencies(\n [tf.Assert(tf.greater(num_logits, 0.), [num_logits])]):\n softmax_cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=labels_flat, logits=logits_flat)\n loss = tf.reduce_sum(mask_flat * softmax_cross_entropy) / num_logits\n perplexity = (tf.reduce_sum(mask_flat * tf.exp(softmax_cross_entropy)) /\n num_logits)\n\n correct_predictions = tf.to_float(\n tf.nn.in_top_k(logits_flat, labels_flat, 1)) * mask_flat\n accuracy = tf.reduce_sum(correct_predictions) / num_logits * 100\n\n event_positions = (\n tf.to_float(tf.not_equal(labels_flat, no_event_label)) * mask_flat)\n event_accuracy = (\n tf.reduce_sum(tf.multiply(correct_predictions, event_positions)) /\n tf.reduce_sum(event_positions) * 100)\n\n no_event_positions = (\n tf.to_float(tf.equal(labels_flat, no_event_label)) * mask_flat)\n no_event_accuracy = (\n tf.reduce_sum(tf.multiply(correct_predictions, no_event_positions)) /\n tf.reduce_sum(no_event_positions) * 100)\n\n global_step = tf.Variable(0, trainable=False, name='global_step')\n\n tf.add_to_collection('loss', loss)\n tf.add_to_collection('perplexity', perplexity)\n tf.add_to_collection('accuracy', accuracy)\n tf.add_to_collection('global_step', global_step)\n\n summaries = [\n tf.summary.scalar('loss', loss),\n tf.summary.scalar('perplexity', perplexity),\n tf.summary.scalar('accuracy', accuracy),\n tf.summary.scalar(\n 'event_accuracy', event_accuracy),\n tf.summary.scalar(\n 'no_event_accuracy', no_event_accuracy),\n ]\n\n if mode == 'train':\n learning_rate = tf.train.exponential_decay(\n hparams.initial_learning_rate, global_step, hparams.decay_steps,\n hparams.decay_rate, staircase=True, name='learning_rate')\n\n opt = tf.train.AdamOptimizer(learning_rate)\n params = tf.trainable_variables()\n gradients = tf.gradients(loss, params)\n clipped_gradients, _ = tf.clip_by_global_norm(gradients,\n hparams.clip_norm)\n train_op = opt.apply_gradients(zip(clipped_gradients, params),\n global_step)\n tf.add_to_collection('learning_rate', learning_rate)\n tf.add_to_collection('train_op', train_op)\n\n summaries.append(tf.summary.scalar(\n 'learning_rate', learning_rate))\n\n if mode == 'eval':\n summary_op = tf.summary.merge(summaries)\n tf.add_to_collection('summary_op', summary_op)\n\n elif mode == 'generate':\n temperature = tf.placeholder(tf.float32, [])\n softmax_flat = tf.nn.softmax(\n tf.div(logits_flat, tf.fill([num_classes], temperature)))\n softmax = tf.reshape(softmax_flat, [hparams.batch_size, -1, num_classes])\n\n tf.add_to_collection('inputs', inputs)\n tf.add_to_collection('initial_state', initial_state)\n tf.add_to_collection('final_state', final_state)\n tf.add_to_collection('temperature', temperature)\n tf.add_to_collection('softmax', softmax)\n\n return graph\n", "path": "magenta/models/shared/events_rnn_graph.py"}]}
| 3,039 | 119 |
gh_patches_debug_25624
|
rasdani/github-patches
|
git_diff
|
bokeh__bokeh-8312
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Feature Request] ‘from_networkx’ function for a fixed layout
# Summary
Currently (ver 0.13.0) Bokeh, `from_networkx` function always requests a layout function.
Personally I think that it is more useful to be able to convert a graph structure using **a fixed layout** as well as layout functions.
# Detail
## Use Cases
I think that this feature is particularly useful in the following scenes.
(A) The case where **the user wants to specify a fixed layout that customizes the position by oneself** instead of using a layout function.
- For example, a transportation network associating nodes with geographical positions.
(B) The case where **the user wants to calculate a layout before calling `from_networkx`** since the calculation time is too long.
- Currently, layout is calculated every time `from_networkx` is called. Therefore, it takes time to try to recreate `GraphRenderer` many times. It will become a large problem in the case of a large graph.
I think that it’s a general case to use a fixed layout.
## Workaround
I currently handle a fixed layout by the following methods. But they are **NOT** fundamental solutions.
1. Specify a layout function (eg `nx.spring_layout function`, etc.) as **a dummy** and then update the `layout_provider` attribute with a fixed layout.
- [Code Example](http://nbviewer.jupyter.org/github/komo-fr/networkx2bokeh_note/blob/master/notebooks/networkx2bokeh_layout_for_custom_position.ipynb)
2. Prepare a function that returns a fixed layout (dict) and pass it in the argument `layout_function`.
- [Code Example](http://nbviewer.jupyter.org/github/komo-fr/networkx2bokeh_note/blob/master/notebooks/use_function_return_layout.ipynb#Workaround)
### Problem
Workaround (1) solves use case (A), but it does **NOT** solve use case (B).
It seems that workaround (2) can solve both use case (A) and (B), **BUT** I think that it’s a bother.
Therefore I hope for a fundamental solution.
# Expected Result
1. Be able to specify a fixed layout in `from_networkx`
2. Or, be able to use `from_networkx` without a layout function
# Solution
I imagine there are several methods of implementation as below, but I don't really have a strong opinion.
If Bokeh has an API design policy and future vision, I would like to follow them.
## Before
https://github.com/bokeh/bokeh/blob/e46ef320ff33be0b64be9d8fbd2eea2ad86aa24c/bokeh/models/graphs.py#L35-L109
## After
### Approach 1. Add a new function for graphs with a fixed layout
- Add a new function apart from the existing `from_networkx`.
- The difference from `from_networkx` is that it is passed a fixed layout (dict) as an argument.
[Code & Usage Example](http://nbviewer.jupyter.org/github/komo-fr/networkx2bokeh_note/blob/master/notebooks/draft_from_networkx_for_fixed_layout.ipynb#Approach-1.-Add-a-new-function-for-graphs-with-fixed-layout)
### Approach 2. Update `from_networkx` to handle both fixed layout and layout function
- Rename `layout_function` to `layout` and allow to receive both dictionary and function.
- If `layout` is a function, execute the function and calculate the layout. Otherwise, set it directly to `graph_renderer.layout_provider` in `from_networkx`.
- It is **NOT** backward compatible with 0.13.0 since the argument name changes.
[Code & Usage Example](http://nbviewer.jupyter.org/github/komo-fr/networkx2bokeh_note/blob/master/notebooks/draft_from_networkx_for_fixed_layout.ipynb#Approach-2.-Update-from_networkx-to-handle-both-fixed-layout-and-layout-function)
### Approach 3. Update `from_networkx` so that it can be called without a layout function
- Make `layout_function` an optional argument.
- The user manually sets a fixed layout after calling `from_networkx`.
[Code & Usage Example](http://nbviewer.jupyter.org/github/komo-fr/networkx2bokeh_note/blob/master/notebooks/draft_from_networkx_for_fixed_layout.ipynb#Approach-3.-Update-from_networkx-so-that--it-can-be-called-without-a-layout-function)
## Or Other Approaches
Other people may have better ideas.
I'm often bothered by this problem, so I'm interested in sending a PR.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bokeh/models/graphs.py`
Content:
```
1 from ..core.has_props import abstract
2 from ..core.properties import Any, Dict, Either, Int, Seq, String
3 from ..model import Model
4 from ..models.sources import ColumnDataSource
5
6
7 @abstract
8 class LayoutProvider(Model):
9 '''
10
11 '''
12
13 pass
14
15
16 class StaticLayoutProvider(LayoutProvider):
17 '''
18
19 '''
20
21 graph_layout = Dict(Either(String, Int), Seq(Any), default={}, help="""
22 The coordinates of the graph nodes in cartesian space. The dictionary
23 keys correspond to a node index and the values are a two element sequence
24 containing the x and y coordinates of the node.
25
26 .. code-block:: python
27
28 {
29 0 : [0.5, 0.5],
30 1 : [1.0, 0.86],
31 2 : [0.86, 1],
32 }
33 """)
34
35 def from_networkx(graph, layout_function, **kwargs):
36 '''
37 Generate a GraphRenderer from a networkx.Graph object and networkx
38 layout function. Any keyword arguments will be passed to the
39 layout function.
40
41 Args:
42 graph (networkx.Graph) : a networkx graph to render
43 layout_function (function) : a networkx layout function
44
45 Returns:
46 instance (GraphRenderer)
47
48 .. warning::
49 Only two dimensional layouts are currently supported.
50
51 .. warning::
52 Node attributes labeled 'index' and edge attributes labeled 'start' or 'end' are ignored.
53 If you want to convert these attributes, please re-label them to other names.
54
55 '''
56
57 # inline import to prevent circular imports
58 from ..models.renderers import GraphRenderer
59 from ..models.graphs import StaticLayoutProvider
60
61 # Handles nx 1.x vs 2.x data structure change
62 # Convert node attributes
63 node_dict = dict()
64 node_attr_keys = [attr_key for node in list(graph.nodes(data=True))
65 for attr_key in node[1].keys()]
66 node_attr_keys = list(set(node_attr_keys))
67
68 for attr_key in node_attr_keys:
69 node_dict[attr_key] = [node_attr[attr_key] if attr_key in node_attr.keys() else None
70 for _, node_attr
71 in graph.nodes(data=True)]
72
73 if 'index' in node_attr_keys:
74 from warnings import warn
75 warn("Converting node attributes labeled 'index' are skipped. "
76 "If you want to convert these attributes, please re-label with other names.")
77
78 node_dict['index'] = list(graph.nodes())
79
80 # Convert edge attributes
81 edge_dict = dict()
82 edge_attr_keys = [attr_key for edge in graph.edges(data=True)
83 for attr_key in edge[2].keys()]
84 edge_attr_keys = list(set(edge_attr_keys))
85
86 for attr_key in edge_attr_keys:
87 edge_dict[attr_key] = [edge_attr[attr_key] if attr_key in edge_attr.keys() else None
88 for _, _, edge_attr
89 in graph.edges(data=True)]
90
91 if 'start' in edge_attr_keys or 'end' in edge_attr_keys:
92 from warnings import warn
93 warn("Converting edge attributes labeled 'start' or 'end' are skipped. "
94 "If you want to convert these attributes, please re-label them with other names.")
95
96 edge_dict['start'] = [x[0] for x in graph.edges()]
97 edge_dict['end'] = [x[1] for x in graph.edges()]
98
99 node_source = ColumnDataSource(data=node_dict)
100 edge_source = ColumnDataSource(data=edge_dict)
101
102 graph_renderer = GraphRenderer()
103 graph_renderer.node_renderer.data_source.data = node_source.data
104 graph_renderer.edge_renderer.data_source.data = edge_source.data
105
106 graph_layout = layout_function(graph, **kwargs)
107 graph_renderer.layout_provider = StaticLayoutProvider(graph_layout=graph_layout)
108
109 return graph_renderer
110
111
112 @abstract
113 class GraphHitTestPolicy(Model):
114 '''
115
116 '''
117
118 pass
119
120
121 class NodesOnly(GraphHitTestPolicy):
122 '''
123 With the NodesOnly policy, only graph nodes are able to be selected and
124 inspected. There is no selection or inspection of graph edges.
125
126 '''
127
128 pass
129
130 class NodesAndLinkedEdges(GraphHitTestPolicy):
131 '''
132 With the NodesAndLinkedEdges policy, inspection or selection of graph
133 nodes will result in the inspection or selection of the node and of the
134 linked graph edges. There is no direct selection or inspection of graph
135 edges.
136
137 '''
138
139 pass
140
141 class EdgesAndLinkedNodes(GraphHitTestPolicy):
142 '''
143 With the EdgesAndLinkedNodes policy, inspection or selection of graph
144 edges will result in the inspection or selection of the edge and of the
145 linked graph nodes. There is no direct selection or inspection of graph
146 nodes.
147
148 '''
149
150 pass
151
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bokeh/models/graphs.py b/bokeh/models/graphs.py
--- a/bokeh/models/graphs.py
+++ b/bokeh/models/graphs.py
@@ -40,7 +40,8 @@
Args:
graph (networkx.Graph) : a networkx graph to render
- layout_function (function) : a networkx layout function
+ layout_function (function or dict) : a networkx layout function or mapping of node keys to positions.
+ The position is a two element sequence containing the x and y coordinate.
Returns:
instance (GraphRenderer)
@@ -103,7 +104,17 @@
graph_renderer.node_renderer.data_source.data = node_source.data
graph_renderer.edge_renderer.data_source.data = edge_source.data
- graph_layout = layout_function(graph, **kwargs)
+ if callable(layout_function):
+ graph_layout = layout_function(graph, **kwargs)
+ else:
+ graph_layout = layout_function
+
+ node_keys = graph_renderer.node_renderer.data_source.data['index']
+ if set(node_keys) != set(layout_function.keys()):
+ from warnings import warn
+ warn("Node keys in 'layout_function' don't match node keys in the graph. "
+ "These nodes may not be displayed correctly.")
+
graph_renderer.layout_provider = StaticLayoutProvider(graph_layout=graph_layout)
return graph_renderer
|
{"golden_diff": "diff --git a/bokeh/models/graphs.py b/bokeh/models/graphs.py\n--- a/bokeh/models/graphs.py\n+++ b/bokeh/models/graphs.py\n@@ -40,7 +40,8 @@\n \n Args:\n graph (networkx.Graph) : a networkx graph to render\n- layout_function (function) : a networkx layout function\n+ layout_function (function or dict) : a networkx layout function or mapping of node keys to positions.\n+ The position is a two element sequence containing the x and y coordinate.\n \n Returns:\n instance (GraphRenderer)\n@@ -103,7 +104,17 @@\n graph_renderer.node_renderer.data_source.data = node_source.data\n graph_renderer.edge_renderer.data_source.data = edge_source.data\n \n- graph_layout = layout_function(graph, **kwargs)\n+ if callable(layout_function):\n+ graph_layout = layout_function(graph, **kwargs)\n+ else:\n+ graph_layout = layout_function\n+\n+ node_keys = graph_renderer.node_renderer.data_source.data['index']\n+ if set(node_keys) != set(layout_function.keys()):\n+ from warnings import warn\n+ warn(\"Node keys in 'layout_function' don't match node keys in the graph. \"\n+ \"These nodes may not be displayed correctly.\")\n+\n graph_renderer.layout_provider = StaticLayoutProvider(graph_layout=graph_layout)\n \n return graph_renderer\n", "issue": "[Feature Request] \u2018from_networkx\u2019 function for a fixed layout\n# Summary\r\nCurrently\u00a0(ver 0.13.0)\u00a0Bokeh,\u00a0`from_networkx`\u00a0function always requests a layout function. \r\nPersonally I think that it is more useful to be able to convert a graph structure using **a fixed layout** as well as layout functions.\r\n\r\n# Detail\r\n## Use Cases\r\nI think that this feature is particularly useful in the following scenes.\r\n\r\n(A) The case where **the user wants to specify a fixed layout that customizes the position by oneself** instead of using a layout function.\r\n- For example, a transportation network associating nodes with geographical positions.\r\n\r\n(B) The case where **the user wants to calculate a layout before calling `from_networkx`** since the calculation time is too long.\t\r\n - Currently, layout is calculated every time `from_networkx` is called. Therefore, it takes time to try to recreate `GraphRenderer` many times. It will become a large problem in the case of a large graph.\r\n\r\nI think that it\u2019s a general case to use a fixed layout.\r\n\r\n## Workaround\r\nI currently handle a fixed layout by the following methods. But they are **NOT** fundamental solutions.\r\n\r\n1. Specify a layout function\u00a0(eg `nx.spring_layout\u00a0function`,\u00a0etc.) as **a dummy**\u00a0and then update the `layout_provider` attribute with a fixed layout.\r\n - [Code Example](http://nbviewer.jupyter.org/github/komo-fr/networkx2bokeh_note/blob/master/notebooks/networkx2bokeh_layout_for_custom_position.ipynb)\r\n2. Prepare a function that returns a fixed layout (dict) and pass it in the argument `layout_function`.\r\n - [Code Example](http://nbviewer.jupyter.org/github/komo-fr/networkx2bokeh_note/blob/master/notebooks/use_function_return_layout.ipynb#Workaround)\r\n\r\n### Problem\r\nWorkaround (1) solves use case (A), but it does **NOT** solve use case (B).\r\nIt seems that workaround (2) can solve both use case (A) and (B), **BUT** I think that it\u2019s a bother.\r\nTherefore I hope for a fundamental solution.\r\n\r\n# Expected Result\r\n1. Be able to specify a fixed layout in `from_networkx`\r\n2. Or, be able to use `from_networkx` without a layout function\r\n\r\n# Solution\r\nI imagine there are several methods of implementation as below, but I don't really have a strong opinion.\r\nIf Bokeh has an API design policy and future vision, I would like to follow them.\r\n\r\n## Before\r\nhttps://github.com/bokeh/bokeh/blob/e46ef320ff33be0b64be9d8fbd2eea2ad86aa24c/bokeh/models/graphs.py#L35-L109\r\n\r\n## After\r\n### Approach 1. Add a new function for graphs with a fixed layout\r\n- Add a new function apart from the existing `from_networkx`.\r\n- The difference from `from_networkx` is that it is passed a fixed layout (dict) as an argument.\r\n\r\n[Code & Usage Example](http://nbviewer.jupyter.org/github/komo-fr/networkx2bokeh_note/blob/master/notebooks/draft_from_networkx_for_fixed_layout.ipynb#Approach-1.-Add-a-new-function-for-graphs-with-fixed-layout)\r\n\r\n### Approach 2. Update `from_networkx` to handle both fixed layout and layout function\r\n- Rename `layout_function` to `layout` and allow to receive both dictionary and function.\r\n- If `layout` is a function, execute the function and calculate the layout. Otherwise, set it directly to `graph_renderer.layout_provider` in `from_networkx`.\r\n- It is **NOT** backward compatible with 0.13.0 since the argument name changes.\r\n\r\n[Code & Usage Example](http://nbviewer.jupyter.org/github/komo-fr/networkx2bokeh_note/blob/master/notebooks/draft_from_networkx_for_fixed_layout.ipynb#Approach-2.-Update-from_networkx-to-handle-both-fixed-layout-and-layout-function)\r\n\r\n### Approach 3. Update `from_networkx` so that it can be called without a layout function\r\n- Make `layout_function` an optional argument.\r\n- The user manually sets a fixed layout after calling `from_networkx`. \r\n\r\n[Code & Usage Example](http://nbviewer.jupyter.org/github/komo-fr/networkx2bokeh_note/blob/master/notebooks/draft_from_networkx_for_fixed_layout.ipynb#Approach-3.-Update-from_networkx-so-that--it-can-be-called-without-a-layout-function)\r\n\r\n## Or Other Approaches\r\nOther people may have better ideas.\r\n\r\nI'm often bothered by this problem, so I'm interested in sending a PR.\n", "before_files": [{"content": "from ..core.has_props import abstract\nfrom ..core.properties import Any, Dict, Either, Int, Seq, String\nfrom ..model import Model\nfrom ..models.sources import ColumnDataSource\n\n\n@abstract\nclass LayoutProvider(Model):\n '''\n\n '''\n\n pass\n\n\nclass StaticLayoutProvider(LayoutProvider):\n '''\n\n '''\n\n graph_layout = Dict(Either(String, Int), Seq(Any), default={}, help=\"\"\"\n The coordinates of the graph nodes in cartesian space. The dictionary\n keys correspond to a node index and the values are a two element sequence\n containing the x and y coordinates of the node.\n\n .. code-block:: python\n\n {\n 0 : [0.5, 0.5],\n 1 : [1.0, 0.86],\n 2 : [0.86, 1],\n }\n \"\"\")\n\ndef from_networkx(graph, layout_function, **kwargs):\n '''\n Generate a GraphRenderer from a networkx.Graph object and networkx\n layout function. Any keyword arguments will be passed to the\n layout function.\n\n Args:\n graph (networkx.Graph) : a networkx graph to render\n layout_function (function) : a networkx layout function\n\n Returns:\n instance (GraphRenderer)\n\n .. warning::\n Only two dimensional layouts are currently supported.\n\n .. warning::\n Node attributes labeled 'index' and edge attributes labeled 'start' or 'end' are ignored.\n If you want to convert these attributes, please re-label them to other names.\n\n '''\n\n # inline import to prevent circular imports\n from ..models.renderers import GraphRenderer\n from ..models.graphs import StaticLayoutProvider\n\n # Handles nx 1.x vs 2.x data structure change\n # Convert node attributes\n node_dict = dict()\n node_attr_keys = [attr_key for node in list(graph.nodes(data=True))\n for attr_key in node[1].keys()]\n node_attr_keys = list(set(node_attr_keys))\n\n for attr_key in node_attr_keys:\n node_dict[attr_key] = [node_attr[attr_key] if attr_key in node_attr.keys() else None\n for _, node_attr\n in graph.nodes(data=True)]\n\n if 'index' in node_attr_keys:\n from warnings import warn\n warn(\"Converting node attributes labeled 'index' are skipped. \"\n \"If you want to convert these attributes, please re-label with other names.\")\n\n node_dict['index'] = list(graph.nodes())\n\n # Convert edge attributes\n edge_dict = dict()\n edge_attr_keys = [attr_key for edge in graph.edges(data=True)\n for attr_key in edge[2].keys()]\n edge_attr_keys = list(set(edge_attr_keys))\n\n for attr_key in edge_attr_keys:\n edge_dict[attr_key] = [edge_attr[attr_key] if attr_key in edge_attr.keys() else None\n for _, _, edge_attr\n in graph.edges(data=True)]\n\n if 'start' in edge_attr_keys or 'end' in edge_attr_keys:\n from warnings import warn\n warn(\"Converting edge attributes labeled 'start' or 'end' are skipped. \"\n \"If you want to convert these attributes, please re-label them with other names.\")\n\n edge_dict['start'] = [x[0] for x in graph.edges()]\n edge_dict['end'] = [x[1] for x in graph.edges()]\n\n node_source = ColumnDataSource(data=node_dict)\n edge_source = ColumnDataSource(data=edge_dict)\n\n graph_renderer = GraphRenderer()\n graph_renderer.node_renderer.data_source.data = node_source.data\n graph_renderer.edge_renderer.data_source.data = edge_source.data\n\n graph_layout = layout_function(graph, **kwargs)\n graph_renderer.layout_provider = StaticLayoutProvider(graph_layout=graph_layout)\n\n return graph_renderer\n\n\n@abstract\nclass GraphHitTestPolicy(Model):\n '''\n\n '''\n\n pass\n\n\nclass NodesOnly(GraphHitTestPolicy):\n '''\n With the NodesOnly policy, only graph nodes are able to be selected and\n inspected. There is no selection or inspection of graph edges.\n\n '''\n\n pass\n\nclass NodesAndLinkedEdges(GraphHitTestPolicy):\n '''\n With the NodesAndLinkedEdges policy, inspection or selection of graph\n nodes will result in the inspection or selection of the node and of the\n linked graph edges. There is no direct selection or inspection of graph\n edges.\n\n '''\n\n pass\n\nclass EdgesAndLinkedNodes(GraphHitTestPolicy):\n '''\n With the EdgesAndLinkedNodes policy, inspection or selection of graph\n edges will result in the inspection or selection of the edge and of the\n linked graph nodes. There is no direct selection or inspection of graph\n nodes.\n\n '''\n\n pass\n", "path": "bokeh/models/graphs.py"}], "after_files": [{"content": "from ..core.has_props import abstract\nfrom ..core.properties import Any, Dict, Either, Int, Seq, String\nfrom ..model import Model\nfrom ..models.sources import ColumnDataSource\n\n\n@abstract\nclass LayoutProvider(Model):\n '''\n\n '''\n\n pass\n\n\nclass StaticLayoutProvider(LayoutProvider):\n '''\n\n '''\n\n graph_layout = Dict(Either(String, Int), Seq(Any), default={}, help=\"\"\"\n The coordinates of the graph nodes in cartesian space. The dictionary\n keys correspond to a node index and the values are a two element sequence\n containing the x and y coordinates of the node.\n\n .. code-block:: python\n\n {\n 0 : [0.5, 0.5],\n 1 : [1.0, 0.86],\n 2 : [0.86, 1],\n }\n \"\"\")\n\ndef from_networkx(graph, layout_function, **kwargs):\n '''\n Generate a GraphRenderer from a networkx.Graph object and networkx\n layout function. Any keyword arguments will be passed to the\n layout function.\n\n Args:\n graph (networkx.Graph) : a networkx graph to render\n layout_function (function or dict) : a networkx layout function or mapping of node keys to positions.\n The position is a two element sequence containing the x and y coordinate.\n\n Returns:\n instance (GraphRenderer)\n\n .. warning::\n Only two dimensional layouts are currently supported.\n\n .. warning::\n Node attributes labeled 'index' and edge attributes labeled 'start' or 'end' are ignored.\n If you want to convert these attributes, please re-label them to other names.\n\n '''\n\n # inline import to prevent circular imports\n from ..models.renderers import GraphRenderer\n from ..models.graphs import StaticLayoutProvider\n\n # Handles nx 1.x vs 2.x data structure change\n # Convert node attributes\n node_dict = dict()\n node_attr_keys = [attr_key for node in list(graph.nodes(data=True))\n for attr_key in node[1].keys()]\n node_attr_keys = list(set(node_attr_keys))\n\n for attr_key in node_attr_keys:\n node_dict[attr_key] = [node_attr[attr_key] if attr_key in node_attr.keys() else None\n for _, node_attr\n in graph.nodes(data=True)]\n\n if 'index' in node_attr_keys:\n from warnings import warn\n warn(\"Converting node attributes labeled 'index' are skipped. \"\n \"If you want to convert these attributes, please re-label with other names.\")\n\n node_dict['index'] = list(graph.nodes())\n\n # Convert edge attributes\n edge_dict = dict()\n edge_attr_keys = [attr_key for edge in graph.edges(data=True)\n for attr_key in edge[2].keys()]\n edge_attr_keys = list(set(edge_attr_keys))\n\n for attr_key in edge_attr_keys:\n edge_dict[attr_key] = [edge_attr[attr_key] if attr_key in edge_attr.keys() else None\n for _, _, edge_attr\n in graph.edges(data=True)]\n\n if 'start' in edge_attr_keys or 'end' in edge_attr_keys:\n from warnings import warn\n warn(\"Converting edge attributes labeled 'start' or 'end' are skipped. \"\n \"If you want to convert these attributes, please re-label them with other names.\")\n\n edge_dict['start'] = [x[0] for x in graph.edges()]\n edge_dict['end'] = [x[1] for x in graph.edges()]\n\n node_source = ColumnDataSource(data=node_dict)\n edge_source = ColumnDataSource(data=edge_dict)\n\n graph_renderer = GraphRenderer()\n graph_renderer.node_renderer.data_source.data = node_source.data\n graph_renderer.edge_renderer.data_source.data = edge_source.data\n\n if callable(layout_function):\n graph_layout = layout_function(graph, **kwargs)\n else:\n graph_layout = layout_function\n\n node_keys = graph_renderer.node_renderer.data_source.data['index']\n if set(node_keys) != set(layout_function.keys()):\n from warnings import warn\n warn(\"Node keys in 'layout_function' don't match node keys in the graph. \"\n \"These nodes may not be displayed correctly.\")\n\n graph_renderer.layout_provider = StaticLayoutProvider(graph_layout=graph_layout)\n\n return graph_renderer\n\n\n@abstract\nclass GraphHitTestPolicy(Model):\n '''\n\n '''\n\n pass\n\n\nclass NodesOnly(GraphHitTestPolicy):\n '''\n With the NodesOnly policy, only graph nodes are able to be selected and\n inspected. There is no selection or inspection of graph edges.\n\n '''\n\n pass\n\nclass NodesAndLinkedEdges(GraphHitTestPolicy):\n '''\n With the NodesAndLinkedEdges policy, inspection or selection of graph\n nodes will result in the inspection or selection of the node and of the\n linked graph edges. There is no direct selection or inspection of graph\n edges.\n\n '''\n\n pass\n\nclass EdgesAndLinkedNodes(GraphHitTestPolicy):\n '''\n With the EdgesAndLinkedNodes policy, inspection or selection of graph\n edges will result in the inspection or selection of the edge and of the\n linked graph nodes. There is no direct selection or inspection of graph\n nodes.\n\n '''\n\n pass\n", "path": "bokeh/models/graphs.py"}]}
| 2,663 | 308 |
gh_patches_debug_25404
|
rasdani/github-patches
|
git_diff
|
openfun__richie-1527
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
og:image is not set propertly
## Bug Report
**Expected behavior/code**
For SEO purpose, we add open graph tags into CMS pages. These tags should be properly configured.
**Actual Behavior**
On CMS Page, `og:image` url is malformed.
**Steps to Reproduce**
1. Go to a CMS Page
2. Use [facebook debug tool](https://developers.facebook.com/tools/debug/) to check link preview for the CMS Page
3. Note that `og:image` tag is not valid
**Environment**
- Richie version: 2.8.2
- Platform: Mac OS 12.0.1 - Firefox 93.0
**Possible Solution**
- Fix the `og:image` attribute
**Additional context/Screenshots**
- https://developers.facebook.com/tools/debug/?q=https%3A%2F%2Fwww.fun-mooc.fr%2Ffr%2Fcours%2Fmooc-elles-font-lart%2F
- https://developers.facebook.com/tools/debug/?q=https%3A%2F%2Fwww.fun-mooc.fr%2Ffr%2Factualites%2Fdeveloppement-des-territoires-comment-agir-pour-construire-laven%2F
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/richie/apps/core/context_processors.py`
Content:
```
1 """
2 Template context processors
3 """
4 import json
5 from collections import OrderedDict
6
7 from django.conf import settings
8 from django.contrib.sites.models import Site
9 from django.http.request import HttpRequest
10 from django.middleware.csrf import get_token
11 from django.utils.translation import get_language_from_request
12
13 from richie.apps.courses.defaults import RICHIE_MAX_ARCHIVED_COURSE_RUNS
14 from richie.apps.courses.models import Organization
15
16 from . import defaults
17
18
19 def site_metas(request: HttpRequest):
20 """
21 Context processor to add all information required by Richie CMS templates and frontend.
22
23 If `CDN_DOMAIN` settings is defined we add it in the context. It allows
24 to load statics js on a CDN like cloudfront.
25 """
26 site_current = Site.objects.get_current()
27 protocol = "https" if request.is_secure() else "http"
28
29 context = {
30 **{
31 f"GLIMPSE_PAGINATION_{k.upper()}": v
32 for k, v in {
33 **defaults.GLIMPSE_PAGINATION,
34 **getattr(settings, "RICHIE_GLIMPSE_PAGINATION", {}),
35 }.items()
36 },
37 "SITE": {
38 "name": site_current.name,
39 "domain": site_current.domain,
40 "web_url": f"{protocol:s}://{site_current.domain:s}",
41 },
42 "FRONTEND_CONTEXT": {
43 "context": {
44 "csrftoken": get_token(request),
45 "environment": getattr(settings, "ENVIRONMENT", ""),
46 "release": getattr(settings, "RELEASE", ""),
47 "sentry_dsn": getattr(settings, "SENTRY_DSN", ""),
48 }
49 },
50 **WebAnalyticsContextProcessor().context_processor(request),
51 }
52
53 if getattr(settings, "CDN_DOMAIN", None):
54 context["CDN_DOMAIN"] = settings.CDN_DOMAIN
55
56 # Add a MEDIA_URL_PREFIX to context to prefix the media url files to have an absolute URL
57 if settings.MEDIA_URL.startswith("//"):
58 # Eg. //my-cdn-user.cdn-provider.com/media/
59 context["MEDIA_URL_PREFIX"] = f"{request.scheme:s}:"
60 elif settings.MEDIA_URL.startswith("/"):
61 # Eg. /media/
62 context["MEDIA_URL_PREFIX"] = f"{protocol:s}://{site_current.domain:s}"
63 else:
64 # Eg. https://my-cdn-user.cdn-provider.com/media/
65 context["MEDIA_URL_PREFIX"] = ""
66
67 authentication_delegation = getattr(
68 settings, "RICHIE_AUTHENTICATION_DELEGATION", None
69 )
70 if authentication_delegation:
71
72 context["AUTHENTICATION"] = {
73 "profile_urls": json.dumps(
74 {
75 key: {
76 "label": str(url["label"]),
77 "action": str(
78 url["href"].format(
79 base_url=authentication_delegation["BASE_URL"]
80 )
81 ),
82 }
83 for key, url in authentication_delegation.get(
84 "PROFILE_URLS", {}
85 ).items()
86 }
87 ),
88 }
89
90 context["FRONTEND_CONTEXT"]["context"]["authentication"] = {
91 "endpoint": authentication_delegation["BASE_URL"],
92 "backend": authentication_delegation["BACKEND"],
93 }
94
95 if getattr(settings, "RICHIE_LMS_BACKENDS", None):
96 context["FRONTEND_CONTEXT"]["context"]["lms_backends"] = [
97 {
98 "endpoint": lms["BASE_URL"],
99 "backend": lms["JS_BACKEND"],
100 "course_regexp": lms["JS_COURSE_REGEX"],
101 }
102 for lms in getattr(settings, "RICHIE_LMS_BACKENDS", [])
103 ]
104
105 context["FRONTEND_CONTEXT"] = json.dumps(context["FRONTEND_CONTEXT"])
106
107 if getattr(settings, "RICHIE_MINIMUM_COURSE_RUNS_ENROLLMENT_COUNT", None):
108 context[
109 "RICHIE_MINIMUM_COURSE_RUNS_ENROLLMENT_COUNT"
110 ] = settings.RICHIE_MINIMUM_COURSE_RUNS_ENROLLMENT_COUNT
111
112 context["RICHIE_MAX_ARCHIVED_COURSE_RUNS"] = getattr(
113 settings, "RICHIE_MAX_ARCHIVED_COURSE_RUNS", RICHIE_MAX_ARCHIVED_COURSE_RUNS
114 )
115
116 return context
117
118
119 class WebAnalyticsContextProcessor:
120 """
121 Context processor to add Web Analytics tracking information to Richie CMS templates and
122 frontend.
123 """
124
125 def context_processor(self, request: HttpRequest) -> dict:
126 """
127 Real implementation of the context processor for the Web Analytics core app sub-module
128 """
129 context = {}
130 if hasattr(request, "current_page"):
131 # load web analytics settings to the context
132 if getattr(settings, "WEB_ANALYTICS_ID", None):
133 context["WEB_ANALYTICS_ID"] = settings.WEB_ANALYTICS_ID
134 context["WEB_ANALYTICS_DIMENSIONS"] = self.get_dimensions(request)
135
136 context["WEB_ANALYTICS_LOCATION"] = getattr(
137 settings, "WEB_ANALYTICS_LOCATION", "head"
138 )
139
140 context["WEB_ANALYTICS_PROVIDER"] = getattr(
141 settings, "WEB_ANALYTICS_PROVIDER", "google_analytics"
142 )
143 return context
144
145 # pylint: disable=no-self-use
146 def get_dimensions(self, request: HttpRequest) -> dict:
147 """
148 Compute the web analytics dimensions (dict) that would be added to the Django context
149 They are a dictionary like:
150 ```
151 {
152 "organizations_codes": ["UNIV_LISBON", "UNIV_PORTO"],
153 "course_code": ["COURSE_XPTO"],
154 "course_runs_titles": [
155 "Summer edition",
156 "Winter edition"
157 ],
158 "course_runs_resource_links": [
159 "http://example.edx:8073/courses/course-v1:edX+DemoX+Demo_Course/info",
160 "http://example.edx:8073/courses/course-v1:edX+DemoX+Demo_Course_2/info"
161 ],
162 "page_title": ["Introduction to Programming"],
163 }
164 ```
165
166 Args:
167 request (HttpRequest): The Http request
168 web_analytics_context (dict): the context relevant for the web analytics sub module
169
170 Returns:
171 dict: a dict with the dimensions, where each value is a list
172 """
173 # Warn do not change the order of evaluation of this methods, because on Google Analytics
174 # the key is a 'dimension1' instead of a more generic key like 'organizations'.
175 # That's why we are using an OrderedDict instead of a normal Python dict (don't support
176 # ordering)
177 dimensions = OrderedDict()
178
179 page = request.current_page or None
180 language = get_language_from_request(request, check_path=True)
181
182 organizations_codes = []
183 if page and not page.is_home:
184 organizations_codes = Organization.get_organizations_codes(page, language)
185 dimensions["organizations_codes"] = organizations_codes
186
187 course = getattr(page, "course", None)
188 dimensions["course_code"] = [getattr(course, "code", "")]
189
190 course_runs = course.course_runs if course else []
191 dimensions["course_runs_titles"] = [
192 course_run.title
193 for course_run in course_runs
194 if course_run is not None and course_run.safe_title is not None
195 ]
196
197 dimensions["course_runs_resource_links"] = map(
198 lambda course_run: course_run.resource_link, course_runs
199 )
200
201 dimensions["page_title"] = [page.get_title() if page else ""]
202 return dimensions
203
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/richie/apps/core/context_processors.py b/src/richie/apps/core/context_processors.py
--- a/src/richie/apps/core/context_processors.py
+++ b/src/richie/apps/core/context_processors.py
@@ -6,6 +6,7 @@
from django.conf import settings
from django.contrib.sites.models import Site
+from django.core.files.storage import get_storage_class
from django.http.request import HttpRequest
from django.middleware.csrf import get_token
from django.utils.translation import get_language_from_request
@@ -53,11 +54,12 @@
if getattr(settings, "CDN_DOMAIN", None):
context["CDN_DOMAIN"] = settings.CDN_DOMAIN
+ storage_url = get_storage_class()().url("any-page")
# Add a MEDIA_URL_PREFIX to context to prefix the media url files to have an absolute URL
- if settings.MEDIA_URL.startswith("//"):
+ if storage_url.startswith("//"):
# Eg. //my-cdn-user.cdn-provider.com/media/
context["MEDIA_URL_PREFIX"] = f"{request.scheme:s}:"
- elif settings.MEDIA_URL.startswith("/"):
+ elif storage_url.startswith("/"):
# Eg. /media/
context["MEDIA_URL_PREFIX"] = f"{protocol:s}://{site_current.domain:s}"
else:
|
{"golden_diff": "diff --git a/src/richie/apps/core/context_processors.py b/src/richie/apps/core/context_processors.py\n--- a/src/richie/apps/core/context_processors.py\n+++ b/src/richie/apps/core/context_processors.py\n@@ -6,6 +6,7 @@\n \n from django.conf import settings\n from django.contrib.sites.models import Site\n+from django.core.files.storage import get_storage_class\n from django.http.request import HttpRequest\n from django.middleware.csrf import get_token\n from django.utils.translation import get_language_from_request\n@@ -53,11 +54,12 @@\n if getattr(settings, \"CDN_DOMAIN\", None):\n context[\"CDN_DOMAIN\"] = settings.CDN_DOMAIN\n \n+ storage_url = get_storage_class()().url(\"any-page\")\n # Add a MEDIA_URL_PREFIX to context to prefix the media url files to have an absolute URL\n- if settings.MEDIA_URL.startswith(\"//\"):\n+ if storage_url.startswith(\"//\"):\n # Eg. //my-cdn-user.cdn-provider.com/media/\n context[\"MEDIA_URL_PREFIX\"] = f\"{request.scheme:s}:\"\n- elif settings.MEDIA_URL.startswith(\"/\"):\n+ elif storage_url.startswith(\"/\"):\n # Eg. /media/\n context[\"MEDIA_URL_PREFIX\"] = f\"{protocol:s}://{site_current.domain:s}\"\n else:\n", "issue": "og:image is not set propertly\n## Bug Report\r\n\r\n**Expected behavior/code**\r\nFor SEO purpose, we add open graph tags into CMS pages. These tags should be properly configured.\r\n\r\n**Actual Behavior**\r\nOn CMS Page, `og:image` url is malformed.\r\n\r\n**Steps to Reproduce**\r\n1. Go to a CMS Page\r\n2. Use [facebook debug tool](https://developers.facebook.com/tools/debug/) to check link preview for the CMS Page\r\n3. Note that `og:image` tag is not valid\r\n\r\n**Environment**\r\n- Richie version: 2.8.2\r\n- Platform: Mac OS 12.0.1 - Firefox 93.0\r\n\r\n**Possible Solution**\r\n- Fix the `og:image` attribute\r\n\r\n**Additional context/Screenshots**\r\n- https://developers.facebook.com/tools/debug/?q=https%3A%2F%2Fwww.fun-mooc.fr%2Ffr%2Fcours%2Fmooc-elles-font-lart%2F\r\n- https://developers.facebook.com/tools/debug/?q=https%3A%2F%2Fwww.fun-mooc.fr%2Ffr%2Factualites%2Fdeveloppement-des-territoires-comment-agir-pour-construire-laven%2F\r\n\n", "before_files": [{"content": "\"\"\"\nTemplate context processors\n\"\"\"\nimport json\nfrom collections import OrderedDict\n\nfrom django.conf import settings\nfrom django.contrib.sites.models import Site\nfrom django.http.request import HttpRequest\nfrom django.middleware.csrf import get_token\nfrom django.utils.translation import get_language_from_request\n\nfrom richie.apps.courses.defaults import RICHIE_MAX_ARCHIVED_COURSE_RUNS\nfrom richie.apps.courses.models import Organization\n\nfrom . import defaults\n\n\ndef site_metas(request: HttpRequest):\n \"\"\"\n Context processor to add all information required by Richie CMS templates and frontend.\n\n If `CDN_DOMAIN` settings is defined we add it in the context. It allows\n to load statics js on a CDN like cloudfront.\n \"\"\"\n site_current = Site.objects.get_current()\n protocol = \"https\" if request.is_secure() else \"http\"\n\n context = {\n **{\n f\"GLIMPSE_PAGINATION_{k.upper()}\": v\n for k, v in {\n **defaults.GLIMPSE_PAGINATION,\n **getattr(settings, \"RICHIE_GLIMPSE_PAGINATION\", {}),\n }.items()\n },\n \"SITE\": {\n \"name\": site_current.name,\n \"domain\": site_current.domain,\n \"web_url\": f\"{protocol:s}://{site_current.domain:s}\",\n },\n \"FRONTEND_CONTEXT\": {\n \"context\": {\n \"csrftoken\": get_token(request),\n \"environment\": getattr(settings, \"ENVIRONMENT\", \"\"),\n \"release\": getattr(settings, \"RELEASE\", \"\"),\n \"sentry_dsn\": getattr(settings, \"SENTRY_DSN\", \"\"),\n }\n },\n **WebAnalyticsContextProcessor().context_processor(request),\n }\n\n if getattr(settings, \"CDN_DOMAIN\", None):\n context[\"CDN_DOMAIN\"] = settings.CDN_DOMAIN\n\n # Add a MEDIA_URL_PREFIX to context to prefix the media url files to have an absolute URL\n if settings.MEDIA_URL.startswith(\"//\"):\n # Eg. //my-cdn-user.cdn-provider.com/media/\n context[\"MEDIA_URL_PREFIX\"] = f\"{request.scheme:s}:\"\n elif settings.MEDIA_URL.startswith(\"/\"):\n # Eg. /media/\n context[\"MEDIA_URL_PREFIX\"] = f\"{protocol:s}://{site_current.domain:s}\"\n else:\n # Eg. https://my-cdn-user.cdn-provider.com/media/\n context[\"MEDIA_URL_PREFIX\"] = \"\"\n\n authentication_delegation = getattr(\n settings, \"RICHIE_AUTHENTICATION_DELEGATION\", None\n )\n if authentication_delegation:\n\n context[\"AUTHENTICATION\"] = {\n \"profile_urls\": json.dumps(\n {\n key: {\n \"label\": str(url[\"label\"]),\n \"action\": str(\n url[\"href\"].format(\n base_url=authentication_delegation[\"BASE_URL\"]\n )\n ),\n }\n for key, url in authentication_delegation.get(\n \"PROFILE_URLS\", {}\n ).items()\n }\n ),\n }\n\n context[\"FRONTEND_CONTEXT\"][\"context\"][\"authentication\"] = {\n \"endpoint\": authentication_delegation[\"BASE_URL\"],\n \"backend\": authentication_delegation[\"BACKEND\"],\n }\n\n if getattr(settings, \"RICHIE_LMS_BACKENDS\", None):\n context[\"FRONTEND_CONTEXT\"][\"context\"][\"lms_backends\"] = [\n {\n \"endpoint\": lms[\"BASE_URL\"],\n \"backend\": lms[\"JS_BACKEND\"],\n \"course_regexp\": lms[\"JS_COURSE_REGEX\"],\n }\n for lms in getattr(settings, \"RICHIE_LMS_BACKENDS\", [])\n ]\n\n context[\"FRONTEND_CONTEXT\"] = json.dumps(context[\"FRONTEND_CONTEXT\"])\n\n if getattr(settings, \"RICHIE_MINIMUM_COURSE_RUNS_ENROLLMENT_COUNT\", None):\n context[\n \"RICHIE_MINIMUM_COURSE_RUNS_ENROLLMENT_COUNT\"\n ] = settings.RICHIE_MINIMUM_COURSE_RUNS_ENROLLMENT_COUNT\n\n context[\"RICHIE_MAX_ARCHIVED_COURSE_RUNS\"] = getattr(\n settings, \"RICHIE_MAX_ARCHIVED_COURSE_RUNS\", RICHIE_MAX_ARCHIVED_COURSE_RUNS\n )\n\n return context\n\n\nclass WebAnalyticsContextProcessor:\n \"\"\"\n Context processor to add Web Analytics tracking information to Richie CMS templates and\n frontend.\n \"\"\"\n\n def context_processor(self, request: HttpRequest) -> dict:\n \"\"\"\n Real implementation of the context processor for the Web Analytics core app sub-module\n \"\"\"\n context = {}\n if hasattr(request, \"current_page\"):\n # load web analytics settings to the context\n if getattr(settings, \"WEB_ANALYTICS_ID\", None):\n context[\"WEB_ANALYTICS_ID\"] = settings.WEB_ANALYTICS_ID\n context[\"WEB_ANALYTICS_DIMENSIONS\"] = self.get_dimensions(request)\n\n context[\"WEB_ANALYTICS_LOCATION\"] = getattr(\n settings, \"WEB_ANALYTICS_LOCATION\", \"head\"\n )\n\n context[\"WEB_ANALYTICS_PROVIDER\"] = getattr(\n settings, \"WEB_ANALYTICS_PROVIDER\", \"google_analytics\"\n )\n return context\n\n # pylint: disable=no-self-use\n def get_dimensions(self, request: HttpRequest) -> dict:\n \"\"\"\n Compute the web analytics dimensions (dict) that would be added to the Django context\n They are a dictionary like:\n ```\n {\n \"organizations_codes\": [\"UNIV_LISBON\", \"UNIV_PORTO\"],\n \"course_code\": [\"COURSE_XPTO\"],\n \"course_runs_titles\": [\n \"Summer edition\",\n \"Winter edition\"\n ],\n \"course_runs_resource_links\": [\n \"http://example.edx:8073/courses/course-v1:edX+DemoX+Demo_Course/info\",\n \"http://example.edx:8073/courses/course-v1:edX+DemoX+Demo_Course_2/info\"\n ],\n \"page_title\": [\"Introduction to Programming\"],\n }\n ```\n\n Args:\n request (HttpRequest): The Http request\n web_analytics_context (dict): the context relevant for the web analytics sub module\n\n Returns:\n dict: a dict with the dimensions, where each value is a list\n \"\"\"\n # Warn do not change the order of evaluation of this methods, because on Google Analytics\n # the key is a 'dimension1' instead of a more generic key like 'organizations'.\n # That's why we are using an OrderedDict instead of a normal Python dict (don't support\n # ordering)\n dimensions = OrderedDict()\n\n page = request.current_page or None\n language = get_language_from_request(request, check_path=True)\n\n organizations_codes = []\n if page and not page.is_home:\n organizations_codes = Organization.get_organizations_codes(page, language)\n dimensions[\"organizations_codes\"] = organizations_codes\n\n course = getattr(page, \"course\", None)\n dimensions[\"course_code\"] = [getattr(course, \"code\", \"\")]\n\n course_runs = course.course_runs if course else []\n dimensions[\"course_runs_titles\"] = [\n course_run.title\n for course_run in course_runs\n if course_run is not None and course_run.safe_title is not None\n ]\n\n dimensions[\"course_runs_resource_links\"] = map(\n lambda course_run: course_run.resource_link, course_runs\n )\n\n dimensions[\"page_title\"] = [page.get_title() if page else \"\"]\n return dimensions\n", "path": "src/richie/apps/core/context_processors.py"}], "after_files": [{"content": "\"\"\"\nTemplate context processors\n\"\"\"\nimport json\nfrom collections import OrderedDict\n\nfrom django.conf import settings\nfrom django.contrib.sites.models import Site\nfrom django.core.files.storage import get_storage_class\nfrom django.http.request import HttpRequest\nfrom django.middleware.csrf import get_token\nfrom django.utils.translation import get_language_from_request\n\nfrom richie.apps.courses.defaults import RICHIE_MAX_ARCHIVED_COURSE_RUNS\nfrom richie.apps.courses.models import Organization\n\nfrom . import defaults\n\n\ndef site_metas(request: HttpRequest):\n \"\"\"\n Context processor to add all information required by Richie CMS templates and frontend.\n\n If `CDN_DOMAIN` settings is defined we add it in the context. It allows\n to load statics js on a CDN like cloudfront.\n \"\"\"\n site_current = Site.objects.get_current()\n protocol = \"https\" if request.is_secure() else \"http\"\n\n context = {\n **{\n f\"GLIMPSE_PAGINATION_{k.upper()}\": v\n for k, v in {\n **defaults.GLIMPSE_PAGINATION,\n **getattr(settings, \"RICHIE_GLIMPSE_PAGINATION\", {}),\n }.items()\n },\n \"SITE\": {\n \"name\": site_current.name,\n \"domain\": site_current.domain,\n \"web_url\": f\"{protocol:s}://{site_current.domain:s}\",\n },\n \"FRONTEND_CONTEXT\": {\n \"context\": {\n \"csrftoken\": get_token(request),\n \"environment\": getattr(settings, \"ENVIRONMENT\", \"\"),\n \"release\": getattr(settings, \"RELEASE\", \"\"),\n \"sentry_dsn\": getattr(settings, \"SENTRY_DSN\", \"\"),\n }\n },\n **WebAnalyticsContextProcessor().context_processor(request),\n }\n\n if getattr(settings, \"CDN_DOMAIN\", None):\n context[\"CDN_DOMAIN\"] = settings.CDN_DOMAIN\n\n storage_url = get_storage_class()().url(\"any-page\")\n # Add a MEDIA_URL_PREFIX to context to prefix the media url files to have an absolute URL\n if storage_url.startswith(\"//\"):\n # Eg. //my-cdn-user.cdn-provider.com/media/\n context[\"MEDIA_URL_PREFIX\"] = f\"{request.scheme:s}:\"\n elif storage_url.startswith(\"/\"):\n # Eg. /media/\n context[\"MEDIA_URL_PREFIX\"] = f\"{protocol:s}://{site_current.domain:s}\"\n else:\n # Eg. https://my-cdn-user.cdn-provider.com/media/\n context[\"MEDIA_URL_PREFIX\"] = \"\"\n\n authentication_delegation = getattr(\n settings, \"RICHIE_AUTHENTICATION_DELEGATION\", None\n )\n if authentication_delegation:\n\n context[\"AUTHENTICATION\"] = {\n \"profile_urls\": json.dumps(\n {\n key: {\n \"label\": str(url[\"label\"]),\n \"action\": str(\n url[\"href\"].format(\n base_url=authentication_delegation[\"BASE_URL\"]\n )\n ),\n }\n for key, url in authentication_delegation.get(\n \"PROFILE_URLS\", {}\n ).items()\n }\n ),\n }\n\n context[\"FRONTEND_CONTEXT\"][\"context\"][\"authentication\"] = {\n \"endpoint\": authentication_delegation[\"BASE_URL\"],\n \"backend\": authentication_delegation[\"BACKEND\"],\n }\n\n if getattr(settings, \"RICHIE_LMS_BACKENDS\", None):\n context[\"FRONTEND_CONTEXT\"][\"context\"][\"lms_backends\"] = [\n {\n \"endpoint\": lms[\"BASE_URL\"],\n \"backend\": lms[\"JS_BACKEND\"],\n \"course_regexp\": lms[\"JS_COURSE_REGEX\"],\n }\n for lms in getattr(settings, \"RICHIE_LMS_BACKENDS\", [])\n ]\n\n context[\"FRONTEND_CONTEXT\"] = json.dumps(context[\"FRONTEND_CONTEXT\"])\n\n if getattr(settings, \"RICHIE_MINIMUM_COURSE_RUNS_ENROLLMENT_COUNT\", None):\n context[\n \"RICHIE_MINIMUM_COURSE_RUNS_ENROLLMENT_COUNT\"\n ] = settings.RICHIE_MINIMUM_COURSE_RUNS_ENROLLMENT_COUNT\n\n context[\"RICHIE_MAX_ARCHIVED_COURSE_RUNS\"] = getattr(\n settings, \"RICHIE_MAX_ARCHIVED_COURSE_RUNS\", RICHIE_MAX_ARCHIVED_COURSE_RUNS\n )\n\n return context\n\n\nclass WebAnalyticsContextProcessor:\n \"\"\"\n Context processor to add Web Analytics tracking information to Richie CMS templates and\n frontend.\n \"\"\"\n\n def context_processor(self, request: HttpRequest) -> dict:\n \"\"\"\n Real implementation of the context processor for the Web Analytics core app sub-module\n \"\"\"\n context = {}\n if hasattr(request, \"current_page\"):\n # load web analytics settings to the context\n if getattr(settings, \"WEB_ANALYTICS_ID\", None):\n context[\"WEB_ANALYTICS_ID\"] = settings.WEB_ANALYTICS_ID\n context[\"WEB_ANALYTICS_DIMENSIONS\"] = self.get_dimensions(request)\n\n context[\"WEB_ANALYTICS_LOCATION\"] = getattr(\n settings, \"WEB_ANALYTICS_LOCATION\", \"head\"\n )\n\n context[\"WEB_ANALYTICS_PROVIDER\"] = getattr(\n settings, \"WEB_ANALYTICS_PROVIDER\", \"google_analytics\"\n )\n return context\n\n # pylint: disable=no-self-use\n def get_dimensions(self, request: HttpRequest) -> dict:\n \"\"\"\n Compute the web analytics dimensions (dict) that would be added to the Django context\n They are a dictionary like:\n ```\n {\n \"organizations_codes\": [\"UNIV_LISBON\", \"UNIV_PORTO\"],\n \"course_code\": [\"COURSE_XPTO\"],\n \"course_runs_titles\": [\n \"Summer edition\",\n \"Winter edition\"\n ],\n \"course_runs_resource_links\": [\n \"http://example.edx:8073/courses/course-v1:edX+DemoX+Demo_Course/info\",\n \"http://example.edx:8073/courses/course-v1:edX+DemoX+Demo_Course_2/info\"\n ],\n \"page_title\": [\"Introduction to Programming\"],\n }\n ```\n\n Args:\n request (HttpRequest): The Http request\n web_analytics_context (dict): the context relevant for the web analytics sub module\n\n Returns:\n dict: a dict with the dimensions, where each value is a list\n \"\"\"\n # Warn do not change the order of evaluation of this methods, because on Google Analytics\n # the key is a 'dimension1' instead of a more generic key like 'organizations'.\n # That's why we are using an OrderedDict instead of a normal Python dict (don't support\n # ordering)\n dimensions = OrderedDict()\n\n page = request.current_page or None\n language = get_language_from_request(request, check_path=True)\n\n organizations_codes = []\n if page and not page.is_home:\n organizations_codes = Organization.get_organizations_codes(page, language)\n dimensions[\"organizations_codes\"] = organizations_codes\n\n course = getattr(page, \"course\", None)\n dimensions[\"course_code\"] = [getattr(course, \"code\", \"\")]\n\n course_runs = course.course_runs if course else []\n dimensions[\"course_runs_titles\"] = [\n course_run.title\n for course_run in course_runs\n if course_run is not None and course_run.safe_title is not None\n ]\n\n dimensions[\"course_runs_resource_links\"] = map(\n lambda course_run: course_run.resource_link, course_runs\n )\n\n dimensions[\"page_title\"] = [page.get_title() if page else \"\"]\n return dimensions\n", "path": "src/richie/apps/core/context_processors.py"}]}
| 2,622 | 278 |
gh_patches_debug_14075
|
rasdani/github-patches
|
git_diff
|
digitalfabrik__integreat-cms-769
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
User creation not working
### Describe the Bug
<!-- A clear and concise description of what the bug is. -->
Due to a problem in the user form's logging, an error occurs when new users are created and a new role is assigned in the creation process
### Steps to Reproduce
1. Go to the user management
2. Create new user and select at least one role
4. See error
### Expected Behavior
<!-- A clear and concise description of what you expected to happen. -->
The user including a corresponding user profile should be created
### Actual Behavior
<!-- A clear and concise description of what actually happened. -->
An error occurs and the user is created without a user profile
```
RelatedObjectDoesNotExist at /augsburg/users/new
User has no profile.
```
### Additional Information
<!-- Add any other context (e.g. logs, screenshots, etc.) about the problem here. -->
<details>
Environment:
Request Method: POST
Request URL: http://localhost:8000/augsburg/users/new
Django Version: 3.1.8
Python Version: 3.7.10
Installed Applications:
['cms.apps.CmsConfig',
'gvz_api.apps.GvzApiConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sitemaps',
'django.contrib.staticfiles',
'corsheaders',
'widget_tweaks',
'easy_thumbnails',
'filer',
'mptt',
'rules.apps.AutodiscoverRulesConfig']
Installed Middleware:
['corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'cms.middleware.timezone_middleware.TimezoneMiddleware']
Traceback (most recent call last):
File "/home/timo/job/integreat/integreat-cms/.venv/lib/python3.7/site-packages/django/core/handlers/exception.py", line 47, in inner
response = get_response(request)
File "/home/timo/job/integreat/integreat-cms/.venv/lib/python3.7/site-packages/django/core/handlers/base.py", line 181, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/home/timo/job/integreat/integreat-cms/.venv/lib/python3.7/site-packages/django/views/generic/base.py", line 70, in view
return self.dispatch(request, *args, **kwargs)
File "/home/timo/job/integreat/integreat-cms/.venv/lib/python3.7/site-packages/django/utils/decorators.py", line 43, in _wrapper
return bound_method(*args, **kwargs)
File "/home/timo/job/integreat/integreat-cms/.venv/lib/python3.7/site-packages/django/contrib/auth/decorators.py", line 21, in _wrapped_view
return view_func(request, *args, **kwargs)
File "/home/timo/job/integreat/integreat-cms/.venv/lib/python3.7/site-packages/django/utils/decorators.py", line 43, in _wrapper
return bound_method(*args, **kwargs)
File "/home/timo/job/integreat/integreat-cms/src/cms/decorators.py", line 53, in wrap
return function(request, *args, **kwargs)
File "/home/timo/job/integreat/integreat-cms/.venv/lib/python3.7/site-packages/django/contrib/auth/mixins.py", line 85, in dispatch
return super().dispatch(request, *args, **kwargs)
File "/home/timo/job/integreat/integreat-cms/.venv/lib/python3.7/site-packages/django/views/generic/base.py", line 98, in dispatch
return handler(request, *args, **kwargs)
File "/home/timo/job/integreat/integreat-cms/src/cms/views/users/region_user_view.py", line 100, in post
user = region_user_form.save()
File "/home/timo/job/integreat/integreat-cms/src/cms/forms/users/user_form.py", line 96, in save
logger.info("%r was assigned to %r", role, user.profile)
File "/home/timo/job/integreat/integreat-cms/.venv/lib/python3.7/site-packages/django/db/models/fields/related_descriptors.py", line 424, in __get__
self.related.get_accessor_name()
Exception Type: RelatedObjectDoesNotExist at /augsburg/users/new
Exception Value: User has no profile.
</details>
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cms/forms/users/user_form.py`
Content:
```
1 import logging
2
3 from django import forms
4 from django.contrib.auth import get_user_model
5 from django.contrib.auth.models import Group as Role
6 from django.contrib.auth.password_validation import (
7 validate_password,
8 password_validators_help_texts,
9 )
10 from django.utils.translation import ugettext_lazy as _
11
12
13 from ..custom_model_form import CustomModelForm
14
15 logger = logging.getLogger(__name__)
16
17
18 class UserForm(CustomModelForm):
19 """
20 Form for creating and modifying user objects
21 """
22
23 roles = forms.ModelMultipleChoiceField(queryset=Role.objects.all(), required=False)
24 password = forms.CharField(
25 widget=forms.PasswordInput,
26 validators=[validate_password],
27 help_text=password_validators_help_texts,
28 )
29
30 class Meta:
31 """
32 This class contains additional meta configuration of the form class, see the :class:`django.forms.ModelForm`
33 for more information.
34 """
35
36 #: The model of this :class:`django.forms.ModelForm`
37 model = get_user_model()
38 #: The fields of the model which should be handled by this form
39 fields = [
40 "username",
41 "first_name",
42 "last_name",
43 "email",
44 "is_staff",
45 "is_active",
46 "is_superuser",
47 ]
48
49 def __init__(self, data=None, instance=None):
50
51 # instantiate ModelForm
52 super().__init__(data=data, instance=instance)
53
54 # check if user instance already exists
55 if self.instance.id:
56 # set initial role data
57 self.fields["roles"].initial = self.instance.groups.all()
58 # don't require password if user already exists
59 self.fields["password"].required = False
60 # adapt placeholder of password input field
61 self.fields["password"].widget.attrs.update(
62 {"placeholder": _("Leave empty to keep unchanged")}
63 )
64 # fix password label
65 self.fields["password"].label = _("Password")
66
67 # pylint: disable=signature-differs
68 def save(self, *args, **kwargs):
69 """
70 This method extends the default ``save()``-method of the base :class:`~django.forms.ModelForm` to set attributes
71 which are not directly determined by input fields.
72
73 :param args: The supplied arguments
74 :type args: list
75
76 :param kwargs: The supplied keyword arguments
77 :type kwargs: dict
78
79 :return: The saved user object
80 :rtype: ~django.contrib.auth.models.User
81 """
82
83 # save ModelForm
84 user = super().save(*args, **kwargs)
85
86 # check if password field was changed
87 if self.cleaned_data["password"]:
88 # change password
89 user.set_password(self.cleaned_data["password"])
90 user.save()
91
92 # assign all selected roles which the user does not have already
93 for role in set(self.cleaned_data["roles"]) - set(user.groups.all()):
94 role.user_set.add(user)
95 logger.info("%r was assigned to %r", role, user.profile)
96
97 # remove all unselected roles which the user had before
98 for role in set(user.groups.all()) - set(self.cleaned_data["roles"]):
99 role.user_set.remove(user)
100 logger.info("The role %r was removed from %r", role, user.profile)
101
102 return user
103
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/cms/forms/users/user_form.py b/src/cms/forms/users/user_form.py
--- a/src/cms/forms/users/user_form.py
+++ b/src/cms/forms/users/user_form.py
@@ -92,7 +92,10 @@
# assign all selected roles which the user does not have already
for role in set(self.cleaned_data["roles"]) - set(user.groups.all()):
role.user_set.add(user)
- logger.info("%r was assigned to %r", role, user.profile)
+ if hasattr(user, "profile"):
+ logger.info("%r was assigned to %r", role, user.profile)
+ else:
+ logger.info("%r was assigned to %r", role, user)
# remove all unselected roles which the user had before
for role in set(user.groups.all()) - set(self.cleaned_data["roles"]):
|
{"golden_diff": "diff --git a/src/cms/forms/users/user_form.py b/src/cms/forms/users/user_form.py\n--- a/src/cms/forms/users/user_form.py\n+++ b/src/cms/forms/users/user_form.py\n@@ -92,7 +92,10 @@\n # assign all selected roles which the user does not have already\n for role in set(self.cleaned_data[\"roles\"]) - set(user.groups.all()):\n role.user_set.add(user)\n- logger.info(\"%r was assigned to %r\", role, user.profile)\n+ if hasattr(user, \"profile\"):\n+ logger.info(\"%r was assigned to %r\", role, user.profile)\n+ else:\n+ logger.info(\"%r was assigned to %r\", role, user)\n \n # remove all unselected roles which the user had before\n for role in set(user.groups.all()) - set(self.cleaned_data[\"roles\"]):\n", "issue": "User creation not working\n### Describe the Bug\r\n<!-- A clear and concise description of what the bug is. -->\r\nDue to a problem in the user form's logging, an error occurs when new users are created and a new role is assigned in the creation process\r\n\r\n### Steps to Reproduce\r\n\r\n1. Go to the user management\r\n2. Create new user and select at least one role\r\n4. See error\r\n\r\n### Expected Behavior\r\n<!-- A clear and concise description of what you expected to happen. -->\r\nThe user including a corresponding user profile should be created\r\n\r\n### Actual Behavior\r\n<!-- A clear and concise description of what actually happened. -->\r\nAn error occurs and the user is created without a user profile\r\n```\r\nRelatedObjectDoesNotExist at /augsburg/users/new\r\n\r\nUser has no profile.\r\n```\r\n### Additional Information\r\n<!-- Add any other context (e.g. logs, screenshots, etc.) about the problem here. -->\r\n\r\n<details>\r\n\r\nEnvironment:\r\n\r\n\r\nRequest Method: POST\r\nRequest URL: http://localhost:8000/augsburg/users/new\r\n\r\nDjango Version: 3.1.8\r\nPython Version: 3.7.10\r\nInstalled Applications:\r\n['cms.apps.CmsConfig',\r\n 'gvz_api.apps.GvzApiConfig',\r\n 'django.contrib.admin',\r\n 'django.contrib.auth',\r\n 'django.contrib.contenttypes',\r\n 'django.contrib.messages',\r\n 'django.contrib.sessions',\r\n 'django.contrib.sitemaps',\r\n 'django.contrib.staticfiles',\r\n 'corsheaders',\r\n 'widget_tweaks',\r\n 'easy_thumbnails',\r\n 'filer',\r\n 'mptt',\r\n 'rules.apps.AutodiscoverRulesConfig']\r\nInstalled Middleware:\r\n['corsheaders.middleware.CorsMiddleware',\r\n 'django.middleware.security.SecurityMiddleware',\r\n 'django.contrib.sessions.middleware.SessionMiddleware',\r\n 'django.middleware.locale.LocaleMiddleware',\r\n 'django.middleware.common.CommonMiddleware',\r\n 'django.middleware.csrf.CsrfViewMiddleware',\r\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\r\n 'django.contrib.messages.middleware.MessageMiddleware',\r\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\r\n 'cms.middleware.timezone_middleware.TimezoneMiddleware']\r\n\r\n\r\n\r\nTraceback (most recent call last):\r\n File \"/home/timo/job/integreat/integreat-cms/.venv/lib/python3.7/site-packages/django/core/handlers/exception.py\", line 47, in inner\r\n response = get_response(request)\r\n File \"/home/timo/job/integreat/integreat-cms/.venv/lib/python3.7/site-packages/django/core/handlers/base.py\", line 181, in _get_response\r\n response = wrapped_callback(request, *callback_args, **callback_kwargs)\r\n File \"/home/timo/job/integreat/integreat-cms/.venv/lib/python3.7/site-packages/django/views/generic/base.py\", line 70, in view\r\n return self.dispatch(request, *args, **kwargs)\r\n File \"/home/timo/job/integreat/integreat-cms/.venv/lib/python3.7/site-packages/django/utils/decorators.py\", line 43, in _wrapper\r\n return bound_method(*args, **kwargs)\r\n File \"/home/timo/job/integreat/integreat-cms/.venv/lib/python3.7/site-packages/django/contrib/auth/decorators.py\", line 21, in _wrapped_view\r\n return view_func(request, *args, **kwargs)\r\n File \"/home/timo/job/integreat/integreat-cms/.venv/lib/python3.7/site-packages/django/utils/decorators.py\", line 43, in _wrapper\r\n return bound_method(*args, **kwargs)\r\n File \"/home/timo/job/integreat/integreat-cms/src/cms/decorators.py\", line 53, in wrap\r\n return function(request, *args, **kwargs)\r\n File \"/home/timo/job/integreat/integreat-cms/.venv/lib/python3.7/site-packages/django/contrib/auth/mixins.py\", line 85, in dispatch\r\n return super().dispatch(request, *args, **kwargs)\r\n File \"/home/timo/job/integreat/integreat-cms/.venv/lib/python3.7/site-packages/django/views/generic/base.py\", line 98, in dispatch\r\n return handler(request, *args, **kwargs)\r\n File \"/home/timo/job/integreat/integreat-cms/src/cms/views/users/region_user_view.py\", line 100, in post\r\n user = region_user_form.save()\r\n File \"/home/timo/job/integreat/integreat-cms/src/cms/forms/users/user_form.py\", line 96, in save\r\n logger.info(\"%r was assigned to %r\", role, user.profile)\r\n File \"/home/timo/job/integreat/integreat-cms/.venv/lib/python3.7/site-packages/django/db/models/fields/related_descriptors.py\", line 424, in __get__\r\n self.related.get_accessor_name()\r\n\r\nException Type: RelatedObjectDoesNotExist at /augsburg/users/new\r\nException Value: User has no profile.\r\n\r\n</details>\n", "before_files": [{"content": "import logging\n\nfrom django import forms\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import Group as Role\nfrom django.contrib.auth.password_validation import (\n validate_password,\n password_validators_help_texts,\n)\nfrom django.utils.translation import ugettext_lazy as _\n\n\nfrom ..custom_model_form import CustomModelForm\n\nlogger = logging.getLogger(__name__)\n\n\nclass UserForm(CustomModelForm):\n \"\"\"\n Form for creating and modifying user objects\n \"\"\"\n\n roles = forms.ModelMultipleChoiceField(queryset=Role.objects.all(), required=False)\n password = forms.CharField(\n widget=forms.PasswordInput,\n validators=[validate_password],\n help_text=password_validators_help_texts,\n )\n\n class Meta:\n \"\"\"\n This class contains additional meta configuration of the form class, see the :class:`django.forms.ModelForm`\n for more information.\n \"\"\"\n\n #: The model of this :class:`django.forms.ModelForm`\n model = get_user_model()\n #: The fields of the model which should be handled by this form\n fields = [\n \"username\",\n \"first_name\",\n \"last_name\",\n \"email\",\n \"is_staff\",\n \"is_active\",\n \"is_superuser\",\n ]\n\n def __init__(self, data=None, instance=None):\n\n # instantiate ModelForm\n super().__init__(data=data, instance=instance)\n\n # check if user instance already exists\n if self.instance.id:\n # set initial role data\n self.fields[\"roles\"].initial = self.instance.groups.all()\n # don't require password if user already exists\n self.fields[\"password\"].required = False\n # adapt placeholder of password input field\n self.fields[\"password\"].widget.attrs.update(\n {\"placeholder\": _(\"Leave empty to keep unchanged\")}\n )\n # fix password label\n self.fields[\"password\"].label = _(\"Password\")\n\n # pylint: disable=signature-differs\n def save(self, *args, **kwargs):\n \"\"\"\n This method extends the default ``save()``-method of the base :class:`~django.forms.ModelForm` to set attributes\n which are not directly determined by input fields.\n\n :param args: The supplied arguments\n :type args: list\n\n :param kwargs: The supplied keyword arguments\n :type kwargs: dict\n\n :return: The saved user object\n :rtype: ~django.contrib.auth.models.User\n \"\"\"\n\n # save ModelForm\n user = super().save(*args, **kwargs)\n\n # check if password field was changed\n if self.cleaned_data[\"password\"]:\n # change password\n user.set_password(self.cleaned_data[\"password\"])\n user.save()\n\n # assign all selected roles which the user does not have already\n for role in set(self.cleaned_data[\"roles\"]) - set(user.groups.all()):\n role.user_set.add(user)\n logger.info(\"%r was assigned to %r\", role, user.profile)\n\n # remove all unselected roles which the user had before\n for role in set(user.groups.all()) - set(self.cleaned_data[\"roles\"]):\n role.user_set.remove(user)\n logger.info(\"The role %r was removed from %r\", role, user.profile)\n\n return user\n", "path": "src/cms/forms/users/user_form.py"}], "after_files": [{"content": "import logging\n\nfrom django import forms\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import Group as Role\nfrom django.contrib.auth.password_validation import (\n validate_password,\n password_validators_help_texts,\n)\nfrom django.utils.translation import ugettext_lazy as _\n\n\nfrom ..custom_model_form import CustomModelForm\n\nlogger = logging.getLogger(__name__)\n\n\nclass UserForm(CustomModelForm):\n \"\"\"\n Form for creating and modifying user objects\n \"\"\"\n\n roles = forms.ModelMultipleChoiceField(queryset=Role.objects.all(), required=False)\n password = forms.CharField(\n widget=forms.PasswordInput,\n validators=[validate_password],\n help_text=password_validators_help_texts,\n )\n\n class Meta:\n \"\"\"\n This class contains additional meta configuration of the form class, see the :class:`django.forms.ModelForm`\n for more information.\n \"\"\"\n\n #: The model of this :class:`django.forms.ModelForm`\n model = get_user_model()\n #: The fields of the model which should be handled by this form\n fields = [\n \"username\",\n \"first_name\",\n \"last_name\",\n \"email\",\n \"is_staff\",\n \"is_active\",\n \"is_superuser\",\n ]\n\n def __init__(self, data=None, instance=None):\n\n # instantiate ModelForm\n super().__init__(data=data, instance=instance)\n\n # check if user instance already exists\n if self.instance.id:\n # set initial role data\n self.fields[\"roles\"].initial = self.instance.groups.all()\n # don't require password if user already exists\n self.fields[\"password\"].required = False\n # adapt placeholder of password input field\n self.fields[\"password\"].widget.attrs.update(\n {\"placeholder\": _(\"Leave empty to keep unchanged\")}\n )\n # fix password label\n self.fields[\"password\"].label = _(\"Password\")\n\n # pylint: disable=signature-differs\n def save(self, *args, **kwargs):\n \"\"\"\n This method extends the default ``save()``-method of the base :class:`~django.forms.ModelForm` to set attributes\n which are not directly determined by input fields.\n\n :param args: The supplied arguments\n :type args: list\n\n :param kwargs: The supplied keyword arguments\n :type kwargs: dict\n\n :return: The saved user object\n :rtype: ~django.contrib.auth.models.User\n \"\"\"\n\n # save ModelForm\n user = super().save(*args, **kwargs)\n\n # check if password field was changed\n if self.cleaned_data[\"password\"]:\n # change password\n user.set_password(self.cleaned_data[\"password\"])\n user.save()\n\n # assign all selected roles which the user does not have already\n for role in set(self.cleaned_data[\"roles\"]) - set(user.groups.all()):\n role.user_set.add(user)\n if hasattr(user, \"profile\"):\n logger.info(\"%r was assigned to %r\", role, user.profile)\n else:\n logger.info(\"%r was assigned to %r\", role, user)\n\n # remove all unselected roles which the user had before\n for role in set(user.groups.all()) - set(self.cleaned_data[\"roles\"]):\n role.user_set.remove(user)\n logger.info(\"The role %r was removed from %r\", role, user.profile)\n\n return user\n", "path": "src/cms/forms/users/user_form.py"}]}
| 2,223 | 186 |
gh_patches_debug_15848
|
rasdani/github-patches
|
git_diff
|
mampfes__hacs_waste_collection_schedule-1588
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug]: date time issue
### I Have A Problem With:
The integration in general
### What's Your Problem
Used to work. No change in cfg.
At some point, after an upgrade…
Integration fails while starting. See log.
### Source (if relevant)
_No response_
### Logs
```Shell
Denne feilen stammer fra en tilpasset integrasjon.
Logger: waste_collection_schedule.source_shell
Source: custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py:136
Integration: waste_collection_schedule (documentation)
First occurred: 19:02:11 (1 occurrences)
Last logged: 19:02:11
fetch failed for source Stavanger Kommune: Traceback (most recent call last): File "/config/custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py", line 134, in fetch entries = self._source.fetch() ^^^^^^^^^^^^^^^^^^^^ File "/config/custom_components/waste_collection_schedule/waste_collection_schedule/source/stavanger_no.py", line 63, in fetch date = datetime.strptime(date[0] + "." + year, "%d.%m.%Y").date() ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.11/_strptime.py", line 568, in _strptime_datetime tt, fraction, gmtoff_fraction = _strptime(data_string, format) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.11/_strptime.py", line 534, in _strptime julian = datetime_date(year, month, day).toordinal() - \ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ValueError: day is out of range for month
```
### Relevant Configuration
```YAML
name: stavanger_no
municipality: Stavanger
```
### Checklist Source Error
- [ ] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)
- [X] Checked that the website of your service provider is still working
- [X] Tested my attributes on the service provider website (if possible)
- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on "Redownload" and choose master as version)
### Checklist Sensor Error
- [ ] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)
### Required
- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.
- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `custom_components/waste_collection_schedule/waste_collection_schedule/source/stavanger_no.py`
Content:
```
1 from datetime import datetime
2
3 import requests
4 from bs4 import BeautifulSoup
5 from waste_collection_schedule import Collection # type: ignore[attr-defined]
6
7 TITLE = "Stavanger Kommune"
8 DESCRIPTION = "Source for Stavanger Kommune, Norway"
9 URL = "https://www.stavanger.kommune.no/"
10 TEST_CASES = {
11 "TestcaseI": {
12 "id": "57bf9d36-722e-400b-ae93-d80f8e354724",
13 "municipality": "Stavanger",
14 "gnumber": "57",
15 "bnumber": "922",
16 "snumber": "0",
17 },
18 }
19
20 ICON_MAP = {
21 "Restavfall": "mdi:trash-can",
22 "Papp/papir": "mdi:recycle",
23 "Bio": "mdi:leaf",
24 "Juletre": "mdi:pine-tree",
25 }
26
27
28 class Source:
29 def __init__(self, id, municipality, gnumber, bnumber, snumber):
30 self._id = id
31 self._municipality = municipality
32 self._gnumber = gnumber
33 self._bnumber = bnumber
34 self._snumber = snumber
35
36 def fetch(self):
37 url = "https://www.stavanger.kommune.no/renovasjon-og-miljo/tommekalender/finn-kalender/show"
38 headers = {"referer": "https://www.stavanger.kommune.no"}
39
40 params = {
41 "id": self._id,
42 "municipality": self._municipality,
43 "gnumber": self._gnumber,
44 "bnumber": self._bnumber,
45 "snumber": self._snumber,
46 }
47
48 r = requests.get(url, params=params, headers=headers)
49 r.raise_for_status()
50
51 soup = BeautifulSoup(r.text, "html.parser")
52
53 tag = soup.find_all("option")
54 year = tag[0].get("value").split("-")
55 year = year[1]
56
57 entries = []
58 for tag in soup.find_all("tr", {"class": "waste-calendar__item"}):
59 if tag.text.strip() == "Dato og dag\nAvfallstype":
60 continue
61
62 date = tag.text.strip().split(" - ")
63 date = datetime.strptime(date[0] + "." + year, "%d.%m.%Y").date()
64
65 for img in tag.find_all("img"):
66 waste_type = img.get("title")
67 entries.append(
68 Collection(date, waste_type, icon=ICON_MAP.get(waste_type))
69 )
70
71 return entries
72
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/stavanger_no.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/stavanger_no.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/stavanger_no.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/stavanger_no.py
@@ -51,14 +51,12 @@
soup = BeautifulSoup(r.text, "html.parser")
tag = soup.find_all("option")
- year = tag[0].get("value").split("-")
- year = year[1]
-
entries = []
for tag in soup.find_all("tr", {"class": "waste-calendar__item"}):
if tag.text.strip() == "Dato og dag\nAvfallstype":
continue
+ year = tag.parent.attrs["data-month"].split("-")[1]
date = tag.text.strip().split(" - ")
date = datetime.strptime(date[0] + "." + year, "%d.%m.%Y").date()
|
{"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/stavanger_no.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/stavanger_no.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/stavanger_no.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/stavanger_no.py\n@@ -51,14 +51,12 @@\n soup = BeautifulSoup(r.text, \"html.parser\")\n \n tag = soup.find_all(\"option\")\n- year = tag[0].get(\"value\").split(\"-\")\n- year = year[1]\n-\n entries = []\n for tag in soup.find_all(\"tr\", {\"class\": \"waste-calendar__item\"}):\n if tag.text.strip() == \"Dato og dag\\nAvfallstype\":\n continue\n \n+ year = tag.parent.attrs[\"data-month\"].split(\"-\")[1]\n date = tag.text.strip().split(\" - \")\n date = datetime.strptime(date[0] + \".\" + year, \"%d.%m.%Y\").date()\n", "issue": "[Bug]: date time issue\n### I Have A Problem With:\n\nThe integration in general\n\n### What's Your Problem\n\nUsed to work. No change in cfg.\r\nAt some point, after an upgrade\u2026\r\nIntegration fails while starting. See log.\n\n### Source (if relevant)\n\n_No response_\n\n### Logs\n\n```Shell\nDenne feilen stammer fra en tilpasset integrasjon.\r\n\r\nLogger: waste_collection_schedule.source_shell\r\nSource: custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py:136\r\nIntegration: waste_collection_schedule (documentation)\r\nFirst occurred: 19:02:11 (1 occurrences)\r\nLast logged: 19:02:11\r\n\r\nfetch failed for source Stavanger Kommune: Traceback (most recent call last): File \"/config/custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py\", line 134, in fetch entries = self._source.fetch() ^^^^^^^^^^^^^^^^^^^^ File \"/config/custom_components/waste_collection_schedule/waste_collection_schedule/source/stavanger_no.py\", line 63, in fetch date = datetime.strptime(date[0] + \".\" + year, \"%d.%m.%Y\").date() ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File \"/usr/local/lib/python3.11/_strptime.py\", line 568, in _strptime_datetime tt, fraction, gmtoff_fraction = _strptime(data_string, format) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File \"/usr/local/lib/python3.11/_strptime.py\", line 534, in _strptime julian = datetime_date(year, month, day).toordinal() - \\ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ValueError: day is out of range for month\n```\n\n\n### Relevant Configuration\n\n```YAML\nname: stavanger_no\r\n\r\nmunicipality: Stavanger\n```\n\n\n### Checklist Source Error\n\n- [ ] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)\n- [X] Checked that the website of your service provider is still working\n- [X] Tested my attributes on the service provider website (if possible)\n- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on \"Redownload\" and choose master as version)\n\n### Checklist Sensor Error\n\n- [ ] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)\n\n### Required\n\n- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.\n- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.\n", "before_files": [{"content": "from datetime import datetime\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\n\nTITLE = \"Stavanger Kommune\"\nDESCRIPTION = \"Source for Stavanger Kommune, Norway\"\nURL = \"https://www.stavanger.kommune.no/\"\nTEST_CASES = {\n \"TestcaseI\": {\n \"id\": \"57bf9d36-722e-400b-ae93-d80f8e354724\",\n \"municipality\": \"Stavanger\",\n \"gnumber\": \"57\",\n \"bnumber\": \"922\",\n \"snumber\": \"0\",\n },\n}\n\nICON_MAP = {\n \"Restavfall\": \"mdi:trash-can\",\n \"Papp/papir\": \"mdi:recycle\",\n \"Bio\": \"mdi:leaf\",\n \"Juletre\": \"mdi:pine-tree\",\n}\n\n\nclass Source:\n def __init__(self, id, municipality, gnumber, bnumber, snumber):\n self._id = id\n self._municipality = municipality\n self._gnumber = gnumber\n self._bnumber = bnumber\n self._snumber = snumber\n\n def fetch(self):\n url = \"https://www.stavanger.kommune.no/renovasjon-og-miljo/tommekalender/finn-kalender/show\"\n headers = {\"referer\": \"https://www.stavanger.kommune.no\"}\n\n params = {\n \"id\": self._id,\n \"municipality\": self._municipality,\n \"gnumber\": self._gnumber,\n \"bnumber\": self._bnumber,\n \"snumber\": self._snumber,\n }\n\n r = requests.get(url, params=params, headers=headers)\n r.raise_for_status()\n\n soup = BeautifulSoup(r.text, \"html.parser\")\n\n tag = soup.find_all(\"option\")\n year = tag[0].get(\"value\").split(\"-\")\n year = year[1]\n\n entries = []\n for tag in soup.find_all(\"tr\", {\"class\": \"waste-calendar__item\"}):\n if tag.text.strip() == \"Dato og dag\\nAvfallstype\":\n continue\n\n date = tag.text.strip().split(\" - \")\n date = datetime.strptime(date[0] + \".\" + year, \"%d.%m.%Y\").date()\n\n for img in tag.find_all(\"img\"):\n waste_type = img.get(\"title\")\n entries.append(\n Collection(date, waste_type, icon=ICON_MAP.get(waste_type))\n )\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/stavanger_no.py"}], "after_files": [{"content": "from datetime import datetime\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\n\nTITLE = \"Stavanger Kommune\"\nDESCRIPTION = \"Source for Stavanger Kommune, Norway\"\nURL = \"https://www.stavanger.kommune.no/\"\nTEST_CASES = {\n \"TestcaseI\": {\n \"id\": \"57bf9d36-722e-400b-ae93-d80f8e354724\",\n \"municipality\": \"Stavanger\",\n \"gnumber\": \"57\",\n \"bnumber\": \"922\",\n \"snumber\": \"0\",\n },\n}\n\nICON_MAP = {\n \"Restavfall\": \"mdi:trash-can\",\n \"Papp/papir\": \"mdi:recycle\",\n \"Bio\": \"mdi:leaf\",\n \"Juletre\": \"mdi:pine-tree\",\n}\n\n\nclass Source:\n def __init__(self, id, municipality, gnumber, bnumber, snumber):\n self._id = id\n self._municipality = municipality\n self._gnumber = gnumber\n self._bnumber = bnumber\n self._snumber = snumber\n\n def fetch(self):\n url = \"https://www.stavanger.kommune.no/renovasjon-og-miljo/tommekalender/finn-kalender/show\"\n headers = {\"referer\": \"https://www.stavanger.kommune.no\"}\n\n params = {\n \"id\": self._id,\n \"municipality\": self._municipality,\n \"gnumber\": self._gnumber,\n \"bnumber\": self._bnumber,\n \"snumber\": self._snumber,\n }\n\n r = requests.get(url, params=params, headers=headers)\n r.raise_for_status()\n\n soup = BeautifulSoup(r.text, \"html.parser\")\n\n tag = soup.find_all(\"option\")\n entries = []\n for tag in soup.find_all(\"tr\", {\"class\": \"waste-calendar__item\"}):\n if tag.text.strip() == \"Dato og dag\\nAvfallstype\":\n continue\n\n year = tag.parent.attrs[\"data-month\"].split(\"-\")[1]\n date = tag.text.strip().split(\" - \")\n date = datetime.strptime(date[0] + \".\" + year, \"%d.%m.%Y\").date()\n\n for img in tag.find_all(\"img\"):\n waste_type = img.get(\"title\")\n entries.append(\n Collection(date, waste_type, icon=ICON_MAP.get(waste_type))\n )\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/stavanger_no.py"}]}
| 1,601 | 230 |
gh_patches_debug_30563
|
rasdani/github-patches
|
git_diff
|
scikit-hep__pyhf-1176
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add axis labels to pyhf.contrib.viz.brazil.plot_results
# Description
We know that the axis labels for [`pyhf.contrib.viz.brazil.plot_results`](https://github.com/scikit-hep/pyhf/blob/28fdfe95a3a4846ba70a9a338b3f72a94eac1322/src/pyhf/contrib/viz/brazil.py#L5) are always going to be the same, so we should just add them on there as
```python
ax.set_xlabel(r"$\mu$")
ax.set_ylabel(r"$\mathrm{CL}_{s}$")
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pyhf/contrib/viz/brazil.py`
Content:
```
1 """Brazil Band Plots."""
2 import numpy as np
3
4
5 def plot_results(ax, mutests, tests, test_size=0.05):
6 """Plot a series of hypothesis tests for various POI values."""
7 cls_obs = np.array([test[0] for test in tests]).flatten()
8 cls_exp = [np.array([test[1][i] for test in tests]).flatten() for i in range(5)]
9 ax.plot(mutests, cls_obs, c='black')
10 for idx, color in zip(range(5), 5 * ['black']):
11 ax.plot(
12 mutests, cls_exp[idx], c=color, linestyle='dotted' if idx != 2 else 'dashed'
13 )
14 ax.fill_between(mutests, cls_exp[0], cls_exp[-1], facecolor='yellow')
15 ax.fill_between(mutests, cls_exp[1], cls_exp[-2], facecolor='green')
16 ax.plot(mutests, [test_size] * len(mutests), c='red')
17 ax.set_ylim(0, 1)
18
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/pyhf/contrib/viz/brazil.py b/src/pyhf/contrib/viz/brazil.py
--- a/src/pyhf/contrib/viz/brazil.py
+++ b/src/pyhf/contrib/viz/brazil.py
@@ -3,7 +3,37 @@
def plot_results(ax, mutests, tests, test_size=0.05):
- """Plot a series of hypothesis tests for various POI values."""
+ """
+ Plot a series of hypothesis tests for various POI values.
+
+ Example:
+
+ >>> import numpy as np
+ >>> import matplotlib.pyplot as plt
+ >>> import pyhf
+ >>> import pyhf.contrib.viz.brazil
+ >>> pyhf.set_backend("numpy")
+ >>> model = pyhf.simplemodels.hepdata_like(
+ ... signal_data=[12.0, 11.0], bkg_data=[50.0, 52.0], bkg_uncerts=[3.0, 7.0]
+ ... )
+ >>> observations = [51, 48]
+ >>> data = observations + model.config.auxdata
+ >>> poi_vals = np.linspace(0, 5, 41)
+ >>> results = [
+ ... pyhf.infer.hypotest(test_poi, data, model, return_expected_set=True)
+ ... for test_poi in poi_vals
+ ... ]
+ >>> fig, ax = plt.subplots()
+ >>> pyhf.contrib.viz.brazil.plot_results(ax, poi_vals, results)
+
+ Args:
+ ax (`matplotlib.axes.Axes`): The matplotlib axis object to plot on.
+ mutests (:obj:`list` or :obj:`array`): The values of the POI where the
+ hypothesis tests were performed.
+ tests (:obj:`list` or :obj:`array`): The :math:$\\mathrm{CL}_{s}$ values
+ from the hypothesis tests.
+ test_size (:obj:`float`): The size, :math:$\alpha$, of the test.
+ """
cls_obs = np.array([test[0] for test in tests]).flatten()
cls_exp = [np.array([test[1][i] for test in tests]).flatten() for i in range(5)]
ax.plot(mutests, cls_obs, c='black')
@@ -15,3 +45,6 @@
ax.fill_between(mutests, cls_exp[1], cls_exp[-2], facecolor='green')
ax.plot(mutests, [test_size] * len(mutests), c='red')
ax.set_ylim(0, 1)
+
+ ax.set_xlabel(r"$\mu$ (POI)")
+ ax.set_ylabel(r"$\mathrm{CL}_{s}$")
|
{"golden_diff": "diff --git a/src/pyhf/contrib/viz/brazil.py b/src/pyhf/contrib/viz/brazil.py\n--- a/src/pyhf/contrib/viz/brazil.py\n+++ b/src/pyhf/contrib/viz/brazil.py\n@@ -3,7 +3,37 @@\n \n \n def plot_results(ax, mutests, tests, test_size=0.05):\n- \"\"\"Plot a series of hypothesis tests for various POI values.\"\"\"\n+ \"\"\"\n+ Plot a series of hypothesis tests for various POI values.\n+\n+ Example:\n+\n+ >>> import numpy as np\n+ >>> import matplotlib.pyplot as plt\n+ >>> import pyhf\n+ >>> import pyhf.contrib.viz.brazil\n+ >>> pyhf.set_backend(\"numpy\")\n+ >>> model = pyhf.simplemodels.hepdata_like(\n+ ... signal_data=[12.0, 11.0], bkg_data=[50.0, 52.0], bkg_uncerts=[3.0, 7.0]\n+ ... )\n+ >>> observations = [51, 48]\n+ >>> data = observations + model.config.auxdata\n+ >>> poi_vals = np.linspace(0, 5, 41)\n+ >>> results = [\n+ ... pyhf.infer.hypotest(test_poi, data, model, return_expected_set=True)\n+ ... for test_poi in poi_vals\n+ ... ]\n+ >>> fig, ax = plt.subplots()\n+ >>> pyhf.contrib.viz.brazil.plot_results(ax, poi_vals, results)\n+\n+ Args:\n+ ax (`matplotlib.axes.Axes`): The matplotlib axis object to plot on.\n+ mutests (:obj:`list` or :obj:`array`): The values of the POI where the\n+ hypothesis tests were performed.\n+ tests (:obj:`list` or :obj:`array`): The :math:$\\\\mathrm{CL}_{s}$ values\n+ from the hypothesis tests.\n+ test_size (:obj:`float`): The size, :math:$\\alpha$, of the test.\n+ \"\"\"\n cls_obs = np.array([test[0] for test in tests]).flatten()\n cls_exp = [np.array([test[1][i] for test in tests]).flatten() for i in range(5)]\n ax.plot(mutests, cls_obs, c='black')\n@@ -15,3 +45,6 @@\n ax.fill_between(mutests, cls_exp[1], cls_exp[-2], facecolor='green')\n ax.plot(mutests, [test_size] * len(mutests), c='red')\n ax.set_ylim(0, 1)\n+\n+ ax.set_xlabel(r\"$\\mu$ (POI)\")\n+ ax.set_ylabel(r\"$\\mathrm{CL}_{s}$\")\n", "issue": "Add axis labels to pyhf.contrib.viz.brazil.plot_results\n# Description\r\n\r\nWe know that the axis labels for [`pyhf.contrib.viz.brazil.plot_results`](https://github.com/scikit-hep/pyhf/blob/28fdfe95a3a4846ba70a9a338b3f72a94eac1322/src/pyhf/contrib/viz/brazil.py#L5) are always going to be the same, so we should just add them on there as\r\n\r\n```python\r\nax.set_xlabel(r\"$\\mu$\")\r\nax.set_ylabel(r\"$\\mathrm{CL}_{s}$\")\r\n```\n", "before_files": [{"content": "\"\"\"Brazil Band Plots.\"\"\"\nimport numpy as np\n\n\ndef plot_results(ax, mutests, tests, test_size=0.05):\n \"\"\"Plot a series of hypothesis tests for various POI values.\"\"\"\n cls_obs = np.array([test[0] for test in tests]).flatten()\n cls_exp = [np.array([test[1][i] for test in tests]).flatten() for i in range(5)]\n ax.plot(mutests, cls_obs, c='black')\n for idx, color in zip(range(5), 5 * ['black']):\n ax.plot(\n mutests, cls_exp[idx], c=color, linestyle='dotted' if idx != 2 else 'dashed'\n )\n ax.fill_between(mutests, cls_exp[0], cls_exp[-1], facecolor='yellow')\n ax.fill_between(mutests, cls_exp[1], cls_exp[-2], facecolor='green')\n ax.plot(mutests, [test_size] * len(mutests), c='red')\n ax.set_ylim(0, 1)\n", "path": "src/pyhf/contrib/viz/brazil.py"}], "after_files": [{"content": "\"\"\"Brazil Band Plots.\"\"\"\nimport numpy as np\n\n\ndef plot_results(ax, mutests, tests, test_size=0.05):\n \"\"\"\n Plot a series of hypothesis tests for various POI values.\n\n Example:\n\n >>> import numpy as np\n >>> import matplotlib.pyplot as plt\n >>> import pyhf\n >>> import pyhf.contrib.viz.brazil\n >>> pyhf.set_backend(\"numpy\")\n >>> model = pyhf.simplemodels.hepdata_like(\n ... signal_data=[12.0, 11.0], bkg_data=[50.0, 52.0], bkg_uncerts=[3.0, 7.0]\n ... )\n >>> observations = [51, 48]\n >>> data = observations + model.config.auxdata\n >>> poi_vals = np.linspace(0, 5, 41)\n >>> results = [\n ... pyhf.infer.hypotest(test_poi, data, model, return_expected_set=True)\n ... for test_poi in poi_vals\n ... ]\n >>> fig, ax = plt.subplots()\n >>> pyhf.contrib.viz.brazil.plot_results(ax, poi_vals, results)\n\n Args:\n ax (`matplotlib.axes.Axes`): The matplotlib axis object to plot on.\n mutests (:obj:`list` or :obj:`array`): The values of the POI where the\n hypothesis tests were performed.\n tests (:obj:`list` or :obj:`array`): The :math:$\\\\mathrm{CL}_{s}$ values\n from the hypothesis tests.\n test_size (:obj:`float`): The size, :math:$\\alpha$, of the test.\n \"\"\"\n cls_obs = np.array([test[0] for test in tests]).flatten()\n cls_exp = [np.array([test[1][i] for test in tests]).flatten() for i in range(5)]\n ax.plot(mutests, cls_obs, c='black')\n for idx, color in zip(range(5), 5 * ['black']):\n ax.plot(\n mutests, cls_exp[idx], c=color, linestyle='dotted' if idx != 2 else 'dashed'\n )\n ax.fill_between(mutests, cls_exp[0], cls_exp[-1], facecolor='yellow')\n ax.fill_between(mutests, cls_exp[1], cls_exp[-2], facecolor='green')\n ax.plot(mutests, [test_size] * len(mutests), c='red')\n ax.set_ylim(0, 1)\n\n ax.set_xlabel(r\"$\\mu$ (POI)\")\n ax.set_ylabel(r\"$\\mathrm{CL}_{s}$\")\n", "path": "src/pyhf/contrib/viz/brazil.py"}]}
| 659 | 618 |
gh_patches_debug_64571
|
rasdani/github-patches
|
git_diff
|
cocotb__cocotb-1145
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Packaging: Add python_requires to manifest
Define our Python version requirements in our package manifest, as described here: https://packaging.python.org/guides/distributing-packages-using-setuptools/#python-requires
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 ###############################################################################
3 # Copyright (c) 2013 Potential Ventures Ltd
4 # Copyright (c) 2013 SolarFlare Communications Inc
5 # All rights reserved.
6 #
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are met:
9 # * Redistributions of source code must retain the above copyright
10 # notice, this list of conditions and the following disclaimer.
11 # * Redistributions in binary form must reproduce the above copyright
12 # notice, this list of conditions and the following disclaimer in the
13 # documentation and/or other materials provided with the distribution.
14 # * Neither the name of Potential Ventures Ltd,
15 # SolarFlare Communications Inc nor the
16 # names of its contributors may be used to endorse or promote products
17 # derived from this software without specific prior written permission.
18 #
19 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
20 # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 # DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY
23 # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 ###############################################################################
30
31 from setuptools import setup
32 from setuptools import find_packages
33 from os import path, walk
34
35 def read_file(fname):
36 return open(path.join(path.dirname(__file__), fname)).read()
37
38 def package_files(directory):
39 paths = []
40 for (fpath, directories, filenames) in walk(directory):
41 for filename in filenames:
42 paths.append(path.join('..', fpath, filename))
43 return paths
44
45 version = read_file('version')[8:].strip()
46
47 setup(
48 name='cocotb',
49 version=version,
50 description='cocotb is a coroutine based cosimulation library for writing VHDL and Verilog testbenches in Python.',
51 url='https://github.com/potentialventures/cocotb',
52 license='BSD',
53 long_description=read_file('README.md'),
54 long_description_content_type='text/markdown',
55 author='Chris Higgs, Stuart Hodgson',
56 author_email='[email protected]',
57 install_requires=[],
58 packages=find_packages(),
59 include_package_data=True,
60 package_data={'cocotb': package_files('cocotb/share')},
61 entry_points={
62 'console_scripts': [
63 'cocotb-config=cocotb.config:main',
64 ]
65 },
66 platforms='any',
67 classifiers=[
68 "Programming Language :: Python :: 2.7",
69 "Programming Language :: Python :: 3",
70 "License :: OSI Approved :: BSD License",
71 "Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)",
72 ],
73 )
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
old mode 100644
new mode 100755
--- a/setup.py
+++ b/setup.py
@@ -55,6 +55,7 @@
author='Chris Higgs, Stuart Hodgson',
author_email='[email protected]',
install_requires=[],
+ python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',
packages=find_packages(),
include_package_data=True,
package_data={'cocotb': package_files('cocotb/share')},
|
{"golden_diff": "diff --git a/setup.py b/setup.py\nold mode 100644\nnew mode 100755\n--- a/setup.py\n+++ b/setup.py\n@@ -55,6 +55,7 @@\n author='Chris Higgs, Stuart Hodgson',\n author_email='[email protected]',\n install_requires=[],\n+ python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',\n packages=find_packages(),\n include_package_data=True,\n package_data={'cocotb': package_files('cocotb/share')},\n", "issue": "Packaging: Add python_requires to manifest\nDefine our Python version requirements in our package manifest, as described here: https://packaging.python.org/guides/distributing-packages-using-setuptools/#python-requires\n", "before_files": [{"content": "#!/usr/bin/env python\n###############################################################################\n# Copyright (c) 2013 Potential Ventures Ltd\n# Copyright (c) 2013 SolarFlare Communications Inc\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of Potential Ventures Ltd,\n# SolarFlare Communications Inc nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n###############################################################################\n\nfrom setuptools import setup\nfrom setuptools import find_packages\nfrom os import path, walk\n\ndef read_file(fname):\n return open(path.join(path.dirname(__file__), fname)).read()\n\ndef package_files(directory):\n paths = []\n for (fpath, directories, filenames) in walk(directory):\n for filename in filenames:\n paths.append(path.join('..', fpath, filename))\n return paths\n\nversion = read_file('version')[8:].strip()\n\nsetup(\n name='cocotb',\n version=version,\n description='cocotb is a coroutine based cosimulation library for writing VHDL and Verilog testbenches in Python.',\n url='https://github.com/potentialventures/cocotb',\n license='BSD',\n long_description=read_file('README.md'),\n long_description_content_type='text/markdown',\n author='Chris Higgs, Stuart Hodgson',\n author_email='[email protected]',\n install_requires=[],\n packages=find_packages(),\n include_package_data=True,\n package_data={'cocotb': package_files('cocotb/share')},\n entry_points={\n 'console_scripts': [\n 'cocotb-config=cocotb.config:main',\n ]\n },\n platforms='any',\n classifiers=[\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: BSD License\",\n \"Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n###############################################################################\n# Copyright (c) 2013 Potential Ventures Ltd\n# Copyright (c) 2013 SolarFlare Communications Inc\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of Potential Ventures Ltd,\n# SolarFlare Communications Inc nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n###############################################################################\n\nfrom setuptools import setup\nfrom setuptools import find_packages\nfrom os import path, walk\n\ndef read_file(fname):\n return open(path.join(path.dirname(__file__), fname)).read()\n\ndef package_files(directory):\n paths = []\n for (fpath, directories, filenames) in walk(directory):\n for filename in filenames:\n paths.append(path.join('..', fpath, filename))\n return paths\n\nversion = read_file('version')[8:].strip()\n\nsetup(\n name='cocotb',\n version=version,\n description='cocotb is a coroutine based cosimulation library for writing VHDL and Verilog testbenches in Python.',\n url='https://github.com/potentialventures/cocotb',\n license='BSD',\n long_description=read_file('README.md'),\n long_description_content_type='text/markdown',\n author='Chris Higgs, Stuart Hodgson',\n author_email='[email protected]',\n install_requires=[],\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',\n packages=find_packages(),\n include_package_data=True,\n package_data={'cocotb': package_files('cocotb/share')},\n entry_points={\n 'console_scripts': [\n 'cocotb-config=cocotb.config:main',\n ]\n },\n platforms='any',\n classifiers=[\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: BSD License\",\n \"Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)\",\n ],\n)\n", "path": "setup.py"}]}
| 1,111 | 145 |
gh_patches_debug_61708
|
rasdani/github-patches
|
git_diff
|
mdn__kuma-6595
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AttributeError: address_zip on strip object
https://sentry.prod.mozaws.net/operations/mdn-prod/issues/7356686/
```
Resolver404: {'tried': [[<RegexURLPattern None ^media/(?:redesign/)?css/(?P<doc>.*)-min.css$>], [<RegexURLPattern None ^media/(?:redesign/)?js/(?P<doc>.*)-min.js$>], [<RegexURLPattern None ^media/(?:redesign/)?img(?P<suffix>.*)$>], [<RegexURLPattern None ^media/(?:redesign/)?css(?P<suffix>.*)$>], [<RegexURLPattern None ^media/(?:redesign/)?js(?P<suffix>.*)$>], [<RegexURLPattern None ^media/(?:redesign/)?fonts(?P<suffix>.*)$>], [<RegexURLPattern None ^media/uploads/demos/(?:.*)$>], [<RegexURLPattern None (?i)^(?P<one>.*)//(?P<two>.*)//(?P<three>.*)$>], [<RegexURLPattern None (?i)^(?P<one>.*)//(?P<two>.*)$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_1_canvas_rect.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_2_canvas_moveto.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_3_canvas_lineto.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_4_canvas_arc.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_5_canvas_quadraticcurveto.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_6_canvas_beziercurveto.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/3_1_canvas_drawimage.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/3_2_canvas_drawimage.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/3_3_canvas_drawimage.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/3_4_canvas_gallery.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_1_canvas_fillstyle.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_2_canvas_strokestyle.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_3_canvas_globalalpha.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_4_canvas_rgba.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_5_canvas_linewidth.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_6_canvas_linecap.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_7_canvas_linejoin.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_8_canvas_miterlimit.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_9_canvas_lineargradient.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_10_canvas_radialgradient.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_11_canvas_createpattern.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/5_1_canvas_savestate.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/5_2_canvas_translate.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/5_3_canvas_rotate.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/5_4_canvas_scale.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/6_1_canvas_composite.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/6_2_canvas_clipping.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/globalCompositeOperation.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/backdrop.png$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/bg_gallery.png$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_1.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_2.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_3.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_4.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_5.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_6.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_7.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_8.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/picture_frame.png$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/rhino.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/wallpaper.png$>], [<RegexURLPattern None (?i)^samples/domref/mozGetAsFile.html$>], [<RegexURLPattern None (?i)^samples/raycaster/input.js$>], [<RegexURLPattern None (?i)^samples/raycaster/Level.js$>], [<RegexURL...
File "redirect_urls/middleware.py", line 14, in __call__
resolver_match = self.resolver.resolve(request.path_info)
File "newrelic/hooks/framework_django.py", line 600, in wrapper
return _wrapped(*args, **kwargs)
File "newrelic/hooks/framework_django.py", line 588, in _wrapped
result = wrapped(path)
File "newrelic/hooks/framework_django.py", line 575, in wrapper
return wrapped(*args, **kwargs)
File "django/urls/resolvers.py", line 394, in resolve
raise Resolver404({'tried': tried, 'path': new_path})
KeyError: 'address_zip'
File "stripe/stripe_object.py", line 90, in __getattr__
return self[k]
File "stripe/stripe_object.py", line 131, in __getitem__
raise err
File "stripe/stripe_object.py", line 119, in __getitem__
return super(StripeObject, self).__getitem__(k)
AttributeError: address_zip
(4 additional frame(s) were not displayed)
...
File "django/views/decorators/cache.py", line 57, in _wrapped_view_func
response = view_func(request, *args, **kwargs)
File "kuma/core/decorators.py", line 210, in wrapped
return func(request, *args, **kwargs)
File "kuma/users/views.py", line 476, in user_edit
"subscription_info": retrieve_stripe_subscription_info(edit_user,),
File "kuma/users/utils.py", line 65, in retrieve_stripe_subscription_info
"zip": card.address_zip,
File "stripe/stripe_object.py", line 92, in __getattr__
raise AttributeError(*err.args)
AttributeError: address_zip
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kuma/users/utils.py`
Content:
```
1 from datetime import datetime
2
3 import stripe
4 from django.conf import settings
5
6
7 def retrieve_stripe_subscription(customer):
8 for subscription in customer.subscriptions.list().auto_paging_iter():
9 # We have to use array indexing syntax, as stripe uses dicts to
10 # represent its objects (dicts come with an .items method)
11 for item in subscription["items"].auto_paging_iter():
12 if item.plan.id == settings.STRIPE_PLAN_ID:
13 return subscription
14
15 return None
16
17
18 def create_stripe_customer_and_subscription_for_user(user, email, stripe_token):
19 customer = (
20 stripe.Customer.retrieve(user.stripe_customer_id)
21 if user.stripe_customer_id
22 else None
23 )
24 if not customer or customer.email != email:
25 customer = stripe.Customer.create(email=email, source=stripe_token,)
26 user.stripe_customer_id = customer.id
27 user.save()
28
29 if retrieve_stripe_subscription(customer) is None:
30 stripe.Subscription.create(
31 customer=customer.id, items=[{"plan": settings.STRIPE_PLAN_ID}],
32 )
33
34
35 def retrieve_stripe_subscription_info(user):
36 stripe_customer = (
37 stripe.Customer.retrieve(user.stripe_customer_id, expand=["default_source"],)
38 if settings.STRIPE_PLAN_ID and user.stripe_customer_id
39 else None
40 )
41
42 stripe_subscription = (
43 retrieve_stripe_subscription(stripe_customer)
44 if stripe_customer and stripe_customer.email == user.email
45 else None
46 )
47 if stripe_subscription:
48 source = stripe_customer.default_source
49 if source.object == "card":
50 card = source
51 elif source.object == "source":
52 card = source.card
53 else:
54 raise ValueError(
55 f"unexpected stripe customer default_source of type {source.object!r}"
56 )
57
58 return {
59 "next_payment_at": datetime.fromtimestamp(
60 stripe_subscription.current_period_end
61 ),
62 "brand": card.brand,
63 "expires_at": f"{card.exp_month}/{card.exp_year}",
64 "last4": card.last4,
65 "zip": card.address_zip,
66 }
67
68 return None
69
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kuma/users/utils.py b/kuma/users/utils.py
--- a/kuma/users/utils.py
+++ b/kuma/users/utils.py
@@ -62,7 +62,8 @@
"brand": card.brand,
"expires_at": f"{card.exp_month}/{card.exp_year}",
"last4": card.last4,
- "zip": card.address_zip,
+ # Cards that are part of a "source" don't have a zip
+ "zip": card.get("address_zip", None),
}
return None
|
{"golden_diff": "diff --git a/kuma/users/utils.py b/kuma/users/utils.py\n--- a/kuma/users/utils.py\n+++ b/kuma/users/utils.py\n@@ -62,7 +62,8 @@\n \"brand\": card.brand,\n \"expires_at\": f\"{card.exp_month}/{card.exp_year}\",\n \"last4\": card.last4,\n- \"zip\": card.address_zip,\n+ # Cards that are part of a \"source\" don't have a zip\n+ \"zip\": card.get(\"address_zip\", None),\n }\n \n return None\n", "issue": "AttributeError: address_zip on strip object \nhttps://sentry.prod.mozaws.net/operations/mdn-prod/issues/7356686/\n\n```\nResolver404: {'tried': [[<RegexURLPattern None ^media/(?:redesign/)?css/(?P<doc>.*)-min.css$>], [<RegexURLPattern None ^media/(?:redesign/)?js/(?P<doc>.*)-min.js$>], [<RegexURLPattern None ^media/(?:redesign/)?img(?P<suffix>.*)$>], [<RegexURLPattern None ^media/(?:redesign/)?css(?P<suffix>.*)$>], [<RegexURLPattern None ^media/(?:redesign/)?js(?P<suffix>.*)$>], [<RegexURLPattern None ^media/(?:redesign/)?fonts(?P<suffix>.*)$>], [<RegexURLPattern None ^media/uploads/demos/(?:.*)$>], [<RegexURLPattern None (?i)^(?P<one>.*)//(?P<two>.*)//(?P<three>.*)$>], [<RegexURLPattern None (?i)^(?P<one>.*)//(?P<two>.*)$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_1_canvas_rect.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_2_canvas_moveto.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_3_canvas_lineto.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_4_canvas_arc.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_5_canvas_quadraticcurveto.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_6_canvas_beziercurveto.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/3_1_canvas_drawimage.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/3_2_canvas_drawimage.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/3_3_canvas_drawimage.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/3_4_canvas_gallery.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_1_canvas_fillstyle.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_2_canvas_strokestyle.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_3_canvas_globalalpha.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_4_canvas_rgba.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_5_canvas_linewidth.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_6_canvas_linecap.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_7_canvas_linejoin.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_8_canvas_miterlimit.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_9_canvas_lineargradient.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_10_canvas_radialgradient.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_11_canvas_createpattern.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/5_1_canvas_savestate.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/5_2_canvas_translate.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/5_3_canvas_rotate.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/5_4_canvas_scale.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/6_1_canvas_composite.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/6_2_canvas_clipping.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/globalCompositeOperation.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/backdrop.png$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/bg_gallery.png$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_1.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_2.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_3.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_4.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_5.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_6.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_7.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_8.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/picture_frame.png$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/rhino.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/wallpaper.png$>], [<RegexURLPattern None (?i)^samples/domref/mozGetAsFile.html$>], [<RegexURLPattern None (?i)^samples/raycaster/input.js$>], [<RegexURLPattern None (?i)^samples/raycaster/Level.js$>], [<RegexURL...\n File \"redirect_urls/middleware.py\", line 14, in __call__\n resolver_match = self.resolver.resolve(request.path_info)\n File \"newrelic/hooks/framework_django.py\", line 600, in wrapper\n return _wrapped(*args, **kwargs)\n File \"newrelic/hooks/framework_django.py\", line 588, in _wrapped\n result = wrapped(path)\n File \"newrelic/hooks/framework_django.py\", line 575, in wrapper\n return wrapped(*args, **kwargs)\n File \"django/urls/resolvers.py\", line 394, in resolve\n raise Resolver404({'tried': tried, 'path': new_path})\n\nKeyError: 'address_zip'\n File \"stripe/stripe_object.py\", line 90, in __getattr__\n return self[k]\n File \"stripe/stripe_object.py\", line 131, in __getitem__\n raise err\n File \"stripe/stripe_object.py\", line 119, in __getitem__\n return super(StripeObject, self).__getitem__(k)\n\nAttributeError: address_zip\n(4 additional frame(s) were not displayed)\n...\n File \"django/views/decorators/cache.py\", line 57, in _wrapped_view_func\n response = view_func(request, *args, **kwargs)\n File \"kuma/core/decorators.py\", line 210, in wrapped\n return func(request, *args, **kwargs)\n File \"kuma/users/views.py\", line 476, in user_edit\n \"subscription_info\": retrieve_stripe_subscription_info(edit_user,),\n File \"kuma/users/utils.py\", line 65, in retrieve_stripe_subscription_info\n \"zip\": card.address_zip,\n File \"stripe/stripe_object.py\", line 92, in __getattr__\n raise AttributeError(*err.args)\n\nAttributeError: address_zip\n```\n", "before_files": [{"content": "from datetime import datetime\n\nimport stripe\nfrom django.conf import settings\n\n\ndef retrieve_stripe_subscription(customer):\n for subscription in customer.subscriptions.list().auto_paging_iter():\n # We have to use array indexing syntax, as stripe uses dicts to\n # represent its objects (dicts come with an .items method)\n for item in subscription[\"items\"].auto_paging_iter():\n if item.plan.id == settings.STRIPE_PLAN_ID:\n return subscription\n\n return None\n\n\ndef create_stripe_customer_and_subscription_for_user(user, email, stripe_token):\n customer = (\n stripe.Customer.retrieve(user.stripe_customer_id)\n if user.stripe_customer_id\n else None\n )\n if not customer or customer.email != email:\n customer = stripe.Customer.create(email=email, source=stripe_token,)\n user.stripe_customer_id = customer.id\n user.save()\n\n if retrieve_stripe_subscription(customer) is None:\n stripe.Subscription.create(\n customer=customer.id, items=[{\"plan\": settings.STRIPE_PLAN_ID}],\n )\n\n\ndef retrieve_stripe_subscription_info(user):\n stripe_customer = (\n stripe.Customer.retrieve(user.stripe_customer_id, expand=[\"default_source\"],)\n if settings.STRIPE_PLAN_ID and user.stripe_customer_id\n else None\n )\n\n stripe_subscription = (\n retrieve_stripe_subscription(stripe_customer)\n if stripe_customer and stripe_customer.email == user.email\n else None\n )\n if stripe_subscription:\n source = stripe_customer.default_source\n if source.object == \"card\":\n card = source\n elif source.object == \"source\":\n card = source.card\n else:\n raise ValueError(\n f\"unexpected stripe customer default_source of type {source.object!r}\"\n )\n\n return {\n \"next_payment_at\": datetime.fromtimestamp(\n stripe_subscription.current_period_end\n ),\n \"brand\": card.brand,\n \"expires_at\": f\"{card.exp_month}/{card.exp_year}\",\n \"last4\": card.last4,\n \"zip\": card.address_zip,\n }\n\n return None\n", "path": "kuma/users/utils.py"}], "after_files": [{"content": "from datetime import datetime\n\nimport stripe\nfrom django.conf import settings\n\n\ndef retrieve_stripe_subscription(customer):\n for subscription in customer.subscriptions.list().auto_paging_iter():\n # We have to use array indexing syntax, as stripe uses dicts to\n # represent its objects (dicts come with an .items method)\n for item in subscription[\"items\"].auto_paging_iter():\n if item.plan.id == settings.STRIPE_PLAN_ID:\n return subscription\n\n return None\n\n\ndef create_stripe_customer_and_subscription_for_user(user, email, stripe_token):\n customer = (\n stripe.Customer.retrieve(user.stripe_customer_id)\n if user.stripe_customer_id\n else None\n )\n if not customer or customer.email != email:\n customer = stripe.Customer.create(email=email, source=stripe_token,)\n user.stripe_customer_id = customer.id\n user.save()\n\n if retrieve_stripe_subscription(customer) is None:\n stripe.Subscription.create(\n customer=customer.id, items=[{\"plan\": settings.STRIPE_PLAN_ID}],\n )\n\n\ndef retrieve_stripe_subscription_info(user):\n stripe_customer = (\n stripe.Customer.retrieve(user.stripe_customer_id, expand=[\"default_source\"],)\n if settings.STRIPE_PLAN_ID and user.stripe_customer_id\n else None\n )\n\n stripe_subscription = (\n retrieve_stripe_subscription(stripe_customer)\n if stripe_customer and stripe_customer.email == user.email\n else None\n )\n if stripe_subscription:\n source = stripe_customer.default_source\n if source.object == \"card\":\n card = source\n elif source.object == \"source\":\n card = source.card\n else:\n raise ValueError(\n f\"unexpected stripe customer default_source of type {source.object!r}\"\n )\n\n return {\n \"next_payment_at\": datetime.fromtimestamp(\n stripe_subscription.current_period_end\n ),\n \"brand\": card.brand,\n \"expires_at\": f\"{card.exp_month}/{card.exp_year}\",\n \"last4\": card.last4,\n # Cards that are part of a \"source\" don't have a zip\n \"zip\": card.get(\"address_zip\", None),\n }\n\n return None\n", "path": "kuma/users/utils.py"}]}
| 2,517 | 122 |
gh_patches_debug_24079
|
rasdani/github-patches
|
git_diff
|
TheAlgorithms__Python-9005
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
improvement to insertion_sort algorithm
### Feature description
I was about to make a PR to improve the implementation of [insertion_sort algorithm](https://github.com/TheAlgorithms/Python/blob/master/sorts/insertion_sort.py) but since there might be multiple ways of doing so, I thought I should first ask your opinions.
These are the things that need improvements:
1. We unnecessarily create a whole new copy of the list: `enumerate(collection[1:])`.
We can either use "indexes" to avoid this which is not very pythonic, or we can use the iterator of the list using `iter()` and throw away the first item using `next()`. In second case we have to either check for empty list first or wrap it in a try-except block. I'll go with indexes if you ask. What do you think?
2. I think a function should either mutate the list in-place and returns `None`, or it should create new sorted list without modifying the original list. Mutating the list and returning the mutated list is not what most developers expect to see. What do you think?
3. We can safely remove `if insert_index != temp_index:` condition and unindent its body. Assigning an item to an index of a list is not costly. So it's one less line in general.
improvement to insertion_sort algorithm
### Feature description
I was about to make a PR to improve the implementation of [insertion_sort algorithm](https://github.com/TheAlgorithms/Python/blob/master/sorts/insertion_sort.py) but since there might be multiple ways of doing so, I thought I should first ask your opinions.
These are the things that need improvements:
1. We unnecessarily create a whole new copy of the list: `enumerate(collection[1:])`.
We can either use "indexes" to avoid this which is not very pythonic, or we can use the iterator of the list using `iter()` and throw away the first item using `next()`. In second case we have to either check for empty list first or wrap it in a try-except block. I'll go with indexes if you ask. What do you think?
2. I think a function should either mutate the list in-place and returns `None`, or it should create new sorted list without modifying the original list. Mutating the list and returning the mutated list is not what most developers expect to see. What do you think?
3. We can safely remove `if insert_index != temp_index:` condition and unindent its body. Assigning an item to an index of a list is not costly. So it's one less line in general.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sorts/insertion_sort.py`
Content:
```
1 """
2 A pure Python implementation of the insertion sort algorithm
3
4 This algorithm sorts a collection by comparing adjacent elements.
5 When it finds that order is not respected, it moves the element compared
6 backward until the order is correct. It then goes back directly to the
7 element's initial position resuming forward comparison.
8
9 For doctests run following command:
10 python3 -m doctest -v insertion_sort.py
11
12 For manual testing run:
13 python3 insertion_sort.py
14 """
15
16
17 def insertion_sort(collection: list) -> list:
18 """A pure Python implementation of the insertion sort algorithm
19
20 :param collection: some mutable ordered collection with heterogeneous
21 comparable items inside
22 :return: the same collection ordered by ascending
23
24 Examples:
25 >>> insertion_sort([0, 5, 3, 2, 2])
26 [0, 2, 2, 3, 5]
27 >>> insertion_sort([]) == sorted([])
28 True
29 >>> insertion_sort([-2, -5, -45]) == sorted([-2, -5, -45])
30 True
31 >>> insertion_sort(['d', 'a', 'b', 'e', 'c']) == sorted(['d', 'a', 'b', 'e', 'c'])
32 True
33 >>> import random
34 >>> collection = random.sample(range(-50, 50), 100)
35 >>> insertion_sort(collection) == sorted(collection)
36 True
37 >>> import string
38 >>> collection = random.choices(string.ascii_letters + string.digits, k=100)
39 >>> insertion_sort(collection) == sorted(collection)
40 True
41 """
42
43 for insert_index, insert_value in enumerate(collection[1:]):
44 temp_index = insert_index
45 while insert_index >= 0 and insert_value < collection[insert_index]:
46 collection[insert_index + 1] = collection[insert_index]
47 insert_index -= 1
48 if insert_index != temp_index:
49 collection[insert_index + 1] = insert_value
50 return collection
51
52
53 if __name__ == "__main__":
54 from doctest import testmod
55
56 testmod()
57
58 user_input = input("Enter numbers separated by a comma:\n").strip()
59 unsorted = [int(item) for item in user_input.split(",")]
60 print(f"{insertion_sort(unsorted) = }")
61
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sorts/insertion_sort.py b/sorts/insertion_sort.py
--- a/sorts/insertion_sort.py
+++ b/sorts/insertion_sort.py
@@ -13,8 +13,19 @@
python3 insertion_sort.py
"""
+from collections.abc import MutableSequence
+from typing import Any, Protocol, TypeVar
-def insertion_sort(collection: list) -> list:
+
+class Comparable(Protocol):
+ def __lt__(self, other: Any, /) -> bool:
+ ...
+
+
+T = TypeVar("T", bound=Comparable)
+
+
+def insertion_sort(collection: MutableSequence[T]) -> MutableSequence[T]:
"""A pure Python implementation of the insertion sort algorithm
:param collection: some mutable ordered collection with heterogeneous
@@ -40,13 +51,12 @@
True
"""
- for insert_index, insert_value in enumerate(collection[1:]):
- temp_index = insert_index
- while insert_index >= 0 and insert_value < collection[insert_index]:
- collection[insert_index + 1] = collection[insert_index]
+ for insert_index in range(1, len(collection)):
+ insert_value = collection[insert_index]
+ while insert_index > 0 and insert_value < collection[insert_index - 1]:
+ collection[insert_index] = collection[insert_index - 1]
insert_index -= 1
- if insert_index != temp_index:
- collection[insert_index + 1] = insert_value
+ collection[insert_index] = insert_value
return collection
|
{"golden_diff": "diff --git a/sorts/insertion_sort.py b/sorts/insertion_sort.py\n--- a/sorts/insertion_sort.py\n+++ b/sorts/insertion_sort.py\n@@ -13,8 +13,19 @@\n python3 insertion_sort.py\n \"\"\"\n \n+from collections.abc import MutableSequence\n+from typing import Any, Protocol, TypeVar\n \n-def insertion_sort(collection: list) -> list:\n+\n+class Comparable(Protocol):\n+ def __lt__(self, other: Any, /) -> bool:\n+ ...\n+\n+\n+T = TypeVar(\"T\", bound=Comparable)\n+\n+\n+def insertion_sort(collection: MutableSequence[T]) -> MutableSequence[T]:\n \"\"\"A pure Python implementation of the insertion sort algorithm\n \n :param collection: some mutable ordered collection with heterogeneous\n@@ -40,13 +51,12 @@\n True\n \"\"\"\n \n- for insert_index, insert_value in enumerate(collection[1:]):\n- temp_index = insert_index\n- while insert_index >= 0 and insert_value < collection[insert_index]:\n- collection[insert_index + 1] = collection[insert_index]\n+ for insert_index in range(1, len(collection)):\n+ insert_value = collection[insert_index]\n+ while insert_index > 0 and insert_value < collection[insert_index - 1]:\n+ collection[insert_index] = collection[insert_index - 1]\n insert_index -= 1\n- if insert_index != temp_index:\n- collection[insert_index + 1] = insert_value\n+ collection[insert_index] = insert_value\n return collection\n", "issue": "improvement to insertion_sort algorithm\n### Feature description\r\n\r\nI was about to make a PR to improve the implementation of [insertion_sort algorithm](https://github.com/TheAlgorithms/Python/blob/master/sorts/insertion_sort.py) but since there might be multiple ways of doing so, I thought I should first ask your opinions.\r\n\r\nThese are the things that need improvements:\r\n\r\n1. We unnecessarily create a whole new copy of the list: `enumerate(collection[1:])`.\r\n\r\n We can either use \"indexes\" to avoid this which is not very pythonic, or we can use the iterator of the list using `iter()` and throw away the first item using `next()`. In second case we have to either check for empty list first or wrap it in a try-except block. I'll go with indexes if you ask. What do you think?\r\n\r\n2. I think a function should either mutate the list in-place and returns `None`, or it should create new sorted list without modifying the original list. Mutating the list and returning the mutated list is not what most developers expect to see. What do you think?\r\n\r\n3. We can safely remove `if insert_index != temp_index:` condition and unindent its body. Assigning an item to an index of a list is not costly. So it's one less line in general.\nimprovement to insertion_sort algorithm\n### Feature description\r\n\r\nI was about to make a PR to improve the implementation of [insertion_sort algorithm](https://github.com/TheAlgorithms/Python/blob/master/sorts/insertion_sort.py) but since there might be multiple ways of doing so, I thought I should first ask your opinions.\r\n\r\nThese are the things that need improvements:\r\n\r\n1. We unnecessarily create a whole new copy of the list: `enumerate(collection[1:])`.\r\n\r\n We can either use \"indexes\" to avoid this which is not very pythonic, or we can use the iterator of the list using `iter()` and throw away the first item using `next()`. In second case we have to either check for empty list first or wrap it in a try-except block. I'll go with indexes if you ask. What do you think?\r\n\r\n2. I think a function should either mutate the list in-place and returns `None`, or it should create new sorted list without modifying the original list. Mutating the list and returning the mutated list is not what most developers expect to see. What do you think?\r\n\r\n3. We can safely remove `if insert_index != temp_index:` condition and unindent its body. Assigning an item to an index of a list is not costly. So it's one less line in general.\n", "before_files": [{"content": "\"\"\"\nA pure Python implementation of the insertion sort algorithm\n\nThis algorithm sorts a collection by comparing adjacent elements.\nWhen it finds that order is not respected, it moves the element compared\nbackward until the order is correct. It then goes back directly to the\nelement's initial position resuming forward comparison.\n\nFor doctests run following command:\npython3 -m doctest -v insertion_sort.py\n\nFor manual testing run:\npython3 insertion_sort.py\n\"\"\"\n\n\ndef insertion_sort(collection: list) -> list:\n \"\"\"A pure Python implementation of the insertion sort algorithm\n\n :param collection: some mutable ordered collection with heterogeneous\n comparable items inside\n :return: the same collection ordered by ascending\n\n Examples:\n >>> insertion_sort([0, 5, 3, 2, 2])\n [0, 2, 2, 3, 5]\n >>> insertion_sort([]) == sorted([])\n True\n >>> insertion_sort([-2, -5, -45]) == sorted([-2, -5, -45])\n True\n >>> insertion_sort(['d', 'a', 'b', 'e', 'c']) == sorted(['d', 'a', 'b', 'e', 'c'])\n True\n >>> import random\n >>> collection = random.sample(range(-50, 50), 100)\n >>> insertion_sort(collection) == sorted(collection)\n True\n >>> import string\n >>> collection = random.choices(string.ascii_letters + string.digits, k=100)\n >>> insertion_sort(collection) == sorted(collection)\n True\n \"\"\"\n\n for insert_index, insert_value in enumerate(collection[1:]):\n temp_index = insert_index\n while insert_index >= 0 and insert_value < collection[insert_index]:\n collection[insert_index + 1] = collection[insert_index]\n insert_index -= 1\n if insert_index != temp_index:\n collection[insert_index + 1] = insert_value\n return collection\n\n\nif __name__ == \"__main__\":\n from doctest import testmod\n\n testmod()\n\n user_input = input(\"Enter numbers separated by a comma:\\n\").strip()\n unsorted = [int(item) for item in user_input.split(\",\")]\n print(f\"{insertion_sort(unsorted) = }\")\n", "path": "sorts/insertion_sort.py"}], "after_files": [{"content": "\"\"\"\nA pure Python implementation of the insertion sort algorithm\n\nThis algorithm sorts a collection by comparing adjacent elements.\nWhen it finds that order is not respected, it moves the element compared\nbackward until the order is correct. It then goes back directly to the\nelement's initial position resuming forward comparison.\n\nFor doctests run following command:\npython3 -m doctest -v insertion_sort.py\n\nFor manual testing run:\npython3 insertion_sort.py\n\"\"\"\n\nfrom collections.abc import MutableSequence\nfrom typing import Any, Protocol, TypeVar\n\n\nclass Comparable(Protocol):\n def __lt__(self, other: Any, /) -> bool:\n ...\n\n\nT = TypeVar(\"T\", bound=Comparable)\n\n\ndef insertion_sort(collection: MutableSequence[T]) -> MutableSequence[T]:\n \"\"\"A pure Python implementation of the insertion sort algorithm\n\n :param collection: some mutable ordered collection with heterogeneous\n comparable items inside\n :return: the same collection ordered by ascending\n\n Examples:\n >>> insertion_sort([0, 5, 3, 2, 2])\n [0, 2, 2, 3, 5]\n >>> insertion_sort([]) == sorted([])\n True\n >>> insertion_sort([-2, -5, -45]) == sorted([-2, -5, -45])\n True\n >>> insertion_sort(['d', 'a', 'b', 'e', 'c']) == sorted(['d', 'a', 'b', 'e', 'c'])\n True\n >>> import random\n >>> collection = random.sample(range(-50, 50), 100)\n >>> insertion_sort(collection) == sorted(collection)\n True\n >>> import string\n >>> collection = random.choices(string.ascii_letters + string.digits, k=100)\n >>> insertion_sort(collection) == sorted(collection)\n True\n \"\"\"\n\n for insert_index in range(1, len(collection)):\n insert_value = collection[insert_index]\n while insert_index > 0 and insert_value < collection[insert_index - 1]:\n collection[insert_index] = collection[insert_index - 1]\n insert_index -= 1\n collection[insert_index] = insert_value\n return collection\n\n\nif __name__ == \"__main__\":\n from doctest import testmod\n\n testmod()\n\n user_input = input(\"Enter numbers separated by a comma:\\n\").strip()\n unsorted = [int(item) for item in user_input.split(\",\")]\n print(f\"{insertion_sort(unsorted) = }\")\n", "path": "sorts/insertion_sort.py"}]}
| 1,427 | 353 |
gh_patches_debug_39533
|
rasdani/github-patches
|
git_diff
|
searx__searx-2115
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Get image button from the Google Images engine doesn't give the raw URL to the image
When searching for images using the "Google Images" engine, the "Get image" button doesn't give the raw URL (that usually ends with `.jpg` or `.png`) to the image of every result.
In fact the button gives the same exact URL as the button "View source".
On other engines (like Bing images) it gives the raw URL to the image.
Here is a screenshot explaining the issue:

Here is a URL for testing the bug: https://darmarit.org/searx/?q=%21goi+ok&time_range=&language=en-US&category_images=on
@return42 Any thoughts about that?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `searx/engines/google_images.py`
Content:
```
1 # SPDX-License-Identifier: AGPL-3.0-or-later
2 """Google (Images)
3
4 :website: https://images.google.com (redirected to subdomain www.)
5 :provide-api: yes (https://developers.google.com/custom-search/)
6 :using-api: not the offical, since it needs registration to another service
7 :results: HTML
8 :stable: no
9 :template: images.html
10 :parse: url, title, content, source, thumbnail_src, img_src
11
12 For detailed description of the *REST-full* API see: `Query Parameter
13 Definitions`_.
14
15 .. _admonition:: Content-Security-Policy (CSP)
16
17 This engine needs to allow images from the `data URLs`_ (prefixed with the
18 ``data:` scheme).::
19
20 Header set Content-Security-Policy "img-src 'self' data: ;"
21
22 .. _Query Parameter Definitions:
23 https://developers.google.com/custom-search/docs/xml_results#WebSearch_Query_Parameter_Definitions
24
25 """
26
27 from lxml import html
28 from flask_babel import gettext
29 from searx import logger
30 from searx.url_utils import urlencode, urlparse
31 from searx.utils import eval_xpath
32 from searx.engines.xpath import extract_text
33
34 # pylint: disable=unused-import
35 from searx.engines.google import (
36 supported_languages_url,
37 _fetch_supported_languages,
38 )
39 # pylint: enable=unused-import
40
41 from searx.engines.google import (
42 get_lang_country,
43 google_domains,
44 time_range_dict,
45 )
46
47 logger = logger.getChild('google images')
48
49 # engine dependent config
50
51 categories = ['images']
52 paging = False
53 language_support = True
54 use_locale_domain = True
55 time_range_support = True
56 safesearch = True
57
58 filter_mapping = {
59 0: 'images',
60 1: 'active',
61 2: 'active'
62 }
63
64
65 def scrap_out_thumbs(dom):
66 """Scrap out thumbnail data from <script> tags.
67 """
68 ret_val = dict()
69 for script in eval_xpath(dom, '//script[contains(., "_setImgSrc(")]'):
70 _script = script.text
71 # _setImgSrc('0','data:image\/jpeg;base64,\/9j\/4AAQSkZJR ....');
72 _thumb_no, _img_data = _script[len("_setImgSrc("):-2].split(",", 1)
73 _thumb_no = _thumb_no.replace("'", "")
74 _img_data = _img_data.replace("'", "")
75 _img_data = _img_data.replace(r"\/", r"/")
76 ret_val[_thumb_no] = _img_data.replace(r"\x3d", "=")
77 return ret_val
78
79
80 def request(query, params):
81 """Google-Video search request"""
82
83 language, country, lang_country = get_lang_country(
84 # pylint: disable=undefined-variable
85 params, supported_languages, language_aliases
86 )
87 subdomain = 'www.' + google_domains.get(country.upper(), 'google.com')
88
89 query_url = 'https://' + subdomain + '/search' + "?" + urlencode({
90 'q': query,
91 'tbm': "isch",
92 'hl': lang_country,
93 'lr': "lang_" + language,
94 'ie': "utf8",
95 'oe': "utf8",
96 'num': 30,
97 })
98
99 if params['time_range'] in time_range_dict:
100 query_url += '&' + urlencode({'tbs': 'qdr:' + time_range_dict[params['time_range']]})
101 if params['safesearch']:
102 query_url += '&' + urlencode({'safe': filter_mapping[params['safesearch']]})
103
104 params['url'] = query_url
105 logger.debug("query_url --> %s", query_url)
106
107 params['headers']['Accept-Language'] = (
108 "%s,%s;q=0.8,%s;q=0.5" % (lang_country, language, language))
109 logger.debug(
110 "HTTP Accept-Language --> %s", params['headers']['Accept-Language'])
111 params['headers']['Accept'] = (
112 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
113 )
114 # params['google_subdomain'] = subdomain
115 return params
116
117
118 def response(resp):
119 """Get response from google's search request"""
120 results = []
121
122 # detect google sorry
123 resp_url = urlparse(resp.url)
124 if resp_url.netloc == 'sorry.google.com' or resp_url.path == '/sorry/IndexRedirect':
125 raise RuntimeWarning('sorry.google.com')
126
127 if resp_url.path.startswith('/sorry'):
128 raise RuntimeWarning(gettext('CAPTCHA required'))
129
130 # which subdomain ?
131 # subdomain = resp.search_params.get('google_subdomain')
132
133 # convert the text to dom
134 dom = html.fromstring(resp.text)
135 img_bas64_map = scrap_out_thumbs(dom)
136
137 # parse results
138 #
139 # root element::
140 # <div id="islmp" ..>
141 # result div per image::
142 # <div jsmodel="tTXmib"> / <div jsaction="..." data-id="..."
143 # The data-id matches to a item in a json-data structure in::
144 # <script nonce="I+vqelcy/01CKiBJi5Z1Ow">AF_initDataCallback({key: 'ds:1', ... data:function(){return [ ...
145 # In this structure the ling to the origin PNG, JPG or whatever is given
146 # (we do not blow out the link there, you could still implement that)
147 # first link per image-div contains a <img> with the data-iid for bas64 encoded image data::
148 # <img class="rg_i Q4LuWd" data-iid="0"
149 # second link per image-div is the target link::
150 # <a class="VFACy kGQAp" href="https://en.wikipedia.org/wiki/The_Sacrament_of_the_Last_Supper">
151 # the second link also contains two div tags with the *description* and *publisher*::
152 # <div class="WGvvNb">The Sacrament of the Last Supper ...</div>
153 # <div class="fxgdke">en.wikipedia.org</div>
154
155 root = eval_xpath(dom, '//div[@id="islmp"]')
156 if not root:
157 logger.error("did not find root element id='islmp'")
158 return results
159
160 root = root[0]
161 for img_node in eval_xpath(root, './/img[contains(@class, "rg_i")]'):
162
163 try:
164 img_alt = eval_xpath(img_node, '@alt')[0]
165
166 img_base64_id = eval_xpath(img_node, '@data-iid')
167 if img_base64_id:
168 img_base64_id = img_base64_id[0]
169 thumbnail_src = img_bas64_map[img_base64_id]
170 else:
171 thumbnail_src = eval_xpath(img_node, '@src')
172 if not thumbnail_src:
173 thumbnail_src = eval_xpath(img_node, '@data-src')
174 if thumbnail_src:
175 thumbnail_src = thumbnail_src[0]
176 else:
177 thumbnail_src = ''
178
179 link_node = eval_xpath(img_node, '../../../a[2]')[0]
180 url = eval_xpath(link_node, '@href')[0]
181
182 pub_nodes = eval_xpath(link_node, './div/div')
183 pub_descr = img_alt
184 pub_source = ''
185 if pub_nodes:
186 pub_descr = extract_text(pub_nodes[0])
187 pub_source = extract_text(pub_nodes[1])
188
189 results.append({
190 'url': url,
191 'title': img_alt,
192 'content': pub_descr,
193 'source': pub_source,
194 'img_src': url,
195 # 'img_format': img_format,
196 'thumbnail_src': thumbnail_src,
197 'template': 'images.html'
198 })
199 except Exception as e: # pylint: disable=broad-except
200 logger.error(e, exc_info=True)
201 # from lxml import etree
202 # logger.debug(etree.tostring(img_node, pretty_print=True))
203 # import pdb
204 # pdb.set_trace()
205 continue
206
207 return results
208
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/searx/engines/google_images.py b/searx/engines/google_images.py
--- a/searx/engines/google_images.py
+++ b/searx/engines/google_images.py
@@ -24,6 +24,7 @@
"""
+import urllib
from lxml import html
from flask_babel import gettext
from searx import logger
@@ -77,6 +78,19 @@
return ret_val
+def scrap_img_by_id(script, data_id):
+ """Get full image URL by data-id in parent element
+ """
+ img_url = ''
+ _script = script.split('\n')
+ for i, line in enumerate(_script):
+ if 'gstatic.com/images' in line and data_id in line:
+ url_line = _script[i + 1]
+ img_url = url_line.split('"')[1]
+ img_url = urllib.parse.unquote(img_url.replace(r'\u00', r'%'))
+ return img_url
+
+
def request(query, params):
"""Google-Video search request"""
@@ -133,6 +147,7 @@
# convert the text to dom
dom = html.fromstring(resp.text)
img_bas64_map = scrap_out_thumbs(dom)
+ img_src_script = eval_xpath(dom, '//script[contains(., "AF_initDataCallback({key: ")]')[1].text
# parse results
#
@@ -142,8 +157,7 @@
# <div jsmodel="tTXmib"> / <div jsaction="..." data-id="..."
# The data-id matches to a item in a json-data structure in::
# <script nonce="I+vqelcy/01CKiBJi5Z1Ow">AF_initDataCallback({key: 'ds:1', ... data:function(){return [ ...
- # In this structure the ling to the origin PNG, JPG or whatever is given
- # (we do not blow out the link there, you could still implement that)
+ # In this structure the link to the origin PNG, JPG or whatever is given
# first link per image-div contains a <img> with the data-iid for bas64 encoded image data::
# <img class="rg_i Q4LuWd" data-iid="0"
# second link per image-div is the target link::
@@ -186,12 +200,17 @@
pub_descr = extract_text(pub_nodes[0])
pub_source = extract_text(pub_nodes[1])
+ img_src_id = eval_xpath(img_node, '../../../@data-id')[0]
+ src_url = scrap_img_by_id(img_src_script, img_src_id)
+ if not src_url:
+ src_url = thumbnail_src
+
results.append({
'url': url,
'title': img_alt,
'content': pub_descr,
'source': pub_source,
- 'img_src': url,
+ 'img_src': src_url,
# 'img_format': img_format,
'thumbnail_src': thumbnail_src,
'template': 'images.html'
|
{"golden_diff": "diff --git a/searx/engines/google_images.py b/searx/engines/google_images.py\n--- a/searx/engines/google_images.py\n+++ b/searx/engines/google_images.py\n@@ -24,6 +24,7 @@\n \n \"\"\"\n \n+import urllib\n from lxml import html\n from flask_babel import gettext\n from searx import logger\n@@ -77,6 +78,19 @@\n return ret_val\n \n \n+def scrap_img_by_id(script, data_id):\n+ \"\"\"Get full image URL by data-id in parent element\n+ \"\"\"\n+ img_url = ''\n+ _script = script.split('\\n')\n+ for i, line in enumerate(_script):\n+ if 'gstatic.com/images' in line and data_id in line:\n+ url_line = _script[i + 1]\n+ img_url = url_line.split('\"')[1]\n+ img_url = urllib.parse.unquote(img_url.replace(r'\\u00', r'%'))\n+ return img_url\n+\n+\n def request(query, params):\n \"\"\"Google-Video search request\"\"\"\n \n@@ -133,6 +147,7 @@\n # convert the text to dom\n dom = html.fromstring(resp.text)\n img_bas64_map = scrap_out_thumbs(dom)\n+ img_src_script = eval_xpath(dom, '//script[contains(., \"AF_initDataCallback({key: \")]')[1].text\n \n # parse results\n #\n@@ -142,8 +157,7 @@\n # <div jsmodel=\"tTXmib\"> / <div jsaction=\"...\" data-id=\"...\"\n # The data-id matches to a item in a json-data structure in::\n # <script nonce=\"I+vqelcy/01CKiBJi5Z1Ow\">AF_initDataCallback({key: 'ds:1', ... data:function(){return [ ...\n- # In this structure the ling to the origin PNG, JPG or whatever is given\n- # (we do not blow out the link there, you could still implement that)\n+ # In this structure the link to the origin PNG, JPG or whatever is given\n # first link per image-div contains a <img> with the data-iid for bas64 encoded image data::\n # <img class=\"rg_i Q4LuWd\" data-iid=\"0\"\n # second link per image-div is the target link::\n@@ -186,12 +200,17 @@\n pub_descr = extract_text(pub_nodes[0])\n pub_source = extract_text(pub_nodes[1])\n \n+ img_src_id = eval_xpath(img_node, '../../../@data-id')[0]\n+ src_url = scrap_img_by_id(img_src_script, img_src_id)\n+ if not src_url:\n+ src_url = thumbnail_src\n+\n results.append({\n 'url': url,\n 'title': img_alt,\n 'content': pub_descr,\n 'source': pub_source,\n- 'img_src': url,\n+ 'img_src': src_url,\n # 'img_format': img_format,\n 'thumbnail_src': thumbnail_src,\n 'template': 'images.html'\n", "issue": "Get image button from the Google Images engine doesn't give the raw URL to the image\nWhen searching for images using the \"Google Images\" engine, the \"Get image\" button doesn't give the raw URL (that usually ends with `.jpg` or `.png`) to the image of every result.\r\nIn fact the button gives the same exact URL as the button \"View source\".\r\n\r\nOn other engines (like Bing images) it gives the raw URL to the image.\r\n\r\nHere is a screenshot explaining the issue:\r\n\r\n\r\nHere is a URL for testing the bug: https://darmarit.org/searx/?q=%21goi+ok&time_range=&language=en-US&category_images=on\r\n\r\n@return42 Any thoughts about that?\r\n\n", "before_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n\"\"\"Google (Images)\n\n:website: https://images.google.com (redirected to subdomain www.)\n:provide-api: yes (https://developers.google.com/custom-search/)\n:using-api: not the offical, since it needs registration to another service\n:results: HTML\n:stable: no\n:template: images.html\n:parse: url, title, content, source, thumbnail_src, img_src\n\nFor detailed description of the *REST-full* API see: `Query Parameter\nDefinitions`_.\n\n.. _admonition:: Content-Security-Policy (CSP)\n\n This engine needs to allow images from the `data URLs`_ (prefixed with the\n ``data:` scheme).::\n\n Header set Content-Security-Policy \"img-src 'self' data: ;\"\n\n.. _Query Parameter Definitions:\n https://developers.google.com/custom-search/docs/xml_results#WebSearch_Query_Parameter_Definitions\n\n\"\"\"\n\nfrom lxml import html\nfrom flask_babel import gettext\nfrom searx import logger\nfrom searx.url_utils import urlencode, urlparse\nfrom searx.utils import eval_xpath\nfrom searx.engines.xpath import extract_text\n\n# pylint: disable=unused-import\nfrom searx.engines.google import (\n supported_languages_url,\n _fetch_supported_languages,\n)\n# pylint: enable=unused-import\n\nfrom searx.engines.google import (\n get_lang_country,\n google_domains,\n time_range_dict,\n)\n\nlogger = logger.getChild('google images')\n\n# engine dependent config\n\ncategories = ['images']\npaging = False\nlanguage_support = True\nuse_locale_domain = True\ntime_range_support = True\nsafesearch = True\n\nfilter_mapping = {\n 0: 'images',\n 1: 'active',\n 2: 'active'\n}\n\n\ndef scrap_out_thumbs(dom):\n \"\"\"Scrap out thumbnail data from <script> tags.\n \"\"\"\n ret_val = dict()\n for script in eval_xpath(dom, '//script[contains(., \"_setImgSrc(\")]'):\n _script = script.text\n # _setImgSrc('0','data:image\\/jpeg;base64,\\/9j\\/4AAQSkZJR ....');\n _thumb_no, _img_data = _script[len(\"_setImgSrc(\"):-2].split(\",\", 1)\n _thumb_no = _thumb_no.replace(\"'\", \"\")\n _img_data = _img_data.replace(\"'\", \"\")\n _img_data = _img_data.replace(r\"\\/\", r\"/\")\n ret_val[_thumb_no] = _img_data.replace(r\"\\x3d\", \"=\")\n return ret_val\n\n\ndef request(query, params):\n \"\"\"Google-Video search request\"\"\"\n\n language, country, lang_country = get_lang_country(\n # pylint: disable=undefined-variable\n params, supported_languages, language_aliases\n )\n subdomain = 'www.' + google_domains.get(country.upper(), 'google.com')\n\n query_url = 'https://' + subdomain + '/search' + \"?\" + urlencode({\n 'q': query,\n 'tbm': \"isch\",\n 'hl': lang_country,\n 'lr': \"lang_\" + language,\n 'ie': \"utf8\",\n 'oe': \"utf8\",\n 'num': 30,\n })\n\n if params['time_range'] in time_range_dict:\n query_url += '&' + urlencode({'tbs': 'qdr:' + time_range_dict[params['time_range']]})\n if params['safesearch']:\n query_url += '&' + urlencode({'safe': filter_mapping[params['safesearch']]})\n\n params['url'] = query_url\n logger.debug(\"query_url --> %s\", query_url)\n\n params['headers']['Accept-Language'] = (\n \"%s,%s;q=0.8,%s;q=0.5\" % (lang_country, language, language))\n logger.debug(\n \"HTTP Accept-Language --> %s\", params['headers']['Accept-Language'])\n params['headers']['Accept'] = (\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'\n )\n # params['google_subdomain'] = subdomain\n return params\n\n\ndef response(resp):\n \"\"\"Get response from google's search request\"\"\"\n results = []\n\n # detect google sorry\n resp_url = urlparse(resp.url)\n if resp_url.netloc == 'sorry.google.com' or resp_url.path == '/sorry/IndexRedirect':\n raise RuntimeWarning('sorry.google.com')\n\n if resp_url.path.startswith('/sorry'):\n raise RuntimeWarning(gettext('CAPTCHA required'))\n\n # which subdomain ?\n # subdomain = resp.search_params.get('google_subdomain')\n\n # convert the text to dom\n dom = html.fromstring(resp.text)\n img_bas64_map = scrap_out_thumbs(dom)\n\n # parse results\n #\n # root element::\n # <div id=\"islmp\" ..>\n # result div per image::\n # <div jsmodel=\"tTXmib\"> / <div jsaction=\"...\" data-id=\"...\"\n # The data-id matches to a item in a json-data structure in::\n # <script nonce=\"I+vqelcy/01CKiBJi5Z1Ow\">AF_initDataCallback({key: 'ds:1', ... data:function(){return [ ...\n # In this structure the ling to the origin PNG, JPG or whatever is given\n # (we do not blow out the link there, you could still implement that)\n # first link per image-div contains a <img> with the data-iid for bas64 encoded image data::\n # <img class=\"rg_i Q4LuWd\" data-iid=\"0\"\n # second link per image-div is the target link::\n # <a class=\"VFACy kGQAp\" href=\"https://en.wikipedia.org/wiki/The_Sacrament_of_the_Last_Supper\">\n # the second link also contains two div tags with the *description* and *publisher*::\n # <div class=\"WGvvNb\">The Sacrament of the Last Supper ...</div>\n # <div class=\"fxgdke\">en.wikipedia.org</div>\n\n root = eval_xpath(dom, '//div[@id=\"islmp\"]')\n if not root:\n logger.error(\"did not find root element id='islmp'\")\n return results\n\n root = root[0]\n for img_node in eval_xpath(root, './/img[contains(@class, \"rg_i\")]'):\n\n try:\n img_alt = eval_xpath(img_node, '@alt')[0]\n\n img_base64_id = eval_xpath(img_node, '@data-iid')\n if img_base64_id:\n img_base64_id = img_base64_id[0]\n thumbnail_src = img_bas64_map[img_base64_id]\n else:\n thumbnail_src = eval_xpath(img_node, '@src')\n if not thumbnail_src:\n thumbnail_src = eval_xpath(img_node, '@data-src')\n if thumbnail_src:\n thumbnail_src = thumbnail_src[0]\n else:\n thumbnail_src = ''\n\n link_node = eval_xpath(img_node, '../../../a[2]')[0]\n url = eval_xpath(link_node, '@href')[0]\n\n pub_nodes = eval_xpath(link_node, './div/div')\n pub_descr = img_alt\n pub_source = ''\n if pub_nodes:\n pub_descr = extract_text(pub_nodes[0])\n pub_source = extract_text(pub_nodes[1])\n\n results.append({\n 'url': url,\n 'title': img_alt,\n 'content': pub_descr,\n 'source': pub_source,\n 'img_src': url,\n # 'img_format': img_format,\n 'thumbnail_src': thumbnail_src,\n 'template': 'images.html'\n })\n except Exception as e: # pylint: disable=broad-except\n logger.error(e, exc_info=True)\n # from lxml import etree\n # logger.debug(etree.tostring(img_node, pretty_print=True))\n # import pdb\n # pdb.set_trace()\n continue\n\n return results\n", "path": "searx/engines/google_images.py"}], "after_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n\"\"\"Google (Images)\n\n:website: https://images.google.com (redirected to subdomain www.)\n:provide-api: yes (https://developers.google.com/custom-search/)\n:using-api: not the offical, since it needs registration to another service\n:results: HTML\n:stable: no\n:template: images.html\n:parse: url, title, content, source, thumbnail_src, img_src\n\nFor detailed description of the *REST-full* API see: `Query Parameter\nDefinitions`_.\n\n.. _admonition:: Content-Security-Policy (CSP)\n\n This engine needs to allow images from the `data URLs`_ (prefixed with the\n ``data:` scheme).::\n\n Header set Content-Security-Policy \"img-src 'self' data: ;\"\n\n.. _Query Parameter Definitions:\n https://developers.google.com/custom-search/docs/xml_results#WebSearch_Query_Parameter_Definitions\n\n\"\"\"\n\nimport urllib\nfrom lxml import html\nfrom flask_babel import gettext\nfrom searx import logger\nfrom searx.url_utils import urlencode, urlparse\nfrom searx.utils import eval_xpath\nfrom searx.engines.xpath import extract_text\n\n# pylint: disable=unused-import\nfrom searx.engines.google import (\n supported_languages_url,\n _fetch_supported_languages,\n)\n# pylint: enable=unused-import\n\nfrom searx.engines.google import (\n get_lang_country,\n google_domains,\n time_range_dict,\n)\n\nlogger = logger.getChild('google images')\n\n# engine dependent config\n\ncategories = ['images']\npaging = False\nlanguage_support = True\nuse_locale_domain = True\ntime_range_support = True\nsafesearch = True\n\nfilter_mapping = {\n 0: 'images',\n 1: 'active',\n 2: 'active'\n}\n\n\ndef scrap_out_thumbs(dom):\n \"\"\"Scrap out thumbnail data from <script> tags.\n \"\"\"\n ret_val = dict()\n for script in eval_xpath(dom, '//script[contains(., \"_setImgSrc(\")]'):\n _script = script.text\n # _setImgSrc('0','data:image\\/jpeg;base64,\\/9j\\/4AAQSkZJR ....');\n _thumb_no, _img_data = _script[len(\"_setImgSrc(\"):-2].split(\",\", 1)\n _thumb_no = _thumb_no.replace(\"'\", \"\")\n _img_data = _img_data.replace(\"'\", \"\")\n _img_data = _img_data.replace(r\"\\/\", r\"/\")\n ret_val[_thumb_no] = _img_data.replace(r\"\\x3d\", \"=\")\n return ret_val\n\n\ndef scrap_img_by_id(script, data_id):\n \"\"\"Get full image URL by data-id in parent element\n \"\"\"\n img_url = ''\n _script = script.split('\\n')\n for i, line in enumerate(_script):\n if 'gstatic.com/images' in line and data_id in line:\n url_line = _script[i + 1]\n img_url = url_line.split('\"')[1]\n img_url = urllib.parse.unquote(img_url.replace(r'\\u00', r'%'))\n return img_url\n\n\ndef request(query, params):\n \"\"\"Google-Video search request\"\"\"\n\n language, country, lang_country = get_lang_country(\n # pylint: disable=undefined-variable\n params, supported_languages, language_aliases\n )\n subdomain = 'www.' + google_domains.get(country.upper(), 'google.com')\n\n query_url = 'https://' + subdomain + '/search' + \"?\" + urlencode({\n 'q': query,\n 'tbm': \"isch\",\n 'hl': lang_country,\n 'lr': \"lang_\" + language,\n 'ie': \"utf8\",\n 'oe': \"utf8\",\n 'num': 30,\n })\n\n if params['time_range'] in time_range_dict:\n query_url += '&' + urlencode({'tbs': 'qdr:' + time_range_dict[params['time_range']]})\n if params['safesearch']:\n query_url += '&' + urlencode({'safe': filter_mapping[params['safesearch']]})\n\n params['url'] = query_url\n logger.debug(\"query_url --> %s\", query_url)\n\n params['headers']['Accept-Language'] = (\n \"%s,%s;q=0.8,%s;q=0.5\" % (lang_country, language, language))\n logger.debug(\n \"HTTP Accept-Language --> %s\", params['headers']['Accept-Language'])\n params['headers']['Accept'] = (\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'\n )\n # params['google_subdomain'] = subdomain\n return params\n\n\ndef response(resp):\n \"\"\"Get response from google's search request\"\"\"\n results = []\n\n # detect google sorry\n resp_url = urlparse(resp.url)\n if resp_url.netloc == 'sorry.google.com' or resp_url.path == '/sorry/IndexRedirect':\n raise RuntimeWarning('sorry.google.com')\n\n if resp_url.path.startswith('/sorry'):\n raise RuntimeWarning(gettext('CAPTCHA required'))\n\n # which subdomain ?\n # subdomain = resp.search_params.get('google_subdomain')\n\n # convert the text to dom\n dom = html.fromstring(resp.text)\n img_bas64_map = scrap_out_thumbs(dom)\n img_src_script = eval_xpath(dom, '//script[contains(., \"AF_initDataCallback({key: \")]')[1].text\n\n # parse results\n #\n # root element::\n # <div id=\"islmp\" ..>\n # result div per image::\n # <div jsmodel=\"tTXmib\"> / <div jsaction=\"...\" data-id=\"...\"\n # The data-id matches to a item in a json-data structure in::\n # <script nonce=\"I+vqelcy/01CKiBJi5Z1Ow\">AF_initDataCallback({key: 'ds:1', ... data:function(){return [ ...\n # In this structure the link to the origin PNG, JPG or whatever is given\n # first link per image-div contains a <img> with the data-iid for bas64 encoded image data::\n # <img class=\"rg_i Q4LuWd\" data-iid=\"0\"\n # second link per image-div is the target link::\n # <a class=\"VFACy kGQAp\" href=\"https://en.wikipedia.org/wiki/The_Sacrament_of_the_Last_Supper\">\n # the second link also contains two div tags with the *description* and *publisher*::\n # <div class=\"WGvvNb\">The Sacrament of the Last Supper ...</div>\n # <div class=\"fxgdke\">en.wikipedia.org</div>\n\n root = eval_xpath(dom, '//div[@id=\"islmp\"]')\n if not root:\n logger.error(\"did not find root element id='islmp'\")\n return results\n\n root = root[0]\n for img_node in eval_xpath(root, './/img[contains(@class, \"rg_i\")]'):\n\n try:\n img_alt = eval_xpath(img_node, '@alt')[0]\n\n img_base64_id = eval_xpath(img_node, '@data-iid')\n if img_base64_id:\n img_base64_id = img_base64_id[0]\n thumbnail_src = img_bas64_map[img_base64_id]\n else:\n thumbnail_src = eval_xpath(img_node, '@src')\n if not thumbnail_src:\n thumbnail_src = eval_xpath(img_node, '@data-src')\n if thumbnail_src:\n thumbnail_src = thumbnail_src[0]\n else:\n thumbnail_src = ''\n\n link_node = eval_xpath(img_node, '../../../a[2]')[0]\n url = eval_xpath(link_node, '@href')[0]\n\n pub_nodes = eval_xpath(link_node, './div/div')\n pub_descr = img_alt\n pub_source = ''\n if pub_nodes:\n pub_descr = extract_text(pub_nodes[0])\n pub_source = extract_text(pub_nodes[1])\n\n img_src_id = eval_xpath(img_node, '../../../@data-id')[0]\n src_url = scrap_img_by_id(img_src_script, img_src_id)\n if not src_url:\n src_url = thumbnail_src\n\n results.append({\n 'url': url,\n 'title': img_alt,\n 'content': pub_descr,\n 'source': pub_source,\n 'img_src': src_url,\n # 'img_format': img_format,\n 'thumbnail_src': thumbnail_src,\n 'template': 'images.html'\n })\n except Exception as e: # pylint: disable=broad-except\n logger.error(e, exc_info=True)\n # from lxml import etree\n # logger.debug(etree.tostring(img_node, pretty_print=True))\n # import pdb\n # pdb.set_trace()\n continue\n\n return results\n", "path": "searx/engines/google_images.py"}]}
| 2,791 | 707 |
gh_patches_debug_893
|
rasdani/github-patches
|
git_diff
|
Lightning-Universe__lightning-flash-665
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ImageEmbedder default behavior is not a flattened output
## 🐛 Bug
I discovered this issue while testing PR #655. If you run the [Image Embedding README example code](https://github.com/PyTorchLightning/lightning-flash#example-1-image-embedding), it returns a 3D tensor.
My understanding from the use of embeddings in general, and how they are used in [Fifty One](https://voxel51.com/docs/fiftyone/tutorials/image_embeddings.html) is they expect the embeddings to be 1D (for each embedding).
The reason it returns a 3D tensor is because it depends on the backbone used. The default there is `resnet101`, which returns a `2048x7x7` shape tensor. Others like inception return a flat 1D tensor, i.e. length-X.
### To Reproduce
Steps to reproduce the behavior:
Run the [README example](https://github.com/PyTorchLightning/lightning-flash#example-1-image-embedding), but remove the `embedding_dim` parameter. See below for example.
Note: as-is, this will error on `print(embeddings.shape)`, regardless of configuration, since that is a list. But the question here is around the logic for the ImageEmbedder.
#### Code sample
```python
from flash.core.data.utils import download_data
from flash.image import ImageEmbedder
# 1. Download the data
download_data("https://pl-flash-data.s3.amazonaws.com/hymenoptera_data.zip", "data/")
# 2. Create an ImageEmbedder with resnet50 trained on imagenet.
embedder = ImageEmbedder(backbone="resnet50")
# 3. Generate an embedding from an image path.
embeddings = embedder.predict("data/hymenoptera_data/predict/153783656_85f9c3ac70.jpg")
# 4. Print embeddings shape
print(embeddings.shape)
```
### Expected behavior
Expect to see a 100352x1 shape tensor as the output, instead of 2048x7x7.
### Environment
- PyTorch Version (e.g., 1.0): 1.9
- OS (e.g., Linux): Linux
- How you installed PyTorch (`conda`, `pip`, source): pip
- Build command you used (if compiling from source): N/A
- Python version: 3.8.6
- CUDA/cuDNN version: N/A
- GPU models and configuration: N/A
- Any other relevant information: N/A
### Additional context
I believe the question is around what the logic should be here:
https://github.com/PyTorchLightning/lightning-flash/blob/075de3a46d74d9fc0e769401063fede1f12d0518/flash/image/embedding/model.py#L85-L92
If `embedding_dim` is None, then the head is `nn.Identity()`. **If we desire a flat 1D embedding, then the question is: should `nn.Identity()` change to `nn.Flatten()`?**
It could be argued that the user should be left to flatten after on their own, but per the contributing guidelines, I thought this would align with "[Force User Decisions To Best Practices](https://github.com/PyTorchLightning/lightning-flash/blob/ddd942d3dfe3884a97a855446410166c3c9f16d9/.github/CONTRIBUTING.md#force-user-decisions-to-best-practices)"
Let me know your thoughts. If that makes sense, then I can update the code, run some tests, and update docs in a PR.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flash_examples/integrations/fiftyone/image_embedding.py`
Content:
```
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import fiftyone as fo
15 import fiftyone.brain as fob
16 import numpy as np
17
18 from flash.core.data.utils import download_data
19 from flash.image import ImageEmbedder
20
21 # 1 Download data
22 download_data("https://pl-flash-data.s3.amazonaws.com/hymenoptera_data.zip")
23
24 # 2 Load data into FiftyOne
25 dataset = fo.Dataset.from_dir(
26 "data/hymenoptera_data/test/",
27 fo.types.ImageClassificationDirectoryTree,
28 )
29
30 # 3 Load model
31 embedder = ImageEmbedder(backbone="resnet101", embedding_dim=128)
32
33 # 4 Generate embeddings
34 filepaths = dataset.values("filepath")
35 embeddings = np.stack(embedder.predict(filepaths))
36
37 # 5 Visualize in FiftyOne App
38 results = fob.compute_visualization(dataset, embeddings=embeddings)
39 session = fo.launch_app(dataset)
40 plot = results.visualize(labels="ground_truth.label")
41 plot.show()
42
43 # Optional: block execution until App is closed
44 session.wait()
45
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/flash_examples/integrations/fiftyone/image_embedding.py b/flash_examples/integrations/fiftyone/image_embedding.py
--- a/flash_examples/integrations/fiftyone/image_embedding.py
+++ b/flash_examples/integrations/fiftyone/image_embedding.py
@@ -28,7 +28,7 @@
)
# 3 Load model
-embedder = ImageEmbedder(backbone="resnet101", embedding_dim=128)
+embedder = ImageEmbedder(backbone="resnet101")
# 4 Generate embeddings
filepaths = dataset.values("filepath")
|
{"golden_diff": "diff --git a/flash_examples/integrations/fiftyone/image_embedding.py b/flash_examples/integrations/fiftyone/image_embedding.py\n--- a/flash_examples/integrations/fiftyone/image_embedding.py\n+++ b/flash_examples/integrations/fiftyone/image_embedding.py\n@@ -28,7 +28,7 @@\n )\n \n # 3 Load model\n-embedder = ImageEmbedder(backbone=\"resnet101\", embedding_dim=128)\n+embedder = ImageEmbedder(backbone=\"resnet101\")\n \n # 4 Generate embeddings\n filepaths = dataset.values(\"filepath\")\n", "issue": "ImageEmbedder default behavior is not a flattened output\n## \ud83d\udc1b Bug\r\n\r\nI discovered this issue while testing PR #655. If you run the [Image Embedding README example code](https://github.com/PyTorchLightning/lightning-flash#example-1-image-embedding), it returns a 3D tensor. \r\nMy understanding from the use of embeddings in general, and how they are used in [Fifty One](https://voxel51.com/docs/fiftyone/tutorials/image_embeddings.html) is they expect the embeddings to be 1D (for each embedding). \r\n\r\nThe reason it returns a 3D tensor is because it depends on the backbone used. The default there is `resnet101`, which returns a `2048x7x7` shape tensor. Others like inception return a flat 1D tensor, i.e. length-X.\r\n\r\n### To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\nRun the [README example](https://github.com/PyTorchLightning/lightning-flash#example-1-image-embedding), but remove the `embedding_dim` parameter. See below for example.\r\n\r\nNote: as-is, this will error on `print(embeddings.shape)`, regardless of configuration, since that is a list. But the question here is around the logic for the ImageEmbedder. \r\n\r\n\r\n#### Code sample\r\n```python\r\nfrom flash.core.data.utils import download_data\r\nfrom flash.image import ImageEmbedder\r\n\r\n# 1. Download the data\r\ndownload_data(\"https://pl-flash-data.s3.amazonaws.com/hymenoptera_data.zip\", \"data/\")\r\n\r\n# 2. Create an ImageEmbedder with resnet50 trained on imagenet.\r\nembedder = ImageEmbedder(backbone=\"resnet50\")\r\n\r\n# 3. Generate an embedding from an image path.\r\nembeddings = embedder.predict(\"data/hymenoptera_data/predict/153783656_85f9c3ac70.jpg\")\r\n\r\n# 4. Print embeddings shape\r\nprint(embeddings.shape)\r\n```\r\n\r\n### Expected behavior\r\n\r\nExpect to see a 100352x1 shape tensor as the output, instead of 2048x7x7. \r\n\r\n### Environment\r\n\r\n - PyTorch Version (e.g., 1.0): 1.9\r\n - OS (e.g., Linux): Linux\r\n - How you installed PyTorch (`conda`, `pip`, source): pip\r\n - Build command you used (if compiling from source): N/A\r\n - Python version: 3.8.6\r\n - CUDA/cuDNN version: N/A\r\n - GPU models and configuration: N/A\r\n - Any other relevant information: N/A\r\n\r\n### Additional context\r\n\r\nI believe the question is around what the logic should be here:\r\nhttps://github.com/PyTorchLightning/lightning-flash/blob/075de3a46d74d9fc0e769401063fede1f12d0518/flash/image/embedding/model.py#L85-L92\r\n\r\nIf `embedding_dim` is None, then the head is `nn.Identity()`. **If we desire a flat 1D embedding, then the question is: should `nn.Identity()` change to `nn.Flatten()`?**\r\n\r\nIt could be argued that the user should be left to flatten after on their own, but per the contributing guidelines, I thought this would align with \"[Force User Decisions To Best Practices](https://github.com/PyTorchLightning/lightning-flash/blob/ddd942d3dfe3884a97a855446410166c3c9f16d9/.github/CONTRIBUTING.md#force-user-decisions-to-best-practices)\"\r\n\r\nLet me know your thoughts. If that makes sense, then I can update the code, run some tests, and update docs in a PR. \r\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport fiftyone as fo\nimport fiftyone.brain as fob\nimport numpy as np\n\nfrom flash.core.data.utils import download_data\nfrom flash.image import ImageEmbedder\n\n# 1 Download data\ndownload_data(\"https://pl-flash-data.s3.amazonaws.com/hymenoptera_data.zip\")\n\n# 2 Load data into FiftyOne\ndataset = fo.Dataset.from_dir(\n \"data/hymenoptera_data/test/\",\n fo.types.ImageClassificationDirectoryTree,\n)\n\n# 3 Load model\nembedder = ImageEmbedder(backbone=\"resnet101\", embedding_dim=128)\n\n# 4 Generate embeddings\nfilepaths = dataset.values(\"filepath\")\nembeddings = np.stack(embedder.predict(filepaths))\n\n# 5 Visualize in FiftyOne App\nresults = fob.compute_visualization(dataset, embeddings=embeddings)\nsession = fo.launch_app(dataset)\nplot = results.visualize(labels=\"ground_truth.label\")\nplot.show()\n\n# Optional: block execution until App is closed\nsession.wait()\n", "path": "flash_examples/integrations/fiftyone/image_embedding.py"}], "after_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport fiftyone as fo\nimport fiftyone.brain as fob\nimport numpy as np\n\nfrom flash.core.data.utils import download_data\nfrom flash.image import ImageEmbedder\n\n# 1 Download data\ndownload_data(\"https://pl-flash-data.s3.amazonaws.com/hymenoptera_data.zip\")\n\n# 2 Load data into FiftyOne\ndataset = fo.Dataset.from_dir(\n \"data/hymenoptera_data/test/\",\n fo.types.ImageClassificationDirectoryTree,\n)\n\n# 3 Load model\nembedder = ImageEmbedder(backbone=\"resnet101\")\n\n# 4 Generate embeddings\nfilepaths = dataset.values(\"filepath\")\nembeddings = np.stack(embedder.predict(filepaths))\n\n# 5 Visualize in FiftyOne App\nresults = fob.compute_visualization(dataset, embeddings=embeddings)\nsession = fo.launch_app(dataset)\nplot = results.visualize(labels=\"ground_truth.label\")\nplot.show()\n\n# Optional: block execution until App is closed\nsession.wait()\n", "path": "flash_examples/integrations/fiftyone/image_embedding.py"}]}
| 1,537 | 134 |
gh_patches_debug_24455
|
rasdani/github-patches
|
git_diff
|
fossasia__open-event-server-5803
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
'AnonymousUserMixin' object has no attribute 'email' on updating an event
**Describe the bug**
<!-- A clear and concise description of what the bug is. -->
We are facing this Attribute Error the moment we try updating any event related object which corresponds to a celery task
**To Reproduce**
Steps to reproduce the behavior:
1. Update any existing event
2. See error
**Expected behavior**
<!-- A clear and concise description of what you expected to happen. -->
It should work normally without any error
**Stacktrace**
<!-- If applicable, add stacktrace to help explain your problem. -->
```cmd
AttributeError: 'AnonymousUserMixin' object has no attribute 'email'
File "/home/shreyansh/open-event-server/venv/lib/python3.6/site-packages/flask/views.py", line 88, in view
return self.dispatch_request(*args, **kwargs)
File "/home/shreyansh/open-event-server/venv/src/flask-rest-jsonapi/flask_rest_jsonapi/resource.py", line 68, in dispatch_request
response = method(*args, **kwargs)
File "/home/shreyansh/open-event-server/venv/src/flask-rest-jsonapi/flask_rest_jsonapi/decorators.py", line 56, in wrapper
return func(*args, **kwargs)
File "/home/shreyansh/open-event-server/venv/src/flask-rest-jsonapi/flask_rest_jsonapi/resource.py", line 311, in patch
self._data_layer.update_object(obj, data, kwargs)
File "/home/shreyansh/open-event-server/venv/src/flask-rest-jsonapi/flask_rest_jsonapi/data_layers/alchemy.py", line 144, in update_object
self.after_update_object(obj, data, view_kwargs)
File "/home/shreyansh/open-event-server/app/api/events.py", line 485, in after_update_object
start_export_tasks(event)
File "/home/shreyansh/open-event-server/app/api/events.py", line 553, in start_export_tasks
create_export_job(task_xcal.id, event_id)
```
**Additional details (please complete the following information):**
- OS: [e.g. MacOS, Ubuntu, CentOS] Ubuntu
- Python Version [e.g. `3.5`, `3.6`] 3.5
**Additional context**
<!-- Add any other context about the problem here. -->
Working on it
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/api/helpers/export_helpers.py`
Content:
```
1 import json
2 import os
3 import shutil
4 from collections import OrderedDict
5 from datetime import datetime
6
7 import pytz
8 import requests
9 from flask import current_app as app
10 from flask import request, url_for
11 from flask_jwt import current_identity
12 from flask_login import current_user
13
14 from app.api.helpers.db import save_to_db
15 from app.api.helpers.storage import upload, UPLOAD_PATHS, UploadedFile
16 from app.api.helpers.utilities import is_downloadable, get_filename_from_cd
17 from app.models import db
18 from app.models.custom_form import CustomForms
19 from app.models.event import Event
20 from app.models.export_job import ExportJob
21 from app.models.microlocation import Microlocation
22 from app.models.session import Session
23 from app.models.session_type import SessionType
24 from app.models.speaker import Speaker
25 from app.models.sponsor import Sponsor
26 from app.models.track import Track
27
28 # order of keys in export json
29 FIELD_ORDER = {
30 'event': [
31 'id', 'name', 'latitude', 'longitude', 'location_name', 'starts_at', 'ends_at',
32 'timezone', 'description', 'original_image_url', 'logo_url', 'organizer_name',
33 'organizer_description', 'external_event_url', 'ticket_url', 'privacy', 'event_type_id',
34 'event_topic_id', 'event_sub_topic_id', 'code_of_conduct'
35 ],
36 'microlocations': ['id', 'name', 'floor'],
37 'sessions': [
38 'id', 'title', 'subtitle', 'short_abstract', 'long_abstract', 'starts_at', 'ends_at',
39 'session_type_id', 'track_id', 'comments', 'language', 'slides_url', 'audio_url', 'video_url'
40 ],
41 'speakers': [
42 'id', 'name', 'email', 'mobile', 'photo_url', 'organisation', 'position', 'country',
43 'short_biography', 'long_biography', 'website', 'twitter', 'facebook', 'github', 'linkedin'
44 ],
45 'sponsors': ['id', 'name', 'logo_url', 'level', 'type', 'url', 'description'],
46 'tracks': ['id', 'name', 'color', 'font_color'],
47 'session_types': ['id', 'name', 'length'],
48 'forms': []
49 }
50
51 # keep sync with storage.UPLOAD_PATHS
52 DOWNLOAD_FIEDLS = {
53 'sessions': {
54 'video_url': ['video', '/videos/session_%d'],
55 'audio_url': ['audio', '/audios/session_%d'],
56 'slides_url': ['document', '/slides/session_%d']
57 },
58 'speakers': {
59 'photo_url': ['image', '/images/speakers/%s_%d']
60 },
61 'event': {
62 'logo_url': ['image', '/images/logo'],
63 'external_event_url': ['image', '/images/background']
64 },
65 'sponsors': {
66 'logo_url': ['image', '/images/sponsors/%s_%d']
67 }
68 }
69
70 DATE_FIELDS = ['starts_at', 'ends_at', 'created_at', 'deleted_at', 'submitted_at']
71
72 EXPORTS = [
73 ('event', Event),
74 ('microlocations', Microlocation),
75 ('sessions', Session),
76 ('speakers', Speaker),
77 ('sponsors', Sponsor),
78 ('tracks', Track),
79 ('session_types', SessionType),
80 ('forms', CustomForms)
81 ]
82
83 # strings to remove in a filename
84 FILENAME_EXCLUDE = '<>:"/\|?*;'
85
86
87 # FUNCTIONS
88
89 def sorted_dict(data):
90 """
91 sorts a json (dict/list->dict) and returns OrderedDict
92 """
93 if type(data) == OrderedDict:
94 data = dict(data)
95 if type(data) == dict:
96 data = OrderedDict(sorted(list(data.items()), key=lambda t: t[0]))
97 elif type(data) == list:
98 for count in range(len(data)):
99 data[count] = OrderedDict(sorted(list(data[count].items()), key=lambda t: t[0]))
100 return data
101
102
103 def _order_json(data, srv):
104 """
105 sorts the data a/c FIELD_ORDER and returns.
106 If some keys are not included in FIELD_ORDER, they go at last, sorted alphabetically
107 """
108 new_data = OrderedDict()
109 data.pop('_sa_instance_state', None)
110 for field in FIELD_ORDER[srv[0]]:
111 if field in DATE_FIELDS and data[field] and type(data[field]) != str:
112 new_data[field] = sorted_dict(data[field].isoformat())
113 elif field == 'font_color' and 'id' in new_data:
114 track = db.session.query(Track).filter(Track.id == new_data['id']).first()
115 new_data[field] = track.font_color
116 else:
117 new_data[field] = sorted_dict(data[field])
118 data.pop(field, None)
119
120 # remaining fields, sort and add
121 # https://docs.python.org/2/library/collections.html#collections.OrderedDict
122 data = OrderedDict(sorted(list(data.items()), key=lambda t: t[0]))
123 for key in data:
124 if key in DATE_FIELDS and data[key] and type(data[key]) != str:
125 new_data[key] = sorted_dict(data[key].isoformat())
126 else:
127 new_data[key] = sorted_dict(data[key])
128
129 return new_data
130
131
132 def _download_media(data, srv, dir_path, settings):
133 """
134 Downloads the media and saves it
135 """
136 if srv not in DOWNLOAD_FIEDLS:
137 return
138 for i in DOWNLOAD_FIEDLS[srv]:
139 if not data[i]:
140 continue
141 if not settings[DOWNLOAD_FIEDLS[srv][i][0]]:
142 continue
143 path = DOWNLOAD_FIEDLS[srv][i][1]
144 if srv == 'speakers':
145 path %= make_filename(data['name']), data['id']
146 elif srv == 'sponsors':
147 path %= make_filename(data['name']), data['id']
148 elif srv != 'event':
149 path = path % (data['id'])
150 if data[i].find('.') > -1: # add extension
151 ext = data[i].rsplit('.', 1)[1]
152 if ext.find('/') == -1:
153 path += '.' + ext
154 full_path = dir_path + path
155 # make dir
156 cdir = full_path.rsplit('/', 1)[0]
157 if not os.path.isdir(cdir):
158 os.makedirs(cdir)
159 # download and set
160 url = data[i]
161 if not is_downloadable(url):
162 continue
163 try:
164 r = requests.get(url, allow_redirects=True)
165 ext = get_filename_from_cd(r.headers.get('content-disposition'))[1]
166 full_path += ext
167 path += ext
168 open(full_path, 'wb').write(r.content)
169 data[i] = path
170 except Exception:
171 pass
172
173
174 def _generate_meta():
175 """
176 Generate Meta information for export
177 """
178 d = {'root_url': request.url_root}
179 return d
180
181
182 def export_event_json(event_id, settings):
183 """
184 Exports the event as a zip on the server and return its path
185 """
186 # make directory
187 exports_dir = app.config['BASE_DIR'] + '/static/uploads/exports/'
188 if not os.path.isdir(exports_dir):
189 os.makedirs(exports_dir)
190 dir_path = exports_dir + 'event%d' % int(event_id)
191 if os.path.isdir(dir_path):
192 shutil.rmtree(dir_path, ignore_errors=True)
193 os.makedirs(dir_path)
194 # save to directory
195 for e in EXPORTS:
196 if e[0] == 'event':
197 query_obj = db.session.query(e[1]).filter(
198 e[1].id == event_id).first()
199 data = _order_json(dict(query_obj.__dict__), e)
200 _download_media(data, 'event', dir_path, settings)
201 else:
202 query_objs = db.session.query(e[1]).filter(
203 e[1].event_id == event_id).all()
204 data = [_order_json(dict(query_obj.__dict__), e) for query_obj in query_objs]
205 for count in range(len(data)):
206 data[count] = _order_json(data[count], e)
207 _download_media(data[count], e[0], dir_path, settings)
208 data_str = json.dumps(data, indent=4, ensure_ascii=False, default=handle_unserializable_data).encode('utf-8')
209 fp = open(dir_path + '/' + e[0], 'w')
210 fp.write(str(data_str, 'utf-8'))
211 fp.close()
212 # add meta
213 data_str = json.dumps(
214 _generate_meta(), sort_keys=True,
215 indent=4, ensure_ascii=False
216 ).encode('utf-8')
217 fp = open(dir_path + '/meta', 'w')
218 fp.write(str(data_str, 'utf-8'))
219 fp.close()
220 # make zip
221 shutil.make_archive(dir_path, 'zip', dir_path)
222 dir_path = dir_path + ".zip"
223
224 storage_path = UPLOAD_PATHS['exports']['zip'].format(
225 event_id=event_id
226 )
227 uploaded_file = UploadedFile(dir_path, dir_path.rsplit('/', 1)[1])
228 storage_url = upload(uploaded_file, storage_path)
229
230 return storage_url
231
232
233 # HELPERS
234
235 def create_export_job(task_id, event_id):
236 """
237 Create export job for an export that is going to start
238 """
239 export_job = ExportJob.query.filter_by(event_id=event_id).first()
240 task_url = url_for('tasks.celery_task', task_id=task_id)
241 if export_job:
242
243 export_job.task = task_url
244 export_job.user_email = current_user.email
245 export_job.event = Event.query.get(event_id)
246 export_job.starts_at = datetime.now(pytz.utc)
247 else:
248 export_job = ExportJob(
249 task=task_url, user_email=current_identity.email,
250 event=Event.query.get(event_id)
251 )
252 save_to_db(export_job, 'ExportJob saved')
253
254
255 # FIELD DATA FORMATTERS
256 def make_filename(name):
257 """Make speaker image filename for export"""
258 for _ in FILENAME_EXCLUDE:
259 name = name.replace(_, ' ')
260 return ''.join(s.title() for s in name.split() if s)
261
262
263 def handle_unserializable_data(obj):
264 """
265 Handles objects which cannot be serialized by json.dumps()
266 :param obj: Object to be serialized
267 :return: JSON representation of the object
268 """
269 if isinstance(obj, datetime):
270 return obj.__str__()
271
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/app/api/helpers/export_helpers.py b/app/api/helpers/export_helpers.py
--- a/app/api/helpers/export_helpers.py
+++ b/app/api/helpers/export_helpers.py
@@ -230,6 +230,13 @@
return storage_url
+def get_current_user():
+ if current_identity:
+ return current_identity
+ else:
+ return current_user
+
+
# HELPERS
def create_export_job(task_id, event_id):
@@ -238,15 +245,17 @@
"""
export_job = ExportJob.query.filter_by(event_id=event_id).first()
task_url = url_for('tasks.celery_task', task_id=task_id)
+ current_logged_user = get_current_user()
+
if export_job:
export_job.task = task_url
- export_job.user_email = current_user.email
+ export_job.user_email = current_logged_user.email
export_job.event = Event.query.get(event_id)
export_job.starts_at = datetime.now(pytz.utc)
else:
export_job = ExportJob(
- task=task_url, user_email=current_identity.email,
+ task=task_url, user_email=current_logged_user.email,
event=Event.query.get(event_id)
)
save_to_db(export_job, 'ExportJob saved')
|
{"golden_diff": "diff --git a/app/api/helpers/export_helpers.py b/app/api/helpers/export_helpers.py\n--- a/app/api/helpers/export_helpers.py\n+++ b/app/api/helpers/export_helpers.py\n@@ -230,6 +230,13 @@\n return storage_url\n \n \n+def get_current_user():\n+ if current_identity:\n+ return current_identity\n+ else:\n+ return current_user\n+\n+\n # HELPERS\n \n def create_export_job(task_id, event_id):\n@@ -238,15 +245,17 @@\n \"\"\"\n export_job = ExportJob.query.filter_by(event_id=event_id).first()\n task_url = url_for('tasks.celery_task', task_id=task_id)\n+ current_logged_user = get_current_user()\n+\n if export_job:\n \n export_job.task = task_url\n- export_job.user_email = current_user.email\n+ export_job.user_email = current_logged_user.email\n export_job.event = Event.query.get(event_id)\n export_job.starts_at = datetime.now(pytz.utc)\n else:\n export_job = ExportJob(\n- task=task_url, user_email=current_identity.email,\n+ task=task_url, user_email=current_logged_user.email,\n event=Event.query.get(event_id)\n )\n save_to_db(export_job, 'ExportJob saved')\n", "issue": " 'AnonymousUserMixin' object has no attribute 'email' on updating an event\n**Describe the bug**\r\n<!-- A clear and concise description of what the bug is. -->\r\nWe are facing this Attribute Error the moment we try updating any event related object which corresponds to a celery task\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Update any existing event\r\n2. See error\r\n\r\n**Expected behavior**\r\n<!-- A clear and concise description of what you expected to happen. -->\r\nIt should work normally without any error\r\n\r\n**Stacktrace**\r\n<!-- If applicable, add stacktrace to help explain your problem. -->\r\n```cmd\r\nAttributeError: 'AnonymousUserMixin' object has no attribute 'email'\r\n\r\nFile \"/home/shreyansh/open-event-server/venv/lib/python3.6/site-packages/flask/views.py\", line 88, in view\r\n return self.dispatch_request(*args, **kwargs)\r\n File \"/home/shreyansh/open-event-server/venv/src/flask-rest-jsonapi/flask_rest_jsonapi/resource.py\", line 68, in dispatch_request\r\n response = method(*args, **kwargs)\r\n File \"/home/shreyansh/open-event-server/venv/src/flask-rest-jsonapi/flask_rest_jsonapi/decorators.py\", line 56, in wrapper\r\n return func(*args, **kwargs)\r\n File \"/home/shreyansh/open-event-server/venv/src/flask-rest-jsonapi/flask_rest_jsonapi/resource.py\", line 311, in patch\r\n self._data_layer.update_object(obj, data, kwargs)\r\n File \"/home/shreyansh/open-event-server/venv/src/flask-rest-jsonapi/flask_rest_jsonapi/data_layers/alchemy.py\", line 144, in update_object\r\n self.after_update_object(obj, data, view_kwargs)\r\n File \"/home/shreyansh/open-event-server/app/api/events.py\", line 485, in after_update_object\r\n start_export_tasks(event)\r\n File \"/home/shreyansh/open-event-server/app/api/events.py\", line 553, in start_export_tasks\r\n create_export_job(task_xcal.id, event_id)\r\n```\r\n**Additional details (please complete the following information):**\r\n - OS: [e.g. MacOS, Ubuntu, CentOS] Ubuntu\r\n - Python Version [e.g. `3.5`, `3.6`] 3.5\r\n\r\n**Additional context**\r\n<!-- Add any other context about the problem here. -->\r\nWorking on it\n", "before_files": [{"content": "import json\nimport os\nimport shutil\nfrom collections import OrderedDict\nfrom datetime import datetime\n\nimport pytz\nimport requests\nfrom flask import current_app as app\nfrom flask import request, url_for\nfrom flask_jwt import current_identity\nfrom flask_login import current_user\n\nfrom app.api.helpers.db import save_to_db\nfrom app.api.helpers.storage import upload, UPLOAD_PATHS, UploadedFile\nfrom app.api.helpers.utilities import is_downloadable, get_filename_from_cd\nfrom app.models import db\nfrom app.models.custom_form import CustomForms\nfrom app.models.event import Event\nfrom app.models.export_job import ExportJob\nfrom app.models.microlocation import Microlocation\nfrom app.models.session import Session\nfrom app.models.session_type import SessionType\nfrom app.models.speaker import Speaker\nfrom app.models.sponsor import Sponsor\nfrom app.models.track import Track\n\n# order of keys in export json\nFIELD_ORDER = {\n 'event': [\n 'id', 'name', 'latitude', 'longitude', 'location_name', 'starts_at', 'ends_at',\n 'timezone', 'description', 'original_image_url', 'logo_url', 'organizer_name',\n 'organizer_description', 'external_event_url', 'ticket_url', 'privacy', 'event_type_id',\n 'event_topic_id', 'event_sub_topic_id', 'code_of_conduct'\n ],\n 'microlocations': ['id', 'name', 'floor'],\n 'sessions': [\n 'id', 'title', 'subtitle', 'short_abstract', 'long_abstract', 'starts_at', 'ends_at',\n 'session_type_id', 'track_id', 'comments', 'language', 'slides_url', 'audio_url', 'video_url'\n ],\n 'speakers': [\n 'id', 'name', 'email', 'mobile', 'photo_url', 'organisation', 'position', 'country',\n 'short_biography', 'long_biography', 'website', 'twitter', 'facebook', 'github', 'linkedin'\n ],\n 'sponsors': ['id', 'name', 'logo_url', 'level', 'type', 'url', 'description'],\n 'tracks': ['id', 'name', 'color', 'font_color'],\n 'session_types': ['id', 'name', 'length'],\n 'forms': []\n}\n\n# keep sync with storage.UPLOAD_PATHS\nDOWNLOAD_FIEDLS = {\n 'sessions': {\n 'video_url': ['video', '/videos/session_%d'],\n 'audio_url': ['audio', '/audios/session_%d'],\n 'slides_url': ['document', '/slides/session_%d']\n },\n 'speakers': {\n 'photo_url': ['image', '/images/speakers/%s_%d']\n },\n 'event': {\n 'logo_url': ['image', '/images/logo'],\n 'external_event_url': ['image', '/images/background']\n },\n 'sponsors': {\n 'logo_url': ['image', '/images/sponsors/%s_%d']\n }\n}\n\nDATE_FIELDS = ['starts_at', 'ends_at', 'created_at', 'deleted_at', 'submitted_at']\n\nEXPORTS = [\n ('event', Event),\n ('microlocations', Microlocation),\n ('sessions', Session),\n ('speakers', Speaker),\n ('sponsors', Sponsor),\n ('tracks', Track),\n ('session_types', SessionType),\n ('forms', CustomForms)\n]\n\n# strings to remove in a filename\nFILENAME_EXCLUDE = '<>:\"/\\|?*;'\n\n\n# FUNCTIONS\n\ndef sorted_dict(data):\n \"\"\"\n sorts a json (dict/list->dict) and returns OrderedDict\n \"\"\"\n if type(data) == OrderedDict:\n data = dict(data)\n if type(data) == dict:\n data = OrderedDict(sorted(list(data.items()), key=lambda t: t[0]))\n elif type(data) == list:\n for count in range(len(data)):\n data[count] = OrderedDict(sorted(list(data[count].items()), key=lambda t: t[0]))\n return data\n\n\ndef _order_json(data, srv):\n \"\"\"\n sorts the data a/c FIELD_ORDER and returns.\n If some keys are not included in FIELD_ORDER, they go at last, sorted alphabetically\n \"\"\"\n new_data = OrderedDict()\n data.pop('_sa_instance_state', None)\n for field in FIELD_ORDER[srv[0]]:\n if field in DATE_FIELDS and data[field] and type(data[field]) != str:\n new_data[field] = sorted_dict(data[field].isoformat())\n elif field == 'font_color' and 'id' in new_data:\n track = db.session.query(Track).filter(Track.id == new_data['id']).first()\n new_data[field] = track.font_color\n else:\n new_data[field] = sorted_dict(data[field])\n data.pop(field, None)\n\n # remaining fields, sort and add\n # https://docs.python.org/2/library/collections.html#collections.OrderedDict\n data = OrderedDict(sorted(list(data.items()), key=lambda t: t[0]))\n for key in data:\n if key in DATE_FIELDS and data[key] and type(data[key]) != str:\n new_data[key] = sorted_dict(data[key].isoformat())\n else:\n new_data[key] = sorted_dict(data[key])\n\n return new_data\n\n\ndef _download_media(data, srv, dir_path, settings):\n \"\"\"\n Downloads the media and saves it\n \"\"\"\n if srv not in DOWNLOAD_FIEDLS:\n return\n for i in DOWNLOAD_FIEDLS[srv]:\n if not data[i]:\n continue\n if not settings[DOWNLOAD_FIEDLS[srv][i][0]]:\n continue\n path = DOWNLOAD_FIEDLS[srv][i][1]\n if srv == 'speakers':\n path %= make_filename(data['name']), data['id']\n elif srv == 'sponsors':\n path %= make_filename(data['name']), data['id']\n elif srv != 'event':\n path = path % (data['id'])\n if data[i].find('.') > -1: # add extension\n ext = data[i].rsplit('.', 1)[1]\n if ext.find('/') == -1:\n path += '.' + ext\n full_path = dir_path + path\n # make dir\n cdir = full_path.rsplit('/', 1)[0]\n if not os.path.isdir(cdir):\n os.makedirs(cdir)\n # download and set\n url = data[i]\n if not is_downloadable(url):\n continue\n try:\n r = requests.get(url, allow_redirects=True)\n ext = get_filename_from_cd(r.headers.get('content-disposition'))[1]\n full_path += ext\n path += ext\n open(full_path, 'wb').write(r.content)\n data[i] = path\n except Exception:\n pass\n\n\ndef _generate_meta():\n \"\"\"\n Generate Meta information for export\n \"\"\"\n d = {'root_url': request.url_root}\n return d\n\n\ndef export_event_json(event_id, settings):\n \"\"\"\n Exports the event as a zip on the server and return its path\n \"\"\"\n # make directory\n exports_dir = app.config['BASE_DIR'] + '/static/uploads/exports/'\n if not os.path.isdir(exports_dir):\n os.makedirs(exports_dir)\n dir_path = exports_dir + 'event%d' % int(event_id)\n if os.path.isdir(dir_path):\n shutil.rmtree(dir_path, ignore_errors=True)\n os.makedirs(dir_path)\n # save to directory\n for e in EXPORTS:\n if e[0] == 'event':\n query_obj = db.session.query(e[1]).filter(\n e[1].id == event_id).first()\n data = _order_json(dict(query_obj.__dict__), e)\n _download_media(data, 'event', dir_path, settings)\n else:\n query_objs = db.session.query(e[1]).filter(\n e[1].event_id == event_id).all()\n data = [_order_json(dict(query_obj.__dict__), e) for query_obj in query_objs]\n for count in range(len(data)):\n data[count] = _order_json(data[count], e)\n _download_media(data[count], e[0], dir_path, settings)\n data_str = json.dumps(data, indent=4, ensure_ascii=False, default=handle_unserializable_data).encode('utf-8')\n fp = open(dir_path + '/' + e[0], 'w')\n fp.write(str(data_str, 'utf-8'))\n fp.close()\n # add meta\n data_str = json.dumps(\n _generate_meta(), sort_keys=True,\n indent=4, ensure_ascii=False\n ).encode('utf-8')\n fp = open(dir_path + '/meta', 'w')\n fp.write(str(data_str, 'utf-8'))\n fp.close()\n # make zip\n shutil.make_archive(dir_path, 'zip', dir_path)\n dir_path = dir_path + \".zip\"\n\n storage_path = UPLOAD_PATHS['exports']['zip'].format(\n event_id=event_id\n )\n uploaded_file = UploadedFile(dir_path, dir_path.rsplit('/', 1)[1])\n storage_url = upload(uploaded_file, storage_path)\n\n return storage_url\n\n\n# HELPERS\n\ndef create_export_job(task_id, event_id):\n \"\"\"\n Create export job for an export that is going to start\n \"\"\"\n export_job = ExportJob.query.filter_by(event_id=event_id).first()\n task_url = url_for('tasks.celery_task', task_id=task_id)\n if export_job:\n\n export_job.task = task_url\n export_job.user_email = current_user.email\n export_job.event = Event.query.get(event_id)\n export_job.starts_at = datetime.now(pytz.utc)\n else:\n export_job = ExportJob(\n task=task_url, user_email=current_identity.email,\n event=Event.query.get(event_id)\n )\n save_to_db(export_job, 'ExportJob saved')\n\n\n# FIELD DATA FORMATTERS\ndef make_filename(name):\n \"\"\"Make speaker image filename for export\"\"\"\n for _ in FILENAME_EXCLUDE:\n name = name.replace(_, ' ')\n return ''.join(s.title() for s in name.split() if s)\n\n\ndef handle_unserializable_data(obj):\n \"\"\"\n Handles objects which cannot be serialized by json.dumps()\n :param obj: Object to be serialized\n :return: JSON representation of the object\n \"\"\"\n if isinstance(obj, datetime):\n return obj.__str__()\n", "path": "app/api/helpers/export_helpers.py"}], "after_files": [{"content": "import json\nimport os\nimport shutil\nfrom collections import OrderedDict\nfrom datetime import datetime\n\nimport pytz\nimport requests\nfrom flask import current_app as app\nfrom flask import request, url_for\nfrom flask_jwt import current_identity\nfrom flask_login import current_user\n\nfrom app.api.helpers.db import save_to_db\nfrom app.api.helpers.storage import upload, UPLOAD_PATHS, UploadedFile\nfrom app.api.helpers.utilities import is_downloadable, get_filename_from_cd\nfrom app.models import db\nfrom app.models.custom_form import CustomForms\nfrom app.models.event import Event\nfrom app.models.export_job import ExportJob\nfrom app.models.microlocation import Microlocation\nfrom app.models.session import Session\nfrom app.models.session_type import SessionType\nfrom app.models.speaker import Speaker\nfrom app.models.sponsor import Sponsor\nfrom app.models.track import Track\n\n# order of keys in export json\nFIELD_ORDER = {\n 'event': [\n 'id', 'name', 'latitude', 'longitude', 'location_name', 'starts_at', 'ends_at',\n 'timezone', 'description', 'original_image_url', 'logo_url', 'organizer_name',\n 'organizer_description', 'external_event_url', 'ticket_url', 'privacy', 'event_type_id',\n 'event_topic_id', 'event_sub_topic_id', 'code_of_conduct'\n ],\n 'microlocations': ['id', 'name', 'floor'],\n 'sessions': [\n 'id', 'title', 'subtitle', 'short_abstract', 'long_abstract', 'starts_at', 'ends_at',\n 'session_type_id', 'track_id', 'comments', 'language', 'slides_url', 'audio_url', 'video_url'\n ],\n 'speakers': [\n 'id', 'name', 'email', 'mobile', 'photo_url', 'organisation', 'position', 'country',\n 'short_biography', 'long_biography', 'website', 'twitter', 'facebook', 'github', 'linkedin'\n ],\n 'sponsors': ['id', 'name', 'logo_url', 'level', 'type', 'url', 'description'],\n 'tracks': ['id', 'name', 'color', 'font_color'],\n 'session_types': ['id', 'name', 'length'],\n 'forms': []\n}\n\n# keep sync with storage.UPLOAD_PATHS\nDOWNLOAD_FIEDLS = {\n 'sessions': {\n 'video_url': ['video', '/videos/session_%d'],\n 'audio_url': ['audio', '/audios/session_%d'],\n 'slides_url': ['document', '/slides/session_%d']\n },\n 'speakers': {\n 'photo_url': ['image', '/images/speakers/%s_%d']\n },\n 'event': {\n 'logo_url': ['image', '/images/logo'],\n 'external_event_url': ['image', '/images/background']\n },\n 'sponsors': {\n 'logo_url': ['image', '/images/sponsors/%s_%d']\n }\n}\n\nDATE_FIELDS = ['starts_at', 'ends_at', 'created_at', 'deleted_at', 'submitted_at']\n\nEXPORTS = [\n ('event', Event),\n ('microlocations', Microlocation),\n ('sessions', Session),\n ('speakers', Speaker),\n ('sponsors', Sponsor),\n ('tracks', Track),\n ('session_types', SessionType),\n ('forms', CustomForms)\n]\n\n# strings to remove in a filename\nFILENAME_EXCLUDE = '<>:\"/\\|?*;'\n\n\n# FUNCTIONS\n\ndef sorted_dict(data):\n \"\"\"\n sorts a json (dict/list->dict) and returns OrderedDict\n \"\"\"\n if type(data) == OrderedDict:\n data = dict(data)\n if type(data) == dict:\n data = OrderedDict(sorted(list(data.items()), key=lambda t: t[0]))\n elif type(data) == list:\n for count in range(len(data)):\n data[count] = OrderedDict(sorted(list(data[count].items()), key=lambda t: t[0]))\n return data\n\n\ndef _order_json(data, srv):\n \"\"\"\n sorts the data a/c FIELD_ORDER and returns.\n If some keys are not included in FIELD_ORDER, they go at last, sorted alphabetically\n \"\"\"\n new_data = OrderedDict()\n data.pop('_sa_instance_state', None)\n for field in FIELD_ORDER[srv[0]]:\n if field in DATE_FIELDS and data[field] and type(data[field]) != str:\n new_data[field] = sorted_dict(data[field].isoformat())\n elif field == 'font_color' and 'id' in new_data:\n track = db.session.query(Track).filter(Track.id == new_data['id']).first()\n new_data[field] = track.font_color\n else:\n new_data[field] = sorted_dict(data[field])\n data.pop(field, None)\n\n # remaining fields, sort and add\n # https://docs.python.org/2/library/collections.html#collections.OrderedDict\n data = OrderedDict(sorted(list(data.items()), key=lambda t: t[0]))\n for key in data:\n if key in DATE_FIELDS and data[key] and type(data[key]) != str:\n new_data[key] = sorted_dict(data[key].isoformat())\n else:\n new_data[key] = sorted_dict(data[key])\n\n return new_data\n\n\ndef _download_media(data, srv, dir_path, settings):\n \"\"\"\n Downloads the media and saves it\n \"\"\"\n if srv not in DOWNLOAD_FIEDLS:\n return\n for i in DOWNLOAD_FIEDLS[srv]:\n if not data[i]:\n continue\n if not settings[DOWNLOAD_FIEDLS[srv][i][0]]:\n continue\n path = DOWNLOAD_FIEDLS[srv][i][1]\n if srv == 'speakers':\n path %= make_filename(data['name']), data['id']\n elif srv == 'sponsors':\n path %= make_filename(data['name']), data['id']\n elif srv != 'event':\n path = path % (data['id'])\n if data[i].find('.') > -1: # add extension\n ext = data[i].rsplit('.', 1)[1]\n if ext.find('/') == -1:\n path += '.' + ext\n full_path = dir_path + path\n # make dir\n cdir = full_path.rsplit('/', 1)[0]\n if not os.path.isdir(cdir):\n os.makedirs(cdir)\n # download and set\n url = data[i]\n if not is_downloadable(url):\n continue\n try:\n r = requests.get(url, allow_redirects=True)\n ext = get_filename_from_cd(r.headers.get('content-disposition'))[1]\n full_path += ext\n path += ext\n open(full_path, 'wb').write(r.content)\n data[i] = path\n except Exception:\n pass\n\n\ndef _generate_meta():\n \"\"\"\n Generate Meta information for export\n \"\"\"\n d = {'root_url': request.url_root}\n return d\n\n\ndef export_event_json(event_id, settings):\n \"\"\"\n Exports the event as a zip on the server and return its path\n \"\"\"\n # make directory\n exports_dir = app.config['BASE_DIR'] + '/static/uploads/exports/'\n if not os.path.isdir(exports_dir):\n os.makedirs(exports_dir)\n dir_path = exports_dir + 'event%d' % int(event_id)\n if os.path.isdir(dir_path):\n shutil.rmtree(dir_path, ignore_errors=True)\n os.makedirs(dir_path)\n # save to directory\n for e in EXPORTS:\n if e[0] == 'event':\n query_obj = db.session.query(e[1]).filter(\n e[1].id == event_id).first()\n data = _order_json(dict(query_obj.__dict__), e)\n _download_media(data, 'event', dir_path, settings)\n else:\n query_objs = db.session.query(e[1]).filter(\n e[1].event_id == event_id).all()\n data = [_order_json(dict(query_obj.__dict__), e) for query_obj in query_objs]\n for count in range(len(data)):\n data[count] = _order_json(data[count], e)\n _download_media(data[count], e[0], dir_path, settings)\n data_str = json.dumps(data, indent=4, ensure_ascii=False, default=handle_unserializable_data).encode('utf-8')\n fp = open(dir_path + '/' + e[0], 'w')\n fp.write(str(data_str, 'utf-8'))\n fp.close()\n # add meta\n data_str = json.dumps(\n _generate_meta(), sort_keys=True,\n indent=4, ensure_ascii=False\n ).encode('utf-8')\n fp = open(dir_path + '/meta', 'w')\n fp.write(str(data_str, 'utf-8'))\n fp.close()\n # make zip\n shutil.make_archive(dir_path, 'zip', dir_path)\n dir_path = dir_path + \".zip\"\n\n storage_path = UPLOAD_PATHS['exports']['zip'].format(\n event_id=event_id\n )\n uploaded_file = UploadedFile(dir_path, dir_path.rsplit('/', 1)[1])\n storage_url = upload(uploaded_file, storage_path)\n\n return storage_url\n\n\ndef get_current_user():\n if current_identity:\n return current_identity\n else:\n return current_user\n\n\n# HELPERS\n\ndef create_export_job(task_id, event_id):\n \"\"\"\n Create export job for an export that is going to start\n \"\"\"\n export_job = ExportJob.query.filter_by(event_id=event_id).first()\n task_url = url_for('tasks.celery_task', task_id=task_id)\n current_logged_user = get_current_user()\n\n if export_job:\n\n export_job.task = task_url\n export_job.user_email = current_logged_user.email\n export_job.event = Event.query.get(event_id)\n export_job.starts_at = datetime.now(pytz.utc)\n else:\n export_job = ExportJob(\n task=task_url, user_email=current_logged_user.email,\n event=Event.query.get(event_id)\n )\n save_to_db(export_job, 'ExportJob saved')\n\n\n# FIELD DATA FORMATTERS\ndef make_filename(name):\n \"\"\"Make speaker image filename for export\"\"\"\n for _ in FILENAME_EXCLUDE:\n name = name.replace(_, ' ')\n return ''.join(s.title() for s in name.split() if s)\n\n\ndef handle_unserializable_data(obj):\n \"\"\"\n Handles objects which cannot be serialized by json.dumps()\n :param obj: Object to be serialized\n :return: JSON representation of the object\n \"\"\"\n if isinstance(obj, datetime):\n return obj.__str__()\n", "path": "app/api/helpers/export_helpers.py"}]}
| 3,748 | 288 |
gh_patches_debug_15994
|
rasdani/github-patches
|
git_diff
|
falconry__falcon-1588
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
WebDAV methods not supported
Falcon defines supported HTTP methods in `falcon/constants.py`: supported are "usual" `HTTP_METHODS` and, in addition to that, `WEBDAV_METHODS`. However, only WebDAV versioning extension methods from RFC 3253 are supported, but not the "ordinary" WebDAV ones (i.e. from RFCs 2518 & 4918) like `COPY`, `LOCK`, `MKCOL`, `MOVE` etc.
Supporting only an extension, but not the core upon which that extension builds looks somewhat inconsistent.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `falcon/constants.py`
Content:
```
1 import os
2
3 # RFC 7231, 5789 methods
4 HTTP_METHODS = [
5 'CONNECT',
6 'DELETE',
7 'GET',
8 'HEAD',
9 'OPTIONS',
10 'PATCH',
11 'POST',
12 'PUT',
13 'TRACE',
14 ]
15
16 # RFC 3253 methods
17 WEBDAV_METHODS = [
18 'CHECKIN',
19 'CHECKOUT',
20 'REPORT',
21 'UNCHECKIN',
22 'UPDATE',
23 'VERSION-CONTROL',
24 ]
25
26 # if FALCON_CUSTOM_HTTP_METHODS is defined, treat it as a comma-
27 # delimited string of additional supported methods in this env.
28 FALCON_CUSTOM_HTTP_METHODS = [
29 method.strip().upper()
30 for method in os.environ.get('FALCON_CUSTOM_HTTP_METHODS', '').split(',')
31 if method.strip() != ''
32 ]
33
34 COMBINED_METHODS = HTTP_METHODS + WEBDAV_METHODS + FALCON_CUSTOM_HTTP_METHODS
35
36 # NOTE(kgriffs): According to RFC 7159, most JSON parsers assume
37 # UTF-8 and so it is the recommended default charset going forward,
38 # and indeed, other charsets should not be specified to ensure
39 # maximum interoperability.
40 MEDIA_JSON = 'application/json'
41
42 # NOTE(kgriffs): An internet media type for MessagePack has not
43 # yet been registered. 'application/x-msgpack' is commonly used,
44 # but the use of the 'x-' prefix is discouraged by RFC 6838.
45 MEDIA_MSGPACK = 'application/msgpack'
46
47 # NOTE(kgriffs): An internet media type for YAML has not been
48 # registered. RoR uses 'application/x-yaml', but since use of
49 # 'x-' is discouraged by RFC 6838, we don't use it in Falcon.
50 #
51 # The YAML specification requires that parsers deduce the character
52 # encoding by examining the first few bytes of the document itself.
53 # Therefore, it does not make sense to include the charset in the
54 # media type string.
55 MEDIA_YAML = 'application/yaml'
56
57 # NOTE(kgriffs): According to RFC 7303, when the charset is
58 # omitted, preference is given to the encoding specified in the
59 # document itself (either via a BOM, or via the XML declaration). If
60 # the document does not explicitly specify the encoding, UTF-8 is
61 # assumed. We do not specify the charset here, because many parsers
62 # ignore it anyway and just use what is specified in the document,
63 # contrary to the RFCs.
64 MEDIA_XML = 'application/xml'
65
66
67 # NOTE(kgriffs): RFC 4329 recommends application/* over text/.
68 # futhermore, parsers are required to respect the Unicode
69 # encoding signature, if present in the document, and to default
70 # to UTF-8 when not present. Note, however, that implementations
71 # are not required to support anything besides UTF-8, so it is
72 # unclear how much utility an encoding signature (or the charset
73 # parameter for that matter) has in practice.
74 MEDIA_JS = 'application/javascript'
75
76 # NOTE(kgriffs): According to RFC 6838, most text media types should
77 # include the charset parameter.
78 MEDIA_HTML = 'text/html; charset=utf-8'
79 MEDIA_TEXT = 'text/plain; charset=utf-8'
80
81 MEDIA_JPEG = 'image/jpeg'
82 MEDIA_PNG = 'image/png'
83 MEDIA_GIF = 'image/gif'
84
85 DEFAULT_MEDIA_TYPE = MEDIA_JSON
86
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/falcon/constants.py b/falcon/constants.py
--- a/falcon/constants.py
+++ b/falcon/constants.py
@@ -13,12 +13,19 @@
'TRACE',
]
-# RFC 3253 methods
+# RFC 2518 and 4918 methods
WEBDAV_METHODS = [
'CHECKIN',
'CHECKOUT',
+ 'COPY',
+ 'LOCK',
+ 'MKCOL',
+ 'MOVE',
+ 'PROPFIND',
+ 'PROPPATCH',
'REPORT',
'UNCHECKIN',
+ 'UNLOCK'
'UPDATE',
'VERSION-CONTROL',
]
@@ -63,7 +70,6 @@
# contrary to the RFCs.
MEDIA_XML = 'application/xml'
-
# NOTE(kgriffs): RFC 4329 recommends application/* over text/.
# futhermore, parsers are required to respect the Unicode
# encoding signature, if present in the document, and to default
|
{"golden_diff": "diff --git a/falcon/constants.py b/falcon/constants.py\n--- a/falcon/constants.py\n+++ b/falcon/constants.py\n@@ -13,12 +13,19 @@\n 'TRACE',\n ]\n \n-# RFC 3253 methods\n+# RFC 2518 and 4918 methods\n WEBDAV_METHODS = [\n 'CHECKIN',\n 'CHECKOUT',\n+ 'COPY',\n+ 'LOCK',\n+ 'MKCOL',\n+ 'MOVE',\n+ 'PROPFIND',\n+ 'PROPPATCH',\n 'REPORT',\n 'UNCHECKIN',\n+ 'UNLOCK'\n 'UPDATE',\n 'VERSION-CONTROL',\n ]\n@@ -63,7 +70,6 @@\n # contrary to the RFCs.\n MEDIA_XML = 'application/xml'\n \n-\n # NOTE(kgriffs): RFC 4329 recommends application/* over text/.\n # futhermore, parsers are required to respect the Unicode\n # encoding signature, if present in the document, and to default\n", "issue": "WebDAV methods not supported\nFalcon defines supported HTTP methods in `falcon/constants.py`: supported are \"usual\" `HTTP_METHODS` and, in addition to that, `WEBDAV_METHODS`. However, only WebDAV versioning extension methods from RFC 3253 are supported, but not the \"ordinary\" WebDAV ones (i.e. from RFCs 2518 & 4918) like `COPY`, `LOCK`, `MKCOL`, `MOVE` etc.\r\n\r\nSupporting only an extension, but not the core upon which that extension builds looks somewhat inconsistent.\n", "before_files": [{"content": "import os\n\n# RFC 7231, 5789 methods\nHTTP_METHODS = [\n 'CONNECT',\n 'DELETE',\n 'GET',\n 'HEAD',\n 'OPTIONS',\n 'PATCH',\n 'POST',\n 'PUT',\n 'TRACE',\n]\n\n# RFC 3253 methods\nWEBDAV_METHODS = [\n 'CHECKIN',\n 'CHECKOUT',\n 'REPORT',\n 'UNCHECKIN',\n 'UPDATE',\n 'VERSION-CONTROL',\n]\n\n# if FALCON_CUSTOM_HTTP_METHODS is defined, treat it as a comma-\n# delimited string of additional supported methods in this env.\nFALCON_CUSTOM_HTTP_METHODS = [\n method.strip().upper()\n for method in os.environ.get('FALCON_CUSTOM_HTTP_METHODS', '').split(',')\n if method.strip() != ''\n]\n\nCOMBINED_METHODS = HTTP_METHODS + WEBDAV_METHODS + FALCON_CUSTOM_HTTP_METHODS\n\n# NOTE(kgriffs): According to RFC 7159, most JSON parsers assume\n# UTF-8 and so it is the recommended default charset going forward,\n# and indeed, other charsets should not be specified to ensure\n# maximum interoperability.\nMEDIA_JSON = 'application/json'\n\n# NOTE(kgriffs): An internet media type for MessagePack has not\n# yet been registered. 'application/x-msgpack' is commonly used,\n# but the use of the 'x-' prefix is discouraged by RFC 6838.\nMEDIA_MSGPACK = 'application/msgpack'\n\n# NOTE(kgriffs): An internet media type for YAML has not been\n# registered. RoR uses 'application/x-yaml', but since use of\n# 'x-' is discouraged by RFC 6838, we don't use it in Falcon.\n#\n# The YAML specification requires that parsers deduce the character\n# encoding by examining the first few bytes of the document itself.\n# Therefore, it does not make sense to include the charset in the\n# media type string.\nMEDIA_YAML = 'application/yaml'\n\n# NOTE(kgriffs): According to RFC 7303, when the charset is\n# omitted, preference is given to the encoding specified in the\n# document itself (either via a BOM, or via the XML declaration). If\n# the document does not explicitly specify the encoding, UTF-8 is\n# assumed. We do not specify the charset here, because many parsers\n# ignore it anyway and just use what is specified in the document,\n# contrary to the RFCs.\nMEDIA_XML = 'application/xml'\n\n\n# NOTE(kgriffs): RFC 4329 recommends application/* over text/.\n# futhermore, parsers are required to respect the Unicode\n# encoding signature, if present in the document, and to default\n# to UTF-8 when not present. Note, however, that implementations\n# are not required to support anything besides UTF-8, so it is\n# unclear how much utility an encoding signature (or the charset\n# parameter for that matter) has in practice.\nMEDIA_JS = 'application/javascript'\n\n# NOTE(kgriffs): According to RFC 6838, most text media types should\n# include the charset parameter.\nMEDIA_HTML = 'text/html; charset=utf-8'\nMEDIA_TEXT = 'text/plain; charset=utf-8'\n\nMEDIA_JPEG = 'image/jpeg'\nMEDIA_PNG = 'image/png'\nMEDIA_GIF = 'image/gif'\n\nDEFAULT_MEDIA_TYPE = MEDIA_JSON\n", "path": "falcon/constants.py"}], "after_files": [{"content": "import os\n\n# RFC 7231, 5789 methods\nHTTP_METHODS = [\n 'CONNECT',\n 'DELETE',\n 'GET',\n 'HEAD',\n 'OPTIONS',\n 'PATCH',\n 'POST',\n 'PUT',\n 'TRACE',\n]\n\n# RFC 2518 and 4918 methods\nWEBDAV_METHODS = [\n 'CHECKIN',\n 'CHECKOUT',\n 'COPY',\n 'LOCK',\n 'MKCOL',\n 'MOVE',\n 'PROPFIND',\n 'PROPPATCH',\n 'REPORT',\n 'UNCHECKIN',\n 'UNLOCK'\n 'UPDATE',\n 'VERSION-CONTROL',\n]\n\n# if FALCON_CUSTOM_HTTP_METHODS is defined, treat it as a comma-\n# delimited string of additional supported methods in this env.\nFALCON_CUSTOM_HTTP_METHODS = [\n method.strip().upper()\n for method in os.environ.get('FALCON_CUSTOM_HTTP_METHODS', '').split(',')\n if method.strip() != ''\n]\n\nCOMBINED_METHODS = HTTP_METHODS + WEBDAV_METHODS + FALCON_CUSTOM_HTTP_METHODS\n\n# NOTE(kgriffs): According to RFC 7159, most JSON parsers assume\n# UTF-8 and so it is the recommended default charset going forward,\n# and indeed, other charsets should not be specified to ensure\n# maximum interoperability.\nMEDIA_JSON = 'application/json'\n\n# NOTE(kgriffs): An internet media type for MessagePack has not\n# yet been registered. 'application/x-msgpack' is commonly used,\n# but the use of the 'x-' prefix is discouraged by RFC 6838.\nMEDIA_MSGPACK = 'application/msgpack'\n\n# NOTE(kgriffs): An internet media type for YAML has not been\n# registered. RoR uses 'application/x-yaml', but since use of\n# 'x-' is discouraged by RFC 6838, we don't use it in Falcon.\n#\n# The YAML specification requires that parsers deduce the character\n# encoding by examining the first few bytes of the document itself.\n# Therefore, it does not make sense to include the charset in the\n# media type string.\nMEDIA_YAML = 'application/yaml'\n\n# NOTE(kgriffs): According to RFC 7303, when the charset is\n# omitted, preference is given to the encoding specified in the\n# document itself (either via a BOM, or via the XML declaration). If\n# the document does not explicitly specify the encoding, UTF-8 is\n# assumed. We do not specify the charset here, because many parsers\n# ignore it anyway and just use what is specified in the document,\n# contrary to the RFCs.\nMEDIA_XML = 'application/xml'\n\n# NOTE(kgriffs): RFC 4329 recommends application/* over text/.\n# futhermore, parsers are required to respect the Unicode\n# encoding signature, if present in the document, and to default\n# to UTF-8 when not present. Note, however, that implementations\n# are not required to support anything besides UTF-8, so it is\n# unclear how much utility an encoding signature (or the charset\n# parameter for that matter) has in practice.\nMEDIA_JS = 'application/javascript'\n\n# NOTE(kgriffs): According to RFC 6838, most text media types should\n# include the charset parameter.\nMEDIA_HTML = 'text/html; charset=utf-8'\nMEDIA_TEXT = 'text/plain; charset=utf-8'\n\nMEDIA_JPEG = 'image/jpeg'\nMEDIA_PNG = 'image/png'\nMEDIA_GIF = 'image/gif'\n\nDEFAULT_MEDIA_TYPE = MEDIA_JSON\n", "path": "falcon/constants.py"}]}
| 1,294 | 226 |
gh_patches_debug_27755
|
rasdani/github-patches
|
git_diff
|
pypi__warehouse-13075
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
chore(deps): bump grpcio-status from 1.51.1 to 1.51.3
Bumps [grpcio-status](https://grpc.io) from 1.51.1 to 1.51.3.
[](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
You can trigger a rebase of this PR by commenting `@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
<details>
<summary>Dependabot commands and options</summary>
<br />
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually
- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
</details>
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `warehouse/macaroons/caveats/_core.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12 from __future__ import annotations
13
14 import dataclasses
15 import json
16 import typing
17
18 from collections.abc import Callable, Mapping, Sequence
19 from dataclasses import dataclass
20 from typing import Any, ClassVar, TypeVar
21
22 from pydantic import ValidationError
23 from pydantic.dataclasses import dataclass as pydantic_dataclass
24 from pyramid.request import Request
25
26 from warehouse.macaroons.caveats import _legacy
27
28 T = TypeVar("T")
29 S = TypeVar("S")
30
31
32 class CaveatError(Exception):
33 pass
34
35
36 class CaveatDeserializationError(CaveatError):
37 pass
38
39
40 @dataclass(frozen=True, slots=True)
41 class Success:
42 def __bool__(self):
43 return True
44
45
46 @dataclass(frozen=True, slots=True)
47 class Failure:
48 reason: str
49
50 def __bool__(self):
51 return False
52
53
54 Result = Success | Failure
55
56
57 @pydantic_dataclass(frozen=True)
58 class Caveat:
59 tag: ClassVar[int]
60
61 def verify(self, request: Request, context: Any, permission: str) -> Result:
62 raise NotImplementedError
63
64 def __serialize__(self) -> Sequence:
65 return (self.tag,) + dataclasses.astuple(self)
66
67 @classmethod
68 def __deserialize__(cls: type[S], data: Sequence) -> S:
69 kwargs = {}
70 for i, field in enumerate(dataclasses.fields(cls)):
71 if len(data) > i:
72 value = data[i]
73 elif field.default is not dataclasses.MISSING:
74 value = field.default
75 elif field.default_factory is not dataclasses.MISSING:
76 value = field.default_factory()
77 else:
78 raise CaveatDeserializationError("Not enough values")
79
80 kwargs[field.name] = value
81
82 try:
83 obj = cls(**kwargs)
84 except ValidationError:
85 raise CaveatDeserializationError("invalid values for fields")
86
87 return obj
88
89
90 class _CaveatRegistry:
91
92 _tags: dict[int, type[Caveat]]
93
94 def __init__(self, *args: Any, **kwargs: Any):
95 super().__init__(*args, **kwargs)
96 self._tags = {}
97
98 def add(self, tag: int, cls: type[Caveat]):
99 if tag in self._tags:
100 raise TypeError(
101 f"Cannot re-use tag: {tag}, already used by {self._tags[tag]}"
102 )
103
104 self._tags[tag] = cls
105 cls.tag = tag
106
107 def lookup(self, /, tag: int) -> type[Caveat] | None:
108 return self._tags.get(tag)
109
110
111 _caveat_registry = _CaveatRegistry()
112
113
114 def as_caveat(*, tag: int) -> Callable[[type[T]], type[T]]:
115 def deco(cls: type[T]) -> type[T]:
116 _caveat_registry.add(tag, typing.cast(type[Caveat], cls))
117 return cls
118
119 return deco
120
121
122 def serialize(caveat: Caveat) -> bytes:
123 return json.dumps(
124 caveat.__serialize__(), sort_keys=True, separators=(",", ":")
125 ).encode("utf8")
126
127
128 def deserialize(data: bytes) -> Caveat:
129 loaded = json.loads(data)
130
131 # Our original caveats were implemented as a mapping with arbitrary keys,
132 # so if we've gotten one of our those, we'll attempt to adapt it to our
133 # new format.
134 if isinstance(loaded, Mapping):
135 loaded = _legacy.adapt(loaded)
136 if loaded is None:
137 raise CaveatDeserializationError("caveat must be an array")
138
139 if not isinstance(loaded, Sequence) or isinstance(loaded, str):
140 raise CaveatDeserializationError("caveat must be an array")
141
142 if not len(loaded):
143 raise CaveatDeserializationError("caveat array cannot be empty")
144
145 tag, *fields = loaded
146 cls = _caveat_registry.lookup(tag)
147
148 if cls is None:
149 raise CaveatDeserializationError(f"caveat has unknown tag: {tag}")
150
151 return cls.__deserialize__(fields)
152
```
Path: `warehouse/admin/views/checks.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 from pyramid.httpexceptions import HTTPNotFound, HTTPSeeOther
14 from pyramid.view import view_config
15 from sqlalchemy.exc import NoResultFound
16
17 from warehouse.malware.models import MalwareCheck, MalwareCheckState, MalwareCheckType
18 from warehouse.malware.tasks import backfill, remove_verdicts, run_scheduled_check
19
20 EVALUATION_RUN_SIZE = 10000
21
22
23 @view_config(
24 route_name="admin.checks.list",
25 renderer="admin/malware/checks/index.html",
26 permission="moderator",
27 request_method="GET",
28 uses_session=True,
29 )
30 def get_checks(request):
31 all_checks = request.db.query(MalwareCheck)
32 active_checks = []
33 for check in all_checks:
34 if not check.is_stale:
35 active_checks.append(check)
36
37 active_checks.sort(key=lambda check: check.created, reverse=True)
38
39 return {"checks": active_checks}
40
41
42 @view_config(
43 route_name="admin.checks.detail",
44 renderer="admin/malware/checks/detail.html",
45 permission="moderator",
46 request_method="GET",
47 uses_session=True,
48 )
49 def get_check(request):
50 check = get_check_by_name(request.db, request.matchdict["check_name"])
51
52 all_checks = (
53 request.db.query(MalwareCheck)
54 .filter(MalwareCheck.name == request.matchdict["check_name"])
55 .order_by(MalwareCheck.version.desc())
56 .all()
57 )
58
59 return {
60 "check": check,
61 "checks": all_checks,
62 "states": MalwareCheckState,
63 "evaluation_run_size": EVALUATION_RUN_SIZE,
64 }
65
66
67 @view_config(
68 route_name="admin.checks.run_evaluation",
69 permission="admin",
70 request_method="POST",
71 uses_session=True,
72 require_methods=False,
73 require_csrf=True,
74 )
75 def run_evaluation(request):
76 check = get_check_by_name(request.db, request.matchdict["check_name"])
77
78 if check.state not in (MalwareCheckState.Enabled, MalwareCheckState.Evaluation):
79 request.session.flash(
80 "Check must be in 'enabled' or 'evaluation' state to manually execute.",
81 queue="error",
82 )
83 return HTTPSeeOther(
84 request.route_path("admin.checks.detail", check_name=check.name)
85 )
86
87 if check.check_type == MalwareCheckType.EventHook:
88 request.session.flash(
89 f"Running {check.name} on {EVALUATION_RUN_SIZE} {check.hooked_object.value}s\
90 !",
91 queue="success",
92 )
93 request.task(backfill).delay(check.name, EVALUATION_RUN_SIZE)
94
95 else:
96 request.session.flash(f"Running {check.name} now!", queue="success")
97 request.task(run_scheduled_check).delay(check.name, manually_triggered=True)
98
99 return HTTPSeeOther(
100 request.route_path("admin.checks.detail", check_name=check.name)
101 )
102
103
104 @view_config(
105 route_name="admin.checks.change_state",
106 permission="admin",
107 request_method="POST",
108 uses_session=True,
109 require_methods=False,
110 require_csrf=True,
111 )
112 def change_check_state(request):
113 check = get_check_by_name(request.db, request.matchdict["check_name"])
114
115 try:
116 check_state = request.POST["check_state"]
117 except KeyError:
118 raise HTTPNotFound
119
120 try:
121 check.state = MalwareCheckState(check_state)
122 except ValueError:
123 request.session.flash("Invalid check state provided.", queue="error")
124 else:
125 if check.state == MalwareCheckState.WipedOut:
126 request.task(remove_verdicts).delay(check.name)
127 request.session.flash(
128 f"Changed {check.name!r} check to {check.state.value!r}!", queue="success"
129 )
130 finally:
131 return HTTPSeeOther(
132 request.route_path("admin.checks.detail", check_name=check.name)
133 )
134
135
136 def get_check_by_name(db, check_name):
137 try:
138 # Throw an exception if and only if no results are returned.
139 newest = (
140 db.query(MalwareCheck)
141 .filter(MalwareCheck.name == check_name)
142 .order_by(MalwareCheck.version.desc())
143 .limit(1)
144 .one()
145 )
146 except NoResultFound:
147 raise HTTPNotFound
148
149 return newest
150
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/warehouse/admin/views/checks.py b/warehouse/admin/views/checks.py
--- a/warehouse/admin/views/checks.py
+++ b/warehouse/admin/views/checks.py
@@ -86,8 +86,8 @@
if check.check_type == MalwareCheckType.EventHook:
request.session.flash(
- f"Running {check.name} on {EVALUATION_RUN_SIZE} {check.hooked_object.value}s\
-!",
+ f"Running {check.name} on {EVALUATION_RUN_SIZE} "
+ f"{check.hooked_object.value}s!",
queue="success",
)
request.task(backfill).delay(check.name, EVALUATION_RUN_SIZE)
diff --git a/warehouse/macaroons/caveats/_core.py b/warehouse/macaroons/caveats/_core.py
--- a/warehouse/macaroons/caveats/_core.py
+++ b/warehouse/macaroons/caveats/_core.py
@@ -15,7 +15,7 @@
import json
import typing
-from collections.abc import Callable, Mapping, Sequence
+from collections.abc import Mapping, Sequence
from dataclasses import dataclass
from typing import Any, ClassVar, TypeVar
@@ -111,7 +111,13 @@
_caveat_registry = _CaveatRegistry()
-def as_caveat(*, tag: int) -> Callable[[type[T]], type[T]]:
+# TODO: The return signature detected is `"Union[Type[Dataclass], DataclassProxy]"`,
+# but the expectation is `Type[Dataclass]`.
+# See https://github.com/pydantic/pydantic/issues/4498 but not exactly the same.
+# This might not be corrected in pydantic until 2.0.
+# Original signature with type hints:
+# def as_caveat(*, tag: int) -> Callable[[type[T]], type[T]]:
+def as_caveat(*, tag: int):
def deco(cls: type[T]) -> type[T]:
_caveat_registry.add(tag, typing.cast(type[Caveat], cls))
return cls
|
{"golden_diff": "diff --git a/warehouse/admin/views/checks.py b/warehouse/admin/views/checks.py\n--- a/warehouse/admin/views/checks.py\n+++ b/warehouse/admin/views/checks.py\n@@ -86,8 +86,8 @@\n \n if check.check_type == MalwareCheckType.EventHook:\n request.session.flash(\n- f\"Running {check.name} on {EVALUATION_RUN_SIZE} {check.hooked_object.value}s\\\n-!\",\n+ f\"Running {check.name} on {EVALUATION_RUN_SIZE} \"\n+ f\"{check.hooked_object.value}s!\",\n queue=\"success\",\n )\n request.task(backfill).delay(check.name, EVALUATION_RUN_SIZE)\ndiff --git a/warehouse/macaroons/caveats/_core.py b/warehouse/macaroons/caveats/_core.py\n--- a/warehouse/macaroons/caveats/_core.py\n+++ b/warehouse/macaroons/caveats/_core.py\n@@ -15,7 +15,7 @@\n import json\n import typing\n \n-from collections.abc import Callable, Mapping, Sequence\n+from collections.abc import Mapping, Sequence\n from dataclasses import dataclass\n from typing import Any, ClassVar, TypeVar\n \n@@ -111,7 +111,13 @@\n _caveat_registry = _CaveatRegistry()\n \n \n-def as_caveat(*, tag: int) -> Callable[[type[T]], type[T]]:\n+# TODO: The return signature detected is `\"Union[Type[Dataclass], DataclassProxy]\"`,\n+# but the expectation is `Type[Dataclass]`.\n+# See https://github.com/pydantic/pydantic/issues/4498 but not exactly the same.\n+# This might not be corrected in pydantic until 2.0.\n+# Original signature with type hints:\n+# def as_caveat(*, tag: int) -> Callable[[type[T]], type[T]]:\n+def as_caveat(*, tag: int):\n def deco(cls: type[T]) -> type[T]:\n _caveat_registry.add(tag, typing.cast(type[Caveat], cls))\n return cls\n", "issue": "chore(deps): bump grpcio-status from 1.51.1 to 1.51.3\nBumps [grpcio-status](https://grpc.io) from 1.51.1 to 1.51.3.\n\n\n[](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nYou can trigger a rebase of this PR by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n<details>\n<summary>Dependabot commands and options</summary>\n<br />\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\n\n\n</details>\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import annotations\n\nimport dataclasses\nimport json\nimport typing\n\nfrom collections.abc import Callable, Mapping, Sequence\nfrom dataclasses import dataclass\nfrom typing import Any, ClassVar, TypeVar\n\nfrom pydantic import ValidationError\nfrom pydantic.dataclasses import dataclass as pydantic_dataclass\nfrom pyramid.request import Request\n\nfrom warehouse.macaroons.caveats import _legacy\n\nT = TypeVar(\"T\")\nS = TypeVar(\"S\")\n\n\nclass CaveatError(Exception):\n pass\n\n\nclass CaveatDeserializationError(CaveatError):\n pass\n\n\n@dataclass(frozen=True, slots=True)\nclass Success:\n def __bool__(self):\n return True\n\n\n@dataclass(frozen=True, slots=True)\nclass Failure:\n reason: str\n\n def __bool__(self):\n return False\n\n\nResult = Success | Failure\n\n\n@pydantic_dataclass(frozen=True)\nclass Caveat:\n tag: ClassVar[int]\n\n def verify(self, request: Request, context: Any, permission: str) -> Result:\n raise NotImplementedError\n\n def __serialize__(self) -> Sequence:\n return (self.tag,) + dataclasses.astuple(self)\n\n @classmethod\n def __deserialize__(cls: type[S], data: Sequence) -> S:\n kwargs = {}\n for i, field in enumerate(dataclasses.fields(cls)):\n if len(data) > i:\n value = data[i]\n elif field.default is not dataclasses.MISSING:\n value = field.default\n elif field.default_factory is not dataclasses.MISSING:\n value = field.default_factory()\n else:\n raise CaveatDeserializationError(\"Not enough values\")\n\n kwargs[field.name] = value\n\n try:\n obj = cls(**kwargs)\n except ValidationError:\n raise CaveatDeserializationError(\"invalid values for fields\")\n\n return obj\n\n\nclass _CaveatRegistry:\n\n _tags: dict[int, type[Caveat]]\n\n def __init__(self, *args: Any, **kwargs: Any):\n super().__init__(*args, **kwargs)\n self._tags = {}\n\n def add(self, tag: int, cls: type[Caveat]):\n if tag in self._tags:\n raise TypeError(\n f\"Cannot re-use tag: {tag}, already used by {self._tags[tag]}\"\n )\n\n self._tags[tag] = cls\n cls.tag = tag\n\n def lookup(self, /, tag: int) -> type[Caveat] | None:\n return self._tags.get(tag)\n\n\n_caveat_registry = _CaveatRegistry()\n\n\ndef as_caveat(*, tag: int) -> Callable[[type[T]], type[T]]:\n def deco(cls: type[T]) -> type[T]:\n _caveat_registry.add(tag, typing.cast(type[Caveat], cls))\n return cls\n\n return deco\n\n\ndef serialize(caveat: Caveat) -> bytes:\n return json.dumps(\n caveat.__serialize__(), sort_keys=True, separators=(\",\", \":\")\n ).encode(\"utf8\")\n\n\ndef deserialize(data: bytes) -> Caveat:\n loaded = json.loads(data)\n\n # Our original caveats were implemented as a mapping with arbitrary keys,\n # so if we've gotten one of our those, we'll attempt to adapt it to our\n # new format.\n if isinstance(loaded, Mapping):\n loaded = _legacy.adapt(loaded)\n if loaded is None:\n raise CaveatDeserializationError(\"caveat must be an array\")\n\n if not isinstance(loaded, Sequence) or isinstance(loaded, str):\n raise CaveatDeserializationError(\"caveat must be an array\")\n\n if not len(loaded):\n raise CaveatDeserializationError(\"caveat array cannot be empty\")\n\n tag, *fields = loaded\n cls = _caveat_registry.lookup(tag)\n\n if cls is None:\n raise CaveatDeserializationError(f\"caveat has unknown tag: {tag}\")\n\n return cls.__deserialize__(fields)\n", "path": "warehouse/macaroons/caveats/_core.py"}, {"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom pyramid.httpexceptions import HTTPNotFound, HTTPSeeOther\nfrom pyramid.view import view_config\nfrom sqlalchemy.exc import NoResultFound\n\nfrom warehouse.malware.models import MalwareCheck, MalwareCheckState, MalwareCheckType\nfrom warehouse.malware.tasks import backfill, remove_verdicts, run_scheduled_check\n\nEVALUATION_RUN_SIZE = 10000\n\n\n@view_config(\n route_name=\"admin.checks.list\",\n renderer=\"admin/malware/checks/index.html\",\n permission=\"moderator\",\n request_method=\"GET\",\n uses_session=True,\n)\ndef get_checks(request):\n all_checks = request.db.query(MalwareCheck)\n active_checks = []\n for check in all_checks:\n if not check.is_stale:\n active_checks.append(check)\n\n active_checks.sort(key=lambda check: check.created, reverse=True)\n\n return {\"checks\": active_checks}\n\n\n@view_config(\n route_name=\"admin.checks.detail\",\n renderer=\"admin/malware/checks/detail.html\",\n permission=\"moderator\",\n request_method=\"GET\",\n uses_session=True,\n)\ndef get_check(request):\n check = get_check_by_name(request.db, request.matchdict[\"check_name\"])\n\n all_checks = (\n request.db.query(MalwareCheck)\n .filter(MalwareCheck.name == request.matchdict[\"check_name\"])\n .order_by(MalwareCheck.version.desc())\n .all()\n )\n\n return {\n \"check\": check,\n \"checks\": all_checks,\n \"states\": MalwareCheckState,\n \"evaluation_run_size\": EVALUATION_RUN_SIZE,\n }\n\n\n@view_config(\n route_name=\"admin.checks.run_evaluation\",\n permission=\"admin\",\n request_method=\"POST\",\n uses_session=True,\n require_methods=False,\n require_csrf=True,\n)\ndef run_evaluation(request):\n check = get_check_by_name(request.db, request.matchdict[\"check_name\"])\n\n if check.state not in (MalwareCheckState.Enabled, MalwareCheckState.Evaluation):\n request.session.flash(\n \"Check must be in 'enabled' or 'evaluation' state to manually execute.\",\n queue=\"error\",\n )\n return HTTPSeeOther(\n request.route_path(\"admin.checks.detail\", check_name=check.name)\n )\n\n if check.check_type == MalwareCheckType.EventHook:\n request.session.flash(\n f\"Running {check.name} on {EVALUATION_RUN_SIZE} {check.hooked_object.value}s\\\n!\",\n queue=\"success\",\n )\n request.task(backfill).delay(check.name, EVALUATION_RUN_SIZE)\n\n else:\n request.session.flash(f\"Running {check.name} now!\", queue=\"success\")\n request.task(run_scheduled_check).delay(check.name, manually_triggered=True)\n\n return HTTPSeeOther(\n request.route_path(\"admin.checks.detail\", check_name=check.name)\n )\n\n\n@view_config(\n route_name=\"admin.checks.change_state\",\n permission=\"admin\",\n request_method=\"POST\",\n uses_session=True,\n require_methods=False,\n require_csrf=True,\n)\ndef change_check_state(request):\n check = get_check_by_name(request.db, request.matchdict[\"check_name\"])\n\n try:\n check_state = request.POST[\"check_state\"]\n except KeyError:\n raise HTTPNotFound\n\n try:\n check.state = MalwareCheckState(check_state)\n except ValueError:\n request.session.flash(\"Invalid check state provided.\", queue=\"error\")\n else:\n if check.state == MalwareCheckState.WipedOut:\n request.task(remove_verdicts).delay(check.name)\n request.session.flash(\n f\"Changed {check.name!r} check to {check.state.value!r}!\", queue=\"success\"\n )\n finally:\n return HTTPSeeOther(\n request.route_path(\"admin.checks.detail\", check_name=check.name)\n )\n\n\ndef get_check_by_name(db, check_name):\n try:\n # Throw an exception if and only if no results are returned.\n newest = (\n db.query(MalwareCheck)\n .filter(MalwareCheck.name == check_name)\n .order_by(MalwareCheck.version.desc())\n .limit(1)\n .one()\n )\n except NoResultFound:\n raise HTTPNotFound\n\n return newest\n", "path": "warehouse/admin/views/checks.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import annotations\n\nimport dataclasses\nimport json\nimport typing\n\nfrom collections.abc import Mapping, Sequence\nfrom dataclasses import dataclass\nfrom typing import Any, ClassVar, TypeVar\n\nfrom pydantic import ValidationError\nfrom pydantic.dataclasses import dataclass as pydantic_dataclass\nfrom pyramid.request import Request\n\nfrom warehouse.macaroons.caveats import _legacy\n\nT = TypeVar(\"T\")\nS = TypeVar(\"S\")\n\n\nclass CaveatError(Exception):\n pass\n\n\nclass CaveatDeserializationError(CaveatError):\n pass\n\n\n@dataclass(frozen=True, slots=True)\nclass Success:\n def __bool__(self):\n return True\n\n\n@dataclass(frozen=True, slots=True)\nclass Failure:\n reason: str\n\n def __bool__(self):\n return False\n\n\nResult = Success | Failure\n\n\n@pydantic_dataclass(frozen=True)\nclass Caveat:\n tag: ClassVar[int]\n\n def verify(self, request: Request, context: Any, permission: str) -> Result:\n raise NotImplementedError\n\n def __serialize__(self) -> Sequence:\n return (self.tag,) + dataclasses.astuple(self)\n\n @classmethod\n def __deserialize__(cls: type[S], data: Sequence) -> S:\n kwargs = {}\n for i, field in enumerate(dataclasses.fields(cls)):\n if len(data) > i:\n value = data[i]\n elif field.default is not dataclasses.MISSING:\n value = field.default\n elif field.default_factory is not dataclasses.MISSING:\n value = field.default_factory()\n else:\n raise CaveatDeserializationError(\"Not enough values\")\n\n kwargs[field.name] = value\n\n try:\n obj = cls(**kwargs)\n except ValidationError:\n raise CaveatDeserializationError(\"invalid values for fields\")\n\n return obj\n\n\nclass _CaveatRegistry:\n\n _tags: dict[int, type[Caveat]]\n\n def __init__(self, *args: Any, **kwargs: Any):\n super().__init__(*args, **kwargs)\n self._tags = {}\n\n def add(self, tag: int, cls: type[Caveat]):\n if tag in self._tags:\n raise TypeError(\n f\"Cannot re-use tag: {tag}, already used by {self._tags[tag]}\"\n )\n\n self._tags[tag] = cls\n cls.tag = tag\n\n def lookup(self, /, tag: int) -> type[Caveat] | None:\n return self._tags.get(tag)\n\n\n_caveat_registry = _CaveatRegistry()\n\n\n# TODO: The return signature detected is `\"Union[Type[Dataclass], DataclassProxy]\"`,\n# but the expectation is `Type[Dataclass]`.\n# See https://github.com/pydantic/pydantic/issues/4498 but not exactly the same.\n# This might not be corrected in pydantic until 2.0.\n# Original signature with type hints:\n# def as_caveat(*, tag: int) -> Callable[[type[T]], type[T]]:\ndef as_caveat(*, tag: int):\n def deco(cls: type[T]) -> type[T]:\n _caveat_registry.add(tag, typing.cast(type[Caveat], cls))\n return cls\n\n return deco\n\n\ndef serialize(caveat: Caveat) -> bytes:\n return json.dumps(\n caveat.__serialize__(), sort_keys=True, separators=(\",\", \":\")\n ).encode(\"utf8\")\n\n\ndef deserialize(data: bytes) -> Caveat:\n loaded = json.loads(data)\n\n # Our original caveats were implemented as a mapping with arbitrary keys,\n # so if we've gotten one of our those, we'll attempt to adapt it to our\n # new format.\n if isinstance(loaded, Mapping):\n loaded = _legacy.adapt(loaded)\n if loaded is None:\n raise CaveatDeserializationError(\"caveat must be an array\")\n\n if not isinstance(loaded, Sequence) or isinstance(loaded, str):\n raise CaveatDeserializationError(\"caveat must be an array\")\n\n if not len(loaded):\n raise CaveatDeserializationError(\"caveat array cannot be empty\")\n\n tag, *fields = loaded\n cls = _caveat_registry.lookup(tag)\n\n if cls is None:\n raise CaveatDeserializationError(f\"caveat has unknown tag: {tag}\")\n\n return cls.__deserialize__(fields)\n", "path": "warehouse/macaroons/caveats/_core.py"}, {"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom pyramid.httpexceptions import HTTPNotFound, HTTPSeeOther\nfrom pyramid.view import view_config\nfrom sqlalchemy.exc import NoResultFound\n\nfrom warehouse.malware.models import MalwareCheck, MalwareCheckState, MalwareCheckType\nfrom warehouse.malware.tasks import backfill, remove_verdicts, run_scheduled_check\n\nEVALUATION_RUN_SIZE = 10000\n\n\n@view_config(\n route_name=\"admin.checks.list\",\n renderer=\"admin/malware/checks/index.html\",\n permission=\"moderator\",\n request_method=\"GET\",\n uses_session=True,\n)\ndef get_checks(request):\n all_checks = request.db.query(MalwareCheck)\n active_checks = []\n for check in all_checks:\n if not check.is_stale:\n active_checks.append(check)\n\n active_checks.sort(key=lambda check: check.created, reverse=True)\n\n return {\"checks\": active_checks}\n\n\n@view_config(\n route_name=\"admin.checks.detail\",\n renderer=\"admin/malware/checks/detail.html\",\n permission=\"moderator\",\n request_method=\"GET\",\n uses_session=True,\n)\ndef get_check(request):\n check = get_check_by_name(request.db, request.matchdict[\"check_name\"])\n\n all_checks = (\n request.db.query(MalwareCheck)\n .filter(MalwareCheck.name == request.matchdict[\"check_name\"])\n .order_by(MalwareCheck.version.desc())\n .all()\n )\n\n return {\n \"check\": check,\n \"checks\": all_checks,\n \"states\": MalwareCheckState,\n \"evaluation_run_size\": EVALUATION_RUN_SIZE,\n }\n\n\n@view_config(\n route_name=\"admin.checks.run_evaluation\",\n permission=\"admin\",\n request_method=\"POST\",\n uses_session=True,\n require_methods=False,\n require_csrf=True,\n)\ndef run_evaluation(request):\n check = get_check_by_name(request.db, request.matchdict[\"check_name\"])\n\n if check.state not in (MalwareCheckState.Enabled, MalwareCheckState.Evaluation):\n request.session.flash(\n \"Check must be in 'enabled' or 'evaluation' state to manually execute.\",\n queue=\"error\",\n )\n return HTTPSeeOther(\n request.route_path(\"admin.checks.detail\", check_name=check.name)\n )\n\n if check.check_type == MalwareCheckType.EventHook:\n request.session.flash(\n f\"Running {check.name} on {EVALUATION_RUN_SIZE} \"\n f\"{check.hooked_object.value}s!\",\n queue=\"success\",\n )\n request.task(backfill).delay(check.name, EVALUATION_RUN_SIZE)\n\n else:\n request.session.flash(f\"Running {check.name} now!\", queue=\"success\")\n request.task(run_scheduled_check).delay(check.name, manually_triggered=True)\n\n return HTTPSeeOther(\n request.route_path(\"admin.checks.detail\", check_name=check.name)\n )\n\n\n@view_config(\n route_name=\"admin.checks.change_state\",\n permission=\"admin\",\n request_method=\"POST\",\n uses_session=True,\n require_methods=False,\n require_csrf=True,\n)\ndef change_check_state(request):\n check = get_check_by_name(request.db, request.matchdict[\"check_name\"])\n\n try:\n check_state = request.POST[\"check_state\"]\n except KeyError:\n raise HTTPNotFound\n\n try:\n check.state = MalwareCheckState(check_state)\n except ValueError:\n request.session.flash(\"Invalid check state provided.\", queue=\"error\")\n else:\n if check.state == MalwareCheckState.WipedOut:\n request.task(remove_verdicts).delay(check.name)\n request.session.flash(\n f\"Changed {check.name!r} check to {check.state.value!r}!\", queue=\"success\"\n )\n finally:\n return HTTPSeeOther(\n request.route_path(\"admin.checks.detail\", check_name=check.name)\n )\n\n\ndef get_check_by_name(db, check_name):\n try:\n # Throw an exception if and only if no results are returned.\n newest = (\n db.query(MalwareCheck)\n .filter(MalwareCheck.name == check_name)\n .order_by(MalwareCheck.version.desc())\n .limit(1)\n .one()\n )\n except NoResultFound:\n raise HTTPNotFound\n\n return newest\n", "path": "warehouse/admin/views/checks.py"}]}
| 3,518 | 462 |
gh_patches_debug_34904
|
rasdani/github-patches
|
git_diff
|
openshift__openshift-ansible-5352
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Health check callback plugin causes warning on unhashable value
#### Description
Traceback when there was a random failure in logging:
```
roles/openshift_health_checker/callback_plugins/zz_failure_summary.py", line 147, in deduplicate_failures
groups[group_key].append(failure)
TypeError: unhashable type: 'dict'
```
This is from the failure de-duplication code in the callback plugin. The result came back with a dict for a value and it won't hash.
##### Version
master
##### Observed Results
```
$ ansible-playbook playbooks/byo/config.yml -vvv
...
TASK [openshift_logging_elasticsearch : Set ES configmap] *********************************************************************************************************************************************************
task path: /home/lmeyer/go/src/github.com/openshift/openshift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:147
Wednesday 06 September 2017 09:52:01 -0400 (0:00:00.031) 0:16:19.770 ***
Using module file /home/lmeyer/go/src/github.com/openshift/openshift-ansible/roles/lib_openshift/library/oc_configmap.py
<ec2-54-172-162-93.compute-1.amazonaws.com> (1, '\n{"msg": {"returncode": 1, "cmd": "/bin/oc create configmap logging-elasticsearch --from-file=elasticsearch.yml=/tmp/openshift-logging-ansible-C6rjd2/elasticsearch.yml --from-file=logging.yml=/tmp/openshift-logging-ansible-C6rjd2/elasticsearch-logging.yml -n logging", "results": {}, "stderr": "Error from server: timeout\\n", "stdout": ""}, "failed": true, "invocation": {"module_args": {"from_file": {"elasticsearch.yml": "/tmp/openshift-logging-ansible-C6rjd2/elasticsearch.yml", "logging.yml": "/tmp/openshift-logging-ansible-C6rjd2/elasticsearch-logging.yml"}, "name": "logging-elasticsearch", "from_literal": null, "namespace": "logging", "kubeconfig": "/etc/origin/master/admin.kubeconfig", "state": "present", "debug": false}}}\n', '')
fatal: [ec2-54-172-162-93.compute-1.amazonaws.com]: FAILED! => {
"changed": false,
"failed": true,
"invocation": {
"module_args": {
"debug": false,
"from_file": {
"elasticsearch.yml": "/tmp/openshift-logging-ansible-C6rjd2/elasticsearch.yml",
"logging.yml": "/tmp/openshift-logging-ansible-C6rjd2/elasticsearch-logging.yml"
},
"from_literal": null,
"kubeconfig": "/etc/origin/master/admin.kubeconfig",
"name": "logging-elasticsearch",
"namespace": "logging",
"state": "present"
}
}
}
MSG:
{u'returncode': 1, u'cmd': u'/bin/oc create configmap logging-elasticsearch --from-file=elasticsearch.yml=/tmp/openshift-logging-ansible-C6rjd2/elasticsearch.yml --from-file=logging.yml=/tmp/openshift-logging-ansible-C6rjd2/elasticsearch-logging.yml -n logging', u'results': {}, u'stderr': u'Error from server: timeout\n', u'stdout': u''}
...
An error happened while generating a summary of failures:
Traceback (most recent call last):
File "/home/lmeyer/go/src/github.com/openshift/openshift-ansible/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py", line 48, in v2_playbook_on_stats
self._display.display(failure_summary(self.__failures, self.__playbook_file))
File "/home/lmeyer/go/src/github.com/openshift/openshift-ansible/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py", line 74, in failure_summary
failures = deduplicate_failures(failures)
File "/home/lmeyer/go/src/github.com/openshift/openshift-ansible/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py", line 147, in deduplicate_failures
groups[group_key].append(failure)
TypeError: unhashable type: 'dict'
```
##### Additional Information
This isn't the cause of whatever failed, it's just adding to the confusion with a warning at the end.
I think it's the "msg" being returned in the task failure that it's trying to hash and failing. Callback plugin code needs to be more paranoid :)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `roles/openshift_health_checker/callback_plugins/zz_failure_summary.py`
Content:
```
1 """Ansible callback plugin to print a nicely formatted summary of failures.
2
3 The file / module name is prefixed with `zz_` to make this plugin be loaded last
4 by Ansible, thus making its output the last thing that users see.
5 """
6
7 from collections import defaultdict
8 import traceback
9
10 from ansible.plugins.callback import CallbackBase
11 from ansible import constants as C
12 from ansible.utils.color import stringc
13
14
15 FAILED_NO_MSG = u'Failed without returning a message.'
16
17
18 class CallbackModule(CallbackBase):
19 """This callback plugin stores task results and summarizes failures."""
20
21 CALLBACK_VERSION = 2.0
22 CALLBACK_TYPE = 'aggregate'
23 CALLBACK_NAME = 'failure_summary'
24 CALLBACK_NEEDS_WHITELIST = False
25
26 def __init__(self):
27 super(CallbackModule, self).__init__()
28 self.__failures = []
29 self.__playbook_file = ''
30
31 def v2_playbook_on_start(self, playbook):
32 super(CallbackModule, self).v2_playbook_on_start(playbook)
33 # pylint: disable=protected-access; Ansible gives us no public API to
34 # get the file name of the current playbook from a callback plugin.
35 self.__playbook_file = playbook._file_name
36
37 def v2_runner_on_failed(self, result, ignore_errors=False):
38 super(CallbackModule, self).v2_runner_on_failed(result, ignore_errors)
39 if not ignore_errors:
40 self.__failures.append(result)
41
42 def v2_playbook_on_stats(self, stats):
43 super(CallbackModule, self).v2_playbook_on_stats(stats)
44 # pylint: disable=broad-except; capturing exceptions broadly is
45 # intentional, to isolate arbitrary failures in this callback plugin.
46 try:
47 if self.__failures:
48 self._display.display(failure_summary(self.__failures, self.__playbook_file))
49 except Exception:
50 msg = stringc(
51 u'An error happened while generating a summary of failures:\n'
52 u'{}'.format(traceback.format_exc()), C.COLOR_WARN)
53 self._display.v(msg)
54
55
56 def failure_summary(failures, playbook):
57 """Return a summary of failed tasks, including details on health checks."""
58 if not failures:
59 return u''
60
61 # NOTE: because we don't have access to task_vars from callback plugins, we
62 # store the playbook context in the task result when the
63 # openshift_health_check action plugin is used, and we use this context to
64 # customize the error message.
65 # pylint: disable=protected-access; Ansible gives us no sufficient public
66 # API on TaskResult objects.
67 context = next((
68 context for context in
69 (failure._result.get('playbook_context') for failure in failures)
70 if context
71 ), None)
72
73 failures = [failure_to_dict(failure) for failure in failures]
74 failures = deduplicate_failures(failures)
75
76 summary = [u'', u'', u'Failure summary:', u'']
77
78 width = len(str(len(failures)))
79 initial_indent_format = u' {{:>{width}}}. '.format(width=width)
80 initial_indent_len = len(initial_indent_format.format(0))
81 subsequent_indent = u' ' * initial_indent_len
82 subsequent_extra_indent = u' ' * (initial_indent_len + 10)
83
84 for i, failure in enumerate(failures, 1):
85 entries = format_failure(failure)
86 summary.append(u'\n{}{}'.format(initial_indent_format.format(i), entries[0]))
87 for entry in entries[1:]:
88 entry = entry.replace(u'\n', u'\n' + subsequent_extra_indent)
89 indented = u'{}{}'.format(subsequent_indent, entry)
90 summary.append(indented)
91
92 failed_checks = set()
93 for failure in failures:
94 failed_checks.update(name for name, message in failure['checks'])
95 if failed_checks:
96 summary.append(check_failure_footer(failed_checks, context, playbook))
97
98 return u'\n'.join(summary)
99
100
101 def failure_to_dict(failed_task_result):
102 """Extract information out of a failed TaskResult into a dict.
103
104 The intent is to transform a TaskResult object into something easier to
105 manipulate. TaskResult is ansible.executor.task_result.TaskResult.
106 """
107 # pylint: disable=protected-access; Ansible gives us no sufficient public
108 # API on TaskResult objects.
109 _result = failed_task_result._result
110 return {
111 'host': failed_task_result._host.get_name(),
112 'play': play_name(failed_task_result._task),
113 'task': failed_task_result.task_name,
114 'msg': _result.get('msg', FAILED_NO_MSG),
115 'checks': tuple(
116 (name, result.get('msg', FAILED_NO_MSG))
117 for name, result in sorted(_result.get('checks', {}).items())
118 if result.get('failed')
119 ),
120 }
121
122
123 def play_name(obj):
124 """Given a task or block, return the name of its parent play.
125
126 This is loosely inspired by ansible.playbook.base.Base.dump_me.
127 """
128 # pylint: disable=protected-access; Ansible gives us no sufficient public
129 # API to implement this.
130 if not obj:
131 return ''
132 if hasattr(obj, '_play'):
133 return obj._play.get_name()
134 return play_name(getattr(obj, '_parent'))
135
136
137 def deduplicate_failures(failures):
138 """Group together similar failures from different hosts.
139
140 Returns a new list of failures such that identical failures from different
141 hosts are grouped together in a single entry. The relative order of failures
142 is preserved.
143 """
144 groups = defaultdict(list)
145 for failure in failures:
146 group_key = tuple(sorted((key, value) for key, value in failure.items() if key != 'host'))
147 groups[group_key].append(failure)
148 result = []
149 for failure in failures:
150 group_key = tuple(sorted((key, value) for key, value in failure.items() if key != 'host'))
151 if group_key not in groups:
152 continue
153 failure['host'] = tuple(sorted(g_failure['host'] for g_failure in groups.pop(group_key)))
154 result.append(failure)
155 return result
156
157
158 def format_failure(failure):
159 """Return a list of pretty-formatted text entries describing a failure, including
160 relevant information about it. Expect that the list of text entries will be joined
161 by a newline separator when output to the user."""
162 host = u', '.join(failure['host'])
163 play = failure['play']
164 task = failure['task']
165 msg = failure['msg']
166 checks = failure['checks']
167 fields = (
168 (u'Hosts', host),
169 (u'Play', play),
170 (u'Task', task),
171 (u'Message', stringc(msg, C.COLOR_ERROR)),
172 )
173 if checks:
174 fields += ((u'Details', format_failed_checks(checks)),)
175 row_format = '{:10}{}'
176 return [row_format.format(header + u':', body) for header, body in fields]
177
178
179 def format_failed_checks(checks):
180 """Return pretty-formatted text describing checks that failed."""
181 messages = []
182 for name, message in checks:
183 messages.append(u'check "{}":\n{}'.format(name, message))
184 return stringc(u'\n\n'.join(messages), C.COLOR_ERROR)
185
186
187 def check_failure_footer(failed_checks, context, playbook):
188 """Return a textual explanation about checks depending on context.
189
190 The purpose of specifying context is to vary the output depending on what
191 the user was expecting to happen (based on which playbook they ran). The
192 only use currently is to vary the message depending on whether the user was
193 deliberately running checks or was trying to install/upgrade and checks are
194 just included. Other use cases may arise.
195 """
196 checks = ','.join(sorted(failed_checks))
197 summary = [u'']
198 if context in ['pre-install', 'health', 'adhoc']:
199 # User was expecting to run checks, less explanation needed.
200 summary.extend([
201 u'You may configure or disable checks by setting Ansible '
202 u'variables. To disable those above, set:',
203 u' openshift_disable_check={checks}'.format(checks=checks),
204 u'Consult check documentation for configurable variables.',
205 ])
206 else:
207 # User may not be familiar with the checks, explain what checks are in
208 # the first place.
209 summary.extend([
210 u'The execution of "{playbook}" includes checks designed to fail '
211 u'early if the requirements of the playbook are not met. One or '
212 u'more of these checks failed. To disregard these results,'
213 u'explicitly disable checks by setting an Ansible variable:'.format(playbook=playbook),
214 u' openshift_disable_check={checks}'.format(checks=checks),
215 u'Failing check names are shown in the failure details above. '
216 u'Some checks may be configurable by variables if your requirements '
217 u'are different from the defaults; consult check documentation.',
218 ])
219 summary.append(
220 u'Variables can be set in the inventory or passed on the command line '
221 u'using the -e flag to ansible-playbook.'
222 )
223 return u'\n'.join(summary)
224
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
--- a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
+++ b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
@@ -10,6 +10,7 @@
from ansible.plugins.callback import CallbackBase
from ansible import constants as C
from ansible.utils.color import stringc
+from ansible.module_utils.six import string_types
FAILED_NO_MSG = u'Failed without returning a message.'
@@ -140,11 +141,19 @@
Returns a new list of failures such that identical failures from different
hosts are grouped together in a single entry. The relative order of failures
is preserved.
+
+ If failures is unhashable, the original list of failures is returned.
"""
groups = defaultdict(list)
for failure in failures:
group_key = tuple(sorted((key, value) for key, value in failure.items() if key != 'host'))
- groups[group_key].append(failure)
+ try:
+ groups[group_key].append(failure)
+ except TypeError:
+ # abort and return original list of failures when failures has an
+ # unhashable type.
+ return failures
+
result = []
for failure in failures:
group_key = tuple(sorted((key, value) for key, value in failure.items() if key != 'host'))
@@ -159,7 +168,10 @@
"""Return a list of pretty-formatted text entries describing a failure, including
relevant information about it. Expect that the list of text entries will be joined
by a newline separator when output to the user."""
- host = u', '.join(failure['host'])
+ if isinstance(failure['host'], string_types):
+ host = failure['host']
+ else:
+ host = u', '.join(failure['host'])
play = failure['play']
task = failure['task']
msg = failure['msg']
|
{"golden_diff": "diff --git a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py\n--- a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py\n+++ b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py\n@@ -10,6 +10,7 @@\n from ansible.plugins.callback import CallbackBase\n from ansible import constants as C\n from ansible.utils.color import stringc\n+from ansible.module_utils.six import string_types\n \n \n FAILED_NO_MSG = u'Failed without returning a message.'\n@@ -140,11 +141,19 @@\n Returns a new list of failures such that identical failures from different\n hosts are grouped together in a single entry. The relative order of failures\n is preserved.\n+\n+ If failures is unhashable, the original list of failures is returned.\n \"\"\"\n groups = defaultdict(list)\n for failure in failures:\n group_key = tuple(sorted((key, value) for key, value in failure.items() if key != 'host'))\n- groups[group_key].append(failure)\n+ try:\n+ groups[group_key].append(failure)\n+ except TypeError:\n+ # abort and return original list of failures when failures has an\n+ # unhashable type.\n+ return failures\n+\n result = []\n for failure in failures:\n group_key = tuple(sorted((key, value) for key, value in failure.items() if key != 'host'))\n@@ -159,7 +168,10 @@\n \"\"\"Return a list of pretty-formatted text entries describing a failure, including\n relevant information about it. Expect that the list of text entries will be joined\n by a newline separator when output to the user.\"\"\"\n- host = u', '.join(failure['host'])\n+ if isinstance(failure['host'], string_types):\n+ host = failure['host']\n+ else:\n+ host = u', '.join(failure['host'])\n play = failure['play']\n task = failure['task']\n msg = failure['msg']\n", "issue": "Health check callback plugin causes warning on unhashable value\n#### Description\r\n\r\nTraceback when there was a random failure in logging:\r\n\r\n```\r\nroles/openshift_health_checker/callback_plugins/zz_failure_summary.py\", line 147, in deduplicate_failures\r\n groups[group_key].append(failure)\r\nTypeError: unhashable type: 'dict'\r\n```\r\n\r\nThis is from the failure de-duplication code in the callback plugin. The result came back with a dict for a value and it won't hash.\r\n\r\n##### Version\r\n\r\nmaster\r\n\r\n##### Observed Results\r\n\r\n```\r\n$ ansible-playbook playbooks/byo/config.yml -vvv\r\n...\r\nTASK [openshift_logging_elasticsearch : Set ES configmap] *********************************************************************************************************************************************************\r\ntask path: /home/lmeyer/go/src/github.com/openshift/openshift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:147\r\nWednesday 06 September 2017 09:52:01 -0400 (0:00:00.031) 0:16:19.770 *** \r\nUsing module file /home/lmeyer/go/src/github.com/openshift/openshift-ansible/roles/lib_openshift/library/oc_configmap.py\r\n<ec2-54-172-162-93.compute-1.amazonaws.com> (1, '\\n{\"msg\": {\"returncode\": 1, \"cmd\": \"/bin/oc create configmap logging-elasticsearch --from-file=elasticsearch.yml=/tmp/openshift-logging-ansible-C6rjd2/elasticsearch.yml --from-file=logging.yml=/tmp/openshift-logging-ansible-C6rjd2/elasticsearch-logging.yml -n logging\", \"results\": {}, \"stderr\": \"Error from server: timeout\\\\n\", \"stdout\": \"\"}, \"failed\": true, \"invocation\": {\"module_args\": {\"from_file\": {\"elasticsearch.yml\": \"/tmp/openshift-logging-ansible-C6rjd2/elasticsearch.yml\", \"logging.yml\": \"/tmp/openshift-logging-ansible-C6rjd2/elasticsearch-logging.yml\"}, \"name\": \"logging-elasticsearch\", \"from_literal\": null, \"namespace\": \"logging\", \"kubeconfig\": \"/etc/origin/master/admin.kubeconfig\", \"state\": \"present\", \"debug\": false}}}\\n', '')\r\nfatal: [ec2-54-172-162-93.compute-1.amazonaws.com]: FAILED! => {\r\n \"changed\": false, \r\n \"failed\": true, \r\n \"invocation\": {\r\n \"module_args\": {\r\n \"debug\": false, \r\n \"from_file\": {\r\n \"elasticsearch.yml\": \"/tmp/openshift-logging-ansible-C6rjd2/elasticsearch.yml\", \r\n \"logging.yml\": \"/tmp/openshift-logging-ansible-C6rjd2/elasticsearch-logging.yml\"\r\n }, \r\n \"from_literal\": null, \r\n \"kubeconfig\": \"/etc/origin/master/admin.kubeconfig\", \r\n \"name\": \"logging-elasticsearch\", \r\n \"namespace\": \"logging\", \r\n \"state\": \"present\"\r\n }\r\n }\r\n}\r\n\r\nMSG:\r\n\r\n{u'returncode': 1, u'cmd': u'/bin/oc create configmap logging-elasticsearch --from-file=elasticsearch.yml=/tmp/openshift-logging-ansible-C6rjd2/elasticsearch.yml --from-file=logging.yml=/tmp/openshift-logging-ansible-C6rjd2/elasticsearch-logging.yml -n logging', u'results': {}, u'stderr': u'Error from server: timeout\\n', u'stdout': u''}\r\n\r\n...\r\nAn error happened while generating a summary of failures:\r\nTraceback (most recent call last):\r\n File \"/home/lmeyer/go/src/github.com/openshift/openshift-ansible/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py\", line 48, in v2_playbook_on_stats\r\n self._display.display(failure_summary(self.__failures, self.__playbook_file))\r\n File \"/home/lmeyer/go/src/github.com/openshift/openshift-ansible/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py\", line 74, in failure_summary\r\n failures = deduplicate_failures(failures)\r\n File \"/home/lmeyer/go/src/github.com/openshift/openshift-ansible/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py\", line 147, in deduplicate_failures\r\n groups[group_key].append(failure)\r\nTypeError: unhashable type: 'dict'\r\n\r\n```\r\n\r\n##### Additional Information\r\nThis isn't the cause of whatever failed, it's just adding to the confusion with a warning at the end.\r\n\r\nI think it's the \"msg\" being returned in the task failure that it's trying to hash and failing. Callback plugin code needs to be more paranoid :)\n", "before_files": [{"content": "\"\"\"Ansible callback plugin to print a nicely formatted summary of failures.\n\nThe file / module name is prefixed with `zz_` to make this plugin be loaded last\nby Ansible, thus making its output the last thing that users see.\n\"\"\"\n\nfrom collections import defaultdict\nimport traceback\n\nfrom ansible.plugins.callback import CallbackBase\nfrom ansible import constants as C\nfrom ansible.utils.color import stringc\n\n\nFAILED_NO_MSG = u'Failed without returning a message.'\n\n\nclass CallbackModule(CallbackBase):\n \"\"\"This callback plugin stores task results and summarizes failures.\"\"\"\n\n CALLBACK_VERSION = 2.0\n CALLBACK_TYPE = 'aggregate'\n CALLBACK_NAME = 'failure_summary'\n CALLBACK_NEEDS_WHITELIST = False\n\n def __init__(self):\n super(CallbackModule, self).__init__()\n self.__failures = []\n self.__playbook_file = ''\n\n def v2_playbook_on_start(self, playbook):\n super(CallbackModule, self).v2_playbook_on_start(playbook)\n # pylint: disable=protected-access; Ansible gives us no public API to\n # get the file name of the current playbook from a callback plugin.\n self.__playbook_file = playbook._file_name\n\n def v2_runner_on_failed(self, result, ignore_errors=False):\n super(CallbackModule, self).v2_runner_on_failed(result, ignore_errors)\n if not ignore_errors:\n self.__failures.append(result)\n\n def v2_playbook_on_stats(self, stats):\n super(CallbackModule, self).v2_playbook_on_stats(stats)\n # pylint: disable=broad-except; capturing exceptions broadly is\n # intentional, to isolate arbitrary failures in this callback plugin.\n try:\n if self.__failures:\n self._display.display(failure_summary(self.__failures, self.__playbook_file))\n except Exception:\n msg = stringc(\n u'An error happened while generating a summary of failures:\\n'\n u'{}'.format(traceback.format_exc()), C.COLOR_WARN)\n self._display.v(msg)\n\n\ndef failure_summary(failures, playbook):\n \"\"\"Return a summary of failed tasks, including details on health checks.\"\"\"\n if not failures:\n return u''\n\n # NOTE: because we don't have access to task_vars from callback plugins, we\n # store the playbook context in the task result when the\n # openshift_health_check action plugin is used, and we use this context to\n # customize the error message.\n # pylint: disable=protected-access; Ansible gives us no sufficient public\n # API on TaskResult objects.\n context = next((\n context for context in\n (failure._result.get('playbook_context') for failure in failures)\n if context\n ), None)\n\n failures = [failure_to_dict(failure) for failure in failures]\n failures = deduplicate_failures(failures)\n\n summary = [u'', u'', u'Failure summary:', u'']\n\n width = len(str(len(failures)))\n initial_indent_format = u' {{:>{width}}}. '.format(width=width)\n initial_indent_len = len(initial_indent_format.format(0))\n subsequent_indent = u' ' * initial_indent_len\n subsequent_extra_indent = u' ' * (initial_indent_len + 10)\n\n for i, failure in enumerate(failures, 1):\n entries = format_failure(failure)\n summary.append(u'\\n{}{}'.format(initial_indent_format.format(i), entries[0]))\n for entry in entries[1:]:\n entry = entry.replace(u'\\n', u'\\n' + subsequent_extra_indent)\n indented = u'{}{}'.format(subsequent_indent, entry)\n summary.append(indented)\n\n failed_checks = set()\n for failure in failures:\n failed_checks.update(name for name, message in failure['checks'])\n if failed_checks:\n summary.append(check_failure_footer(failed_checks, context, playbook))\n\n return u'\\n'.join(summary)\n\n\ndef failure_to_dict(failed_task_result):\n \"\"\"Extract information out of a failed TaskResult into a dict.\n\n The intent is to transform a TaskResult object into something easier to\n manipulate. TaskResult is ansible.executor.task_result.TaskResult.\n \"\"\"\n # pylint: disable=protected-access; Ansible gives us no sufficient public\n # API on TaskResult objects.\n _result = failed_task_result._result\n return {\n 'host': failed_task_result._host.get_name(),\n 'play': play_name(failed_task_result._task),\n 'task': failed_task_result.task_name,\n 'msg': _result.get('msg', FAILED_NO_MSG),\n 'checks': tuple(\n (name, result.get('msg', FAILED_NO_MSG))\n for name, result in sorted(_result.get('checks', {}).items())\n if result.get('failed')\n ),\n }\n\n\ndef play_name(obj):\n \"\"\"Given a task or block, return the name of its parent play.\n\n This is loosely inspired by ansible.playbook.base.Base.dump_me.\n \"\"\"\n # pylint: disable=protected-access; Ansible gives us no sufficient public\n # API to implement this.\n if not obj:\n return ''\n if hasattr(obj, '_play'):\n return obj._play.get_name()\n return play_name(getattr(obj, '_parent'))\n\n\ndef deduplicate_failures(failures):\n \"\"\"Group together similar failures from different hosts.\n\n Returns a new list of failures such that identical failures from different\n hosts are grouped together in a single entry. The relative order of failures\n is preserved.\n \"\"\"\n groups = defaultdict(list)\n for failure in failures:\n group_key = tuple(sorted((key, value) for key, value in failure.items() if key != 'host'))\n groups[group_key].append(failure)\n result = []\n for failure in failures:\n group_key = tuple(sorted((key, value) for key, value in failure.items() if key != 'host'))\n if group_key not in groups:\n continue\n failure['host'] = tuple(sorted(g_failure['host'] for g_failure in groups.pop(group_key)))\n result.append(failure)\n return result\n\n\ndef format_failure(failure):\n \"\"\"Return a list of pretty-formatted text entries describing a failure, including\n relevant information about it. Expect that the list of text entries will be joined\n by a newline separator when output to the user.\"\"\"\n host = u', '.join(failure['host'])\n play = failure['play']\n task = failure['task']\n msg = failure['msg']\n checks = failure['checks']\n fields = (\n (u'Hosts', host),\n (u'Play', play),\n (u'Task', task),\n (u'Message', stringc(msg, C.COLOR_ERROR)),\n )\n if checks:\n fields += ((u'Details', format_failed_checks(checks)),)\n row_format = '{:10}{}'\n return [row_format.format(header + u':', body) for header, body in fields]\n\n\ndef format_failed_checks(checks):\n \"\"\"Return pretty-formatted text describing checks that failed.\"\"\"\n messages = []\n for name, message in checks:\n messages.append(u'check \"{}\":\\n{}'.format(name, message))\n return stringc(u'\\n\\n'.join(messages), C.COLOR_ERROR)\n\n\ndef check_failure_footer(failed_checks, context, playbook):\n \"\"\"Return a textual explanation about checks depending on context.\n\n The purpose of specifying context is to vary the output depending on what\n the user was expecting to happen (based on which playbook they ran). The\n only use currently is to vary the message depending on whether the user was\n deliberately running checks or was trying to install/upgrade and checks are\n just included. Other use cases may arise.\n \"\"\"\n checks = ','.join(sorted(failed_checks))\n summary = [u'']\n if context in ['pre-install', 'health', 'adhoc']:\n # User was expecting to run checks, less explanation needed.\n summary.extend([\n u'You may configure or disable checks by setting Ansible '\n u'variables. To disable those above, set:',\n u' openshift_disable_check={checks}'.format(checks=checks),\n u'Consult check documentation for configurable variables.',\n ])\n else:\n # User may not be familiar with the checks, explain what checks are in\n # the first place.\n summary.extend([\n u'The execution of \"{playbook}\" includes checks designed to fail '\n u'early if the requirements of the playbook are not met. One or '\n u'more of these checks failed. To disregard these results,'\n u'explicitly disable checks by setting an Ansible variable:'.format(playbook=playbook),\n u' openshift_disable_check={checks}'.format(checks=checks),\n u'Failing check names are shown in the failure details above. '\n u'Some checks may be configurable by variables if your requirements '\n u'are different from the defaults; consult check documentation.',\n ])\n summary.append(\n u'Variables can be set in the inventory or passed on the command line '\n u'using the -e flag to ansible-playbook.'\n )\n return u'\\n'.join(summary)\n", "path": "roles/openshift_health_checker/callback_plugins/zz_failure_summary.py"}], "after_files": [{"content": "\"\"\"Ansible callback plugin to print a nicely formatted summary of failures.\n\nThe file / module name is prefixed with `zz_` to make this plugin be loaded last\nby Ansible, thus making its output the last thing that users see.\n\"\"\"\n\nfrom collections import defaultdict\nimport traceback\n\nfrom ansible.plugins.callback import CallbackBase\nfrom ansible import constants as C\nfrom ansible.utils.color import stringc\nfrom ansible.module_utils.six import string_types\n\n\nFAILED_NO_MSG = u'Failed without returning a message.'\n\n\nclass CallbackModule(CallbackBase):\n \"\"\"This callback plugin stores task results and summarizes failures.\"\"\"\n\n CALLBACK_VERSION = 2.0\n CALLBACK_TYPE = 'aggregate'\n CALLBACK_NAME = 'failure_summary'\n CALLBACK_NEEDS_WHITELIST = False\n\n def __init__(self):\n super(CallbackModule, self).__init__()\n self.__failures = []\n self.__playbook_file = ''\n\n def v2_playbook_on_start(self, playbook):\n super(CallbackModule, self).v2_playbook_on_start(playbook)\n # pylint: disable=protected-access; Ansible gives us no public API to\n # get the file name of the current playbook from a callback plugin.\n self.__playbook_file = playbook._file_name\n\n def v2_runner_on_failed(self, result, ignore_errors=False):\n super(CallbackModule, self).v2_runner_on_failed(result, ignore_errors)\n if not ignore_errors:\n self.__failures.append(result)\n\n def v2_playbook_on_stats(self, stats):\n super(CallbackModule, self).v2_playbook_on_stats(stats)\n # pylint: disable=broad-except; capturing exceptions broadly is\n # intentional, to isolate arbitrary failures in this callback plugin.\n try:\n if self.__failures:\n self._display.display(failure_summary(self.__failures, self.__playbook_file))\n except Exception:\n msg = stringc(\n u'An error happened while generating a summary of failures:\\n'\n u'{}'.format(traceback.format_exc()), C.COLOR_WARN)\n self._display.v(msg)\n\n\ndef failure_summary(failures, playbook):\n \"\"\"Return a summary of failed tasks, including details on health checks.\"\"\"\n if not failures:\n return u''\n\n # NOTE: because we don't have access to task_vars from callback plugins, we\n # store the playbook context in the task result when the\n # openshift_health_check action plugin is used, and we use this context to\n # customize the error message.\n # pylint: disable=protected-access; Ansible gives us no sufficient public\n # API on TaskResult objects.\n context = next((\n context for context in\n (failure._result.get('playbook_context') for failure in failures)\n if context\n ), None)\n\n failures = [failure_to_dict(failure) for failure in failures]\n failures = deduplicate_failures(failures)\n\n summary = [u'', u'', u'Failure summary:', u'']\n\n width = len(str(len(failures)))\n initial_indent_format = u' {{:>{width}}}. '.format(width=width)\n initial_indent_len = len(initial_indent_format.format(0))\n subsequent_indent = u' ' * initial_indent_len\n subsequent_extra_indent = u' ' * (initial_indent_len + 10)\n\n for i, failure in enumerate(failures, 1):\n entries = format_failure(failure)\n summary.append(u'\\n{}{}'.format(initial_indent_format.format(i), entries[0]))\n for entry in entries[1:]:\n entry = entry.replace(u'\\n', u'\\n' + subsequent_extra_indent)\n indented = u'{}{}'.format(subsequent_indent, entry)\n summary.append(indented)\n\n failed_checks = set()\n for failure in failures:\n failed_checks.update(name for name, message in failure['checks'])\n if failed_checks:\n summary.append(check_failure_footer(failed_checks, context, playbook))\n\n return u'\\n'.join(summary)\n\n\ndef failure_to_dict(failed_task_result):\n \"\"\"Extract information out of a failed TaskResult into a dict.\n\n The intent is to transform a TaskResult object into something easier to\n manipulate. TaskResult is ansible.executor.task_result.TaskResult.\n \"\"\"\n # pylint: disable=protected-access; Ansible gives us no sufficient public\n # API on TaskResult objects.\n _result = failed_task_result._result\n return {\n 'host': failed_task_result._host.get_name(),\n 'play': play_name(failed_task_result._task),\n 'task': failed_task_result.task_name,\n 'msg': _result.get('msg', FAILED_NO_MSG),\n 'checks': tuple(\n (name, result.get('msg', FAILED_NO_MSG))\n for name, result in sorted(_result.get('checks', {}).items())\n if result.get('failed')\n ),\n }\n\n\ndef play_name(obj):\n \"\"\"Given a task or block, return the name of its parent play.\n\n This is loosely inspired by ansible.playbook.base.Base.dump_me.\n \"\"\"\n # pylint: disable=protected-access; Ansible gives us no sufficient public\n # API to implement this.\n if not obj:\n return ''\n if hasattr(obj, '_play'):\n return obj._play.get_name()\n return play_name(getattr(obj, '_parent'))\n\n\ndef deduplicate_failures(failures):\n \"\"\"Group together similar failures from different hosts.\n\n Returns a new list of failures such that identical failures from different\n hosts are grouped together in a single entry. The relative order of failures\n is preserved.\n\n If failures is unhashable, the original list of failures is returned.\n \"\"\"\n groups = defaultdict(list)\n for failure in failures:\n group_key = tuple(sorted((key, value) for key, value in failure.items() if key != 'host'))\n try:\n groups[group_key].append(failure)\n except TypeError:\n # abort and return original list of failures when failures has an\n # unhashable type.\n return failures\n\n result = []\n for failure in failures:\n group_key = tuple(sorted((key, value) for key, value in failure.items() if key != 'host'))\n if group_key not in groups:\n continue\n failure['host'] = tuple(sorted(g_failure['host'] for g_failure in groups.pop(group_key)))\n result.append(failure)\n return result\n\n\ndef format_failure(failure):\n \"\"\"Return a list of pretty-formatted text entries describing a failure, including\n relevant information about it. Expect that the list of text entries will be joined\n by a newline separator when output to the user.\"\"\"\n if isinstance(failure['host'], string_types):\n host = failure['host']\n else:\n host = u', '.join(failure['host'])\n play = failure['play']\n task = failure['task']\n msg = failure['msg']\n checks = failure['checks']\n fields = (\n (u'Hosts', host),\n (u'Play', play),\n (u'Task', task),\n (u'Message', stringc(msg, C.COLOR_ERROR)),\n )\n if checks:\n fields += ((u'Details', format_failed_checks(checks)),)\n row_format = '{:10}{}'\n return [row_format.format(header + u':', body) for header, body in fields]\n\n\ndef format_failed_checks(checks):\n \"\"\"Return pretty-formatted text describing checks that failed.\"\"\"\n messages = []\n for name, message in checks:\n messages.append(u'check \"{}\":\\n{}'.format(name, message))\n return stringc(u'\\n\\n'.join(messages), C.COLOR_ERROR)\n\n\ndef check_failure_footer(failed_checks, context, playbook):\n \"\"\"Return a textual explanation about checks depending on context.\n\n The purpose of specifying context is to vary the output depending on what\n the user was expecting to happen (based on which playbook they ran). The\n only use currently is to vary the message depending on whether the user was\n deliberately running checks or was trying to install/upgrade and checks are\n just included. Other use cases may arise.\n \"\"\"\n checks = ','.join(sorted(failed_checks))\n summary = [u'']\n if context in ['pre-install', 'health', 'adhoc']:\n # User was expecting to run checks, less explanation needed.\n summary.extend([\n u'You may configure or disable checks by setting Ansible '\n u'variables. To disable those above, set:',\n u' openshift_disable_check={checks}'.format(checks=checks),\n u'Consult check documentation for configurable variables.',\n ])\n else:\n # User may not be familiar with the checks, explain what checks are in\n # the first place.\n summary.extend([\n u'The execution of \"{playbook}\" includes checks designed to fail '\n u'early if the requirements of the playbook are not met. One or '\n u'more of these checks failed. To disregard these results,'\n u'explicitly disable checks by setting an Ansible variable:'.format(playbook=playbook),\n u' openshift_disable_check={checks}'.format(checks=checks),\n u'Failing check names are shown in the failure details above. '\n u'Some checks may be configurable by variables if your requirements '\n u'are different from the defaults; consult check documentation.',\n ])\n summary.append(\n u'Variables can be set in the inventory or passed on the command line '\n u'using the -e flag to ansible-playbook.'\n )\n return u'\\n'.join(summary)\n", "path": "roles/openshift_health_checker/callback_plugins/zz_failure_summary.py"}]}
| 3,883 | 458 |
gh_patches_debug_1925
|
rasdani/github-patches
|
git_diff
|
mathesar-foundation__mathesar-673
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
IndexError when deleting a column
## Description
<!-- A clear and concise description of what the bug is. -->
An indexError occurs when deleting a column through the API. Most of the time the error occurs when deleting the first or second column of a table. Deleting the last columns in a table does not seem to produce this error.
## Expected behavior
<!-- A clear and concise description of what you expected to happen. -->
- A column should be deleted
## To Reproduce
<!-- How can we recreate this bug? Please try to provide a Minimal, Complete, and Verifiable (http://stackoverflow.com/help/mcve) example if code-related. -->
1. Delete the first or second column of a table via API. Example: api/v0/tables/1/columns/1/
2. Delete the first or second column of another table via API. Example: api/v0/tables/2/columns/0/
## Screenshots


## Environment
- OS: (_eg._ macOS 10.14.6; Fedora 32)
- Browser: (_eg._ Safari; Firefox)
- Browser Version: (_eg._ 13; 73)
- Other info:
## Additional context
<!-- Add any other context about the problem or screenshots here. -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `db/columns/operations/select.py`
Content:
```
1 import warnings
2
3 from sqlalchemy import Table, MetaData, and_, select, text, func
4
5 from db.tables.operations.select import reflect_table_from_oid
6 from db.utils import execute_statement
7
8
9 def get_column_index_from_name(table_oid, column_name, engine, connection_to_use=None):
10 with warnings.catch_warnings():
11 warnings.filterwarnings("ignore", message="Did not recognize type")
12 pg_attribute = Table("pg_attribute", MetaData(), autoload_with=engine)
13 sel = select(pg_attribute.c.attnum).where(
14 and_(
15 pg_attribute.c.attrelid == table_oid,
16 pg_attribute.c.attname == column_name
17 )
18 )
19 result = execute_statement(engine, sel, connection_to_use).fetchone()[0]
20
21 # Account for dropped columns that don't appear in the SQLAlchemy tables
22 sel = (
23 select(func.count())
24 .where(and_(
25 pg_attribute.c.attisdropped.is_(True),
26 pg_attribute.c.attnum < result,
27 ))
28 )
29 dropped_count = execute_statement(engine, sel, connection_to_use).fetchone()[0]
30
31 return result - 1 - dropped_count
32
33
34 def get_column_default(table_oid, column_index, engine, connection_to_use=None):
35 table = reflect_table_from_oid(table_oid, engine, connection_to_use)
36 column = table.columns[column_index]
37 if column.server_default is None:
38 return None
39
40 metadata = MetaData()
41 with warnings.catch_warnings():
42 warnings.filterwarnings("ignore", message="Did not recognize type")
43 pg_attribute = Table("pg_attribute", metadata, autoload_with=engine)
44 pg_attrdef = Table("pg_attrdef", metadata, autoload_with=engine)
45
46 query = (
47 select(pg_attrdef.c.adbin)
48 .select_from(
49 pg_attrdef
50 .join(
51 pg_attribute,
52 and_(
53 pg_attribute.c.attnum == pg_attrdef.c.adnum,
54 pg_attribute.c.attrelid == pg_attrdef.c.adrelid
55 )
56 )
57 )
58 .where(and_(
59 pg_attribute.c.attrelid == table_oid,
60 pg_attribute.c.attname == column.name,
61 pg_attribute.c.attnum >= 1,
62 ))
63 )
64
65 result = execute_statement(engine, query, connection_to_use).first()[0]
66
67 # Here, we get the 'adbin' value for the current column, stored in the attrdef
68 # system table. The prefix of this value tells us whether the default is static
69 # ('{CONSTANT') or generated ('{FUNCEXPR'). We do not return generated defaults.
70 if result.startswith("{FUNCEXPR"):
71 return None
72
73 default_textual_sql = column.server_default.arg.text
74 # Defaults are stored as text with SQL casts appended
75 # Ex: "'test default string'::character varying" or "'2020-01-01'::date"
76 # Here, we execute the cast to get the proper python value
77 return execute_statement(engine, select(text(default_textual_sql)), connection_to_use).first()[0]
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/db/columns/operations/select.py b/db/columns/operations/select.py
--- a/db/columns/operations/select.py
+++ b/db/columns/operations/select.py
@@ -22,6 +22,7 @@
sel = (
select(func.count())
.where(and_(
+ pg_attribute.c.attrelid == table_oid,
pg_attribute.c.attisdropped.is_(True),
pg_attribute.c.attnum < result,
))
|
{"golden_diff": "diff --git a/db/columns/operations/select.py b/db/columns/operations/select.py\n--- a/db/columns/operations/select.py\n+++ b/db/columns/operations/select.py\n@@ -22,6 +22,7 @@\n sel = (\n select(func.count())\n .where(and_(\n+ pg_attribute.c.attrelid == table_oid,\n pg_attribute.c.attisdropped.is_(True),\n pg_attribute.c.attnum < result,\n ))\n", "issue": "IndexError when deleting a column\n## Description\r\n<!-- A clear and concise description of what the bug is. -->\r\nAn indexError occurs when deleting a column through the API. Most of the time the error occurs when deleting the first or second column of a table. Deleting the last columns in a table does not seem to produce this error. \r\n\r\n## Expected behavior\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\n- A column should be deleted\r\n\r\n## To Reproduce\r\n<!-- How can we recreate this bug? Please try to provide a Minimal, Complete, and Verifiable (http://stackoverflow.com/help/mcve) example if code-related. -->\r\n\r\n1. Delete the first or second column of a table via API. Example: api/v0/tables/1/columns/1/\r\n2. Delete the first or second column of another table via API. Example: api/v0/tables/2/columns/0/\r\n\r\n## Screenshots\r\n\r\n\r\n\r\n\r\n## Environment\r\n - OS: (_eg._ macOS 10.14.6; Fedora 32)\r\n - Browser: (_eg._ Safari; Firefox)\r\n - Browser Version: (_eg._ 13; 73)\r\n - Other info:\r\n\r\n## Additional context\r\n<!-- Add any other context about the problem or screenshots here. -->\r\n\n", "before_files": [{"content": "import warnings\n\nfrom sqlalchemy import Table, MetaData, and_, select, text, func\n\nfrom db.tables.operations.select import reflect_table_from_oid\nfrom db.utils import execute_statement\n\n\ndef get_column_index_from_name(table_oid, column_name, engine, connection_to_use=None):\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", message=\"Did not recognize type\")\n pg_attribute = Table(\"pg_attribute\", MetaData(), autoload_with=engine)\n sel = select(pg_attribute.c.attnum).where(\n and_(\n pg_attribute.c.attrelid == table_oid,\n pg_attribute.c.attname == column_name\n )\n )\n result = execute_statement(engine, sel, connection_to_use).fetchone()[0]\n\n # Account for dropped columns that don't appear in the SQLAlchemy tables\n sel = (\n select(func.count())\n .where(and_(\n pg_attribute.c.attisdropped.is_(True),\n pg_attribute.c.attnum < result,\n ))\n )\n dropped_count = execute_statement(engine, sel, connection_to_use).fetchone()[0]\n\n return result - 1 - dropped_count\n\n\ndef get_column_default(table_oid, column_index, engine, connection_to_use=None):\n table = reflect_table_from_oid(table_oid, engine, connection_to_use)\n column = table.columns[column_index]\n if column.server_default is None:\n return None\n\n metadata = MetaData()\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", message=\"Did not recognize type\")\n pg_attribute = Table(\"pg_attribute\", metadata, autoload_with=engine)\n pg_attrdef = Table(\"pg_attrdef\", metadata, autoload_with=engine)\n\n query = (\n select(pg_attrdef.c.adbin)\n .select_from(\n pg_attrdef\n .join(\n pg_attribute,\n and_(\n pg_attribute.c.attnum == pg_attrdef.c.adnum,\n pg_attribute.c.attrelid == pg_attrdef.c.adrelid\n )\n )\n )\n .where(and_(\n pg_attribute.c.attrelid == table_oid,\n pg_attribute.c.attname == column.name,\n pg_attribute.c.attnum >= 1,\n ))\n )\n\n result = execute_statement(engine, query, connection_to_use).first()[0]\n\n # Here, we get the 'adbin' value for the current column, stored in the attrdef\n # system table. The prefix of this value tells us whether the default is static\n # ('{CONSTANT') or generated ('{FUNCEXPR'). We do not return generated defaults.\n if result.startswith(\"{FUNCEXPR\"):\n return None\n\n default_textual_sql = column.server_default.arg.text\n # Defaults are stored as text with SQL casts appended\n # Ex: \"'test default string'::character varying\" or \"'2020-01-01'::date\"\n # Here, we execute the cast to get the proper python value\n return execute_statement(engine, select(text(default_textual_sql)), connection_to_use).first()[0]\n", "path": "db/columns/operations/select.py"}], "after_files": [{"content": "import warnings\n\nfrom sqlalchemy import Table, MetaData, and_, select, text, func\n\nfrom db.tables.operations.select import reflect_table_from_oid\nfrom db.utils import execute_statement\n\n\ndef get_column_index_from_name(table_oid, column_name, engine, connection_to_use=None):\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", message=\"Did not recognize type\")\n pg_attribute = Table(\"pg_attribute\", MetaData(), autoload_with=engine)\n sel = select(pg_attribute.c.attnum).where(\n and_(\n pg_attribute.c.attrelid == table_oid,\n pg_attribute.c.attname == column_name\n )\n )\n result = execute_statement(engine, sel, connection_to_use).fetchone()[0]\n\n # Account for dropped columns that don't appear in the SQLAlchemy tables\n sel = (\n select(func.count())\n .where(and_(\n pg_attribute.c.attrelid == table_oid,\n pg_attribute.c.attisdropped.is_(True),\n pg_attribute.c.attnum < result,\n ))\n )\n dropped_count = execute_statement(engine, sel, connection_to_use).fetchone()[0]\n\n return result - 1 - dropped_count\n\n\ndef get_column_default(table_oid, column_index, engine, connection_to_use=None):\n table = reflect_table_from_oid(table_oid, engine, connection_to_use)\n column = table.columns[column_index]\n if column.server_default is None:\n return None\n\n metadata = MetaData()\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", message=\"Did not recognize type\")\n pg_attribute = Table(\"pg_attribute\", metadata, autoload_with=engine)\n pg_attrdef = Table(\"pg_attrdef\", metadata, autoload_with=engine)\n\n query = (\n select(pg_attrdef.c.adbin)\n .select_from(\n pg_attrdef\n .join(\n pg_attribute,\n and_(\n pg_attribute.c.attnum == pg_attrdef.c.adnum,\n pg_attribute.c.attrelid == pg_attrdef.c.adrelid\n )\n )\n )\n .where(and_(\n pg_attribute.c.attrelid == table_oid,\n pg_attribute.c.attname == column.name,\n pg_attribute.c.attnum >= 1,\n ))\n )\n\n result = execute_statement(engine, query, connection_to_use).first()[0]\n\n # Here, we get the 'adbin' value for the current column, stored in the attrdef\n # system table. The prefix of this value tells us whether the default is static\n # ('{CONSTANT') or generated ('{FUNCEXPR'). We do not return generated defaults.\n if result.startswith(\"{FUNCEXPR\"):\n return None\n\n default_textual_sql = column.server_default.arg.text\n # Defaults are stored as text with SQL casts appended\n # Ex: \"'test default string'::character varying\" or \"'2020-01-01'::date\"\n # Here, we execute the cast to get the proper python value\n return execute_statement(engine, select(text(default_textual_sql)), connection_to_use).first()[0]\n", "path": "db/columns/operations/select.py"}]}
| 1,470 | 103 |
gh_patches_debug_59246
|
rasdani/github-patches
|
git_diff
|
projectmesa__mesa-451
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Datacollector fix
Simplify the `DataCollector` API by allowing the user-provided model- and agent-level reporters to be the names of attributes, in addition to methods. e.g. instead of needing to write
```
agent_reporters={"Wealth": lambda a: a.wealth}
```
you can write
```
agent_reporters={"Wealth":"wealth"}
```
This PR implements this feature, and updates the tests and documentation accordingly.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mesa/__init__.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """
3 Mesa Agent-Based Modeling Framework
4
5 Core Objects: Model, and Agent.
6
7 """
8 import datetime
9
10 from .model import Model
11 from .agent import Agent
12
13
14 __all__ = ["Model", "Agent"]
15
16 __title__ = 'mesa'
17 __version__ = '0.8.2'
18 __license__ = 'Apache 2.0'
19 __copyright__ = 'Copyright %s Project Mesa Team' % datetime.date.today().year
20
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mesa/__init__.py b/mesa/__init__.py
--- a/mesa/__init__.py
+++ b/mesa/__init__.py
@@ -14,6 +14,6 @@
__all__ = ["Model", "Agent"]
__title__ = 'mesa'
-__version__ = '0.8.2'
+__version__ = '0.8.3'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright %s Project Mesa Team' % datetime.date.today().year
|
{"golden_diff": "diff --git a/mesa/__init__.py b/mesa/__init__.py\n--- a/mesa/__init__.py\n+++ b/mesa/__init__.py\n@@ -14,6 +14,6 @@\n __all__ = [\"Model\", \"Agent\"]\n \n __title__ = 'mesa'\n-__version__ = '0.8.2'\n+__version__ = '0.8.3'\n __license__ = 'Apache 2.0'\n __copyright__ = 'Copyright %s Project Mesa Team' % datetime.date.today().year\n", "issue": "Datacollector fix\nSimplify the `DataCollector` API by allowing the user-provided model- and agent-level reporters to be the names of attributes, in addition to methods. e.g. instead of needing to write\r\n\r\n```\r\nagent_reporters={\"Wealth\": lambda a: a.wealth}\r\n```\r\n\r\nyou can write\r\n```\r\nagent_reporters={\"Wealth\":\"wealth\"}\r\n```\r\n\r\nThis PR implements this feature, and updates the tests and documentation accordingly.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nMesa Agent-Based Modeling Framework\n\nCore Objects: Model, and Agent.\n\n\"\"\"\nimport datetime\n\nfrom .model import Model\nfrom .agent import Agent\n\n\n__all__ = [\"Model\", \"Agent\"]\n\n__title__ = 'mesa'\n__version__ = '0.8.2'\n__license__ = 'Apache 2.0'\n__copyright__ = 'Copyright %s Project Mesa Team' % datetime.date.today().year\n", "path": "mesa/__init__.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nMesa Agent-Based Modeling Framework\n\nCore Objects: Model, and Agent.\n\n\"\"\"\nimport datetime\n\nfrom .model import Model\nfrom .agent import Agent\n\n\n__all__ = [\"Model\", \"Agent\"]\n\n__title__ = 'mesa'\n__version__ = '0.8.3'\n__license__ = 'Apache 2.0'\n__copyright__ = 'Copyright %s Project Mesa Team' % datetime.date.today().year\n", "path": "mesa/__init__.py"}]}
| 490 | 121 |
gh_patches_debug_31990
|
rasdani/github-patches
|
git_diff
|
cloudtools__troposphere-1811
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add AWS::Elasticsearch::Domain DomainEndpointOptions
Cloudformation now allows you to specify additional options for the domain endpoint, such as whether to require HTTPS for all traffic, with an Elasticseach Domain.
https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticsearch-domain-domainendpointoptions.html
Adding support for this would be super.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `troposphere/elasticsearch.py`
Content:
```
1 # Copyright (c) 2012-2015, Mark Peek <[email protected]>
2 # All rights reserved.
3 #
4 # See LICENSE file for full license.
5
6 from . import AWSProperty, AWSObject, Tags
7 from .compat import policytypes
8 from .validators import boolean, integer, integer_range, positive_integer
9
10 VALID_VOLUME_TYPES = ('standard', 'gp2', 'io1')
11
12
13 def validate_volume_type(volume_type):
14 """Validate VolumeType for ElasticsearchDomain"""
15 if volume_type not in VALID_VOLUME_TYPES:
16 raise ValueError("Elasticsearch Domain VolumeType must be one of: %s" %
17 ", ".join(VALID_VOLUME_TYPES))
18 return volume_type
19
20
21 class CognitoOptions(AWSProperty):
22 props = {
23 'Enabled': (boolean, False),
24 'IdentityPoolId': (basestring, False),
25 'RoleArn': (basestring, False),
26 'UserPoolId': (basestring, False),
27 }
28
29
30 class EBSOptions(AWSProperty):
31 props = {
32 'EBSEnabled': (boolean, False),
33 'Iops': (positive_integer, False),
34 'VolumeSize': (integer, False),
35 'VolumeType': (validate_volume_type, False)
36 }
37
38 def validate(self):
39 volume_type = self.properties.get('VolumeType')
40 iops = self.properties.get('Iops')
41 if volume_type == 'io1' and not iops:
42 raise ValueError("Must specify Iops if VolumeType is 'io1'.")
43
44
45 class ZoneAwarenessConfig(AWSProperty):
46 props = {
47 'AvailabilityZoneCount': (integer, False),
48 }
49
50
51 class ElasticsearchClusterConfig(AWSProperty):
52 props = {
53 'DedicatedMasterCount': (integer, False),
54 'DedicatedMasterEnabled': (boolean, False),
55 'DedicatedMasterType': (basestring, False),
56 'InstanceCount': (integer, False),
57 'InstanceType': (basestring, False),
58 'ZoneAwarenessConfig': (ZoneAwarenessConfig, False),
59 'ZoneAwarenessEnabled': (boolean, False)
60 }
61
62
63 class EncryptionAtRestOptions(AWSProperty):
64 props = {
65 'Enabled': (boolean, False),
66 'KmsKeyId': (basestring, False),
67 }
68
69
70 class NodeToNodeEncryptionOptions(AWSProperty):
71 props = {
72 'Enabled': (boolean, False),
73 }
74
75
76 class SnapshotOptions(AWSProperty):
77 props = {
78 'AutomatedSnapshotStartHour': (integer_range(0, 23), False)
79 }
80
81
82 class VPCOptions(AWSProperty):
83 props = {
84 'SecurityGroupIds': ([basestring], False),
85 'SubnetIds': ([basestring], False)
86 }
87
88
89 class MasterUserOptions(AWSProperty):
90 props = {
91 'MasterUserARN': (basestring, False),
92 'MasterUserName': (basestring, False),
93 'MasterUserPassword': (basestring, False),
94 }
95
96
97 class AdvancedSecurityOptionsInput(AWSProperty):
98 props = {
99 'Enabled': (boolean, False),
100 'InternalUserDatabaseEnabled': (boolean, False),
101 'MasterUserOptions': (MasterUserOptions, False),
102 }
103
104
105 class Domain(AWSObject):
106 resource_type = "AWS::Elasticsearch::Domain"
107
108 props = {
109 'AccessPolicies': (policytypes, False),
110 'AdvancedOptions': (dict, False),
111 'AdvancedSecurityOptions': (AdvancedSecurityOptionsInput, False),
112 'CognitoOptions': (CognitoOptions, False),
113 'DomainName': (basestring, False),
114 'EBSOptions': (EBSOptions, False),
115 'ElasticsearchClusterConfig': (ElasticsearchClusterConfig, False),
116 'ElasticsearchVersion': (basestring, False),
117 'EncryptionAtRestOptions': (EncryptionAtRestOptions, False),
118 'LogPublishingOptions': (dict, False),
119 'NodeToNodeEncryptionOptions': (NodeToNodeEncryptionOptions, False),
120 'SnapshotOptions': (SnapshotOptions, False),
121 'Tags': ((Tags, list), False),
122 'VPCOptions': (VPCOptions, False),
123 }
124
125
126 # Backward compatibility
127 ElasticsearchDomain = Domain
128
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/troposphere/elasticsearch.py b/troposphere/elasticsearch.py
--- a/troposphere/elasticsearch.py
+++ b/troposphere/elasticsearch.py
@@ -8,6 +8,10 @@
from .validators import boolean, integer, integer_range, positive_integer
VALID_VOLUME_TYPES = ('standard', 'gp2', 'io1')
+VALID_TLS_SECURITY_POLICIES = (
+ 'Policy-Min-TLS-1-0-2019-07',
+ 'Policy-Min-TLS-1-2-2019-07'
+ )
def validate_volume_type(volume_type):
@@ -18,6 +22,14 @@
return volume_type
+def validate_tls_security_policy(tls_security_policy):
+ """Validate TLS Security Policy for ElasticsearchDomain"""
+ if tls_security_policy not in VALID_TLS_SECURITY_POLICIES:
+ raise ValueError("Minimum TLS Security Policy must be one of: %s" %
+ ", ".join(VALID_TLS_SECURITY_POLICIES))
+ return tls_security_policy
+
+
class CognitoOptions(AWSProperty):
props = {
'Enabled': (boolean, False),
@@ -27,6 +39,13 @@
}
+class DomainEndpointOptions(AWSProperty):
+ props = {
+ 'EnforceHTTPS': (boolean, False),
+ 'TLSSecurityPolicy': (validate_tls_security_policy, False),
+ }
+
+
class EBSOptions(AWSProperty):
props = {
'EBSEnabled': (boolean, False),
@@ -111,6 +130,7 @@
'AdvancedSecurityOptions': (AdvancedSecurityOptionsInput, False),
'CognitoOptions': (CognitoOptions, False),
'DomainName': (basestring, False),
+ 'DomainEndpointOptions': (DomainEndpointOptions, False),
'EBSOptions': (EBSOptions, False),
'ElasticsearchClusterConfig': (ElasticsearchClusterConfig, False),
'ElasticsearchVersion': (basestring, False),
|
{"golden_diff": "diff --git a/troposphere/elasticsearch.py b/troposphere/elasticsearch.py\n--- a/troposphere/elasticsearch.py\n+++ b/troposphere/elasticsearch.py\n@@ -8,6 +8,10 @@\n from .validators import boolean, integer, integer_range, positive_integer\n \n VALID_VOLUME_TYPES = ('standard', 'gp2', 'io1')\n+VALID_TLS_SECURITY_POLICIES = (\n+ 'Policy-Min-TLS-1-0-2019-07',\n+ 'Policy-Min-TLS-1-2-2019-07'\n+ )\n \n \n def validate_volume_type(volume_type):\n@@ -18,6 +22,14 @@\n return volume_type\n \n \n+def validate_tls_security_policy(tls_security_policy):\n+ \"\"\"Validate TLS Security Policy for ElasticsearchDomain\"\"\"\n+ if tls_security_policy not in VALID_TLS_SECURITY_POLICIES:\n+ raise ValueError(\"Minimum TLS Security Policy must be one of: %s\" %\n+ \", \".join(VALID_TLS_SECURITY_POLICIES))\n+ return tls_security_policy\n+\n+\n class CognitoOptions(AWSProperty):\n props = {\n 'Enabled': (boolean, False),\n@@ -27,6 +39,13 @@\n }\n \n \n+class DomainEndpointOptions(AWSProperty):\n+ props = {\n+ 'EnforceHTTPS': (boolean, False),\n+ 'TLSSecurityPolicy': (validate_tls_security_policy, False),\n+ }\n+\n+\n class EBSOptions(AWSProperty):\n props = {\n 'EBSEnabled': (boolean, False),\n@@ -111,6 +130,7 @@\n 'AdvancedSecurityOptions': (AdvancedSecurityOptionsInput, False),\n 'CognitoOptions': (CognitoOptions, False),\n 'DomainName': (basestring, False),\n+ 'DomainEndpointOptions': (DomainEndpointOptions, False),\n 'EBSOptions': (EBSOptions, False),\n 'ElasticsearchClusterConfig': (ElasticsearchClusterConfig, False),\n 'ElasticsearchVersion': (basestring, False),\n", "issue": "Add AWS::Elasticsearch::Domain DomainEndpointOptions \nCloudformation now allows you to specify additional options for the domain endpoint, such as whether to require HTTPS for all traffic, with an Elasticseach Domain. \r\n\r\nhttps://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticsearch-domain-domainendpointoptions.html\r\n\r\nAdding support for this would be super. \n", "before_files": [{"content": "# Copyright (c) 2012-2015, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSProperty, AWSObject, Tags\nfrom .compat import policytypes\nfrom .validators import boolean, integer, integer_range, positive_integer\n\nVALID_VOLUME_TYPES = ('standard', 'gp2', 'io1')\n\n\ndef validate_volume_type(volume_type):\n \"\"\"Validate VolumeType for ElasticsearchDomain\"\"\"\n if volume_type not in VALID_VOLUME_TYPES:\n raise ValueError(\"Elasticsearch Domain VolumeType must be one of: %s\" %\n \", \".join(VALID_VOLUME_TYPES))\n return volume_type\n\n\nclass CognitoOptions(AWSProperty):\n props = {\n 'Enabled': (boolean, False),\n 'IdentityPoolId': (basestring, False),\n 'RoleArn': (basestring, False),\n 'UserPoolId': (basestring, False),\n }\n\n\nclass EBSOptions(AWSProperty):\n props = {\n 'EBSEnabled': (boolean, False),\n 'Iops': (positive_integer, False),\n 'VolumeSize': (integer, False),\n 'VolumeType': (validate_volume_type, False)\n }\n\n def validate(self):\n volume_type = self.properties.get('VolumeType')\n iops = self.properties.get('Iops')\n if volume_type == 'io1' and not iops:\n raise ValueError(\"Must specify Iops if VolumeType is 'io1'.\")\n\n\nclass ZoneAwarenessConfig(AWSProperty):\n props = {\n 'AvailabilityZoneCount': (integer, False),\n }\n\n\nclass ElasticsearchClusterConfig(AWSProperty):\n props = {\n 'DedicatedMasterCount': (integer, False),\n 'DedicatedMasterEnabled': (boolean, False),\n 'DedicatedMasterType': (basestring, False),\n 'InstanceCount': (integer, False),\n 'InstanceType': (basestring, False),\n 'ZoneAwarenessConfig': (ZoneAwarenessConfig, False),\n 'ZoneAwarenessEnabled': (boolean, False)\n }\n\n\nclass EncryptionAtRestOptions(AWSProperty):\n props = {\n 'Enabled': (boolean, False),\n 'KmsKeyId': (basestring, False),\n }\n\n\nclass NodeToNodeEncryptionOptions(AWSProperty):\n props = {\n 'Enabled': (boolean, False),\n }\n\n\nclass SnapshotOptions(AWSProperty):\n props = {\n 'AutomatedSnapshotStartHour': (integer_range(0, 23), False)\n }\n\n\nclass VPCOptions(AWSProperty):\n props = {\n 'SecurityGroupIds': ([basestring], False),\n 'SubnetIds': ([basestring], False)\n }\n\n\nclass MasterUserOptions(AWSProperty):\n props = {\n 'MasterUserARN': (basestring, False),\n 'MasterUserName': (basestring, False),\n 'MasterUserPassword': (basestring, False),\n }\n\n\nclass AdvancedSecurityOptionsInput(AWSProperty):\n props = {\n 'Enabled': (boolean, False),\n 'InternalUserDatabaseEnabled': (boolean, False),\n 'MasterUserOptions': (MasterUserOptions, False),\n }\n\n\nclass Domain(AWSObject):\n resource_type = \"AWS::Elasticsearch::Domain\"\n\n props = {\n 'AccessPolicies': (policytypes, False),\n 'AdvancedOptions': (dict, False),\n 'AdvancedSecurityOptions': (AdvancedSecurityOptionsInput, False),\n 'CognitoOptions': (CognitoOptions, False),\n 'DomainName': (basestring, False),\n 'EBSOptions': (EBSOptions, False),\n 'ElasticsearchClusterConfig': (ElasticsearchClusterConfig, False),\n 'ElasticsearchVersion': (basestring, False),\n 'EncryptionAtRestOptions': (EncryptionAtRestOptions, False),\n 'LogPublishingOptions': (dict, False),\n 'NodeToNodeEncryptionOptions': (NodeToNodeEncryptionOptions, False),\n 'SnapshotOptions': (SnapshotOptions, False),\n 'Tags': ((Tags, list), False),\n 'VPCOptions': (VPCOptions, False),\n }\n\n\n# Backward compatibility\nElasticsearchDomain = Domain\n", "path": "troposphere/elasticsearch.py"}], "after_files": [{"content": "# Copyright (c) 2012-2015, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSProperty, AWSObject, Tags\nfrom .compat import policytypes\nfrom .validators import boolean, integer, integer_range, positive_integer\n\nVALID_VOLUME_TYPES = ('standard', 'gp2', 'io1')\nVALID_TLS_SECURITY_POLICIES = (\n 'Policy-Min-TLS-1-0-2019-07',\n 'Policy-Min-TLS-1-2-2019-07'\n )\n\n\ndef validate_volume_type(volume_type):\n \"\"\"Validate VolumeType for ElasticsearchDomain\"\"\"\n if volume_type not in VALID_VOLUME_TYPES:\n raise ValueError(\"Elasticsearch Domain VolumeType must be one of: %s\" %\n \", \".join(VALID_VOLUME_TYPES))\n return volume_type\n\n\ndef validate_tls_security_policy(tls_security_policy):\n \"\"\"Validate TLS Security Policy for ElasticsearchDomain\"\"\"\n if tls_security_policy not in VALID_TLS_SECURITY_POLICIES:\n raise ValueError(\"Minimum TLS Security Policy must be one of: %s\" %\n \", \".join(VALID_TLS_SECURITY_POLICIES))\n return tls_security_policy\n\n\nclass CognitoOptions(AWSProperty):\n props = {\n 'Enabled': (boolean, False),\n 'IdentityPoolId': (basestring, False),\n 'RoleArn': (basestring, False),\n 'UserPoolId': (basestring, False),\n }\n\n\nclass DomainEndpointOptions(AWSProperty):\n props = {\n 'EnforceHTTPS': (boolean, False),\n 'TLSSecurityPolicy': (validate_tls_security_policy, False),\n }\n\n\nclass EBSOptions(AWSProperty):\n props = {\n 'EBSEnabled': (boolean, False),\n 'Iops': (positive_integer, False),\n 'VolumeSize': (integer, False),\n 'VolumeType': (validate_volume_type, False)\n }\n\n def validate(self):\n volume_type = self.properties.get('VolumeType')\n iops = self.properties.get('Iops')\n if volume_type == 'io1' and not iops:\n raise ValueError(\"Must specify Iops if VolumeType is 'io1'.\")\n\n\nclass ZoneAwarenessConfig(AWSProperty):\n props = {\n 'AvailabilityZoneCount': (integer, False),\n }\n\n\nclass ElasticsearchClusterConfig(AWSProperty):\n props = {\n 'DedicatedMasterCount': (integer, False),\n 'DedicatedMasterEnabled': (boolean, False),\n 'DedicatedMasterType': (basestring, False),\n 'InstanceCount': (integer, False),\n 'InstanceType': (basestring, False),\n 'ZoneAwarenessConfig': (ZoneAwarenessConfig, False),\n 'ZoneAwarenessEnabled': (boolean, False)\n }\n\n\nclass EncryptionAtRestOptions(AWSProperty):\n props = {\n 'Enabled': (boolean, False),\n 'KmsKeyId': (basestring, False),\n }\n\n\nclass NodeToNodeEncryptionOptions(AWSProperty):\n props = {\n 'Enabled': (boolean, False),\n }\n\n\nclass SnapshotOptions(AWSProperty):\n props = {\n 'AutomatedSnapshotStartHour': (integer_range(0, 23), False)\n }\n\n\nclass VPCOptions(AWSProperty):\n props = {\n 'SecurityGroupIds': ([basestring], False),\n 'SubnetIds': ([basestring], False)\n }\n\n\nclass MasterUserOptions(AWSProperty):\n props = {\n 'MasterUserARN': (basestring, False),\n 'MasterUserName': (basestring, False),\n 'MasterUserPassword': (basestring, False),\n }\n\n\nclass AdvancedSecurityOptionsInput(AWSProperty):\n props = {\n 'Enabled': (boolean, False),\n 'InternalUserDatabaseEnabled': (boolean, False),\n 'MasterUserOptions': (MasterUserOptions, False),\n }\n\n\nclass Domain(AWSObject):\n resource_type = \"AWS::Elasticsearch::Domain\"\n\n props = {\n 'AccessPolicies': (policytypes, False),\n 'AdvancedOptions': (dict, False),\n 'AdvancedSecurityOptions': (AdvancedSecurityOptionsInput, False),\n 'CognitoOptions': (CognitoOptions, False),\n 'DomainName': (basestring, False),\n 'DomainEndpointOptions': (DomainEndpointOptions, False),\n 'EBSOptions': (EBSOptions, False),\n 'ElasticsearchClusterConfig': (ElasticsearchClusterConfig, False),\n 'ElasticsearchVersion': (basestring, False),\n 'EncryptionAtRestOptions': (EncryptionAtRestOptions, False),\n 'LogPublishingOptions': (dict, False),\n 'NodeToNodeEncryptionOptions': (NodeToNodeEncryptionOptions, False),\n 'SnapshotOptions': (SnapshotOptions, False),\n 'Tags': ((Tags, list), False),\n 'VPCOptions': (VPCOptions, False),\n }\n\n\n# Backward compatibility\nElasticsearchDomain = Domain\n", "path": "troposphere/elasticsearch.py"}]}
| 1,536 | 451 |
gh_patches_debug_36776
|
rasdani/github-patches
|
git_diff
|
nvaccess__nvda-14464
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
WinVersion: add machine architecture property
Hi,
Stemming from #14397:
### Background:
Over the years, Windows supported various processor architectures from various vendors, including Intel x86 (32-bit), AMD64, Itanium (now discontinued), and ARM family (32-bit and 64-bit). While the base machine architecture for NVDA is x86, it provides support for x64 (Intel and AMD) and ARM64 through remote helper libraries.
While #14397 highlighted the need to improve support for ARM64 machines, it brought up an unspoken questino: can NVDA effectively detect different machine architectures? Moreover, thinking about this issue led me to believe that it would be helpful to let NVDA report machine architecture as part of Windows version information, similar to what Resource Monitor add-on provides.
### Is your feature request related to a problem? Please describe.
At the moment NVDA does recognize workstation versus server Windows releases, but not machine architecture, which could have helped debug #14397 and similar issues.
### Describe the solution you'd like
Add machine architecture (WinVersion.architecture) property to WinVersion class and report it at startup. This allows developers to figure out if the issue is specific to an architecture such as AMD64 or AM64, as well as detect test machines easily once ARM machines are used more on the cloud (currently virtual machines on the cloud are powered by x64 processors).
In addition, the following changes can be made:
1. App modules: 64-bit process dtection becoes easier thanks to checking for Windows machine architecture check.
2. Update check: made simpler by fetching WinVersion property.
### Describe alternatives you've considered
Leave code as is and rely on environment variables to detect machine architecture.
### Additional context
For an example, see josephsl/resourcemonitor, specifically Windows version script (NVDA+Shift+number row 6).
### Proposed solution:
Bring machine architecture detection code from Resource Monitor add-on to NVDA Core, likely to target 2023.1 or 2023.2. I'd be happy to mentor someone to bring this feature request to life.
Thanks.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `source/winVersion.py`
Content:
```
1 # A part of NonVisual Desktop Access (NVDA)
2 # Copyright (C) 2006-2022 NV Access Limited, Bill Dengler, Joseph Lee
3 # This file is covered by the GNU General Public License.
4 # See the file COPYING for more details.
5
6 """A module used to record Windows versions.
7 It is also used to define feature checks such as
8 making sure NVDA can run on a minimum supported version of Windows.
9
10 When working on this file, consider moving to winAPI.
11 """
12
13 from typing import Optional
14 import sys
15 import os
16 import functools
17 import winreg
18
19
20 # Records a mapping between Windows builds and release names.
21 # These include build 10240 for Windows 10 1507 and releases with multiple release builds.
22 # These are applicable to Windows 10 and later as they report the same system version (10.0).
23 _BUILDS_TO_RELEASE_NAMES = {
24 10240: "Windows 10 1507",
25 10586: "Windows 10 1511",
26 14393: "Windows 10 1607",
27 15063: "Windows 10 1703",
28 16299: "Windows 10 1709",
29 17134: "Windows 10 1803",
30 17763: "Windows 10 1809",
31 18362: "Windows 10 1903",
32 18363: "Windows 10 1909",
33 19041: "Windows 10 2004",
34 19042: "Windows 10 20H2",
35 19043: "Windows 10 21H1",
36 19044: "Windows 10 21H2",
37 19045: "Windows 10 22H2",
38 20348: "Windows Server 2022",
39 22000: "Windows 11 21H2",
40 22621: "Windows 11 22H2",
41 }
42
43
44 @functools.lru_cache(maxsize=1)
45 def _getRunningVersionNameFromWinReg() -> str:
46 """Returns the Windows release name defined in Windows Registry.
47 This is applicable on Windows 10 Version 1511 (build 10586) and later.
48 """
49 # Cache the version in use on the system.
50 with winreg.OpenKey(
51 winreg.HKEY_LOCAL_MACHINE, r"Software\Microsoft\Windows NT\CurrentVersion"
52 ) as currentVersion:
53 # Version 20H2 and later where a separate display version string is used.
54 try:
55 releaseId = winreg.QueryValueEx(currentVersion, "DisplayVersion")[0]
56 except OSError:
57 # Don't set anything if this is Windows 10 1507 or earlier.
58 try:
59 releaseId = winreg.QueryValueEx(currentVersion, "ReleaseID")[0]
60 except OSError:
61 raise RuntimeError(
62 "Release name is not recorded in Windows Registry on this version of Windows"
63 ) from None
64 return releaseId
65
66
67 @functools.total_ordering
68 class WinVersion(object):
69 """
70 Represents a Windows release.
71 Includes version major, minor, build, service pack information,
72 as well as tools such as checking for specific Windows 10 releases.
73 """
74
75 def __init__(
76 self,
77 major: int = 0,
78 minor: int = 0,
79 build: int = 0,
80 releaseName: Optional[str] = None,
81 servicePack: str = "",
82 productType: str = ""
83 ):
84 self.major = major
85 self.minor = minor
86 self.build = build
87 if releaseName:
88 self.releaseName = releaseName
89 else:
90 self.releaseName = self._getWindowsReleaseName()
91 self.servicePack = servicePack
92 self.productType = productType
93
94 def _getWindowsReleaseName(self) -> str:
95 """Returns the public release name for a given Windows release based on major, minor, and build.
96 This is useful if release names are not defined when constructing this class.
97 For example, 6.1 will return 'Windows 7'.
98 For Windows 10, feature update release name will be included.
99 On server systems, unless noted otherwise, client release names will be returned.
100 For example, 'Windows 10 1809' will be returned on Server 2019 systems.
101 """
102 if (self.major, self.minor) == (6, 1):
103 return "Windows 7"
104 elif (self.major, self.minor) == (6, 2):
105 return "Windows 8"
106 elif (self.major, self.minor) == (6, 3):
107 return "Windows 8.1"
108 elif self.major == 10:
109 # From Version 1511 (build 10586), release Id/display version comes from Windows Registry.
110 # However there are builds with no release name (Version 1507/10240)
111 # or releases with different builds.
112 # Look these up first before asking Windows Registry.
113 if self.build in _BUILDS_TO_RELEASE_NAMES:
114 return _BUILDS_TO_RELEASE_NAMES[self.build]
115 return "Windows 10 unknown"
116 else:
117 return "Windows release unknown"
118
119 def __repr__(self):
120 winVersionText = [self.releaseName]
121 winVersionText.append(f"({self.major}.{self.minor}.{self.build})")
122 if self.servicePack != "":
123 winVersionText.append(f"service pack {self.servicePack}")
124 if self.productType != "":
125 winVersionText.append(self.productType)
126 return " ".join(winVersionText)
127
128 def __eq__(self, other):
129 return (
130 (self.major, self.minor, self.build)
131 == (other.major, other.minor, other.build)
132 )
133
134 def __ge__(self, other):
135 return (
136 (self.major, self.minor, self.build)
137 >= (other.major, other.minor, other.build)
138 )
139
140
141 # Windows releases to WinVersion instances for easing comparisons.
142 WIN7 = WinVersion(major=6, minor=1, build=7600)
143 WIN7_SP1 = WinVersion(major=6, minor=1, build=7601, servicePack="1")
144 WIN8 = WinVersion(major=6, minor=2, build=9200)
145 WIN81 = WinVersion(major=6, minor=3, build=9600)
146 WIN10 = WIN10_1507 = WinVersion(major=10, minor=0, build=10240)
147 WIN10_1511 = WinVersion(major=10, minor=0, build=10586)
148 WIN10_1607 = WinVersion(major=10, minor=0, build=14393)
149 WIN10_1703 = WinVersion(major=10, minor=0, build=15063)
150 WIN10_1709 = WinVersion(major=10, minor=0, build=16299)
151 WIN10_1803 = WinVersion(major=10, minor=0, build=17134)
152 WIN10_1809 = WinVersion(major=10, minor=0, build=17763)
153 WIN10_1903 = WinVersion(major=10, minor=0, build=18362)
154 WIN10_1909 = WinVersion(major=10, minor=0, build=18363)
155 WIN10_2004 = WinVersion(major=10, minor=0, build=19041)
156 WIN10_20H2 = WinVersion(major=10, minor=0, build=19042)
157 WIN10_21H1 = WinVersion(major=10, minor=0, build=19043)
158 WIN10_21H2 = WinVersion(major=10, minor=0, build=19044)
159 WIN10_22H2 = WinVersion(major=10, minor=0, build=19045)
160 WINSERVER_2022 = WinVersion(major=10, minor=0, build=20348)
161 WIN11 = WIN11_21H2 = WinVersion(major=10, minor=0, build=22000)
162 WIN11_22H2 = WinVersion(major=10, minor=0, build=22621)
163
164
165 @functools.lru_cache(maxsize=1)
166 def getWinVer():
167 """Returns a record of current Windows version NVDA is running on.
168 """
169 winVer = sys.getwindowsversion()
170 # #12509: on Windows 10, fetch whatever Windows Registry says for the current build.
171 # #12626: note that not all Windows 10 releases are labeled "Windows 10"
172 # (build 22000 is Windows 11 despite major.minor being 10.0).
173 try:
174 if WinVersion(
175 major=winVer.major,
176 minor=winVer.minor,
177 build=winVer.build
178 ) >= WIN11:
179 releaseName = f"Windows 11 {_getRunningVersionNameFromWinReg()}"
180 else:
181 releaseName = f"Windows 10 {_getRunningVersionNameFromWinReg()}"
182 except RuntimeError:
183 releaseName = None
184 return WinVersion(
185 major=winVer.major,
186 minor=winVer.minor,
187 build=winVer.build,
188 releaseName=releaseName,
189 servicePack=winVer.service_pack,
190 productType=("workstation", "domain controller", "server")[winVer.product_type - 1]
191 )
192
193
194 def isSupportedOS():
195 # NVDA can only run on Windows 7 Service pack 1 and above
196 return getWinVer() >= WIN7_SP1
197
198
199 UWP_OCR_DATA_PATH = os.path.expandvars(r"$windir\OCR")
200
201
202 def isUwpOcrAvailable():
203 return os.path.isdir(UWP_OCR_DATA_PATH)
204
205
206 def isFullScreenMagnificationAvailable() -> bool:
207 """
208 Technically this is always False. The Magnification API has been marked by MS as unsupported for
209 WOW64 applications such as NVDA. For our usages, support has been added since Windows 8, relying on our
210 testing our specific usage of the API with each Windows version since Windows 8
211 """
212 return getWinVer() >= WIN8
213
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/source/winVersion.py b/source/winVersion.py
--- a/source/winVersion.py
+++ b/source/winVersion.py
@@ -15,6 +15,7 @@
import os
import functools
import winreg
+import platform
# Records a mapping between Windows builds and release names.
@@ -68,7 +69,7 @@
class WinVersion(object):
"""
Represents a Windows release.
- Includes version major, minor, build, service pack information,
+ Includes version major, minor, build, service pack information, machine architecture,
as well as tools such as checking for specific Windows 10 releases.
"""
@@ -79,7 +80,8 @@
build: int = 0,
releaseName: Optional[str] = None,
servicePack: str = "",
- productType: str = ""
+ productType: str = "",
+ processorArchitecture: str = ""
):
self.major = major
self.minor = minor
@@ -90,6 +92,7 @@
self.releaseName = self._getWindowsReleaseName()
self.servicePack = servicePack
self.productType = productType
+ self.processorArchitecture = processorArchitecture
def _getWindowsReleaseName(self) -> str:
"""Returns the public release name for a given Windows release based on major, minor, and build.
@@ -123,6 +126,8 @@
winVersionText.append(f"service pack {self.servicePack}")
if self.productType != "":
winVersionText.append(self.productType)
+ if self.processorArchitecture != "":
+ winVersionText.append(self.processorArchitecture)
return " ".join(winVersionText)
def __eq__(self, other):
@@ -187,7 +192,8 @@
build=winVer.build,
releaseName=releaseName,
servicePack=winVer.service_pack,
- productType=("workstation", "domain controller", "server")[winVer.product_type - 1]
+ productType=("workstation", "domain controller", "server")[winVer.product_type - 1],
+ processorArchitecture=platform.machine()
)
|
{"golden_diff": "diff --git a/source/winVersion.py b/source/winVersion.py\n--- a/source/winVersion.py\n+++ b/source/winVersion.py\n@@ -15,6 +15,7 @@\n import os\r\n import functools\r\n import winreg\r\n+import platform\r\n \r\n \r\n # Records a mapping between Windows builds and release names.\r\n@@ -68,7 +69,7 @@\n class WinVersion(object):\r\n \t\"\"\"\r\n \tRepresents a Windows release.\r\n-\tIncludes version major, minor, build, service pack information,\r\n+\tIncludes version major, minor, build, service pack information, machine architecture,\r\n \tas well as tools such as checking for specific Windows 10 releases.\r\n \t\"\"\"\r\n \r\n@@ -79,7 +80,8 @@\n \t\t\tbuild: int = 0,\r\n \t\t\treleaseName: Optional[str] = None,\r\n \t\t\tservicePack: str = \"\",\r\n-\t\t\tproductType: str = \"\"\r\n+\t\t\tproductType: str = \"\",\r\n+\t\t\tprocessorArchitecture: str = \"\"\r\n \t):\r\n \t\tself.major = major\r\n \t\tself.minor = minor\r\n@@ -90,6 +92,7 @@\n \t\t\tself.releaseName = self._getWindowsReleaseName()\r\n \t\tself.servicePack = servicePack\r\n \t\tself.productType = productType\r\n+\t\tself.processorArchitecture = processorArchitecture\r\n \r\n \tdef _getWindowsReleaseName(self) -> str:\r\n \t\t\"\"\"Returns the public release name for a given Windows release based on major, minor, and build.\r\n@@ -123,6 +126,8 @@\n \t\t\twinVersionText.append(f\"service pack {self.servicePack}\")\r\n \t\tif self.productType != \"\":\r\n \t\t\twinVersionText.append(self.productType)\r\n+\t\tif self.processorArchitecture != \"\":\r\n+\t\t\twinVersionText.append(self.processorArchitecture)\r\n \t\treturn \" \".join(winVersionText)\r\n \r\n \tdef __eq__(self, other):\r\n@@ -187,7 +192,8 @@\n \t\tbuild=winVer.build,\r\n \t\treleaseName=releaseName,\r\n \t\tservicePack=winVer.service_pack,\r\n-\t\tproductType=(\"workstation\", \"domain controller\", \"server\")[winVer.product_type - 1]\r\n+\t\tproductType=(\"workstation\", \"domain controller\", \"server\")[winVer.product_type - 1],\r\n+\t\tprocessorArchitecture=platform.machine()\r\n \t)\n", "issue": "WinVersion: add machine architecture property\nHi,\r\n\r\nStemming from #14397:\r\n\r\n### Background:\r\nOver the years, Windows supported various processor architectures from various vendors, including Intel x86 (32-bit), AMD64, Itanium (now discontinued), and ARM family (32-bit and 64-bit). While the base machine architecture for NVDA is x86, it provides support for x64 (Intel and AMD) and ARM64 through remote helper libraries.\r\n\r\nWhile #14397 highlighted the need to improve support for ARM64 machines, it brought up an unspoken questino: can NVDA effectively detect different machine architectures? Moreover, thinking about this issue led me to believe that it would be helpful to let NVDA report machine architecture as part of Windows version information, similar to what Resource Monitor add-on provides.\r\n\r\n### Is your feature request related to a problem? Please describe.\r\nAt the moment NVDA does recognize workstation versus server Windows releases, but not machine architecture, which could have helped debug #14397 and similar issues.\r\n\r\n### Describe the solution you'd like\r\nAdd machine architecture (WinVersion.architecture) property to WinVersion class and report it at startup. This allows developers to figure out if the issue is specific to an architecture such as AMD64 or AM64, as well as detect test machines easily once ARM machines are used more on the cloud (currently virtual machines on the cloud are powered by x64 processors).\r\n\r\nIn addition, the following changes can be made:\r\n\r\n1. App modules: 64-bit process dtection becoes easier thanks to checking for Windows machine architecture check.\r\n2. Update check: made simpler by fetching WinVersion property.\r\n\r\n### Describe alternatives you've considered\r\nLeave code as is and rely on environment variables to detect machine architecture.\r\n\r\n### Additional context\r\nFor an example, see josephsl/resourcemonitor, specifically Windows version script (NVDA+Shift+number row 6).\r\n\r\n### Proposed solution:\r\nBring machine architecture detection code from Resource Monitor add-on to NVDA Core, likely to target 2023.1 or 2023.2. I'd be happy to mentor someone to bring this feature request to life.\r\n\r\nThanks.\n", "before_files": [{"content": "# A part of NonVisual Desktop Access (NVDA)\r\n# Copyright (C) 2006-2022 NV Access Limited, Bill Dengler, Joseph Lee\r\n# This file is covered by the GNU General Public License.\r\n# See the file COPYING for more details.\r\n\r\n\"\"\"A module used to record Windows versions.\r\nIt is also used to define feature checks such as\r\nmaking sure NVDA can run on a minimum supported version of Windows.\r\n\r\nWhen working on this file, consider moving to winAPI.\r\n\"\"\"\r\n\r\nfrom typing import Optional\r\nimport sys\r\nimport os\r\nimport functools\r\nimport winreg\r\n\r\n\r\n# Records a mapping between Windows builds and release names.\r\n# These include build 10240 for Windows 10 1507 and releases with multiple release builds.\r\n# These are applicable to Windows 10 and later as they report the same system version (10.0).\r\n_BUILDS_TO_RELEASE_NAMES = {\r\n\t10240: \"Windows 10 1507\",\r\n\t10586: \"Windows 10 1511\",\r\n\t14393: \"Windows 10 1607\",\r\n\t15063: \"Windows 10 1703\",\r\n\t16299: \"Windows 10 1709\",\r\n\t17134: \"Windows 10 1803\",\r\n\t17763: \"Windows 10 1809\",\r\n\t18362: \"Windows 10 1903\",\r\n\t18363: \"Windows 10 1909\",\r\n\t19041: \"Windows 10 2004\",\r\n\t19042: \"Windows 10 20H2\",\r\n\t19043: \"Windows 10 21H1\",\r\n\t19044: \"Windows 10 21H2\",\r\n\t19045: \"Windows 10 22H2\",\r\n\t20348: \"Windows Server 2022\",\r\n\t22000: \"Windows 11 21H2\",\r\n\t22621: \"Windows 11 22H2\",\r\n}\r\n\r\n\r\[email protected]_cache(maxsize=1)\r\ndef _getRunningVersionNameFromWinReg() -> str:\r\n\t\"\"\"Returns the Windows release name defined in Windows Registry.\r\n\tThis is applicable on Windows 10 Version 1511 (build 10586) and later.\r\n\t\"\"\"\r\n\t# Cache the version in use on the system.\r\n\twith winreg.OpenKey(\r\n\t\twinreg.HKEY_LOCAL_MACHINE, r\"Software\\Microsoft\\Windows NT\\CurrentVersion\"\r\n\t) as currentVersion:\r\n\t\t# Version 20H2 and later where a separate display version string is used.\r\n\t\ttry:\r\n\t\t\treleaseId = winreg.QueryValueEx(currentVersion, \"DisplayVersion\")[0]\r\n\t\texcept OSError:\r\n\t\t\t# Don't set anything if this is Windows 10 1507 or earlier.\r\n\t\t\ttry:\r\n\t\t\t\treleaseId = winreg.QueryValueEx(currentVersion, \"ReleaseID\")[0]\r\n\t\t\texcept OSError:\r\n\t\t\t\traise RuntimeError(\r\n\t\t\t\t\t\"Release name is not recorded in Windows Registry on this version of Windows\"\r\n\t\t\t\t) from None\r\n\treturn releaseId\r\n\r\n\r\[email protected]_ordering\r\nclass WinVersion(object):\r\n\t\"\"\"\r\n\tRepresents a Windows release.\r\n\tIncludes version major, minor, build, service pack information,\r\n\tas well as tools such as checking for specific Windows 10 releases.\r\n\t\"\"\"\r\n\r\n\tdef __init__(\r\n\t\t\tself,\r\n\t\t\tmajor: int = 0,\r\n\t\t\tminor: int = 0,\r\n\t\t\tbuild: int = 0,\r\n\t\t\treleaseName: Optional[str] = None,\r\n\t\t\tservicePack: str = \"\",\r\n\t\t\tproductType: str = \"\"\r\n\t):\r\n\t\tself.major = major\r\n\t\tself.minor = minor\r\n\t\tself.build = build\r\n\t\tif releaseName:\r\n\t\t\tself.releaseName = releaseName\r\n\t\telse:\r\n\t\t\tself.releaseName = self._getWindowsReleaseName()\r\n\t\tself.servicePack = servicePack\r\n\t\tself.productType = productType\r\n\r\n\tdef _getWindowsReleaseName(self) -> str:\r\n\t\t\"\"\"Returns the public release name for a given Windows release based on major, minor, and build.\r\n\t\tThis is useful if release names are not defined when constructing this class.\r\n\t\tFor example, 6.1 will return 'Windows 7'.\r\n\t\tFor Windows 10, feature update release name will be included.\r\n\t\tOn server systems, unless noted otherwise, client release names will be returned.\r\n\t\tFor example, 'Windows 10 1809' will be returned on Server 2019 systems.\r\n\t\t\"\"\"\r\n\t\tif (self.major, self.minor) == (6, 1):\r\n\t\t\treturn \"Windows 7\"\r\n\t\telif (self.major, self.minor) == (6, 2):\r\n\t\t\treturn \"Windows 8\"\r\n\t\telif (self.major, self.minor) == (6, 3):\r\n\t\t\treturn \"Windows 8.1\"\r\n\t\telif self.major == 10:\r\n\t\t\t# From Version 1511 (build 10586), release Id/display version comes from Windows Registry.\r\n\t\t\t# However there are builds with no release name (Version 1507/10240)\r\n\t\t\t# or releases with different builds.\r\n\t\t\t# Look these up first before asking Windows Registry.\r\n\t\t\tif self.build in _BUILDS_TO_RELEASE_NAMES:\r\n\t\t\t\treturn _BUILDS_TO_RELEASE_NAMES[self.build]\r\n\t\t\treturn \"Windows 10 unknown\"\r\n\t\telse:\r\n\t\t\treturn \"Windows release unknown\"\r\n\r\n\tdef __repr__(self):\r\n\t\twinVersionText = [self.releaseName]\r\n\t\twinVersionText.append(f\"({self.major}.{self.minor}.{self.build})\")\r\n\t\tif self.servicePack != \"\":\r\n\t\t\twinVersionText.append(f\"service pack {self.servicePack}\")\r\n\t\tif self.productType != \"\":\r\n\t\t\twinVersionText.append(self.productType)\r\n\t\treturn \" \".join(winVersionText)\r\n\r\n\tdef __eq__(self, other):\r\n\t\treturn (\r\n\t\t\t(self.major, self.minor, self.build)\r\n\t\t\t== (other.major, other.minor, other.build)\r\n\t\t)\r\n\r\n\tdef __ge__(self, other):\r\n\t\treturn (\r\n\t\t\t(self.major, self.minor, self.build)\r\n\t\t\t>= (other.major, other.minor, other.build)\r\n\t\t)\r\n\r\n\r\n# Windows releases to WinVersion instances for easing comparisons.\r\nWIN7 = WinVersion(major=6, minor=1, build=7600)\r\nWIN7_SP1 = WinVersion(major=6, minor=1, build=7601, servicePack=\"1\")\r\nWIN8 = WinVersion(major=6, minor=2, build=9200)\r\nWIN81 = WinVersion(major=6, minor=3, build=9600)\r\nWIN10 = WIN10_1507 = WinVersion(major=10, minor=0, build=10240)\r\nWIN10_1511 = WinVersion(major=10, minor=0, build=10586)\r\nWIN10_1607 = WinVersion(major=10, minor=0, build=14393)\r\nWIN10_1703 = WinVersion(major=10, minor=0, build=15063)\r\nWIN10_1709 = WinVersion(major=10, minor=0, build=16299)\r\nWIN10_1803 = WinVersion(major=10, minor=0, build=17134)\r\nWIN10_1809 = WinVersion(major=10, minor=0, build=17763)\r\nWIN10_1903 = WinVersion(major=10, minor=0, build=18362)\r\nWIN10_1909 = WinVersion(major=10, minor=0, build=18363)\r\nWIN10_2004 = WinVersion(major=10, minor=0, build=19041)\r\nWIN10_20H2 = WinVersion(major=10, minor=0, build=19042)\r\nWIN10_21H1 = WinVersion(major=10, minor=0, build=19043)\r\nWIN10_21H2 = WinVersion(major=10, minor=0, build=19044)\r\nWIN10_22H2 = WinVersion(major=10, minor=0, build=19045)\r\nWINSERVER_2022 = WinVersion(major=10, minor=0, build=20348)\r\nWIN11 = WIN11_21H2 = WinVersion(major=10, minor=0, build=22000)\r\nWIN11_22H2 = WinVersion(major=10, minor=0, build=22621)\r\n\r\n\r\[email protected]_cache(maxsize=1)\r\ndef getWinVer():\r\n\t\"\"\"Returns a record of current Windows version NVDA is running on.\r\n\t\"\"\"\r\n\twinVer = sys.getwindowsversion()\r\n\t# #12509: on Windows 10, fetch whatever Windows Registry says for the current build.\r\n\t# #12626: note that not all Windows 10 releases are labeled \"Windows 10\"\r\n\t# (build 22000 is Windows 11 despite major.minor being 10.0).\r\n\ttry:\r\n\t\tif WinVersion(\r\n\t\t\tmajor=winVer.major,\r\n\t\t\tminor=winVer.minor,\r\n\t\t\tbuild=winVer.build\r\n\t\t) >= WIN11:\r\n\t\t\treleaseName = f\"Windows 11 {_getRunningVersionNameFromWinReg()}\"\r\n\t\telse:\r\n\t\t\treleaseName = f\"Windows 10 {_getRunningVersionNameFromWinReg()}\"\r\n\texcept RuntimeError:\r\n\t\treleaseName = None\r\n\treturn WinVersion(\r\n\t\tmajor=winVer.major,\r\n\t\tminor=winVer.minor,\r\n\t\tbuild=winVer.build,\r\n\t\treleaseName=releaseName,\r\n\t\tservicePack=winVer.service_pack,\r\n\t\tproductType=(\"workstation\", \"domain controller\", \"server\")[winVer.product_type - 1]\r\n\t)\r\n\r\n\r\ndef isSupportedOS():\r\n\t# NVDA can only run on Windows 7 Service pack 1 and above\r\n\treturn getWinVer() >= WIN7_SP1\r\n\r\n\r\nUWP_OCR_DATA_PATH = os.path.expandvars(r\"$windir\\OCR\")\r\n\r\n\r\ndef isUwpOcrAvailable():\r\n\treturn os.path.isdir(UWP_OCR_DATA_PATH)\r\n\r\n\r\ndef isFullScreenMagnificationAvailable() -> bool:\r\n\t\"\"\"\r\n\tTechnically this is always False. The Magnification API has been marked by MS as unsupported for\r\n\tWOW64 applications such as NVDA. For our usages, support has been added since Windows 8, relying on our\r\n\ttesting our specific usage of the API with each Windows version since Windows 8\r\n\t\"\"\"\r\n\treturn getWinVer() >= WIN8\r\n", "path": "source/winVersion.py"}], "after_files": [{"content": "# A part of NonVisual Desktop Access (NVDA)\r\n# Copyright (C) 2006-2022 NV Access Limited, Bill Dengler, Joseph Lee\r\n# This file is covered by the GNU General Public License.\r\n# See the file COPYING for more details.\r\n\r\n\"\"\"A module used to record Windows versions.\r\nIt is also used to define feature checks such as\r\nmaking sure NVDA can run on a minimum supported version of Windows.\r\n\r\nWhen working on this file, consider moving to winAPI.\r\n\"\"\"\r\n\r\nfrom typing import Optional\r\nimport sys\r\nimport os\r\nimport functools\r\nimport winreg\r\nimport platform\r\n\r\n\r\n# Records a mapping between Windows builds and release names.\r\n# These include build 10240 for Windows 10 1507 and releases with multiple release builds.\r\n# These are applicable to Windows 10 and later as they report the same system version (10.0).\r\n_BUILDS_TO_RELEASE_NAMES = {\r\n\t10240: \"Windows 10 1507\",\r\n\t10586: \"Windows 10 1511\",\r\n\t14393: \"Windows 10 1607\",\r\n\t15063: \"Windows 10 1703\",\r\n\t16299: \"Windows 10 1709\",\r\n\t17134: \"Windows 10 1803\",\r\n\t17763: \"Windows 10 1809\",\r\n\t18362: \"Windows 10 1903\",\r\n\t18363: \"Windows 10 1909\",\r\n\t19041: \"Windows 10 2004\",\r\n\t19042: \"Windows 10 20H2\",\r\n\t19043: \"Windows 10 21H1\",\r\n\t19044: \"Windows 10 21H2\",\r\n\t19045: \"Windows 10 22H2\",\r\n\t20348: \"Windows Server 2022\",\r\n\t22000: \"Windows 11 21H2\",\r\n\t22621: \"Windows 11 22H2\",\r\n}\r\n\r\n\r\[email protected]_cache(maxsize=1)\r\ndef _getRunningVersionNameFromWinReg() -> str:\r\n\t\"\"\"Returns the Windows release name defined in Windows Registry.\r\n\tThis is applicable on Windows 10 Version 1511 (build 10586) and later.\r\n\t\"\"\"\r\n\t# Cache the version in use on the system.\r\n\twith winreg.OpenKey(\r\n\t\twinreg.HKEY_LOCAL_MACHINE, r\"Software\\Microsoft\\Windows NT\\CurrentVersion\"\r\n\t) as currentVersion:\r\n\t\t# Version 20H2 and later where a separate display version string is used.\r\n\t\ttry:\r\n\t\t\treleaseId = winreg.QueryValueEx(currentVersion, \"DisplayVersion\")[0]\r\n\t\texcept OSError:\r\n\t\t\t# Don't set anything if this is Windows 10 1507 or earlier.\r\n\t\t\ttry:\r\n\t\t\t\treleaseId = winreg.QueryValueEx(currentVersion, \"ReleaseID\")[0]\r\n\t\t\texcept OSError:\r\n\t\t\t\traise RuntimeError(\r\n\t\t\t\t\t\"Release name is not recorded in Windows Registry on this version of Windows\"\r\n\t\t\t\t) from None\r\n\treturn releaseId\r\n\r\n\r\[email protected]_ordering\r\nclass WinVersion(object):\r\n\t\"\"\"\r\n\tRepresents a Windows release.\r\n\tIncludes version major, minor, build, service pack information, machine architecture,\r\n\tas well as tools such as checking for specific Windows 10 releases.\r\n\t\"\"\"\r\n\r\n\tdef __init__(\r\n\t\t\tself,\r\n\t\t\tmajor: int = 0,\r\n\t\t\tminor: int = 0,\r\n\t\t\tbuild: int = 0,\r\n\t\t\treleaseName: Optional[str] = None,\r\n\t\t\tservicePack: str = \"\",\r\n\t\t\tproductType: str = \"\",\r\n\t\t\tprocessorArchitecture: str = \"\"\r\n\t):\r\n\t\tself.major = major\r\n\t\tself.minor = minor\r\n\t\tself.build = build\r\n\t\tif releaseName:\r\n\t\t\tself.releaseName = releaseName\r\n\t\telse:\r\n\t\t\tself.releaseName = self._getWindowsReleaseName()\r\n\t\tself.servicePack = servicePack\r\n\t\tself.productType = productType\r\n\t\tself.processorArchitecture = processorArchitecture\r\n\r\n\tdef _getWindowsReleaseName(self) -> str:\r\n\t\t\"\"\"Returns the public release name for a given Windows release based on major, minor, and build.\r\n\t\tThis is useful if release names are not defined when constructing this class.\r\n\t\tFor example, 6.1 will return 'Windows 7'.\r\n\t\tFor Windows 10, feature update release name will be included.\r\n\t\tOn server systems, unless noted otherwise, client release names will be returned.\r\n\t\tFor example, 'Windows 10 1809' will be returned on Server 2019 systems.\r\n\t\t\"\"\"\r\n\t\tif (self.major, self.minor) == (6, 1):\r\n\t\t\treturn \"Windows 7\"\r\n\t\telif (self.major, self.minor) == (6, 2):\r\n\t\t\treturn \"Windows 8\"\r\n\t\telif (self.major, self.minor) == (6, 3):\r\n\t\t\treturn \"Windows 8.1\"\r\n\t\telif self.major == 10:\r\n\t\t\t# From Version 1511 (build 10586), release Id/display version comes from Windows Registry.\r\n\t\t\t# However there are builds with no release name (Version 1507/10240)\r\n\t\t\t# or releases with different builds.\r\n\t\t\t# Look these up first before asking Windows Registry.\r\n\t\t\tif self.build in _BUILDS_TO_RELEASE_NAMES:\r\n\t\t\t\treturn _BUILDS_TO_RELEASE_NAMES[self.build]\r\n\t\t\treturn \"Windows 10 unknown\"\r\n\t\telse:\r\n\t\t\treturn \"Windows release unknown\"\r\n\r\n\tdef __repr__(self):\r\n\t\twinVersionText = [self.releaseName]\r\n\t\twinVersionText.append(f\"({self.major}.{self.minor}.{self.build})\")\r\n\t\tif self.servicePack != \"\":\r\n\t\t\twinVersionText.append(f\"service pack {self.servicePack}\")\r\n\t\tif self.productType != \"\":\r\n\t\t\twinVersionText.append(self.productType)\r\n\t\tif self.processorArchitecture != \"\":\r\n\t\t\twinVersionText.append(self.processorArchitecture)\r\n\t\treturn \" \".join(winVersionText)\r\n\r\n\tdef __eq__(self, other):\r\n\t\treturn (\r\n\t\t\t(self.major, self.minor, self.build)\r\n\t\t\t== (other.major, other.minor, other.build)\r\n\t\t)\r\n\r\n\tdef __ge__(self, other):\r\n\t\treturn (\r\n\t\t\t(self.major, self.minor, self.build)\r\n\t\t\t>= (other.major, other.minor, other.build)\r\n\t\t)\r\n\r\n\r\n# Windows releases to WinVersion instances for easing comparisons.\r\nWIN7 = WinVersion(major=6, minor=1, build=7600)\r\nWIN7_SP1 = WinVersion(major=6, minor=1, build=7601, servicePack=\"1\")\r\nWIN8 = WinVersion(major=6, minor=2, build=9200)\r\nWIN81 = WinVersion(major=6, minor=3, build=9600)\r\nWIN10 = WIN10_1507 = WinVersion(major=10, minor=0, build=10240)\r\nWIN10_1511 = WinVersion(major=10, minor=0, build=10586)\r\nWIN10_1607 = WinVersion(major=10, minor=0, build=14393)\r\nWIN10_1703 = WinVersion(major=10, minor=0, build=15063)\r\nWIN10_1709 = WinVersion(major=10, minor=0, build=16299)\r\nWIN10_1803 = WinVersion(major=10, minor=0, build=17134)\r\nWIN10_1809 = WinVersion(major=10, minor=0, build=17763)\r\nWIN10_1903 = WinVersion(major=10, minor=0, build=18362)\r\nWIN10_1909 = WinVersion(major=10, minor=0, build=18363)\r\nWIN10_2004 = WinVersion(major=10, minor=0, build=19041)\r\nWIN10_20H2 = WinVersion(major=10, minor=0, build=19042)\r\nWIN10_21H1 = WinVersion(major=10, minor=0, build=19043)\r\nWIN10_21H2 = WinVersion(major=10, minor=0, build=19044)\r\nWIN10_22H2 = WinVersion(major=10, minor=0, build=19045)\r\nWINSERVER_2022 = WinVersion(major=10, minor=0, build=20348)\r\nWIN11 = WIN11_21H2 = WinVersion(major=10, minor=0, build=22000)\r\nWIN11_22H2 = WinVersion(major=10, minor=0, build=22621)\r\n\r\n\r\[email protected]_cache(maxsize=1)\r\ndef getWinVer():\r\n\t\"\"\"Returns a record of current Windows version NVDA is running on.\r\n\t\"\"\"\r\n\twinVer = sys.getwindowsversion()\r\n\t# #12509: on Windows 10, fetch whatever Windows Registry says for the current build.\r\n\t# #12626: note that not all Windows 10 releases are labeled \"Windows 10\"\r\n\t# (build 22000 is Windows 11 despite major.minor being 10.0).\r\n\ttry:\r\n\t\tif WinVersion(\r\n\t\t\tmajor=winVer.major,\r\n\t\t\tminor=winVer.minor,\r\n\t\t\tbuild=winVer.build\r\n\t\t) >= WIN11:\r\n\t\t\treleaseName = f\"Windows 11 {_getRunningVersionNameFromWinReg()}\"\r\n\t\telse:\r\n\t\t\treleaseName = f\"Windows 10 {_getRunningVersionNameFromWinReg()}\"\r\n\texcept RuntimeError:\r\n\t\treleaseName = None\r\n\treturn WinVersion(\r\n\t\tmajor=winVer.major,\r\n\t\tminor=winVer.minor,\r\n\t\tbuild=winVer.build,\r\n\t\treleaseName=releaseName,\r\n\t\tservicePack=winVer.service_pack,\r\n\t\tproductType=(\"workstation\", \"domain controller\", \"server\")[winVer.product_type - 1],\r\n\t\tprocessorArchitecture=platform.machine()\r\n\t)\r\n\r\n\r\ndef isSupportedOS():\r\n\t# NVDA can only run on Windows 7 Service pack 1 and above\r\n\treturn getWinVer() >= WIN7_SP1\r\n\r\n\r\nUWP_OCR_DATA_PATH = os.path.expandvars(r\"$windir\\OCR\")\r\n\r\n\r\ndef isUwpOcrAvailable():\r\n\treturn os.path.isdir(UWP_OCR_DATA_PATH)\r\n\r\n\r\ndef isFullScreenMagnificationAvailable() -> bool:\r\n\t\"\"\"\r\n\tTechnically this is always False. The Magnification API has been marked by MS as unsupported for\r\n\tWOW64 applications such as NVDA. For our usages, support has been added since Windows 8, relying on our\r\n\ttesting our specific usage of the API with each Windows version since Windows 8\r\n\t\"\"\"\r\n\treturn getWinVer() >= WIN8\r\n", "path": "source/winVersion.py"}]}
| 3,782 | 487 |
gh_patches_debug_12129
|
rasdani/github-patches
|
git_diff
|
mathesar-foundation__mathesar-1670
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Filters not applied when calculating count of items within group
## Reproduce
1. Go to the Library Management schema.
1. Load the Table Page for the Publications table.
1. Group by "Publication Year".
1. Observe the first group, for year 1900, to contain 10 records and to display a "Count" of 10. Good.
1. Add a filter condition requiring Title to contain the string "To".
1. Observe the first group, for year 1900, to contain 2 records.
1. Expect "Count" to display 2.
1. Observe "Count" displays 10.

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `db/transforms/operations/apply.py`
Content:
```
1 from db.transforms.base import enforce_relation_type_expectations, Transform
2 from db.transforms import base
3
4
5 def apply_transformations(relation, transformations):
6 enforce_relation_type_expectations(relation)
7 for transform in transformations:
8 relation = _apply_transform(relation, transform)
9 return relation
10
11
12 def _apply_transform(relation, transform):
13 assert isinstance(transform, Transform)
14 relation = transform.apply_to_relation(relation)
15 enforce_relation_type_expectations(relation)
16 return relation
17
18
19 # NOTE deprecated; this will be replaced with apply_transformations
20 def apply_transformations_deprecated(
21 table,
22 limit=None,
23 offset=None,
24 order_by=None,
25 filter=None,
26 columns_to_select=None,
27 group_by=None,
28 duplicate_only=None,
29 search=[],
30 ):
31 # TODO rename the actual method parameter
32 relation = table
33
34 enforce_relation_type_expectations(relation)
35
36 transforms = []
37
38 if duplicate_only:
39 transforms.append(base.DuplicateOnly(duplicate_only))
40 if group_by:
41 transforms.append(base.Group(group_by))
42 if order_by:
43 transforms.append(base.Order(order_by))
44 if filter:
45 transforms.append(base.Filter(filter))
46 if search:
47 transforms.append(base.Search([search, limit]))
48 if columns_to_select:
49 transforms.append(base.SelectSubsetOfColumns(columns_to_select))
50 if offset:
51 transforms.append(base.Offset(offset))
52 if limit:
53 transforms.append(base.Limit(limit))
54
55 relation = apply_transformations(relation, transforms)
56 return relation
57
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/db/transforms/operations/apply.py b/db/transforms/operations/apply.py
--- a/db/transforms/operations/apply.py
+++ b/db/transforms/operations/apply.py
@@ -35,14 +35,14 @@
transforms = []
+ if filter:
+ transforms.append(base.Filter(filter))
if duplicate_only:
transforms.append(base.DuplicateOnly(duplicate_only))
if group_by:
transforms.append(base.Group(group_by))
if order_by:
transforms.append(base.Order(order_by))
- if filter:
- transforms.append(base.Filter(filter))
if search:
transforms.append(base.Search([search, limit]))
if columns_to_select:
|
{"golden_diff": "diff --git a/db/transforms/operations/apply.py b/db/transforms/operations/apply.py\n--- a/db/transforms/operations/apply.py\n+++ b/db/transforms/operations/apply.py\n@@ -35,14 +35,14 @@\n \n transforms = []\n \n+ if filter:\n+ transforms.append(base.Filter(filter))\n if duplicate_only:\n transforms.append(base.DuplicateOnly(duplicate_only))\n if group_by:\n transforms.append(base.Group(group_by))\n if order_by:\n transforms.append(base.Order(order_by))\n- if filter:\n- transforms.append(base.Filter(filter))\n if search:\n transforms.append(base.Search([search, limit]))\n if columns_to_select:\n", "issue": "Filters not applied when calculating count of items within group\n## Reproduce\n\n1. Go to the Library Management schema.\n1. Load the Table Page for the Publications table.\n1. Group by \"Publication Year\".\n1. Observe the first group, for year 1900, to contain 10 records and to display a \"Count\" of 10. Good.\n1. Add a filter condition requiring Title to contain the string \"To\".\n1. Observe the first group, for year 1900, to contain 2 records.\n1. Expect \"Count\" to display 2.\n1. Observe \"Count\" displays 10.\n\n\n\n", "before_files": [{"content": "from db.transforms.base import enforce_relation_type_expectations, Transform\nfrom db.transforms import base\n\n\ndef apply_transformations(relation, transformations):\n enforce_relation_type_expectations(relation)\n for transform in transformations:\n relation = _apply_transform(relation, transform)\n return relation\n\n\ndef _apply_transform(relation, transform):\n assert isinstance(transform, Transform)\n relation = transform.apply_to_relation(relation)\n enforce_relation_type_expectations(relation)\n return relation\n\n\n# NOTE deprecated; this will be replaced with apply_transformations\ndef apply_transformations_deprecated(\n table,\n limit=None,\n offset=None,\n order_by=None,\n filter=None,\n columns_to_select=None,\n group_by=None,\n duplicate_only=None,\n search=[],\n):\n # TODO rename the actual method parameter\n relation = table\n\n enforce_relation_type_expectations(relation)\n\n transforms = []\n\n if duplicate_only:\n transforms.append(base.DuplicateOnly(duplicate_only))\n if group_by:\n transforms.append(base.Group(group_by))\n if order_by:\n transforms.append(base.Order(order_by))\n if filter:\n transforms.append(base.Filter(filter))\n if search:\n transforms.append(base.Search([search, limit]))\n if columns_to_select:\n transforms.append(base.SelectSubsetOfColumns(columns_to_select))\n if offset:\n transforms.append(base.Offset(offset))\n if limit:\n transforms.append(base.Limit(limit))\n\n relation = apply_transformations(relation, transforms)\n return relation\n", "path": "db/transforms/operations/apply.py"}], "after_files": [{"content": "from db.transforms.base import enforce_relation_type_expectations, Transform\nfrom db.transforms import base\n\n\ndef apply_transformations(relation, transformations):\n enforce_relation_type_expectations(relation)\n for transform in transformations:\n relation = _apply_transform(relation, transform)\n return relation\n\n\ndef _apply_transform(relation, transform):\n assert isinstance(transform, Transform)\n relation = transform.apply_to_relation(relation)\n enforce_relation_type_expectations(relation)\n return relation\n\n\n# NOTE deprecated; this will be replaced with apply_transformations\ndef apply_transformations_deprecated(\n table,\n limit=None,\n offset=None,\n order_by=None,\n filter=None,\n columns_to_select=None,\n group_by=None,\n duplicate_only=None,\n search=[],\n):\n # TODO rename the actual method parameter\n relation = table\n\n enforce_relation_type_expectations(relation)\n\n transforms = []\n\n if filter:\n transforms.append(base.Filter(filter))\n if duplicate_only:\n transforms.append(base.DuplicateOnly(duplicate_only))\n if group_by:\n transforms.append(base.Group(group_by))\n if order_by:\n transforms.append(base.Order(order_by))\n if search:\n transforms.append(base.Search([search, limit]))\n if columns_to_select:\n transforms.append(base.SelectSubsetOfColumns(columns_to_select))\n if offset:\n transforms.append(base.Offset(offset))\n if limit:\n transforms.append(base.Limit(limit))\n\n relation = apply_transformations(relation, transforms)\n return relation\n", "path": "db/transforms/operations/apply.py"}]}
| 884 | 152 |
gh_patches_debug_41721
|
rasdani/github-patches
|
git_diff
|
Lightning-Universe__lightning-flash-372
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Task doesn't seem to converge: Summurization, Translation
## 🐛 Bug
<!-- A clear and concise description of what the bug is. -->
### To Reproduce
On a machine with 1 GPU.
```
FLASH_TESTING=1 python flash_examples/finetuning/summarization.py
```
```
FLASH_TESTING=1 python flash_examples/finetuning/translation.py
```
Steps to reproduce the behavior:
1. Go to '...'
2. Run '....'
3. Scroll down to '....'
4. See error
<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->
#### Code sample
<!-- Ideally attach a minimal code sample to reproduce the decried issue.
Minimal means having the shortest code but still preserving the bug. -->
### Expected behavior
<!-- A clear and concise description of what you expected to happen. -->
### Environment
- PyTorch Version (e.g., 1.0):
- OS (e.g., Linux):
- How you installed PyTorch (`conda`, `pip`, source):
- Build command you used (if compiling from source):
- Python version:
- CUDA/cuDNN version:
- GPU models and configuration:
- Any other relevant information:
### Additional context
<!-- Add any other context about the problem here. -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flash/text/seq2seq/summarization/data.py`
Content:
```
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from flash.text.seq2seq.core.data import Seq2SeqData, Seq2SeqPostprocess, Seq2SeqPreprocess
15
16
17 class SummarizationData(Seq2SeqData):
18
19 preprocess_cls = Seq2SeqPreprocess
20 postprocess_cls = Seq2SeqPostprocess
21
```
Path: `flash/text/seq2seq/summarization/metric.py`
Content:
```
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from typing import Dict, List, Tuple
15
16 import numpy as np
17 from torch import tensor
18 from torchmetrics import Metric
19
20 from flash.core.utilities.imports import _TEXT_AVAILABLE
21 from flash.text.seq2seq.summarization.utils import add_newline_to_end_of_each_sentence
22
23 if _TEXT_AVAILABLE:
24 from rouge_score import rouge_scorer, scoring
25 from rouge_score.scoring import AggregateScore, BootstrapAggregator, Score
26 else:
27 AggregateScore, Score, BootstrapAggregator = None, None, object
28
29
30 class RougeMetric(Metric):
31 """
32 Metric used for automatic summarization. https://www.aclweb.org/anthology/W04-1013/
33
34 Example:
35
36 >>> target = "Is your name John".split()
37 >>> preds = "My name is John".split()
38 >>> rouge = RougeMetric() # doctest: +SKIP
39 >>> from pprint import pprint
40 >>> pprint(rouge(preds, target)) # doctest: +NORMALIZE_WHITESPACE +SKIP
41 {'rouge1_fmeasure': 0.25,
42 'rouge1_precision': 0.25,
43 'rouge1_recall': 0.25,
44 'rouge2_fmeasure': 0.0,
45 'rouge2_precision': 0.0,
46 'rouge2_recall': 0.0,
47 'rougeL_fmeasure': 0.25,
48 'rougeL_precision': 0.25,
49 'rougeL_recall': 0.25,
50 'rougeLsum_fmeasure': 0.25,
51 'rougeLsum_precision': 0.25,
52 'rougeLsum_recall': 0.25}
53 """
54
55 def __init__(
56 self,
57 rouge_newline_sep: bool = False,
58 use_stemmer: bool = False,
59 rouge_keys: Tuple[str] = ("rouge1", "rouge2", "rougeL", "rougeLsum"),
60 ):
61 super().__init__()
62 if not _TEXT_AVAILABLE:
63 raise ModuleNotFoundError("Please, pip install 'lightning-flash[text]'")
64
65 self.rouge_newline_sep = rouge_newline_sep
66 self.rouge_keys = rouge_keys
67 self.use_stemmer = use_stemmer
68 self.aggregator = RougeBatchAggregator()
69 self.scorer = rouge_scorer.RougeScorer(rouge_keys, use_stemmer=self.use_stemmer)
70
71 for key in rouge_keys:
72 self.add_state(key, [])
73
74 def update(self, pred_lns: List[str], tgt_lns: List[str]):
75 for pred, tgt in zip(pred_lns, tgt_lns):
76 # rougeLsum expects "\n" separated sentences within a summary
77 if self.rouge_newline_sep:
78 pred = add_newline_to_end_of_each_sentence(pred)
79 tgt = add_newline_to_end_of_each_sentence(tgt)
80 results = self.scorer.score(pred, tgt)
81 for key, score in results.items():
82 score = tensor([score.precision, score.recall, score.fmeasure])
83 getattr(self, key).append(score)
84
85 def compute(self) -> Dict[str, float]:
86 scores = {key: getattr(self, key) for key in self.rouge_keys}
87 self.aggregator.add_scores(scores)
88 result = self.aggregator.aggregate()
89 return format_rouge_results(result)
90
91 def __hash__(self):
92 # override to hash list objects.
93 # this is a bug in the upstream pytorch release.
94 hash_vals = [self.__class__.__name__]
95
96 for key in self._defaults.keys():
97 value = getattr(self, key)
98 if isinstance(value, list):
99 value = tuple(value)
100 hash_vals.append(value)
101
102 return hash(tuple(hash_vals))
103
104
105 class RougeBatchAggregator(BootstrapAggregator):
106 """
107 Aggregates rouge scores and provides confidence intervals.
108 """
109
110 def aggregate(self):
111 """
112 Override function to wrap the final results in `Score` objects.
113 This is due to the scores being replaced with a list of torch tensors.
114 """
115 result = {}
116 for score_type, scores in self._scores.items():
117 # Stack scores into a 2-d matrix of (sample, measure).
118 score_matrix = np.vstack(tuple(scores))
119 # Percentiles are returned as (interval, measure).
120 percentiles = self._bootstrap_resample(score_matrix)
121 # Extract the three intervals (low, mid, high).
122 intervals = tuple((Score(*percentiles[j, :]) for j in range(3)))
123 result[score_type] = AggregateScore(low=intervals[0], mid=intervals[1], high=intervals[2])
124 return result
125
126 def add_scores(self, scores):
127 self._scores = scores
128
129
130 def format_rouge_results(result: Dict[str, AggregateScore], decimal_places: int = 4) -> Dict[str, float]:
131 flattened_result = {}
132 for rouge_key, rouge_aggregate_score in result.items():
133 for stat in ["precision", "recall", "fmeasure"]:
134 mid = rouge_aggregate_score.mid
135 score = round(getattr(mid, stat), decimal_places)
136 flattened_result[f"{rouge_key}_{stat}"] = score
137 return flattened_result
138
```
Path: `flash/text/seq2seq/summarization/model.py`
Content:
```
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from typing import Any, Callable, Dict, List, Mapping, Optional, Sequence, Type, Union
15
16 import pytorch_lightning as pl
17 import torch
18
19 from flash.text.seq2seq.core.model import Seq2SeqTask
20 from flash.text.seq2seq.summarization.metric import RougeMetric
21
22
23 class SummarizationTask(Seq2SeqTask):
24 """Task for Seq2Seq Summarization.
25
26 Args:
27 backbone: backbone model to use for the task.
28 loss_fn: Loss function for training.
29 optimizer: Optimizer to use for training, defaults to `torch.optim.Adam`.
30 metrics: Metrics to compute for training and evaluation.
31 learning_rate: Learning rate to use for training, defaults to `3e-4`
32 val_target_max_length: Maximum length of targets in validation. Defaults to `128`
33 num_beams: Number of beams to use in validation when generating predictions. Defaults to `4`
34 use_stemmer: Whether Porter stemmer should be used to strip word suffixes to improve matching.
35 rouge_newline_sep: Add a new line at the beginning of each sentence in Rouge Metric calculation.
36 """
37
38 def __init__(
39 self,
40 backbone: str = "sshleifer/tiny-mbart",
41 loss_fn: Optional[Union[Callable, Mapping, Sequence]] = None,
42 optimizer: Type[torch.optim.Optimizer] = torch.optim.Adam,
43 metrics: Union[pl.metrics.Metric, Mapping, Sequence, None] = None,
44 learning_rate: float = 5e-5,
45 val_target_max_length: Optional[int] = None,
46 num_beams: Optional[int] = 4,
47 use_stemmer: bool = True,
48 rouge_newline_sep: bool = True
49 ):
50 self.save_hyperparameters()
51 super().__init__(
52 backbone=backbone,
53 loss_fn=loss_fn,
54 optimizer=optimizer,
55 metrics=metrics,
56 learning_rate=learning_rate,
57 val_target_max_length=val_target_max_length,
58 num_beams=num_beams
59 )
60 self.rouge = RougeMetric(
61 rouge_newline_sep=rouge_newline_sep,
62 use_stemmer=use_stemmer,
63 )
64
65 @property
66 def task(self) -> str:
67 return "summarization"
68
69 def compute_metrics(self, generated_tokens: torch.Tensor, batch: Dict, prefix: str) -> None:
70 tgt_lns = self.tokenize_labels(batch["labels"])
71 result = self.rouge(self._postprocess.uncollate(generated_tokens), tgt_lns)
72 self.log_dict(result, on_step=False, on_epoch=True)
73
74 def _ci_benchmark_fn(self, history: List[Dict[str, Any]]):
75 """
76 This function is used only for debugging usage with CI
77 """
78 assert history[-1]["val_f1"] > 0.45
79
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/flash/text/seq2seq/summarization/data.py b/flash/text/seq2seq/summarization/data.py
--- a/flash/text/seq2seq/summarization/data.py
+++ b/flash/text/seq2seq/summarization/data.py
@@ -11,10 +11,37 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+from typing import Callable, Dict, Optional, Union
+
from flash.text.seq2seq.core.data import Seq2SeqData, Seq2SeqPostprocess, Seq2SeqPreprocess
+class SummarizationPreprocess(Seq2SeqPreprocess):
+
+ def __init__(
+ self,
+ train_transform: Optional[Dict[str, Callable]] = None,
+ val_transform: Optional[Dict[str, Callable]] = None,
+ test_transform: Optional[Dict[str, Callable]] = None,
+ predict_transform: Optional[Dict[str, Callable]] = None,
+ backbone: str = "sshleifer/distilbart-xsum-1-1",
+ max_source_length: int = 128,
+ max_target_length: int = 128,
+ padding: Union[str, bool] = 'max_length'
+ ):
+ super().__init__(
+ train_transform=train_transform,
+ val_transform=val_transform,
+ test_transform=test_transform,
+ predict_transform=predict_transform,
+ backbone=backbone,
+ max_source_length=max_source_length,
+ max_target_length=max_target_length,
+ padding=padding,
+ )
+
+
class SummarizationData(Seq2SeqData):
- preprocess_cls = Seq2SeqPreprocess
+ preprocess_cls = SummarizationPreprocess
postprocess_cls = Seq2SeqPostprocess
diff --git a/flash/text/seq2seq/summarization/metric.py b/flash/text/seq2seq/summarization/metric.py
--- a/flash/text/seq2seq/summarization/metric.py
+++ b/flash/text/seq2seq/summarization/metric.py
@@ -21,7 +21,7 @@
from flash.text.seq2seq.summarization.utils import add_newline_to_end_of_each_sentence
if _TEXT_AVAILABLE:
- from rouge_score import rouge_scorer, scoring
+ from rouge_score import rouge_scorer
from rouge_score.scoring import AggregateScore, BootstrapAggregator, Score
else:
AggregateScore, Score, BootstrapAggregator = None, None, object
diff --git a/flash/text/seq2seq/summarization/model.py b/flash/text/seq2seq/summarization/model.py
--- a/flash/text/seq2seq/summarization/model.py
+++ b/flash/text/seq2seq/summarization/model.py
@@ -37,11 +37,11 @@
def __init__(
self,
- backbone: str = "sshleifer/tiny-mbart",
+ backbone: str = "sshleifer/distilbart-xsum-1-1",
loss_fn: Optional[Union[Callable, Mapping, Sequence]] = None,
optimizer: Type[torch.optim.Optimizer] = torch.optim.Adam,
metrics: Union[pl.metrics.Metric, Mapping, Sequence, None] = None,
- learning_rate: float = 5e-5,
+ learning_rate: float = 1e-5,
val_target_max_length: Optional[int] = None,
num_beams: Optional[int] = 4,
use_stemmer: bool = True,
@@ -69,10 +69,10 @@
def compute_metrics(self, generated_tokens: torch.Tensor, batch: Dict, prefix: str) -> None:
tgt_lns = self.tokenize_labels(batch["labels"])
result = self.rouge(self._postprocess.uncollate(generated_tokens), tgt_lns)
- self.log_dict(result, on_step=False, on_epoch=True)
+ self.log_dict(result, on_step=False, on_epoch=True, prog_bar=True)
def _ci_benchmark_fn(self, history: List[Dict[str, Any]]):
"""
This function is used only for debugging usage with CI
"""
- assert history[-1]["val_f1"] > 0.45
+ assert history[-1]["rouge1_recall"] > 0.2
|
{"golden_diff": "diff --git a/flash/text/seq2seq/summarization/data.py b/flash/text/seq2seq/summarization/data.py\n--- a/flash/text/seq2seq/summarization/data.py\n+++ b/flash/text/seq2seq/summarization/data.py\n@@ -11,10 +11,37 @@\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n # See the License for the specific language governing permissions and\n # limitations under the License.\n+from typing import Callable, Dict, Optional, Union\n+\n from flash.text.seq2seq.core.data import Seq2SeqData, Seq2SeqPostprocess, Seq2SeqPreprocess\n \n \n+class SummarizationPreprocess(Seq2SeqPreprocess):\n+\n+ def __init__(\n+ self,\n+ train_transform: Optional[Dict[str, Callable]] = None,\n+ val_transform: Optional[Dict[str, Callable]] = None,\n+ test_transform: Optional[Dict[str, Callable]] = None,\n+ predict_transform: Optional[Dict[str, Callable]] = None,\n+ backbone: str = \"sshleifer/distilbart-xsum-1-1\",\n+ max_source_length: int = 128,\n+ max_target_length: int = 128,\n+ padding: Union[str, bool] = 'max_length'\n+ ):\n+ super().__init__(\n+ train_transform=train_transform,\n+ val_transform=val_transform,\n+ test_transform=test_transform,\n+ predict_transform=predict_transform,\n+ backbone=backbone,\n+ max_source_length=max_source_length,\n+ max_target_length=max_target_length,\n+ padding=padding,\n+ )\n+\n+\n class SummarizationData(Seq2SeqData):\n \n- preprocess_cls = Seq2SeqPreprocess\n+ preprocess_cls = SummarizationPreprocess\n postprocess_cls = Seq2SeqPostprocess\ndiff --git a/flash/text/seq2seq/summarization/metric.py b/flash/text/seq2seq/summarization/metric.py\n--- a/flash/text/seq2seq/summarization/metric.py\n+++ b/flash/text/seq2seq/summarization/metric.py\n@@ -21,7 +21,7 @@\n from flash.text.seq2seq.summarization.utils import add_newline_to_end_of_each_sentence\n \n if _TEXT_AVAILABLE:\n- from rouge_score import rouge_scorer, scoring\n+ from rouge_score import rouge_scorer\n from rouge_score.scoring import AggregateScore, BootstrapAggregator, Score\n else:\n AggregateScore, Score, BootstrapAggregator = None, None, object\ndiff --git a/flash/text/seq2seq/summarization/model.py b/flash/text/seq2seq/summarization/model.py\n--- a/flash/text/seq2seq/summarization/model.py\n+++ b/flash/text/seq2seq/summarization/model.py\n@@ -37,11 +37,11 @@\n \n def __init__(\n self,\n- backbone: str = \"sshleifer/tiny-mbart\",\n+ backbone: str = \"sshleifer/distilbart-xsum-1-1\",\n loss_fn: Optional[Union[Callable, Mapping, Sequence]] = None,\n optimizer: Type[torch.optim.Optimizer] = torch.optim.Adam,\n metrics: Union[pl.metrics.Metric, Mapping, Sequence, None] = None,\n- learning_rate: float = 5e-5,\n+ learning_rate: float = 1e-5,\n val_target_max_length: Optional[int] = None,\n num_beams: Optional[int] = 4,\n use_stemmer: bool = True,\n@@ -69,10 +69,10 @@\n def compute_metrics(self, generated_tokens: torch.Tensor, batch: Dict, prefix: str) -> None:\n tgt_lns = self.tokenize_labels(batch[\"labels\"])\n result = self.rouge(self._postprocess.uncollate(generated_tokens), tgt_lns)\n- self.log_dict(result, on_step=False, on_epoch=True)\n+ self.log_dict(result, on_step=False, on_epoch=True, prog_bar=True)\n \n def _ci_benchmark_fn(self, history: List[Dict[str, Any]]):\n \"\"\"\n This function is used only for debugging usage with CI\n \"\"\"\n- assert history[-1][\"val_f1\"] > 0.45\n+ assert history[-1][\"rouge1_recall\"] > 0.2\n", "issue": "Task doesn't seem to converge: Summurization, Translation\n## \ud83d\udc1b Bug\r\n\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\n### To Reproduce\r\n\r\nOn a machine with 1 GPU.\r\n\r\n```\r\nFLASH_TESTING=1 python flash_examples/finetuning/summarization.py\r\n```\r\n\r\n```\r\nFLASH_TESTING=1 python flash_examples/finetuning/translation.py\r\n```\r\n\r\nSteps to reproduce the behavior:\r\n\r\n1. Go to '...'\r\n2. Run '....'\r\n3. Scroll down to '....'\r\n4. See error\r\n\r\n<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->\r\n\r\n\r\n#### Code sample\r\n<!-- Ideally attach a minimal code sample to reproduce the decried issue.\r\nMinimal means having the shortest code but still preserving the bug. -->\r\n\r\n### Expected behavior\r\n\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\n### Environment\r\n\r\n - PyTorch Version (e.g., 1.0):\r\n - OS (e.g., Linux):\r\n - How you installed PyTorch (`conda`, `pip`, source):\r\n - Build command you used (if compiling from source):\r\n - Python version:\r\n - CUDA/cuDNN version:\r\n - GPU models and configuration:\r\n - Any other relevant information:\r\n\r\n### Additional context\r\n\r\n<!-- Add any other context about the problem here. -->\r\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom flash.text.seq2seq.core.data import Seq2SeqData, Seq2SeqPostprocess, Seq2SeqPreprocess\n\n\nclass SummarizationData(Seq2SeqData):\n\n preprocess_cls = Seq2SeqPreprocess\n postprocess_cls = Seq2SeqPostprocess\n", "path": "flash/text/seq2seq/summarization/data.py"}, {"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Dict, List, Tuple\n\nimport numpy as np\nfrom torch import tensor\nfrom torchmetrics import Metric\n\nfrom flash.core.utilities.imports import _TEXT_AVAILABLE\nfrom flash.text.seq2seq.summarization.utils import add_newline_to_end_of_each_sentence\n\nif _TEXT_AVAILABLE:\n from rouge_score import rouge_scorer, scoring\n from rouge_score.scoring import AggregateScore, BootstrapAggregator, Score\nelse:\n AggregateScore, Score, BootstrapAggregator = None, None, object\n\n\nclass RougeMetric(Metric):\n \"\"\"\n Metric used for automatic summarization. https://www.aclweb.org/anthology/W04-1013/\n\n Example:\n\n >>> target = \"Is your name John\".split()\n >>> preds = \"My name is John\".split()\n >>> rouge = RougeMetric() # doctest: +SKIP\n >>> from pprint import pprint\n >>> pprint(rouge(preds, target)) # doctest: +NORMALIZE_WHITESPACE +SKIP\n {'rouge1_fmeasure': 0.25,\n 'rouge1_precision': 0.25,\n 'rouge1_recall': 0.25,\n 'rouge2_fmeasure': 0.0,\n 'rouge2_precision': 0.0,\n 'rouge2_recall': 0.0,\n 'rougeL_fmeasure': 0.25,\n 'rougeL_precision': 0.25,\n 'rougeL_recall': 0.25,\n 'rougeLsum_fmeasure': 0.25,\n 'rougeLsum_precision': 0.25,\n 'rougeLsum_recall': 0.25}\n \"\"\"\n\n def __init__(\n self,\n rouge_newline_sep: bool = False,\n use_stemmer: bool = False,\n rouge_keys: Tuple[str] = (\"rouge1\", \"rouge2\", \"rougeL\", \"rougeLsum\"),\n ):\n super().__init__()\n if not _TEXT_AVAILABLE:\n raise ModuleNotFoundError(\"Please, pip install 'lightning-flash[text]'\")\n\n self.rouge_newline_sep = rouge_newline_sep\n self.rouge_keys = rouge_keys\n self.use_stemmer = use_stemmer\n self.aggregator = RougeBatchAggregator()\n self.scorer = rouge_scorer.RougeScorer(rouge_keys, use_stemmer=self.use_stemmer)\n\n for key in rouge_keys:\n self.add_state(key, [])\n\n def update(self, pred_lns: List[str], tgt_lns: List[str]):\n for pred, tgt in zip(pred_lns, tgt_lns):\n # rougeLsum expects \"\\n\" separated sentences within a summary\n if self.rouge_newline_sep:\n pred = add_newline_to_end_of_each_sentence(pred)\n tgt = add_newline_to_end_of_each_sentence(tgt)\n results = self.scorer.score(pred, tgt)\n for key, score in results.items():\n score = tensor([score.precision, score.recall, score.fmeasure])\n getattr(self, key).append(score)\n\n def compute(self) -> Dict[str, float]:\n scores = {key: getattr(self, key) for key in self.rouge_keys}\n self.aggregator.add_scores(scores)\n result = self.aggregator.aggregate()\n return format_rouge_results(result)\n\n def __hash__(self):\n # override to hash list objects.\n # this is a bug in the upstream pytorch release.\n hash_vals = [self.__class__.__name__]\n\n for key in self._defaults.keys():\n value = getattr(self, key)\n if isinstance(value, list):\n value = tuple(value)\n hash_vals.append(value)\n\n return hash(tuple(hash_vals))\n\n\nclass RougeBatchAggregator(BootstrapAggregator):\n \"\"\"\n Aggregates rouge scores and provides confidence intervals.\n \"\"\"\n\n def aggregate(self):\n \"\"\"\n Override function to wrap the final results in `Score` objects.\n This is due to the scores being replaced with a list of torch tensors.\n \"\"\"\n result = {}\n for score_type, scores in self._scores.items():\n # Stack scores into a 2-d matrix of (sample, measure).\n score_matrix = np.vstack(tuple(scores))\n # Percentiles are returned as (interval, measure).\n percentiles = self._bootstrap_resample(score_matrix)\n # Extract the three intervals (low, mid, high).\n intervals = tuple((Score(*percentiles[j, :]) for j in range(3)))\n result[score_type] = AggregateScore(low=intervals[0], mid=intervals[1], high=intervals[2])\n return result\n\n def add_scores(self, scores):\n self._scores = scores\n\n\ndef format_rouge_results(result: Dict[str, AggregateScore], decimal_places: int = 4) -> Dict[str, float]:\n flattened_result = {}\n for rouge_key, rouge_aggregate_score in result.items():\n for stat in [\"precision\", \"recall\", \"fmeasure\"]:\n mid = rouge_aggregate_score.mid\n score = round(getattr(mid, stat), decimal_places)\n flattened_result[f\"{rouge_key}_{stat}\"] = score\n return flattened_result\n", "path": "flash/text/seq2seq/summarization/metric.py"}, {"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Callable, Dict, List, Mapping, Optional, Sequence, Type, Union\n\nimport pytorch_lightning as pl\nimport torch\n\nfrom flash.text.seq2seq.core.model import Seq2SeqTask\nfrom flash.text.seq2seq.summarization.metric import RougeMetric\n\n\nclass SummarizationTask(Seq2SeqTask):\n \"\"\"Task for Seq2Seq Summarization.\n\n Args:\n backbone: backbone model to use for the task.\n loss_fn: Loss function for training.\n optimizer: Optimizer to use for training, defaults to `torch.optim.Adam`.\n metrics: Metrics to compute for training and evaluation.\n learning_rate: Learning rate to use for training, defaults to `3e-4`\n val_target_max_length: Maximum length of targets in validation. Defaults to `128`\n num_beams: Number of beams to use in validation when generating predictions. Defaults to `4`\n use_stemmer: Whether Porter stemmer should be used to strip word suffixes to improve matching.\n rouge_newline_sep: Add a new line at the beginning of each sentence in Rouge Metric calculation.\n \"\"\"\n\n def __init__(\n self,\n backbone: str = \"sshleifer/tiny-mbart\",\n loss_fn: Optional[Union[Callable, Mapping, Sequence]] = None,\n optimizer: Type[torch.optim.Optimizer] = torch.optim.Adam,\n metrics: Union[pl.metrics.Metric, Mapping, Sequence, None] = None,\n learning_rate: float = 5e-5,\n val_target_max_length: Optional[int] = None,\n num_beams: Optional[int] = 4,\n use_stemmer: bool = True,\n rouge_newline_sep: bool = True\n ):\n self.save_hyperparameters()\n super().__init__(\n backbone=backbone,\n loss_fn=loss_fn,\n optimizer=optimizer,\n metrics=metrics,\n learning_rate=learning_rate,\n val_target_max_length=val_target_max_length,\n num_beams=num_beams\n )\n self.rouge = RougeMetric(\n rouge_newline_sep=rouge_newline_sep,\n use_stemmer=use_stemmer,\n )\n\n @property\n def task(self) -> str:\n return \"summarization\"\n\n def compute_metrics(self, generated_tokens: torch.Tensor, batch: Dict, prefix: str) -> None:\n tgt_lns = self.tokenize_labels(batch[\"labels\"])\n result = self.rouge(self._postprocess.uncollate(generated_tokens), tgt_lns)\n self.log_dict(result, on_step=False, on_epoch=True)\n\n def _ci_benchmark_fn(self, history: List[Dict[str, Any]]):\n \"\"\"\n This function is used only for debugging usage with CI\n \"\"\"\n assert history[-1][\"val_f1\"] > 0.45\n", "path": "flash/text/seq2seq/summarization/model.py"}], "after_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Callable, Dict, Optional, Union\n\nfrom flash.text.seq2seq.core.data import Seq2SeqData, Seq2SeqPostprocess, Seq2SeqPreprocess\n\n\nclass SummarizationPreprocess(Seq2SeqPreprocess):\n\n def __init__(\n self,\n train_transform: Optional[Dict[str, Callable]] = None,\n val_transform: Optional[Dict[str, Callable]] = None,\n test_transform: Optional[Dict[str, Callable]] = None,\n predict_transform: Optional[Dict[str, Callable]] = None,\n backbone: str = \"sshleifer/distilbart-xsum-1-1\",\n max_source_length: int = 128,\n max_target_length: int = 128,\n padding: Union[str, bool] = 'max_length'\n ):\n super().__init__(\n train_transform=train_transform,\n val_transform=val_transform,\n test_transform=test_transform,\n predict_transform=predict_transform,\n backbone=backbone,\n max_source_length=max_source_length,\n max_target_length=max_target_length,\n padding=padding,\n )\n\n\nclass SummarizationData(Seq2SeqData):\n\n preprocess_cls = SummarizationPreprocess\n postprocess_cls = Seq2SeqPostprocess\n", "path": "flash/text/seq2seq/summarization/data.py"}, {"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Dict, List, Tuple\n\nimport numpy as np\nfrom torch import tensor\nfrom torchmetrics import Metric\n\nfrom flash.core.utilities.imports import _TEXT_AVAILABLE\nfrom flash.text.seq2seq.summarization.utils import add_newline_to_end_of_each_sentence\n\nif _TEXT_AVAILABLE:\n from rouge_score import rouge_scorer\n from rouge_score.scoring import AggregateScore, BootstrapAggregator, Score\nelse:\n AggregateScore, Score, BootstrapAggregator = None, None, object\n\n\nclass RougeMetric(Metric):\n \"\"\"\n Metric used for automatic summarization. https://www.aclweb.org/anthology/W04-1013/\n\n Example:\n\n >>> target = \"Is your name John\".split()\n >>> preds = \"My name is John\".split()\n >>> rouge = RougeMetric() # doctest: +SKIP\n >>> from pprint import pprint\n >>> pprint(rouge(preds, target)) # doctest: +NORMALIZE_WHITESPACE +SKIP\n {'rouge1_fmeasure': 0.25,\n 'rouge1_precision': 0.25,\n 'rouge1_recall': 0.25,\n 'rouge2_fmeasure': 0.0,\n 'rouge2_precision': 0.0,\n 'rouge2_recall': 0.0,\n 'rougeL_fmeasure': 0.25,\n 'rougeL_precision': 0.25,\n 'rougeL_recall': 0.25,\n 'rougeLsum_fmeasure': 0.25,\n 'rougeLsum_precision': 0.25,\n 'rougeLsum_recall': 0.25}\n \"\"\"\n\n def __init__(\n self,\n rouge_newline_sep: bool = False,\n use_stemmer: bool = False,\n rouge_keys: Tuple[str] = (\"rouge1\", \"rouge2\", \"rougeL\", \"rougeLsum\"),\n ):\n super().__init__()\n if not _TEXT_AVAILABLE:\n raise ModuleNotFoundError(\"Please, pip install 'lightning-flash[text]'\")\n\n self.rouge_newline_sep = rouge_newline_sep\n self.rouge_keys = rouge_keys\n self.use_stemmer = use_stemmer\n self.aggregator = RougeBatchAggregator()\n self.scorer = rouge_scorer.RougeScorer(rouge_keys, use_stemmer=self.use_stemmer)\n\n for key in rouge_keys:\n self.add_state(key, [])\n\n def update(self, pred_lns: List[str], tgt_lns: List[str]):\n for pred, tgt in zip(pred_lns, tgt_lns):\n # rougeLsum expects \"\\n\" separated sentences within a summary\n if self.rouge_newline_sep:\n pred = add_newline_to_end_of_each_sentence(pred)\n tgt = add_newline_to_end_of_each_sentence(tgt)\n results = self.scorer.score(pred, tgt)\n for key, score in results.items():\n score = tensor([score.precision, score.recall, score.fmeasure])\n getattr(self, key).append(score)\n\n def compute(self) -> Dict[str, float]:\n scores = {key: getattr(self, key) for key in self.rouge_keys}\n self.aggregator.add_scores(scores)\n result = self.aggregator.aggregate()\n return format_rouge_results(result)\n\n def __hash__(self):\n # override to hash list objects.\n # this is a bug in the upstream pytorch release.\n hash_vals = [self.__class__.__name__]\n\n for key in self._defaults.keys():\n value = getattr(self, key)\n if isinstance(value, list):\n value = tuple(value)\n hash_vals.append(value)\n\n return hash(tuple(hash_vals))\n\n\nclass RougeBatchAggregator(BootstrapAggregator):\n \"\"\"\n Aggregates rouge scores and provides confidence intervals.\n \"\"\"\n\n def aggregate(self):\n \"\"\"\n Override function to wrap the final results in `Score` objects.\n This is due to the scores being replaced with a list of torch tensors.\n \"\"\"\n result = {}\n for score_type, scores in self._scores.items():\n # Stack scores into a 2-d matrix of (sample, measure).\n score_matrix = np.vstack(tuple(scores))\n # Percentiles are returned as (interval, measure).\n percentiles = self._bootstrap_resample(score_matrix)\n # Extract the three intervals (low, mid, high).\n intervals = tuple((Score(*percentiles[j, :]) for j in range(3)))\n result[score_type] = AggregateScore(low=intervals[0], mid=intervals[1], high=intervals[2])\n return result\n\n def add_scores(self, scores):\n self._scores = scores\n\n\ndef format_rouge_results(result: Dict[str, AggregateScore], decimal_places: int = 4) -> Dict[str, float]:\n flattened_result = {}\n for rouge_key, rouge_aggregate_score in result.items():\n for stat in [\"precision\", \"recall\", \"fmeasure\"]:\n mid = rouge_aggregate_score.mid\n score = round(getattr(mid, stat), decimal_places)\n flattened_result[f\"{rouge_key}_{stat}\"] = score\n return flattened_result\n", "path": "flash/text/seq2seq/summarization/metric.py"}, {"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Callable, Dict, List, Mapping, Optional, Sequence, Type, Union\n\nimport pytorch_lightning as pl\nimport torch\n\nfrom flash.text.seq2seq.core.model import Seq2SeqTask\nfrom flash.text.seq2seq.summarization.metric import RougeMetric\n\n\nclass SummarizationTask(Seq2SeqTask):\n \"\"\"Task for Seq2Seq Summarization.\n\n Args:\n backbone: backbone model to use for the task.\n loss_fn: Loss function for training.\n optimizer: Optimizer to use for training, defaults to `torch.optim.Adam`.\n metrics: Metrics to compute for training and evaluation.\n learning_rate: Learning rate to use for training, defaults to `3e-4`\n val_target_max_length: Maximum length of targets in validation. Defaults to `128`\n num_beams: Number of beams to use in validation when generating predictions. Defaults to `4`\n use_stemmer: Whether Porter stemmer should be used to strip word suffixes to improve matching.\n rouge_newline_sep: Add a new line at the beginning of each sentence in Rouge Metric calculation.\n \"\"\"\n\n def __init__(\n self,\n backbone: str = \"sshleifer/distilbart-xsum-1-1\",\n loss_fn: Optional[Union[Callable, Mapping, Sequence]] = None,\n optimizer: Type[torch.optim.Optimizer] = torch.optim.Adam,\n metrics: Union[pl.metrics.Metric, Mapping, Sequence, None] = None,\n learning_rate: float = 1e-5,\n val_target_max_length: Optional[int] = None,\n num_beams: Optional[int] = 4,\n use_stemmer: bool = True,\n rouge_newline_sep: bool = True\n ):\n self.save_hyperparameters()\n super().__init__(\n backbone=backbone,\n loss_fn=loss_fn,\n optimizer=optimizer,\n metrics=metrics,\n learning_rate=learning_rate,\n val_target_max_length=val_target_max_length,\n num_beams=num_beams\n )\n self.rouge = RougeMetric(\n rouge_newline_sep=rouge_newline_sep,\n use_stemmer=use_stemmer,\n )\n\n @property\n def task(self) -> str:\n return \"summarization\"\n\n def compute_metrics(self, generated_tokens: torch.Tensor, batch: Dict, prefix: str) -> None:\n tgt_lns = self.tokenize_labels(batch[\"labels\"])\n result = self.rouge(self._postprocess.uncollate(generated_tokens), tgt_lns)\n self.log_dict(result, on_step=False, on_epoch=True, prog_bar=True)\n\n def _ci_benchmark_fn(self, history: List[Dict[str, Any]]):\n \"\"\"\n This function is used only for debugging usage with CI\n \"\"\"\n assert history[-1][\"rouge1_recall\"] > 0.2\n", "path": "flash/text/seq2seq/summarization/model.py"}]}
| 3,301 | 985 |
gh_patches_debug_1479
|
rasdani/github-patches
|
git_diff
|
fidals__shopelectro-870
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add absolute urls to the canonical links. stb2
Необходимо поправить построение канонических ссылок на сайте
Адрес в канонической ссылке должен быть обязательно абсолютный
<link rel="canonical" href="https://www.сайт.ру/адрес_страницы" >
а не так
<link rel="canonical" href="/адрес_страницы" > - это неверно
Поисковики игнорируют этот тег, если указан относительный адрес в теге...
У меня при скане появляется много страниц дублей (пагинация), в коде указан каноникал. а при сканировании методом аналогичным поисковому роботу сраницы как канонические не помечаются
Вероятно, на STB нужно сделать так же.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `shopelectro/context_processors.py`
Content:
```
1 from django.conf import settings
2
3
4 def shop(request):
5 """
6 Inject shop dict into request.
7
8 Shop dict contains information about shop:
9 emails, phones, API-integrations.
10 """
11 return {
12 'shop': settings.SHOP,
13 'DEBUG': settings.DEBUG,
14 'BASE_URL': settings.BASE_URL,
15 'SENTRY_FRONT_DSN': settings.SENTRY_FRONT_DSN,
16 }
17
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/shopelectro/context_processors.py b/shopelectro/context_processors.py
--- a/shopelectro/context_processors.py
+++ b/shopelectro/context_processors.py
@@ -11,6 +11,6 @@
return {
'shop': settings.SHOP,
'DEBUG': settings.DEBUG,
- 'BASE_URL': settings.BASE_URL,
+ 'base_url': settings.BASE_URL,
'SENTRY_FRONT_DSN': settings.SENTRY_FRONT_DSN,
}
|
{"golden_diff": "diff --git a/shopelectro/context_processors.py b/shopelectro/context_processors.py\n--- a/shopelectro/context_processors.py\n+++ b/shopelectro/context_processors.py\n@@ -11,6 +11,6 @@\n return {\n 'shop': settings.SHOP,\n 'DEBUG': settings.DEBUG,\n- 'BASE_URL': settings.BASE_URL,\n+ 'base_url': settings.BASE_URL,\n 'SENTRY_FRONT_DSN': settings.SENTRY_FRONT_DSN,\n }\n", "issue": "Add absolute urls to the canonical links. stb2\n\u041d\u0435\u043e\u0431\u0445\u043e\u0434\u0438\u043c\u043e \u043f\u043e\u043f\u0440\u0430\u0432\u0438\u0442\u044c \u043f\u043e\u0441\u0442\u0440\u043e\u0435\u043d\u0438\u0435 \u043a\u0430\u043d\u043e\u043d\u0438\u0447\u0435\u0441\u043a\u0438\u0445 \u0441\u0441\u044b\u043b\u043e\u043a \u043d\u0430 \u0441\u0430\u0439\u0442\u0435\r\n\u0410\u0434\u0440\u0435\u0441 \u0432 \u043a\u0430\u043d\u043e\u043d\u0438\u0447\u0435\u0441\u043a\u043e\u0439 \u0441\u0441\u044b\u043b\u043a\u0435 \u0434\u043e\u043b\u0436\u0435\u043d \u0431\u044b\u0442\u044c \u043e\u0431\u044f\u0437\u0430\u0442\u0435\u043b\u044c\u043d\u043e \u0430\u0431\u0441\u043e\u043b\u044e\u0442\u043d\u044b\u0439\r\n<link rel=\"canonical\" href=\"https://www.\u0441\u0430\u0439\u0442.\u0440\u0443/\u0430\u0434\u0440\u0435\u0441_\u0441\u0442\u0440\u0430\u043d\u0438\u0446\u044b\" >\r\n\u0430 \u043d\u0435 \u0442\u0430\u043a\r\n<link rel=\"canonical\" href=\"/\u0430\u0434\u0440\u0435\u0441_\u0441\u0442\u0440\u0430\u043d\u0438\u0446\u044b\" > - \u044d\u0442\u043e \u043d\u0435\u0432\u0435\u0440\u043d\u043e\r\n\u041f\u043e\u0438\u0441\u043a\u043e\u0432\u0438\u043a\u0438 \u0438\u0433\u043d\u043e\u0440\u0438\u0440\u0443\u044e\u0442 \u044d\u0442\u043e\u0442 \u0442\u0435\u0433, \u0435\u0441\u043b\u0438 \u0443\u043a\u0430\u0437\u0430\u043d \u043e\u0442\u043d\u043e\u0441\u0438\u0442\u0435\u043b\u044c\u043d\u044b\u0439 \u0430\u0434\u0440\u0435\u0441 \u0432 \u0442\u0435\u0433\u0435...\r\n\u0423 \u043c\u0435\u043d\u044f \u043f\u0440\u0438 \u0441\u043a\u0430\u043d\u0435 \u043f\u043e\u044f\u0432\u043b\u044f\u0435\u0442\u0441\u044f \u043c\u043d\u043e\u0433\u043e \u0441\u0442\u0440\u0430\u043d\u0438\u0446 \u0434\u0443\u0431\u043b\u0435\u0439 (\u043f\u0430\u0433\u0438\u043d\u0430\u0446\u0438\u044f), \u0432 \u043a\u043e\u0434\u0435 \u0443\u043a\u0430\u0437\u0430\u043d \u043a\u0430\u043d\u043e\u043d\u0438\u043a\u0430\u043b. \u0430 \u043f\u0440\u0438 \u0441\u043a\u0430\u043d\u0438\u0440\u043e\u0432\u0430\u043d\u0438\u0438 \u043c\u0435\u0442\u043e\u0434\u043e\u043c \u0430\u043d\u0430\u043b\u043e\u0433\u0438\u0447\u043d\u044b\u043c \u043f\u043e\u0438\u0441\u043a\u043e\u0432\u043e\u043c\u0443 \u0440\u043e\u0431\u043e\u0442\u0443 \u0441\u0440\u0430\u043d\u0438\u0446\u044b \u043a\u0430\u043a \u043a\u0430\u043d\u043e\u043d\u0438\u0447\u0435\u0441\u043a\u0438\u0435 \u043d\u0435 \u043f\u043e\u043c\u0435\u0447\u0430\u044e\u0442\u0441\u044f\r\n\r\n\u0412\u0435\u0440\u043e\u044f\u0442\u043d\u043e, \u043d\u0430 STB \u043d\u0443\u0436\u043d\u043e \u0441\u0434\u0435\u043b\u0430\u0442\u044c \u0442\u0430\u043a \u0436\u0435. \n", "before_files": [{"content": "from django.conf import settings\n\n\ndef shop(request):\n \"\"\"\n Inject shop dict into request.\n\n Shop dict contains information about shop:\n emails, phones, API-integrations.\n \"\"\"\n return {\n 'shop': settings.SHOP,\n 'DEBUG': settings.DEBUG,\n 'BASE_URL': settings.BASE_URL,\n 'SENTRY_FRONT_DSN': settings.SENTRY_FRONT_DSN,\n }\n", "path": "shopelectro/context_processors.py"}], "after_files": [{"content": "from django.conf import settings\n\n\ndef shop(request):\n \"\"\"\n Inject shop dict into request.\n\n Shop dict contains information about shop:\n emails, phones, API-integrations.\n \"\"\"\n return {\n 'shop': settings.SHOP,\n 'DEBUG': settings.DEBUG,\n 'base_url': settings.BASE_URL,\n 'SENTRY_FRONT_DSN': settings.SENTRY_FRONT_DSN,\n }\n", "path": "shopelectro/context_processors.py"}]}
| 565 | 107 |
gh_patches_debug_17504
|
rasdani/github-patches
|
git_diff
|
strawberry-graphql__strawberry-2774
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TypeError: Forward references must evaluate to types.
So I am running Strawberry with Fastapi
## Describe the Bug
I have a schema like this:
```
from __future__ import annotations
from enum import Enum
import strawberry
import datetime
from strawberry.scalars import JSON
@strawberry.enum
class Sex(Enum):
male = "Male"
female ="Female"
@strawberry.type
class PersonBase:
name: JSON | None
sex: Sex | None
@strawberry.type
class Client(PersonBase):
firm: Firm | None = None
```
When I try to run the server it throws back an error:
```
File "C:\Users\0\Desktop\Fastapi\Test\.\app\main.py", line 14, in <module>
from app.schema import schema
File "C:\Users\0\Desktop\Fastapi\Test\.\app\schema.py", line 624, in <module>
schema = strawberry.Schema(query=Query, mutation = Mutation)
File "C:\Users\0\Envs\Fastapi\lib\site-packages\strawberry\schema\schema.py", line 115, in __init__
self._schema = GraphQLSchema(
File "C:\Users\0\Envs\Fastapi\lib\site-packages\graphql\type\schema.py", line 224, in __init__
collect_referenced_types(query)
File "C:\Users\0\Envs\Fastapi\lib\site-packages\graphql\type\schema.py", line 433, in collect_referenced_types
collect_referenced_types(field.type)
File "C:\Users\0\Envs\Fastapi\lib\site-packages\graphql\type\schema.py", line 433, in collect_referenced_types
collect_referenced_types(field.type)
File "C:\Users\0\Envs\Fastapi\lib\site-packages\graphql\type\schema.py", line 432, in collect_referenced_types
for field in named_type.fields.values():
File "C:\Users\0\AppData\Local\Programs\Python\Python310\lib\functools.py", line 970, in __get__
val = self.func(instance)
File "C:\Users\0\Envs\Fastapi\lib\site-packages\graphql\type\definition.py", line 811, in fields
raise cls(f"{self.name} fields cannot be resolved. {error}") from error
TypeError: Client fields cannot be resolved. Forward references must evaluate to types. Got <strawberry.type.StrawberryOptional object at 0x000002C85C5F5990>.
```
Note that if I change one thing like this making a required name field with a string the error is resolved
```
@strawberry.type
class Client(PersonBase):
firm: Firm | None = None
name: str | None
```
But if I put PersonBase into the model like this, it still throws an error even though the name field is required as JSON.
```
class Client:
firm: Firm | None = None
name: JSON | None
sex: Sex | None
```
## System Information
- Operating system: Windows 10
- Strawberry version (if applicable): strawberry-graphql==0.139.0
## Additional Context
Not sure if this is an bug or I did something wrong but I find this behavior strange because the JSON and Enum are both scalars but required and they triggered a type error.
<!-- POLAR PLEDGE BADGE START -->
## Upvote & Fund
- We're using [Polar.sh](https://polar.sh/strawberry-graphql) so you can upvote and help fund this issue.
- We receive the funding once the issue is completed & confirmed by you.
- Thank you in advance for helping prioritize & fund our backlog.
<a href="https://polar.sh/strawberry-graphql/strawberry/issues/2465">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/strawberry-graphql/strawberry/issues/2465/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/strawberry-graphql/strawberry/issues/2465/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `strawberry/custom_scalar.py`
Content:
```
1 from __future__ import annotations
2
3 import sys
4 from dataclasses import dataclass
5 from typing import (
6 TYPE_CHECKING,
7 Any,
8 Callable,
9 Iterable,
10 Mapping,
11 NewType,
12 Optional,
13 Type,
14 TypeVar,
15 Union,
16 overload,
17 )
18
19 from strawberry.exceptions import InvalidUnionTypeError
20 from strawberry.type import StrawberryOptional, StrawberryType
21
22 from .utils.str_converters import to_camel_case
23
24 if TYPE_CHECKING:
25 from graphql import GraphQLScalarType
26
27
28 # in python 3.10+ NewType is a class
29 if sys.version_info >= (3, 10):
30 _T = TypeVar("_T", bound=Union[type, NewType])
31 else:
32 _T = TypeVar("_T", bound=type)
33
34
35 def identity(x: _T) -> _T:
36 return x
37
38
39 @dataclass
40 class ScalarDefinition(StrawberryType):
41 name: str
42 description: Optional[str]
43 specified_by_url: Optional[str]
44 serialize: Optional[Callable]
45 parse_value: Optional[Callable]
46 parse_literal: Optional[Callable]
47 directives: Iterable[object] = ()
48
49 # Optionally store the GraphQLScalarType instance so that we don't get
50 # duplicates
51 implementation: Optional[GraphQLScalarType] = None
52
53 # used for better error messages
54 _source_file: Optional[str] = None
55 _source_line: Optional[int] = None
56
57 def copy_with(
58 self, type_var_map: Mapping[TypeVar, Union[StrawberryType, type]]
59 ) -> Union[StrawberryType, type]:
60 return super().copy_with(type_var_map) # type: ignore[safe-super]
61
62 @property
63 def is_generic(self) -> bool:
64 return False
65
66
67 class ScalarWrapper:
68 _scalar_definition: ScalarDefinition
69
70 def __init__(self, wrap: Callable[[Any], Any]):
71 self.wrap = wrap
72
73 def __call__(self, *args: str, **kwargs: Any):
74 return self.wrap(*args, **kwargs)
75
76 def __or__(self, other: Union[StrawberryType, type]) -> StrawberryType:
77 if other is None:
78 # Return the correct notation when using `StrawberryUnion | None`.
79 return StrawberryOptional(of_type=self)
80
81 # Raise an error in any other case.
82 # There is Work in progress to deal with more merging cases, see:
83 # https://github.com/strawberry-graphql/strawberry/pull/1455
84 raise InvalidUnionTypeError(str(other), self.wrap)
85
86
87 def _process_scalar(
88 cls: Type[_T],
89 *,
90 name: Optional[str] = None,
91 description: Optional[str] = None,
92 specified_by_url: Optional[str] = None,
93 serialize: Optional[Callable] = None,
94 parse_value: Optional[Callable] = None,
95 parse_literal: Optional[Callable] = None,
96 directives: Iterable[object] = (),
97 ):
98 from strawberry.exceptions.handler import should_use_rich_exceptions
99
100 name = name or to_camel_case(cls.__name__)
101
102 _source_file = None
103 _source_line = None
104
105 if should_use_rich_exceptions():
106 frame = sys._getframe(3)
107
108 _source_file = frame.f_code.co_filename
109 _source_line = frame.f_lineno
110
111 wrapper = ScalarWrapper(cls)
112 wrapper._scalar_definition = ScalarDefinition(
113 name=name,
114 description=description,
115 specified_by_url=specified_by_url,
116 serialize=serialize,
117 parse_literal=parse_literal,
118 parse_value=parse_value,
119 directives=directives,
120 _source_file=_source_file,
121 _source_line=_source_line,
122 )
123
124 return wrapper
125
126
127 @overload
128 def scalar(
129 *,
130 name: Optional[str] = None,
131 description: Optional[str] = None,
132 specified_by_url: Optional[str] = None,
133 serialize: Callable = identity,
134 parse_value: Optional[Callable] = None,
135 parse_literal: Optional[Callable] = None,
136 directives: Iterable[object] = (),
137 ) -> Callable[[_T], _T]:
138 ...
139
140
141 @overload
142 def scalar(
143 cls: _T,
144 *,
145 name: Optional[str] = None,
146 description: Optional[str] = None,
147 specified_by_url: Optional[str] = None,
148 serialize: Callable = identity,
149 parse_value: Optional[Callable] = None,
150 parse_literal: Optional[Callable] = None,
151 directives: Iterable[object] = (),
152 ) -> _T:
153 ...
154
155
156 # FIXME: We are tricking pyright into thinking that we are returning the given type
157 # here or else it won't let us use any custom scalar to annotate attributes in
158 # dataclasses/types. This should be properly solved when implementing StrawberryScalar
159 def scalar(
160 cls=None,
161 *,
162 name: Optional[str] = None,
163 description: Optional[str] = None,
164 specified_by_url: Optional[str] = None,
165 serialize: Callable = identity,
166 parse_value: Optional[Callable] = None,
167 parse_literal: Optional[Callable] = None,
168 directives: Iterable[object] = (),
169 ) -> Any:
170 """Annotates a class or type as a GraphQL custom scalar.
171
172 Example usages:
173
174 >>> strawberry.scalar(
175 >>> datetime.date,
176 >>> serialize=lambda value: value.isoformat(),
177 >>> parse_value=datetime.parse_date
178 >>> )
179
180 >>> Base64Encoded = strawberry.scalar(
181 >>> NewType("Base64Encoded", bytes),
182 >>> serialize=base64.b64encode,
183 >>> parse_value=base64.b64decode
184 >>> )
185
186 >>> @strawberry.scalar(
187 >>> serialize=lambda value: ",".join(value.items),
188 >>> parse_value=lambda value: CustomList(value.split(","))
189 >>> )
190 >>> class CustomList:
191 >>> def __init__(self, items):
192 >>> self.items = items
193
194 """
195
196 if parse_value is None:
197 parse_value = cls
198
199 def wrap(cls: Type):
200 return _process_scalar(
201 cls,
202 name=name,
203 description=description,
204 specified_by_url=specified_by_url,
205 serialize=serialize,
206 parse_value=parse_value,
207 parse_literal=parse_literal,
208 directives=directives,
209 )
210
211 if cls is None:
212 return wrap
213
214 return wrap(cls)
215
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/strawberry/custom_scalar.py b/strawberry/custom_scalar.py
--- a/strawberry/custom_scalar.py
+++ b/strawberry/custom_scalar.py
@@ -17,7 +17,7 @@
)
from strawberry.exceptions import InvalidUnionTypeError
-from strawberry.type import StrawberryOptional, StrawberryType
+from strawberry.type import StrawberryType
from .utils.str_converters import to_camel_case
@@ -76,7 +76,7 @@
def __or__(self, other: Union[StrawberryType, type]) -> StrawberryType:
if other is None:
# Return the correct notation when using `StrawberryUnion | None`.
- return StrawberryOptional(of_type=self)
+ return Optional[self]
# Raise an error in any other case.
# There is Work in progress to deal with more merging cases, see:
|
{"golden_diff": "diff --git a/strawberry/custom_scalar.py b/strawberry/custom_scalar.py\n--- a/strawberry/custom_scalar.py\n+++ b/strawberry/custom_scalar.py\n@@ -17,7 +17,7 @@\n )\n \n from strawberry.exceptions import InvalidUnionTypeError\n-from strawberry.type import StrawberryOptional, StrawberryType\n+from strawberry.type import StrawberryType\n \n from .utils.str_converters import to_camel_case\n \n@@ -76,7 +76,7 @@\n def __or__(self, other: Union[StrawberryType, type]) -> StrawberryType:\n if other is None:\n # Return the correct notation when using `StrawberryUnion | None`.\n- return StrawberryOptional(of_type=self)\n+ return Optional[self]\n \n # Raise an error in any other case.\n # There is Work in progress to deal with more merging cases, see:\n", "issue": "TypeError: Forward references must evaluate to types.\nSo I am running Strawberry with Fastapi\r\n\r\n## Describe the Bug\r\n\r\nI have a schema like this:\r\n\r\n```\r\nfrom __future__ import annotations\r\nfrom enum import Enum\r\nimport strawberry\r\nimport datetime\r\nfrom strawberry.scalars import JSON\r\n\r\[email protected]\r\nclass Sex(Enum):\r\n male = \"Male\"\r\n female =\"Female\"\r\n\r\[email protected]\r\nclass PersonBase:\r\n name: JSON | None\r\n sex: Sex | None\r\n\r\[email protected]\r\nclass Client(PersonBase):\r\n firm: Firm | None = None\r\n\r\n\r\n```\r\n When I try to run the server it throws back an error:\r\n\r\n```\r\n File \"C:\\Users\\0\\Desktop\\Fastapi\\Test\\.\\app\\main.py\", line 14, in <module>\r\n from app.schema import schema\r\n File \"C:\\Users\\0\\Desktop\\Fastapi\\Test\\.\\app\\schema.py\", line 624, in <module>\r\n schema = strawberry.Schema(query=Query, mutation = Mutation)\r\n File \"C:\\Users\\0\\Envs\\Fastapi\\lib\\site-packages\\strawberry\\schema\\schema.py\", line 115, in __init__\r\n self._schema = GraphQLSchema(\r\n File \"C:\\Users\\0\\Envs\\Fastapi\\lib\\site-packages\\graphql\\type\\schema.py\", line 224, in __init__\r\n collect_referenced_types(query)\r\n File \"C:\\Users\\0\\Envs\\Fastapi\\lib\\site-packages\\graphql\\type\\schema.py\", line 433, in collect_referenced_types\r\n collect_referenced_types(field.type)\r\n File \"C:\\Users\\0\\Envs\\Fastapi\\lib\\site-packages\\graphql\\type\\schema.py\", line 433, in collect_referenced_types\r\n collect_referenced_types(field.type)\r\n File \"C:\\Users\\0\\Envs\\Fastapi\\lib\\site-packages\\graphql\\type\\schema.py\", line 432, in collect_referenced_types\r\n for field in named_type.fields.values():\r\n File \"C:\\Users\\0\\AppData\\Local\\Programs\\Python\\Python310\\lib\\functools.py\", line 970, in __get__\r\n val = self.func(instance)\r\n File \"C:\\Users\\0\\Envs\\Fastapi\\lib\\site-packages\\graphql\\type\\definition.py\", line 811, in fields\r\n raise cls(f\"{self.name} fields cannot be resolved. {error}\") from error\r\nTypeError: Client fields cannot be resolved. Forward references must evaluate to types. Got <strawberry.type.StrawberryOptional object at 0x000002C85C5F5990>.\r\n```\r\nNote that if I change one thing like this making a required name field with a string the error is resolved\r\n\r\n```\r\[email protected]\r\nclass Client(PersonBase):\r\n firm: Firm | None = None\r\n name: str | None\r\n```\r\n\r\nBut if I put PersonBase into the model like this, it still throws an error even though the name field is required as JSON.\r\n\r\n```\r\nclass Client:\r\n firm: Firm | None = None\r\n name: JSON | None\r\n sex: Sex | None\r\n```\r\n\r\n\r\n## System Information\r\n\r\n - Operating system: Windows 10\r\n - Strawberry version (if applicable): strawberry-graphql==0.139.0\r\n\r\n## Additional Context\r\n\r\nNot sure if this is an bug or I did something wrong but I find this behavior strange because the JSON and Enum are both scalars but required and they triggered a type error.\r\n\n\n<!-- POLAR PLEDGE BADGE START -->\n## Upvote & Fund\n\n- We're using [Polar.sh](https://polar.sh/strawberry-graphql) so you can upvote and help fund this issue.\n- We receive the funding once the issue is completed & confirmed by you.\n- Thank you in advance for helping prioritize & fund our backlog.\n\n<a href=\"https://polar.sh/strawberry-graphql/strawberry/issues/2465\">\n<picture>\n <source media=\"(prefers-color-scheme: dark)\" srcset=\"https://polar.sh/api/github/strawberry-graphql/strawberry/issues/2465/pledge.svg?darkmode=1\">\n <img alt=\"Fund with Polar\" src=\"https://polar.sh/api/github/strawberry-graphql/strawberry/issues/2465/pledge.svg\">\n</picture>\n</a>\n<!-- POLAR PLEDGE BADGE END -->\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport sys\nfrom dataclasses import dataclass\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Iterable,\n Mapping,\n NewType,\n Optional,\n Type,\n TypeVar,\n Union,\n overload,\n)\n\nfrom strawberry.exceptions import InvalidUnionTypeError\nfrom strawberry.type import StrawberryOptional, StrawberryType\n\nfrom .utils.str_converters import to_camel_case\n\nif TYPE_CHECKING:\n from graphql import GraphQLScalarType\n\n\n# in python 3.10+ NewType is a class\nif sys.version_info >= (3, 10):\n _T = TypeVar(\"_T\", bound=Union[type, NewType])\nelse:\n _T = TypeVar(\"_T\", bound=type)\n\n\ndef identity(x: _T) -> _T:\n return x\n\n\n@dataclass\nclass ScalarDefinition(StrawberryType):\n name: str\n description: Optional[str]\n specified_by_url: Optional[str]\n serialize: Optional[Callable]\n parse_value: Optional[Callable]\n parse_literal: Optional[Callable]\n directives: Iterable[object] = ()\n\n # Optionally store the GraphQLScalarType instance so that we don't get\n # duplicates\n implementation: Optional[GraphQLScalarType] = None\n\n # used for better error messages\n _source_file: Optional[str] = None\n _source_line: Optional[int] = None\n\n def copy_with(\n self, type_var_map: Mapping[TypeVar, Union[StrawberryType, type]]\n ) -> Union[StrawberryType, type]:\n return super().copy_with(type_var_map) # type: ignore[safe-super]\n\n @property\n def is_generic(self) -> bool:\n return False\n\n\nclass ScalarWrapper:\n _scalar_definition: ScalarDefinition\n\n def __init__(self, wrap: Callable[[Any], Any]):\n self.wrap = wrap\n\n def __call__(self, *args: str, **kwargs: Any):\n return self.wrap(*args, **kwargs)\n\n def __or__(self, other: Union[StrawberryType, type]) -> StrawberryType:\n if other is None:\n # Return the correct notation when using `StrawberryUnion | None`.\n return StrawberryOptional(of_type=self)\n\n # Raise an error in any other case.\n # There is Work in progress to deal with more merging cases, see:\n # https://github.com/strawberry-graphql/strawberry/pull/1455\n raise InvalidUnionTypeError(str(other), self.wrap)\n\n\ndef _process_scalar(\n cls: Type[_T],\n *,\n name: Optional[str] = None,\n description: Optional[str] = None,\n specified_by_url: Optional[str] = None,\n serialize: Optional[Callable] = None,\n parse_value: Optional[Callable] = None,\n parse_literal: Optional[Callable] = None,\n directives: Iterable[object] = (),\n):\n from strawberry.exceptions.handler import should_use_rich_exceptions\n\n name = name or to_camel_case(cls.__name__)\n\n _source_file = None\n _source_line = None\n\n if should_use_rich_exceptions():\n frame = sys._getframe(3)\n\n _source_file = frame.f_code.co_filename\n _source_line = frame.f_lineno\n\n wrapper = ScalarWrapper(cls)\n wrapper._scalar_definition = ScalarDefinition(\n name=name,\n description=description,\n specified_by_url=specified_by_url,\n serialize=serialize,\n parse_literal=parse_literal,\n parse_value=parse_value,\n directives=directives,\n _source_file=_source_file,\n _source_line=_source_line,\n )\n\n return wrapper\n\n\n@overload\ndef scalar(\n *,\n name: Optional[str] = None,\n description: Optional[str] = None,\n specified_by_url: Optional[str] = None,\n serialize: Callable = identity,\n parse_value: Optional[Callable] = None,\n parse_literal: Optional[Callable] = None,\n directives: Iterable[object] = (),\n) -> Callable[[_T], _T]:\n ...\n\n\n@overload\ndef scalar(\n cls: _T,\n *,\n name: Optional[str] = None,\n description: Optional[str] = None,\n specified_by_url: Optional[str] = None,\n serialize: Callable = identity,\n parse_value: Optional[Callable] = None,\n parse_literal: Optional[Callable] = None,\n directives: Iterable[object] = (),\n) -> _T:\n ...\n\n\n# FIXME: We are tricking pyright into thinking that we are returning the given type\n# here or else it won't let us use any custom scalar to annotate attributes in\n# dataclasses/types. This should be properly solved when implementing StrawberryScalar\ndef scalar(\n cls=None,\n *,\n name: Optional[str] = None,\n description: Optional[str] = None,\n specified_by_url: Optional[str] = None,\n serialize: Callable = identity,\n parse_value: Optional[Callable] = None,\n parse_literal: Optional[Callable] = None,\n directives: Iterable[object] = (),\n) -> Any:\n \"\"\"Annotates a class or type as a GraphQL custom scalar.\n\n Example usages:\n\n >>> strawberry.scalar(\n >>> datetime.date,\n >>> serialize=lambda value: value.isoformat(),\n >>> parse_value=datetime.parse_date\n >>> )\n\n >>> Base64Encoded = strawberry.scalar(\n >>> NewType(\"Base64Encoded\", bytes),\n >>> serialize=base64.b64encode,\n >>> parse_value=base64.b64decode\n >>> )\n\n >>> @strawberry.scalar(\n >>> serialize=lambda value: \",\".join(value.items),\n >>> parse_value=lambda value: CustomList(value.split(\",\"))\n >>> )\n >>> class CustomList:\n >>> def __init__(self, items):\n >>> self.items = items\n\n \"\"\"\n\n if parse_value is None:\n parse_value = cls\n\n def wrap(cls: Type):\n return _process_scalar(\n cls,\n name=name,\n description=description,\n specified_by_url=specified_by_url,\n serialize=serialize,\n parse_value=parse_value,\n parse_literal=parse_literal,\n directives=directives,\n )\n\n if cls is None:\n return wrap\n\n return wrap(cls)\n", "path": "strawberry/custom_scalar.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport sys\nfrom dataclasses import dataclass\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Iterable,\n Mapping,\n NewType,\n Optional,\n Type,\n TypeVar,\n Union,\n overload,\n)\n\nfrom strawberry.exceptions import InvalidUnionTypeError\nfrom strawberry.type import StrawberryType\n\nfrom .utils.str_converters import to_camel_case\n\nif TYPE_CHECKING:\n from graphql import GraphQLScalarType\n\n\n# in python 3.10+ NewType is a class\nif sys.version_info >= (3, 10):\n _T = TypeVar(\"_T\", bound=Union[type, NewType])\nelse:\n _T = TypeVar(\"_T\", bound=type)\n\n\ndef identity(x: _T) -> _T:\n return x\n\n\n@dataclass\nclass ScalarDefinition(StrawberryType):\n name: str\n description: Optional[str]\n specified_by_url: Optional[str]\n serialize: Optional[Callable]\n parse_value: Optional[Callable]\n parse_literal: Optional[Callable]\n directives: Iterable[object] = ()\n\n # Optionally store the GraphQLScalarType instance so that we don't get\n # duplicates\n implementation: Optional[GraphQLScalarType] = None\n\n # used for better error messages\n _source_file: Optional[str] = None\n _source_line: Optional[int] = None\n\n def copy_with(\n self, type_var_map: Mapping[TypeVar, Union[StrawberryType, type]]\n ) -> Union[StrawberryType, type]:\n return super().copy_with(type_var_map) # type: ignore[safe-super]\n\n @property\n def is_generic(self) -> bool:\n return False\n\n\nclass ScalarWrapper:\n _scalar_definition: ScalarDefinition\n\n def __init__(self, wrap: Callable[[Any], Any]):\n self.wrap = wrap\n\n def __call__(self, *args: str, **kwargs: Any):\n return self.wrap(*args, **kwargs)\n\n def __or__(self, other: Union[StrawberryType, type]) -> StrawberryType:\n if other is None:\n # Return the correct notation when using `StrawberryUnion | None`.\n return Optional[self]\n\n # Raise an error in any other case.\n # There is Work in progress to deal with more merging cases, see:\n # https://github.com/strawberry-graphql/strawberry/pull/1455\n raise InvalidUnionTypeError(str(other), self.wrap)\n\n\ndef _process_scalar(\n cls: Type[_T],\n *,\n name: Optional[str] = None,\n description: Optional[str] = None,\n specified_by_url: Optional[str] = None,\n serialize: Optional[Callable] = None,\n parse_value: Optional[Callable] = None,\n parse_literal: Optional[Callable] = None,\n directives: Iterable[object] = (),\n):\n from strawberry.exceptions.handler import should_use_rich_exceptions\n\n name = name or to_camel_case(cls.__name__)\n\n _source_file = None\n _source_line = None\n\n if should_use_rich_exceptions():\n frame = sys._getframe(3)\n\n _source_file = frame.f_code.co_filename\n _source_line = frame.f_lineno\n\n wrapper = ScalarWrapper(cls)\n wrapper._scalar_definition = ScalarDefinition(\n name=name,\n description=description,\n specified_by_url=specified_by_url,\n serialize=serialize,\n parse_literal=parse_literal,\n parse_value=parse_value,\n directives=directives,\n _source_file=_source_file,\n _source_line=_source_line,\n )\n\n return wrapper\n\n\n@overload\ndef scalar(\n *,\n name: Optional[str] = None,\n description: Optional[str] = None,\n specified_by_url: Optional[str] = None,\n serialize: Callable = identity,\n parse_value: Optional[Callable] = None,\n parse_literal: Optional[Callable] = None,\n directives: Iterable[object] = (),\n) -> Callable[[_T], _T]:\n ...\n\n\n@overload\ndef scalar(\n cls: _T,\n *,\n name: Optional[str] = None,\n description: Optional[str] = None,\n specified_by_url: Optional[str] = None,\n serialize: Callable = identity,\n parse_value: Optional[Callable] = None,\n parse_literal: Optional[Callable] = None,\n directives: Iterable[object] = (),\n) -> _T:\n ...\n\n\n# FIXME: We are tricking pyright into thinking that we are returning the given type\n# here or else it won't let us use any custom scalar to annotate attributes in\n# dataclasses/types. This should be properly solved when implementing StrawberryScalar\ndef scalar(\n cls=None,\n *,\n name: Optional[str] = None,\n description: Optional[str] = None,\n specified_by_url: Optional[str] = None,\n serialize: Callable = identity,\n parse_value: Optional[Callable] = None,\n parse_literal: Optional[Callable] = None,\n directives: Iterable[object] = (),\n) -> Any:\n \"\"\"Annotates a class or type as a GraphQL custom scalar.\n\n Example usages:\n\n >>> strawberry.scalar(\n >>> datetime.date,\n >>> serialize=lambda value: value.isoformat(),\n >>> parse_value=datetime.parse_date\n >>> )\n\n >>> Base64Encoded = strawberry.scalar(\n >>> NewType(\"Base64Encoded\", bytes),\n >>> serialize=base64.b64encode,\n >>> parse_value=base64.b64decode\n >>> )\n\n >>> @strawberry.scalar(\n >>> serialize=lambda value: \",\".join(value.items),\n >>> parse_value=lambda value: CustomList(value.split(\",\"))\n >>> )\n >>> class CustomList:\n >>> def __init__(self, items):\n >>> self.items = items\n\n \"\"\"\n\n if parse_value is None:\n parse_value = cls\n\n def wrap(cls: Type):\n return _process_scalar(\n cls,\n name=name,\n description=description,\n specified_by_url=specified_by_url,\n serialize=serialize,\n parse_value=parse_value,\n parse_literal=parse_literal,\n directives=directives,\n )\n\n if cls is None:\n return wrap\n\n return wrap(cls)\n", "path": "strawberry/custom_scalar.py"}]}
| 3,196 | 190 |
gh_patches_debug_8675
|
rasdani/github-patches
|
git_diff
|
psychopy__psychopy-740
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Wrong link for polygon help button
Dear all,
The help button for the polygon dialog redirects to the wrong Web page:
http://www.psychopy.org/builder/components/shape.html
I guess it should now be:
http://www.psychopy.org/builder/components/polygon.html
I just updated to v1.81.02, so I guess the report is still relevant. I didn't check all the other buttons; I don't know which labels have been changed recently.
To be sure, I still included the output of sysInfo.py below.
Best,
Axel
Willkommen auf PsychoPy2!
v1.81.02
##### Running: /Applications/PsychoPy2.app/Contents/Resources/lib/python2.7/psychopy/demos/coder/sysInfo.py
Paths to files on the system:
userPrefsFile: /Users/akohler/.psychopy2/userPrefs.cfg
appDataFile: /Users/akohler/.psychopy2/appData.cfg
demos: /Applications/PsychoPy2.app/Contents/Resources/lib/python2.7/psychopy/demos
appFile: /Applications/PsychoPy2.app/Contents/Resources/lib/python2.7/psychopy/app/PsychoPy.py
System info:
Darwin-12.5.0-x86_64-i386-32bit
OS X 10.8.5 running on x86_64
Python info
/Applications/PsychoPy2.app/Contents/MacOS/python
2.7.3 (v2.7.3:70274d53c1dd, Apr 9 2012, 20:32:06)
[GCC 4.0.1 (Apple Inc. build 5493)]
numpy 1.7.1
scipy 0.12.0
matplotlib 1.3.0
pyglet 1.1.4
pyo 0.6.6
PsychoPy 1.81.02
have shaders: True
OpenGL info:
vendor: Intel Inc.
rendering engine: Intel HD Graphics 3000 OpenGL Engine
OpenGL version: 2.1 INTEL-8.16.80
(Selected) Extensions:
True GL_ARB_multitexture
True GL_EXT_framebuffer_object
True GL_ARB_fragment_program
True GL_ARB_shader_objects
True GL_ARB_vertex_shader
True GL_ARB_texture_non_power_of_two
True GL_ARB_texture_float
False GL_STEREO
max vertices in vertex array: 1048575
1.7780 WARNING Movie2 stim could not be imported and won't be available
6.5262 WARNING Monitor specification not found. Creating a temporary one...
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `psychopy/app/builder/components/polygon.py`
Content:
```
1 # Part of the PsychoPy library
2 # Copyright (C) 2014 Jonathan Peirce
3 # Distributed under the terms of the GNU General Public License (GPL).
4
5 from _visual import * #to get the template visual component
6 from os import path
7 from psychopy.app.builder.components import getInitVals
8
9 thisFolder = path.abspath(path.dirname(__file__))#the absolute path to the folder containing this path
10 iconFile = path.join(thisFolder,'polygon.png')
11 tooltip = _translate('Polygon: any regular polygon (line, triangle, square...circle)')
12
13 # only use _localized values for label values, nothing functional:
14 _localized = {'nVertices': _translate('Num. vertices'),
15 'fillColorSpace': _translate('Fill color-space'), 'fillColor': _translate('Fill color'),
16 'lineColorSpace': _translate('Line color-space'), 'lineColor': _translate('Line color'),
17 'lineWidth': _translate('Line width'),
18 'interpolate': _translate('Interpolate'), 'size': _translate("Size [w,h]")
19 }
20
21 class PolygonComponent(VisualComponent):
22 """A class for presenting grating stimuli"""
23 def __init__(self, exp, parentName, name='polygon', interpolate='linear',
24 units='from exp settings',
25 lineColor='$[1,1,1]', lineColorSpace='rgb', lineWidth=1,
26 fillColor='$[1,1,1]', fillColorSpace='rgb',
27 nVertices=4,
28 pos=[0,0], size=[0.5,0.5], ori=0,
29 startType='time (s)', startVal=0.0,
30 stopType='duration (s)', stopVal=1.0,
31 startEstim='', durationEstim=''):
32 #initialise main parameters from base stimulus
33 super(PolygonComponent, self).__init__(exp,parentName,name=name, units=units,
34 pos=pos, size=size, ori=ori,
35 startType=startType, startVal=startVal,
36 stopType=stopType, stopVal=stopVal,
37 startEstim=startEstim, durationEstim=durationEstim)
38 self.type='Polygon'
39 self.url="http://www.psychopy.org/builder/components/shape.html"
40 self.exp.requirePsychopyLibs(['visual'])
41 self.order=['nVertices']
42 #params
43 self.params['nVertices']=Param(nVertices, valType='int',
44 updates='constant', allowedUpdates=['constant'],
45 hint=_translate("How many vertices? 2=line, 3=triangle... (90 approximates a circle)"),
46 label=_localized['nVertices'])
47 self.params['fillColorSpace']=Param(fillColorSpace, valType='str', allowedVals=['rgb','dkl','lms','hsv'],
48 updates='constant',
49 hint=_translate("Choice of color space for the fill color (rgb, dkl, lms, hsv)"),
50 label=_localized['fillColorSpace'], categ='Advanced')
51 self.params['fillColor']=Param(fillColor, valType='str', allowedTypes=[],
52 updates='constant', allowedUpdates=['constant','set every repeat','set every frame'],
53 hint=_translate("Fill color of this shape; Right-click to bring up a color-picker (rgb only)"),
54 label=_localized['fillColor'], categ='Advanced')
55 self.params['lineColorSpace']=Param(lineColorSpace, valType='str', allowedVals=['rgb','dkl','lms','hsv'],
56 updates='constant',
57 hint=_translate("Choice of color space for the fill color (rgb, dkl, lms, hsv)"),
58 label=_localized['lineColorSpace'], categ='Advanced')
59 self.params['lineColor']=Param(lineColor, valType='str', allowedTypes=[],
60 updates='constant', allowedUpdates=['constant','set every repeat','set every frame'],
61 hint=_translate("Line color of this shape; Right-click to bring up a color-picker (rgb only)"),
62 label=_localized['lineColor'], categ='Advanced')
63 self.params['lineWidth']=Param(lineWidth, valType='code', allowedTypes=[],
64 updates='constant', allowedUpdates=['constant','set every repeat','set every frame'],
65 hint=_translate("Width of the shape's line (always in pixels - this does NOT use 'units')"),
66 label=_localized['lineWidth'])
67 self.params['interpolate']=Param(interpolate, valType='str', allowedVals=['linear','nearest'],
68 updates='constant', allowedUpdates=[],
69 hint=_translate("How should the image be interpolated if/when rescaled"),
70 label=_localized['interpolate'], categ='Advanced')
71 self.params['size']=Param(size, valType='code', allowedTypes=[],
72 updates='constant', allowedUpdates=['constant','set every repeat','set every frame'],
73 hint=_translate("Size of this stimulus [w,h]. Note that for a line only the first value is used, for triangle and rect the [w,h] is as expected,\n but for higher-order polygons it represents the [w,h] of the ellipse that the polygon sits on!! "),
74 label=_localized['size'])
75 del self.params['color']
76 del self.params['colorSpace']
77
78 def writeInitCode(self,buff):
79 #do we need units code?
80 if self.params['units'].val=='from exp settings': unitsStr=""
81 else: unitsStr="units=%(units)s, " %self.params
82 inits = getInitVals(self.params)#replaces variable params with defaults
83 if inits['size'].val=='1.0':
84 inits['size'].val = '[1.0, 1.0]'
85 if self.params['nVertices'].val == '2':
86 buff.writeIndented("%s = visual.Line(win=win, name='%s',%s\n" %(inits['name'],inits['name'],unitsStr))
87 buff.writeIndented(" start=(-%(size)s[0]/2.0, 0), end=(+%(size)s[0]/2.0, 0),\n" %(inits) )
88 elif self.params['nVertices'].val == '3':
89 buff.writeIndented("%s = visual.ShapeStim(win=win, name='%s',%s\n" %(inits['name'],inits['name'],unitsStr))
90 buff.writeIndented(" vertices = [[-%(size)s[0]/2.0,-%(size)s[1]/2.0], [+%(size)s[0]/2.0,-%(size)s[1]/2.0], [0,%(size)s[1]/2.0]],\n" %(inits) )
91 elif self.params['nVertices'].val == '4':
92 buff.writeIndented("%s = visual.Rect(win=win, name='%s',%s\n" %(inits['name'],inits['name'],unitsStr))
93 buff.writeIndented(" width=%(size)s[0], height=%(size)s[1],\n" %(inits) )
94 else:
95 buff.writeIndented("%s = visual.Polygon(win=win, name='%s',%s\n" %(inits['name'],inits['name'],unitsStr))
96 buff.writeIndented(" edges = %s," % str(inits['nVertices'].val))
97 buff.writeIndented(" size=%(size)s,\n" %(inits) )
98 buff.writeIndented(" ori=%(ori)s, pos=%(pos)s,\n" %(inits) )
99 buff.writeIndented(" lineWidth=%(lineWidth)s, lineColor=%(lineColor)s, lineColorSpace=%(lineColorSpace)s,\n" %(inits) )
100 buff.writeIndented(" fillColor=%(fillColor)s, fillColorSpace=%(fillColorSpace)s,\n" %(inits) )
101 buff.writeIndented(" opacity=%(opacity)s," %(inits) )
102 if self.params['interpolate'].val=='linear':
103 buff.write("interpolate=True)\n")
104 else: buff.write("interpolate=False)\n")
105
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/psychopy/app/builder/components/polygon.py b/psychopy/app/builder/components/polygon.py
--- a/psychopy/app/builder/components/polygon.py
+++ b/psychopy/app/builder/components/polygon.py
@@ -36,7 +36,7 @@
stopType=stopType, stopVal=stopVal,
startEstim=startEstim, durationEstim=durationEstim)
self.type='Polygon'
- self.url="http://www.psychopy.org/builder/components/shape.html"
+ self.url="http://www.psychopy.org/builder/components/polygon.html"
self.exp.requirePsychopyLibs(['visual'])
self.order=['nVertices']
#params
|
{"golden_diff": "diff --git a/psychopy/app/builder/components/polygon.py b/psychopy/app/builder/components/polygon.py\n--- a/psychopy/app/builder/components/polygon.py\n+++ b/psychopy/app/builder/components/polygon.py\n@@ -36,7 +36,7 @@\n stopType=stopType, stopVal=stopVal,\n startEstim=startEstim, durationEstim=durationEstim)\n self.type='Polygon'\n- self.url=\"http://www.psychopy.org/builder/components/shape.html\"\n+ self.url=\"http://www.psychopy.org/builder/components/polygon.html\"\n self.exp.requirePsychopyLibs(['visual'])\n self.order=['nVertices']\n #params\n", "issue": "Wrong link for polygon help button\nDear all,\n\nThe help button for the polygon dialog redirects to the wrong Web page:\nhttp://www.psychopy.org/builder/components/shape.html\n\nI guess it should now be:\nhttp://www.psychopy.org/builder/components/polygon.html\n\nI just updated to v1.81.02, so I guess the report is still relevant. I didn't check all the other buttons; I don't know which labels have been changed recently.\n\nTo be sure, I still included the output of sysInfo.py below.\n\nBest,\n\nAxel\n\nWillkommen auf PsychoPy2!\nv1.81.02\n\u001b\n##### Running: /Applications/PsychoPy2.app/Contents/Resources/lib/python2.7/psychopy/demos/coder/sysInfo.py\n\nPaths to files on the system:\n userPrefsFile: /Users/akohler/.psychopy2/userPrefs.cfg\n appDataFile: /Users/akohler/.psychopy2/appData.cfg\n demos: /Applications/PsychoPy2.app/Contents/Resources/lib/python2.7/psychopy/demos\n appFile: /Applications/PsychoPy2.app/Contents/Resources/lib/python2.7/psychopy/app/PsychoPy.py\n\nSystem info:\nDarwin-12.5.0-x86_64-i386-32bit\nOS X 10.8.5 running on x86_64\n\nPython info\n/Applications/PsychoPy2.app/Contents/MacOS/python\n2.7.3 (v2.7.3:70274d53c1dd, Apr 9 2012, 20:32:06) \n[GCC 4.0.1 (Apple Inc. build 5493)]\nnumpy 1.7.1\nscipy 0.12.0\nmatplotlib 1.3.0\npyglet 1.1.4\npyo 0.6.6\n\nPsychoPy 1.81.02\nhave shaders: True\n\nOpenGL info:\nvendor: Intel Inc.\nrendering engine: Intel HD Graphics 3000 OpenGL Engine\nOpenGL version: 2.1 INTEL-8.16.80\n(Selected) Extensions:\n True GL_ARB_multitexture\n True GL_EXT_framebuffer_object\n True GL_ARB_fragment_program\n True GL_ARB_shader_objects\n True GL_ARB_vertex_shader\n True GL_ARB_texture_non_power_of_two\n True GL_ARB_texture_float\n False GL_STEREO\n max vertices in vertex array: 1048575\n1.7780 WARNING Movie2 stim could not be imported and won't be available\n6.5262 WARNING Monitor specification not found. Creating a temporary one...\n\n", "before_files": [{"content": "# Part of the PsychoPy library\n# Copyright (C) 2014 Jonathan Peirce\n# Distributed under the terms of the GNU General Public License (GPL).\n\nfrom _visual import * #to get the template visual component\nfrom os import path\nfrom psychopy.app.builder.components import getInitVals\n\nthisFolder = path.abspath(path.dirname(__file__))#the absolute path to the folder containing this path\niconFile = path.join(thisFolder,'polygon.png')\ntooltip = _translate('Polygon: any regular polygon (line, triangle, square...circle)')\n\n# only use _localized values for label values, nothing functional:\n_localized = {'nVertices': _translate('Num. vertices'),\n 'fillColorSpace': _translate('Fill color-space'), 'fillColor': _translate('Fill color'),\n 'lineColorSpace': _translate('Line color-space'), 'lineColor': _translate('Line color'),\n 'lineWidth': _translate('Line width'),\n 'interpolate': _translate('Interpolate'), 'size': _translate(\"Size [w,h]\")\n }\n\nclass PolygonComponent(VisualComponent):\n \"\"\"A class for presenting grating stimuli\"\"\"\n def __init__(self, exp, parentName, name='polygon', interpolate='linear',\n units='from exp settings',\n lineColor='$[1,1,1]', lineColorSpace='rgb', lineWidth=1,\n fillColor='$[1,1,1]', fillColorSpace='rgb',\n nVertices=4,\n pos=[0,0], size=[0.5,0.5], ori=0,\n startType='time (s)', startVal=0.0,\n stopType='duration (s)', stopVal=1.0,\n startEstim='', durationEstim=''):\n #initialise main parameters from base stimulus\n super(PolygonComponent, self).__init__(exp,parentName,name=name, units=units,\n pos=pos, size=size, ori=ori,\n startType=startType, startVal=startVal,\n stopType=stopType, stopVal=stopVal,\n startEstim=startEstim, durationEstim=durationEstim)\n self.type='Polygon'\n self.url=\"http://www.psychopy.org/builder/components/shape.html\"\n self.exp.requirePsychopyLibs(['visual'])\n self.order=['nVertices']\n #params\n self.params['nVertices']=Param(nVertices, valType='int',\n updates='constant', allowedUpdates=['constant'],\n hint=_translate(\"How many vertices? 2=line, 3=triangle... (90 approximates a circle)\"),\n label=_localized['nVertices'])\n self.params['fillColorSpace']=Param(fillColorSpace, valType='str', allowedVals=['rgb','dkl','lms','hsv'],\n updates='constant',\n hint=_translate(\"Choice of color space for the fill color (rgb, dkl, lms, hsv)\"),\n label=_localized['fillColorSpace'], categ='Advanced')\n self.params['fillColor']=Param(fillColor, valType='str', allowedTypes=[],\n updates='constant', allowedUpdates=['constant','set every repeat','set every frame'],\n hint=_translate(\"Fill color of this shape; Right-click to bring up a color-picker (rgb only)\"),\n label=_localized['fillColor'], categ='Advanced')\n self.params['lineColorSpace']=Param(lineColorSpace, valType='str', allowedVals=['rgb','dkl','lms','hsv'],\n updates='constant',\n hint=_translate(\"Choice of color space for the fill color (rgb, dkl, lms, hsv)\"),\n label=_localized['lineColorSpace'], categ='Advanced')\n self.params['lineColor']=Param(lineColor, valType='str', allowedTypes=[],\n updates='constant', allowedUpdates=['constant','set every repeat','set every frame'],\n hint=_translate(\"Line color of this shape; Right-click to bring up a color-picker (rgb only)\"),\n label=_localized['lineColor'], categ='Advanced')\n self.params['lineWidth']=Param(lineWidth, valType='code', allowedTypes=[],\n updates='constant', allowedUpdates=['constant','set every repeat','set every frame'],\n hint=_translate(\"Width of the shape's line (always in pixels - this does NOT use 'units')\"),\n label=_localized['lineWidth'])\n self.params['interpolate']=Param(interpolate, valType='str', allowedVals=['linear','nearest'],\n updates='constant', allowedUpdates=[],\n hint=_translate(\"How should the image be interpolated if/when rescaled\"),\n label=_localized['interpolate'], categ='Advanced')\n self.params['size']=Param(size, valType='code', allowedTypes=[],\n updates='constant', allowedUpdates=['constant','set every repeat','set every frame'],\n hint=_translate(\"Size of this stimulus [w,h]. Note that for a line only the first value is used, for triangle and rect the [w,h] is as expected,\\n but for higher-order polygons it represents the [w,h] of the ellipse that the polygon sits on!! \"),\n label=_localized['size'])\n del self.params['color']\n del self.params['colorSpace']\n\n def writeInitCode(self,buff):\n #do we need units code?\n if self.params['units'].val=='from exp settings': unitsStr=\"\"\n else: unitsStr=\"units=%(units)s, \" %self.params\n inits = getInitVals(self.params)#replaces variable params with defaults\n if inits['size'].val=='1.0':\n inits['size'].val = '[1.0, 1.0]'\n if self.params['nVertices'].val == '2':\n buff.writeIndented(\"%s = visual.Line(win=win, name='%s',%s\\n\" %(inits['name'],inits['name'],unitsStr))\n buff.writeIndented(\" start=(-%(size)s[0]/2.0, 0), end=(+%(size)s[0]/2.0, 0),\\n\" %(inits) )\n elif self.params['nVertices'].val == '3':\n buff.writeIndented(\"%s = visual.ShapeStim(win=win, name='%s',%s\\n\" %(inits['name'],inits['name'],unitsStr))\n buff.writeIndented(\" vertices = [[-%(size)s[0]/2.0,-%(size)s[1]/2.0], [+%(size)s[0]/2.0,-%(size)s[1]/2.0], [0,%(size)s[1]/2.0]],\\n\" %(inits) )\n elif self.params['nVertices'].val == '4':\n buff.writeIndented(\"%s = visual.Rect(win=win, name='%s',%s\\n\" %(inits['name'],inits['name'],unitsStr))\n buff.writeIndented(\" width=%(size)s[0], height=%(size)s[1],\\n\" %(inits) )\n else:\n buff.writeIndented(\"%s = visual.Polygon(win=win, name='%s',%s\\n\" %(inits['name'],inits['name'],unitsStr))\n buff.writeIndented(\" edges = %s,\" % str(inits['nVertices'].val))\n buff.writeIndented(\" size=%(size)s,\\n\" %(inits) )\n buff.writeIndented(\" ori=%(ori)s, pos=%(pos)s,\\n\" %(inits) )\n buff.writeIndented(\" lineWidth=%(lineWidth)s, lineColor=%(lineColor)s, lineColorSpace=%(lineColorSpace)s,\\n\" %(inits) )\n buff.writeIndented(\" fillColor=%(fillColor)s, fillColorSpace=%(fillColorSpace)s,\\n\" %(inits) )\n buff.writeIndented(\" opacity=%(opacity)s,\" %(inits) )\n if self.params['interpolate'].val=='linear':\n buff.write(\"interpolate=True)\\n\")\n else: buff.write(\"interpolate=False)\\n\")\n", "path": "psychopy/app/builder/components/polygon.py"}], "after_files": [{"content": "# Part of the PsychoPy library\n# Copyright (C) 2014 Jonathan Peirce\n# Distributed under the terms of the GNU General Public License (GPL).\n\nfrom _visual import * #to get the template visual component\nfrom os import path\nfrom psychopy.app.builder.components import getInitVals\n\nthisFolder = path.abspath(path.dirname(__file__))#the absolute path to the folder containing this path\niconFile = path.join(thisFolder,'polygon.png')\ntooltip = _translate('Polygon: any regular polygon (line, triangle, square...circle)')\n\n# only use _localized values for label values, nothing functional:\n_localized = {'nVertices': _translate('Num. vertices'),\n 'fillColorSpace': _translate('Fill color-space'), 'fillColor': _translate('Fill color'),\n 'lineColorSpace': _translate('Line color-space'), 'lineColor': _translate('Line color'),\n 'lineWidth': _translate('Line width'),\n 'interpolate': _translate('Interpolate'), 'size': _translate(\"Size [w,h]\")\n }\n\nclass PolygonComponent(VisualComponent):\n \"\"\"A class for presenting grating stimuli\"\"\"\n def __init__(self, exp, parentName, name='polygon', interpolate='linear',\n units='from exp settings',\n lineColor='$[1,1,1]', lineColorSpace='rgb', lineWidth=1,\n fillColor='$[1,1,1]', fillColorSpace='rgb',\n nVertices=4,\n pos=[0,0], size=[0.5,0.5], ori=0,\n startType='time (s)', startVal=0.0,\n stopType='duration (s)', stopVal=1.0,\n startEstim='', durationEstim=''):\n #initialise main parameters from base stimulus\n super(PolygonComponent, self).__init__(exp,parentName,name=name, units=units,\n pos=pos, size=size, ori=ori,\n startType=startType, startVal=startVal,\n stopType=stopType, stopVal=stopVal,\n startEstim=startEstim, durationEstim=durationEstim)\n self.type='Polygon'\n self.url=\"http://www.psychopy.org/builder/components/polygon.html\"\n self.exp.requirePsychopyLibs(['visual'])\n self.order=['nVertices']\n #params\n self.params['nVertices']=Param(nVertices, valType='int',\n updates='constant', allowedUpdates=['constant'],\n hint=_translate(\"How many vertices? 2=line, 3=triangle... (90 approximates a circle)\"),\n label=_localized['nVertices'])\n self.params['fillColorSpace']=Param(fillColorSpace, valType='str', allowedVals=['rgb','dkl','lms','hsv'],\n updates='constant',\n hint=_translate(\"Choice of color space for the fill color (rgb, dkl, lms, hsv)\"),\n label=_localized['fillColorSpace'], categ='Advanced')\n self.params['fillColor']=Param(fillColor, valType='str', allowedTypes=[],\n updates='constant', allowedUpdates=['constant','set every repeat','set every frame'],\n hint=_translate(\"Fill color of this shape; Right-click to bring up a color-picker (rgb only)\"),\n label=_localized['fillColor'], categ='Advanced')\n self.params['lineColorSpace']=Param(lineColorSpace, valType='str', allowedVals=['rgb','dkl','lms','hsv'],\n updates='constant',\n hint=_translate(\"Choice of color space for the fill color (rgb, dkl, lms, hsv)\"),\n label=_localized['lineColorSpace'], categ='Advanced')\n self.params['lineColor']=Param(lineColor, valType='str', allowedTypes=[],\n updates='constant', allowedUpdates=['constant','set every repeat','set every frame'],\n hint=_translate(\"Line color of this shape; Right-click to bring up a color-picker (rgb only)\"),\n label=_localized['lineColor'], categ='Advanced')\n self.params['lineWidth']=Param(lineWidth, valType='code', allowedTypes=[],\n updates='constant', allowedUpdates=['constant','set every repeat','set every frame'],\n hint=_translate(\"Width of the shape's line (always in pixels - this does NOT use 'units')\"),\n label=_localized['lineWidth'])\n self.params['interpolate']=Param(interpolate, valType='str', allowedVals=['linear','nearest'],\n updates='constant', allowedUpdates=[],\n hint=_translate(\"How should the image be interpolated if/when rescaled\"),\n label=_localized['interpolate'], categ='Advanced')\n self.params['size']=Param(size, valType='code', allowedTypes=[],\n updates='constant', allowedUpdates=['constant','set every repeat','set every frame'],\n hint=_translate(\"Size of this stimulus [w,h]. Note that for a line only the first value is used, for triangle and rect the [w,h] is as expected,\\n but for higher-order polygons it represents the [w,h] of the ellipse that the polygon sits on!! \"),\n label=_localized['size'])\n del self.params['color']\n del self.params['colorSpace']\n\n def writeInitCode(self,buff):\n #do we need units code?\n if self.params['units'].val=='from exp settings': unitsStr=\"\"\n else: unitsStr=\"units=%(units)s, \" %self.params\n inits = getInitVals(self.params)#replaces variable params with defaults\n if inits['size'].val=='1.0':\n inits['size'].val = '[1.0, 1.0]'\n if self.params['nVertices'].val == '2':\n buff.writeIndented(\"%s = visual.Line(win=win, name='%s',%s\\n\" %(inits['name'],inits['name'],unitsStr))\n buff.writeIndented(\" start=(-%(size)s[0]/2.0, 0), end=(+%(size)s[0]/2.0, 0),\\n\" %(inits) )\n elif self.params['nVertices'].val == '3':\n buff.writeIndented(\"%s = visual.ShapeStim(win=win, name='%s',%s\\n\" %(inits['name'],inits['name'],unitsStr))\n buff.writeIndented(\" vertices = [[-%(size)s[0]/2.0,-%(size)s[1]/2.0], [+%(size)s[0]/2.0,-%(size)s[1]/2.0], [0,%(size)s[1]/2.0]],\\n\" %(inits) )\n elif self.params['nVertices'].val == '4':\n buff.writeIndented(\"%s = visual.Rect(win=win, name='%s',%s\\n\" %(inits['name'],inits['name'],unitsStr))\n buff.writeIndented(\" width=%(size)s[0], height=%(size)s[1],\\n\" %(inits) )\n else:\n buff.writeIndented(\"%s = visual.Polygon(win=win, name='%s',%s\\n\" %(inits['name'],inits['name'],unitsStr))\n buff.writeIndented(\" edges = %s,\" % str(inits['nVertices'].val))\n buff.writeIndented(\" size=%(size)s,\\n\" %(inits) )\n buff.writeIndented(\" ori=%(ori)s, pos=%(pos)s,\\n\" %(inits) )\n buff.writeIndented(\" lineWidth=%(lineWidth)s, lineColor=%(lineColor)s, lineColorSpace=%(lineColorSpace)s,\\n\" %(inits) )\n buff.writeIndented(\" fillColor=%(fillColor)s, fillColorSpace=%(fillColorSpace)s,\\n\" %(inits) )\n buff.writeIndented(\" opacity=%(opacity)s,\" %(inits) )\n if self.params['interpolate'].val=='linear':\n buff.write(\"interpolate=True)\\n\")\n else: buff.write(\"interpolate=False)\\n\")\n", "path": "psychopy/app/builder/components/polygon.py"}]}
| 2,823 | 159 |
gh_patches_debug_18677
|
rasdani/github-patches
|
git_diff
|
canonical__snapcraft-4758
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Local key assets are not used in package-repositories for core24
### Bug Description
Local key assets are not used in package-repositories for core24 (only the keyserver is supported)
### To Reproduce
.
### Environment
.
### snapcraft.yaml
```shell
.
```
### Relevant log output
```shell
.
```
### Additional context
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `snapcraft/services/lifecycle.py`
Content:
```
1 # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
2 #
3 # Copyright 2024 Canonical Ltd.
4 #
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License version 3 as
7 # published by the Free Software Foundation.
8 #
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
13 #
14 # You should have received a copy of the GNU General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 """Snapcraft Lifecycle Service."""
17
18 import copy
19 import json
20 import os
21 from datetime import datetime
22 from pathlib import Path
23 from typing import Any, cast
24
25 from craft_application import AppMetadata, LifecycleService, ServiceFactory
26 from craft_application.models import BuildInfo
27 from craft_parts import ProjectInfo, StepInfo, callbacks
28 from craft_parts.packages import Repository as Repo
29 from overrides import overrides
30
31 from snapcraft import __version__, errors, models, os_release, parts, utils
32
33
34 class Lifecycle(LifecycleService):
35 """Snapcraft specialization of the Lifecycle Service."""
36
37 def __init__( # noqa: PLR0913 (too many arguments)
38 self,
39 app: AppMetadata,
40 services: ServiceFactory,
41 *,
42 project: models.Project,
43 work_dir: Path | str,
44 cache_dir: Path | str,
45 build_plan: list[BuildInfo],
46 **lifecycle_kwargs: Any, # noqa: ANN401 - eventually used in an Any
47 ) -> None:
48 super().__init__(
49 app,
50 services,
51 project=project,
52 work_dir=work_dir,
53 cache_dir=cache_dir,
54 build_plan=build_plan,
55 **lifecycle_kwargs,
56 )
57 self._start_time = datetime.now()
58 self._manifest: models.Manifest
59
60 @overrides
61 def setup(self) -> None:
62 project = cast(models.Project, self._project)
63
64 if project.package_repositories:
65 # Note: we unfortunately need to handle missing gpg/dirmngr binaries
66 # ourselves here, as this situation happens in Launchpad (where
67 # builds are executed destructively).
68 required_packages = ["gpg", "dirmngr"]
69 if any(p for p in required_packages if not Repo.is_package_installed(p)):
70 Repo.install_packages(required_packages, refresh_package_cache=False)
71
72 # Have the lifecycle install the base snap, and look into it when
73 # determining the package cutoff.
74 self._manager_kwargs.update(
75 base=project.get_effective_base(),
76 extra_build_snaps=project.get_extra_build_snaps(),
77 confinement=project.confinement,
78 project_base=project.base or "",
79 )
80 callbacks.register_prologue(parts.set_global_environment)
81 callbacks.register_pre_step(parts.set_step_environment)
82 super().setup()
83
84 @overrides
85 def post_prime(self, step_info: StepInfo) -> bool:
86 """Run post-prime parts steps for Snapcraft."""
87 return parts.patch_elf(step_info)
88
89 def get_prime_dir(self, component: str | None = None) -> Path:
90 """Get the prime directory path for the default prime dir or a component.
91
92 :param component: Name of the component to get the prime directory for.
93
94 :returns: The default prime directory or a component's prime directory.
95
96 :raises SnapcraftError: If the component does not exist.
97 """
98 try:
99 return self.prime_dirs[component]
100 except KeyError as err:
101 raise errors.SnapcraftError(
102 f"Could not get prime directory for component {component!r} "
103 "because it does not exist."
104 ) from err
105
106 @property
107 def prime_dirs(self) -> dict[str | None, Path]:
108 """Return a mapping of component names to prime directories.
109
110 'None' maps to the default prime directory.
111 """
112 return get_prime_dirs_from_project(self._lcm.project_info)
113
114 def generate_manifest(self) -> models.Manifest:
115 """Create and populate the manifest file."""
116 primed_stage_packages: set[str] = set()
117
118 image_information = os.getenv("SNAPCRAFT_IMAGE_INFO", "{}")
119 try:
120 image_info = json.loads(image_information)
121 except json.decoder.JSONDecodeError as err:
122 raise errors.SnapcraftError(
123 f"Image information decode error at {err.lineno}:{err.colno}: "
124 f"{err.doc!r}: {err.msg}"
125 ) from err
126
127 project = cast(models.Project, self._project)
128
129 project_parts = copy.deepcopy(project.parts)
130 for name, part in project_parts.items():
131 assets = self.get_pull_assets(part_name=name)
132 if assets:
133 part["stage-packages"] = assets.get("stage-packages", []) or []
134 for key in ("stage", "prime", "stage-packages", "build-packages"):
135 part.setdefault(key, [])
136
137 stage_packages = self.get_primed_stage_packages(part_name=name)
138 if stage_packages:
139 primed_stage_packages |= set(stage_packages)
140
141 osrel = os_release.OsRelease()
142 version = utils.process_version(project.version)
143 host_arch = utils.get_host_architecture()
144 build_for = self._build_plan[0].build_for if self._build_plan else host_arch
145
146 return models.Manifest(
147 # Snapcraft annotations
148 snapcraft_version=__version__,
149 snapcraft_started_at=self._start_time.isoformat("T") + "Z",
150 snapcraft_os_release_id=osrel.name().lower(),
151 snapcraft_os_release_version_id=osrel.version_id().lower(),
152 # Project fields
153 name=project.name,
154 version=version,
155 summary=str(project.summary),
156 description=str(project.description),
157 base=project.base,
158 grade=project.grade or "stable",
159 confinement=project.confinement,
160 apps=project.apps,
161 parts=project_parts,
162 # Architecture
163 architectures=[build_for],
164 # Image info
165 image_info=image_info,
166 # Build environment
167 build_packages=[],
168 build_snaps=[],
169 primed_stage_packages=sorted(primed_stage_packages),
170 )
171
172
173 def get_prime_dirs_from_project(project_info: ProjectInfo) -> dict[str | None, Path]:
174 """Get a mapping of component names to prime directories from a ProjectInfo.
175
176 'None' maps to the default prime directory.
177
178 :param project_info: The ProjectInfo to get the prime directory mapping from.
179 """
180 partition_prime_dirs = project_info.prime_dirs
181 component_prime_dirs: dict[str | None, Path] = {None: project_info.prime_dir}
182
183 # strip 'component/' prefix so that the component name is the key
184 for partition, prime_dir in partition_prime_dirs.items():
185 if partition and partition.startswith("component/"):
186 component = partition.split("/", 1)[1]
187 component_prime_dirs[component] = prime_dir
188
189 return component_prime_dirs
190
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/snapcraft/services/lifecycle.py b/snapcraft/services/lifecycle.py
--- a/snapcraft/services/lifecycle.py
+++ b/snapcraft/services/lifecycle.py
@@ -29,6 +29,7 @@
from overrides import overrides
from snapcraft import __version__, errors, models, os_release, parts, utils
+from snapcraft.parts.yaml_utils import get_snap_project
class Lifecycle(LifecycleService):
@@ -169,6 +170,15 @@
primed_stage_packages=sorted(primed_stage_packages),
)
+ @overrides
+ def _get_local_keys_path(self) -> Path | None:
+ snap_project = get_snap_project()
+ keys_dir = snap_project.assets_dir / "keys"
+ if keys_dir.is_dir():
+ return keys_dir
+
+ return None
+
def get_prime_dirs_from_project(project_info: ProjectInfo) -> dict[str | None, Path]:
"""Get a mapping of component names to prime directories from a ProjectInfo.
|
{"golden_diff": "diff --git a/snapcraft/services/lifecycle.py b/snapcraft/services/lifecycle.py\n--- a/snapcraft/services/lifecycle.py\n+++ b/snapcraft/services/lifecycle.py\n@@ -29,6 +29,7 @@\n from overrides import overrides\n \n from snapcraft import __version__, errors, models, os_release, parts, utils\n+from snapcraft.parts.yaml_utils import get_snap_project\n \n \n class Lifecycle(LifecycleService):\n@@ -169,6 +170,15 @@\n primed_stage_packages=sorted(primed_stage_packages),\n )\n \n+ @overrides\n+ def _get_local_keys_path(self) -> Path | None:\n+ snap_project = get_snap_project()\n+ keys_dir = snap_project.assets_dir / \"keys\"\n+ if keys_dir.is_dir():\n+ return keys_dir\n+\n+ return None\n+\n \n def get_prime_dirs_from_project(project_info: ProjectInfo) -> dict[str | None, Path]:\n \"\"\"Get a mapping of component names to prime directories from a ProjectInfo.\n", "issue": "Local key assets are not used in package-repositories for core24\n### Bug Description\n\nLocal key assets are not used in package-repositories for core24 (only the keyserver is supported)\n\n### To Reproduce\n\n.\n\n### Environment\n\n.\n\n### snapcraft.yaml\n\n```shell\n.\n```\n\n\n### Relevant log output\n\n```shell\n.\n```\n\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-\n#\n# Copyright 2024 Canonical Ltd.\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License version 3 as\n# published by the Free Software Foundation.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\"\"\"Snapcraft Lifecycle Service.\"\"\"\n\nimport copy\nimport json\nimport os\nfrom datetime import datetime\nfrom pathlib import Path\nfrom typing import Any, cast\n\nfrom craft_application import AppMetadata, LifecycleService, ServiceFactory\nfrom craft_application.models import BuildInfo\nfrom craft_parts import ProjectInfo, StepInfo, callbacks\nfrom craft_parts.packages import Repository as Repo\nfrom overrides import overrides\n\nfrom snapcraft import __version__, errors, models, os_release, parts, utils\n\n\nclass Lifecycle(LifecycleService):\n \"\"\"Snapcraft specialization of the Lifecycle Service.\"\"\"\n\n def __init__( # noqa: PLR0913 (too many arguments)\n self,\n app: AppMetadata,\n services: ServiceFactory,\n *,\n project: models.Project,\n work_dir: Path | str,\n cache_dir: Path | str,\n build_plan: list[BuildInfo],\n **lifecycle_kwargs: Any, # noqa: ANN401 - eventually used in an Any\n ) -> None:\n super().__init__(\n app,\n services,\n project=project,\n work_dir=work_dir,\n cache_dir=cache_dir,\n build_plan=build_plan,\n **lifecycle_kwargs,\n )\n self._start_time = datetime.now()\n self._manifest: models.Manifest\n\n @overrides\n def setup(self) -> None:\n project = cast(models.Project, self._project)\n\n if project.package_repositories:\n # Note: we unfortunately need to handle missing gpg/dirmngr binaries\n # ourselves here, as this situation happens in Launchpad (where\n # builds are executed destructively).\n required_packages = [\"gpg\", \"dirmngr\"]\n if any(p for p in required_packages if not Repo.is_package_installed(p)):\n Repo.install_packages(required_packages, refresh_package_cache=False)\n\n # Have the lifecycle install the base snap, and look into it when\n # determining the package cutoff.\n self._manager_kwargs.update(\n base=project.get_effective_base(),\n extra_build_snaps=project.get_extra_build_snaps(),\n confinement=project.confinement,\n project_base=project.base or \"\",\n )\n callbacks.register_prologue(parts.set_global_environment)\n callbacks.register_pre_step(parts.set_step_environment)\n super().setup()\n\n @overrides\n def post_prime(self, step_info: StepInfo) -> bool:\n \"\"\"Run post-prime parts steps for Snapcraft.\"\"\"\n return parts.patch_elf(step_info)\n\n def get_prime_dir(self, component: str | None = None) -> Path:\n \"\"\"Get the prime directory path for the default prime dir or a component.\n\n :param component: Name of the component to get the prime directory for.\n\n :returns: The default prime directory or a component's prime directory.\n\n :raises SnapcraftError: If the component does not exist.\n \"\"\"\n try:\n return self.prime_dirs[component]\n except KeyError as err:\n raise errors.SnapcraftError(\n f\"Could not get prime directory for component {component!r} \"\n \"because it does not exist.\"\n ) from err\n\n @property\n def prime_dirs(self) -> dict[str | None, Path]:\n \"\"\"Return a mapping of component names to prime directories.\n\n 'None' maps to the default prime directory.\n \"\"\"\n return get_prime_dirs_from_project(self._lcm.project_info)\n\n def generate_manifest(self) -> models.Manifest:\n \"\"\"Create and populate the manifest file.\"\"\"\n primed_stage_packages: set[str] = set()\n\n image_information = os.getenv(\"SNAPCRAFT_IMAGE_INFO\", \"{}\")\n try:\n image_info = json.loads(image_information)\n except json.decoder.JSONDecodeError as err:\n raise errors.SnapcraftError(\n f\"Image information decode error at {err.lineno}:{err.colno}: \"\n f\"{err.doc!r}: {err.msg}\"\n ) from err\n\n project = cast(models.Project, self._project)\n\n project_parts = copy.deepcopy(project.parts)\n for name, part in project_parts.items():\n assets = self.get_pull_assets(part_name=name)\n if assets:\n part[\"stage-packages\"] = assets.get(\"stage-packages\", []) or []\n for key in (\"stage\", \"prime\", \"stage-packages\", \"build-packages\"):\n part.setdefault(key, [])\n\n stage_packages = self.get_primed_stage_packages(part_name=name)\n if stage_packages:\n primed_stage_packages |= set(stage_packages)\n\n osrel = os_release.OsRelease()\n version = utils.process_version(project.version)\n host_arch = utils.get_host_architecture()\n build_for = self._build_plan[0].build_for if self._build_plan else host_arch\n\n return models.Manifest(\n # Snapcraft annotations\n snapcraft_version=__version__,\n snapcraft_started_at=self._start_time.isoformat(\"T\") + \"Z\",\n snapcraft_os_release_id=osrel.name().lower(),\n snapcraft_os_release_version_id=osrel.version_id().lower(),\n # Project fields\n name=project.name,\n version=version,\n summary=str(project.summary),\n description=str(project.description),\n base=project.base,\n grade=project.grade or \"stable\",\n confinement=project.confinement,\n apps=project.apps,\n parts=project_parts,\n # Architecture\n architectures=[build_for],\n # Image info\n image_info=image_info,\n # Build environment\n build_packages=[],\n build_snaps=[],\n primed_stage_packages=sorted(primed_stage_packages),\n )\n\n\ndef get_prime_dirs_from_project(project_info: ProjectInfo) -> dict[str | None, Path]:\n \"\"\"Get a mapping of component names to prime directories from a ProjectInfo.\n\n 'None' maps to the default prime directory.\n\n :param project_info: The ProjectInfo to get the prime directory mapping from.\n \"\"\"\n partition_prime_dirs = project_info.prime_dirs\n component_prime_dirs: dict[str | None, Path] = {None: project_info.prime_dir}\n\n # strip 'component/' prefix so that the component name is the key\n for partition, prime_dir in partition_prime_dirs.items():\n if partition and partition.startswith(\"component/\"):\n component = partition.split(\"/\", 1)[1]\n component_prime_dirs[component] = prime_dir\n\n return component_prime_dirs\n", "path": "snapcraft/services/lifecycle.py"}], "after_files": [{"content": "# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-\n#\n# Copyright 2024 Canonical Ltd.\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License version 3 as\n# published by the Free Software Foundation.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\"\"\"Snapcraft Lifecycle Service.\"\"\"\n\nimport copy\nimport json\nimport os\nfrom datetime import datetime\nfrom pathlib import Path\nfrom typing import Any, cast\n\nfrom craft_application import AppMetadata, LifecycleService, ServiceFactory\nfrom craft_application.models import BuildInfo\nfrom craft_parts import ProjectInfo, StepInfo, callbacks\nfrom craft_parts.packages import Repository as Repo\nfrom overrides import overrides\n\nfrom snapcraft import __version__, errors, models, os_release, parts, utils\nfrom snapcraft.parts.yaml_utils import get_snap_project\n\n\nclass Lifecycle(LifecycleService):\n \"\"\"Snapcraft specialization of the Lifecycle Service.\"\"\"\n\n def __init__( # noqa: PLR0913 (too many arguments)\n self,\n app: AppMetadata,\n services: ServiceFactory,\n *,\n project: models.Project,\n work_dir: Path | str,\n cache_dir: Path | str,\n build_plan: list[BuildInfo],\n **lifecycle_kwargs: Any, # noqa: ANN401 - eventually used in an Any\n ) -> None:\n super().__init__(\n app,\n services,\n project=project,\n work_dir=work_dir,\n cache_dir=cache_dir,\n build_plan=build_plan,\n **lifecycle_kwargs,\n )\n self._start_time = datetime.now()\n self._manifest: models.Manifest\n\n @overrides\n def setup(self) -> None:\n project = cast(models.Project, self._project)\n\n if project.package_repositories:\n # Note: we unfortunately need to handle missing gpg/dirmngr binaries\n # ourselves here, as this situation happens in Launchpad (where\n # builds are executed destructively).\n required_packages = [\"gpg\", \"dirmngr\"]\n if any(p for p in required_packages if not Repo.is_package_installed(p)):\n Repo.install_packages(required_packages, refresh_package_cache=False)\n\n # Have the lifecycle install the base snap, and look into it when\n # determining the package cutoff.\n self._manager_kwargs.update(\n base=project.get_effective_base(),\n extra_build_snaps=project.get_extra_build_snaps(),\n confinement=project.confinement,\n project_base=project.base or \"\",\n )\n callbacks.register_prologue(parts.set_global_environment)\n callbacks.register_pre_step(parts.set_step_environment)\n super().setup()\n\n @overrides\n def post_prime(self, step_info: StepInfo) -> bool:\n \"\"\"Run post-prime parts steps for Snapcraft.\"\"\"\n return parts.patch_elf(step_info)\n\n def get_prime_dir(self, component: str | None = None) -> Path:\n \"\"\"Get the prime directory path for the default prime dir or a component.\n\n :param component: Name of the component to get the prime directory for.\n\n :returns: The default prime directory or a component's prime directory.\n\n :raises SnapcraftError: If the component does not exist.\n \"\"\"\n try:\n return self.prime_dirs[component]\n except KeyError as err:\n raise errors.SnapcraftError(\n f\"Could not get prime directory for component {component!r} \"\n \"because it does not exist.\"\n ) from err\n\n @property\n def prime_dirs(self) -> dict[str | None, Path]:\n \"\"\"Return a mapping of component names to prime directories.\n\n 'None' maps to the default prime directory.\n \"\"\"\n return get_prime_dirs_from_project(self._lcm.project_info)\n\n def generate_manifest(self) -> models.Manifest:\n \"\"\"Create and populate the manifest file.\"\"\"\n primed_stage_packages: set[str] = set()\n\n image_information = os.getenv(\"SNAPCRAFT_IMAGE_INFO\", \"{}\")\n try:\n image_info = json.loads(image_information)\n except json.decoder.JSONDecodeError as err:\n raise errors.SnapcraftError(\n f\"Image information decode error at {err.lineno}:{err.colno}: \"\n f\"{err.doc!r}: {err.msg}\"\n ) from err\n\n project = cast(models.Project, self._project)\n\n project_parts = copy.deepcopy(project.parts)\n for name, part in project_parts.items():\n assets = self.get_pull_assets(part_name=name)\n if assets:\n part[\"stage-packages\"] = assets.get(\"stage-packages\", []) or []\n for key in (\"stage\", \"prime\", \"stage-packages\", \"build-packages\"):\n part.setdefault(key, [])\n\n stage_packages = self.get_primed_stage_packages(part_name=name)\n if stage_packages:\n primed_stage_packages |= set(stage_packages)\n\n osrel = os_release.OsRelease()\n version = utils.process_version(project.version)\n host_arch = utils.get_host_architecture()\n build_for = self._build_plan[0].build_for if self._build_plan else host_arch\n\n return models.Manifest(\n # Snapcraft annotations\n snapcraft_version=__version__,\n snapcraft_started_at=self._start_time.isoformat(\"T\") + \"Z\",\n snapcraft_os_release_id=osrel.name().lower(),\n snapcraft_os_release_version_id=osrel.version_id().lower(),\n # Project fields\n name=project.name,\n version=version,\n summary=str(project.summary),\n description=str(project.description),\n base=project.base,\n grade=project.grade or \"stable\",\n confinement=project.confinement,\n apps=project.apps,\n parts=project_parts,\n # Architecture\n architectures=[build_for],\n # Image info\n image_info=image_info,\n # Build environment\n build_packages=[],\n build_snaps=[],\n primed_stage_packages=sorted(primed_stage_packages),\n )\n\n @overrides\n def _get_local_keys_path(self) -> Path | None:\n snap_project = get_snap_project()\n keys_dir = snap_project.assets_dir / \"keys\"\n if keys_dir.is_dir():\n return keys_dir\n\n return None\n\n\ndef get_prime_dirs_from_project(project_info: ProjectInfo) -> dict[str | None, Path]:\n \"\"\"Get a mapping of component names to prime directories from a ProjectInfo.\n\n 'None' maps to the default prime directory.\n\n :param project_info: The ProjectInfo to get the prime directory mapping from.\n \"\"\"\n partition_prime_dirs = project_info.prime_dirs\n component_prime_dirs: dict[str | None, Path] = {None: project_info.prime_dir}\n\n # strip 'component/' prefix so that the component name is the key\n for partition, prime_dir in partition_prime_dirs.items():\n if partition and partition.startswith(\"component/\"):\n component = partition.split(\"/\", 1)[1]\n component_prime_dirs[component] = prime_dir\n\n return component_prime_dirs\n", "path": "snapcraft/services/lifecycle.py"}]}
| 2,313 | 225 |
gh_patches_debug_28837
|
rasdani/github-patches
|
git_diff
|
facebookresearch__nevergrad-202
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Optimizer creates candidate from out of bounds args
## Steps to reproduce
**Note:** I don't know if this is properly handled (doesn't cause the optimizer to mis-behave) but here is my observation:
1. Define a bounded instrumentation variable (ex. instru.var.Array(1).asfloat().bounded(0,5))
2. Create a candidate from out of space args (ex. 10)
3. Optimizer.tell(candidate, arbitrary value)
## Observed Results
the candidate is created normally and the optimizer accepts it.
## Expected Results
throwing an exception due to output of bounds args.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nevergrad/instrumentation/transforms.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
2 #
3 # This source code is licensed under the MIT license found in the
4 # LICENSE file in the root directory of this source tree.
5
6 import numpy as np
7 from scipy import stats
8
9
10 class Transform:
11 """Base class for transforms implementing a forward and a backward (inverse)
12 method.
13 This provide a default representation, and a short representation should be implemented
14 for each transform.
15 """
16
17 def forward(self, x: np.ndarray) -> np.ndarray:
18 raise NotImplementedError
19
20 def backward(self, y: np.ndarray) -> np.ndarray:
21 raise NotImplementedError
22
23 def reverted(self) -> 'Transform':
24 return Reverted(self)
25
26 def _short_repr(self) -> str:
27 raise NotImplementedError
28
29 def __repr__(self) -> str:
30 args = ", ".join(f"{x}={y}" for x, y in sorted(self.__dict__.items()) if not x.startswith("_"))
31 return f"{self.__class__.__name__}({args})"
32
33 def __format__(self, format_spec: str) -> str:
34 if format_spec == "short":
35 return self._short_repr()
36 return repr(self)
37
38
39 class Reverted(Transform):
40 """Inverse of a transform.
41
42 Parameters
43 ----------
44 transform: Transform
45 """
46
47 def __init__(self, transform: Transform) -> None:
48 self.transform = transform
49
50 def forward(self, x: np.ndarray) -> np.ndarray:
51 return self.transform.backward(x)
52
53 def backward(self, y: np.ndarray) -> np.ndarray:
54 return self.transform.forward(y)
55
56 def _short_repr(self) -> str:
57 return f'Rv({self.transform:short})'
58
59
60 class Affine(Transform):
61 """Affine transform a * x + b
62
63 Parameters
64 ----------
65 a: float
66 b: float
67 """
68
69 def __init__(self, a: float, b: float) -> None:
70 self.a = a
71 self.b = b
72
73 def forward(self, x: np.ndarray) -> np.ndarray:
74 return self.a * x + self.b # type: ignore
75
76 def backward(self, y: np.ndarray) -> np.ndarray:
77 return (y - self.b) / self.a # type: ignore
78
79 def _short_repr(self) -> str:
80 return f"Af({self.a},{self.b})"
81
82
83 class Exponentiate(Transform):
84 """Exponentiation transform base ** (coeff * x)
85 This can for instance be used for to get a logarithmicly distruted values 10**(-[1, 2, 3]).
86
87 Parameters
88 ----------
89 base: float
90 coeff: float
91 """
92
93 def __init__(self, base: float = 10., coeff: float = 1.) -> None:
94 self.base = base
95 self.coeff = coeff
96
97 def forward(self, x: np.ndarray) -> np.ndarray:
98 return self.base ** (float(self.coeff) * x) # type: ignore
99
100 def backward(self, y: np.ndarray) -> np.ndarray:
101 return np.log(y) / (float(self.coeff) * np.log(self.base)) # type: ignore
102
103 def _short_repr(self) -> str:
104 return f"Ex({self.base},{self.coeff})"
105
106
107 class TanhBound(Transform):
108 """Bounds all real values into [min_val, max_val] using a tanh transform.
109 Beware, tanh goes very fast to its limits.
110
111 Parameters
112 ----------
113 min_val: float
114 max_val: float
115 """
116
117 def __init__(self, min_val: float, max_val: float) -> None:
118 assert min_val < max_val
119 self.min_val = min_val
120 self.max_val = max_val
121 self._b = .5 * (self.max_val + self.min_val)
122 self._a = .5 * (self.max_val - self.min_val)
123
124 def forward(self, x: np.ndarray) -> np.ndarray:
125 return self._b + self._a * np.tanh(x) # type: ignore
126
127 def backward(self, y: np.ndarray) -> np.ndarray:
128 return np.arctanh((y - self._b) / self._a) # type: ignore
129
130 def _short_repr(self) -> str:
131 return f"Th({self.min_val},{self.max_val})"
132
133
134 class ArctanBound(Transform):
135 """Bounds all real values into [min_val, max_val] using an arctan transform.
136 This is a much softer approach compared to tanh.
137
138 Parameters
139 ----------
140 min_val: float
141 max_val: float
142 """
143
144 def __init__(self, min_val: float, max_val: float) -> None:
145 assert min_val < max_val
146 self.min_val = min_val
147 self.max_val = max_val
148 self._b = .5 * (self.max_val + self.min_val)
149 self._a = (self.max_val - self.min_val) / np.pi
150
151 def forward(self, x: np.ndarray) -> np.ndarray:
152 return self._b + self._a * np.arctan(x) # type: ignore
153
154 def backward(self, y: np.ndarray) -> np.ndarray:
155 return np.tan((y - self._b) / self._a) # type: ignore
156
157 def _short_repr(self) -> str:
158 return f"At({self.min_val},{self.max_val})"
159
160
161 class CumulativeDensity(Transform):
162 """Bounds all real values into [0, 1] using a gaussian cumulative density function (cdf)
163 Beware, cdf goes very fast to its limits.
164 """
165
166 def forward(self, x: np.ndarray) -> np.ndarray:
167 return stats.norm.cdf(x) # type: ignore
168
169 def backward(self, y: np.ndarray) -> np.ndarray:
170 return stats.norm.ppf(y) # type: ignore
171
172 def _short_repr(self) -> str:
173 return f"Cd()"
174
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/nevergrad/instrumentation/transforms.py b/nevergrad/instrumentation/transforms.py
--- a/nevergrad/instrumentation/transforms.py
+++ b/nevergrad/instrumentation/transforms.py
@@ -125,6 +125,9 @@
return self._b + self._a * np.tanh(x) # type: ignore
def backward(self, y: np.ndarray) -> np.ndarray:
+ if np.max(y) > self.max_val or np.min(y) < self.min_val:
+ raise ValueError(f"Only data between {self.min_val} and {self.max_val} "
+ "can be transformed back (bounds lead to infinity).")
return np.arctanh((y - self._b) / self._a) # type: ignore
def _short_repr(self) -> str:
@@ -152,6 +155,8 @@
return self._b + self._a * np.arctan(x) # type: ignore
def backward(self, y: np.ndarray) -> np.ndarray:
+ if np.max(y) > self.max_val or np.min(y) < self.min_val:
+ raise ValueError(f"Only data between {self.min_val} and {self.max_val} can be transformed back.")
return np.tan((y - self._b) / self._a) # type: ignore
def _short_repr(self) -> str:
@@ -167,6 +172,8 @@
return stats.norm.cdf(x) # type: ignore
def backward(self, y: np.ndarray) -> np.ndarray:
+ if np.max(y) > 1 or np.min(y) < 0:
+ raise ValueError("Only data between 0 and 1 can be transformed back (bounds lead to infinity).")
return stats.norm.ppf(y) # type: ignore
def _short_repr(self) -> str:
|
{"golden_diff": "diff --git a/nevergrad/instrumentation/transforms.py b/nevergrad/instrumentation/transforms.py\n--- a/nevergrad/instrumentation/transforms.py\n+++ b/nevergrad/instrumentation/transforms.py\n@@ -125,6 +125,9 @@\n return self._b + self._a * np.tanh(x) # type: ignore\n \n def backward(self, y: np.ndarray) -> np.ndarray:\n+ if np.max(y) > self.max_val or np.min(y) < self.min_val:\n+ raise ValueError(f\"Only data between {self.min_val} and {self.max_val} \"\n+ \"can be transformed back (bounds lead to infinity).\")\n return np.arctanh((y - self._b) / self._a) # type: ignore\n \n def _short_repr(self) -> str:\n@@ -152,6 +155,8 @@\n return self._b + self._a * np.arctan(x) # type: ignore\n \n def backward(self, y: np.ndarray) -> np.ndarray:\n+ if np.max(y) > self.max_val or np.min(y) < self.min_val:\n+ raise ValueError(f\"Only data between {self.min_val} and {self.max_val} can be transformed back.\")\n return np.tan((y - self._b) / self._a) # type: ignore\n \n def _short_repr(self) -> str:\n@@ -167,6 +172,8 @@\n return stats.norm.cdf(x) # type: ignore\n \n def backward(self, y: np.ndarray) -> np.ndarray:\n+ if np.max(y) > 1 or np.min(y) < 0:\n+ raise ValueError(\"Only data between 0 and 1 can be transformed back (bounds lead to infinity).\")\n return stats.norm.ppf(y) # type: ignore\n \n def _short_repr(self) -> str:\n", "issue": "Optimizer creates candidate from out of bounds args\n## Steps to reproduce\r\n\r\n**Note:** I don't know if this is properly handled (doesn't cause the optimizer to mis-behave) but here is my observation:\r\n 1. Define a bounded instrumentation variable (ex. instru.var.Array(1).asfloat().bounded(0,5))\r\n 2. Create a candidate from out of space args (ex. 10)\r\n 3. Optimizer.tell(candidate, arbitrary value)\r\n\r\n## Observed Results\r\n\r\nthe candidate is created normally and the optimizer accepts it.\r\n\r\n## Expected Results\r\n\r\nthrowing an exception due to output of bounds args.\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport numpy as np\nfrom scipy import stats\n\n\nclass Transform:\n \"\"\"Base class for transforms implementing a forward and a backward (inverse)\n method.\n This provide a default representation, and a short representation should be implemented\n for each transform.\n \"\"\"\n\n def forward(self, x: np.ndarray) -> np.ndarray:\n raise NotImplementedError\n\n def backward(self, y: np.ndarray) -> np.ndarray:\n raise NotImplementedError\n\n def reverted(self) -> 'Transform':\n return Reverted(self)\n\n def _short_repr(self) -> str:\n raise NotImplementedError\n\n def __repr__(self) -> str:\n args = \", \".join(f\"{x}={y}\" for x, y in sorted(self.__dict__.items()) if not x.startswith(\"_\"))\n return f\"{self.__class__.__name__}({args})\"\n\n def __format__(self, format_spec: str) -> str:\n if format_spec == \"short\":\n return self._short_repr()\n return repr(self)\n\n\nclass Reverted(Transform):\n \"\"\"Inverse of a transform.\n\n Parameters\n ----------\n transform: Transform\n \"\"\"\n\n def __init__(self, transform: Transform) -> None:\n self.transform = transform\n\n def forward(self, x: np.ndarray) -> np.ndarray:\n return self.transform.backward(x)\n\n def backward(self, y: np.ndarray) -> np.ndarray:\n return self.transform.forward(y)\n\n def _short_repr(self) -> str:\n return f'Rv({self.transform:short})'\n\n\nclass Affine(Transform):\n \"\"\"Affine transform a * x + b\n\n Parameters\n ----------\n a: float\n b: float\n \"\"\"\n\n def __init__(self, a: float, b: float) -> None:\n self.a = a\n self.b = b\n\n def forward(self, x: np.ndarray) -> np.ndarray:\n return self.a * x + self.b # type: ignore\n\n def backward(self, y: np.ndarray) -> np.ndarray:\n return (y - self.b) / self.a # type: ignore\n\n def _short_repr(self) -> str:\n return f\"Af({self.a},{self.b})\"\n\n\nclass Exponentiate(Transform):\n \"\"\"Exponentiation transform base ** (coeff * x)\n This can for instance be used for to get a logarithmicly distruted values 10**(-[1, 2, 3]).\n\n Parameters\n ----------\n base: float\n coeff: float\n \"\"\"\n\n def __init__(self, base: float = 10., coeff: float = 1.) -> None:\n self.base = base\n self.coeff = coeff\n\n def forward(self, x: np.ndarray) -> np.ndarray:\n return self.base ** (float(self.coeff) * x) # type: ignore\n\n def backward(self, y: np.ndarray) -> np.ndarray:\n return np.log(y) / (float(self.coeff) * np.log(self.base)) # type: ignore\n\n def _short_repr(self) -> str:\n return f\"Ex({self.base},{self.coeff})\"\n\n\nclass TanhBound(Transform):\n \"\"\"Bounds all real values into [min_val, max_val] using a tanh transform.\n Beware, tanh goes very fast to its limits.\n\n Parameters\n ----------\n min_val: float\n max_val: float\n \"\"\"\n\n def __init__(self, min_val: float, max_val: float) -> None:\n assert min_val < max_val\n self.min_val = min_val\n self.max_val = max_val\n self._b = .5 * (self.max_val + self.min_val)\n self._a = .5 * (self.max_val - self.min_val)\n\n def forward(self, x: np.ndarray) -> np.ndarray:\n return self._b + self._a * np.tanh(x) # type: ignore\n\n def backward(self, y: np.ndarray) -> np.ndarray:\n return np.arctanh((y - self._b) / self._a) # type: ignore\n\n def _short_repr(self) -> str:\n return f\"Th({self.min_val},{self.max_val})\"\n\n\nclass ArctanBound(Transform):\n \"\"\"Bounds all real values into [min_val, max_val] using an arctan transform.\n This is a much softer approach compared to tanh.\n\n Parameters\n ----------\n min_val: float\n max_val: float\n \"\"\"\n\n def __init__(self, min_val: float, max_val: float) -> None:\n assert min_val < max_val\n self.min_val = min_val\n self.max_val = max_val\n self._b = .5 * (self.max_val + self.min_val)\n self._a = (self.max_val - self.min_val) / np.pi\n\n def forward(self, x: np.ndarray) -> np.ndarray:\n return self._b + self._a * np.arctan(x) # type: ignore\n\n def backward(self, y: np.ndarray) -> np.ndarray:\n return np.tan((y - self._b) / self._a) # type: ignore\n\n def _short_repr(self) -> str:\n return f\"At({self.min_val},{self.max_val})\"\n\n\nclass CumulativeDensity(Transform):\n \"\"\"Bounds all real values into [0, 1] using a gaussian cumulative density function (cdf)\n Beware, cdf goes very fast to its limits.\n \"\"\"\n\n def forward(self, x: np.ndarray) -> np.ndarray:\n return stats.norm.cdf(x) # type: ignore\n\n def backward(self, y: np.ndarray) -> np.ndarray:\n return stats.norm.ppf(y) # type: ignore\n\n def _short_repr(self) -> str:\n return f\"Cd()\"\n", "path": "nevergrad/instrumentation/transforms.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport numpy as np\nfrom scipy import stats\n\n\nclass Transform:\n \"\"\"Base class for transforms implementing a forward and a backward (inverse)\n method.\n This provide a default representation, and a short representation should be implemented\n for each transform.\n \"\"\"\n\n def forward(self, x: np.ndarray) -> np.ndarray:\n raise NotImplementedError\n\n def backward(self, y: np.ndarray) -> np.ndarray:\n raise NotImplementedError\n\n def reverted(self) -> 'Transform':\n return Reverted(self)\n\n def _short_repr(self) -> str:\n raise NotImplementedError\n\n def __repr__(self) -> str:\n args = \", \".join(f\"{x}={y}\" for x, y in sorted(self.__dict__.items()) if not x.startswith(\"_\"))\n return f\"{self.__class__.__name__}({args})\"\n\n def __format__(self, format_spec: str) -> str:\n if format_spec == \"short\":\n return self._short_repr()\n return repr(self)\n\n\nclass Reverted(Transform):\n \"\"\"Inverse of a transform.\n\n Parameters\n ----------\n transform: Transform\n \"\"\"\n\n def __init__(self, transform: Transform) -> None:\n self.transform = transform\n\n def forward(self, x: np.ndarray) -> np.ndarray:\n return self.transform.backward(x)\n\n def backward(self, y: np.ndarray) -> np.ndarray:\n return self.transform.forward(y)\n\n def _short_repr(self) -> str:\n return f'Rv({self.transform:short})'\n\n\nclass Affine(Transform):\n \"\"\"Affine transform a * x + b\n\n Parameters\n ----------\n a: float\n b: float\n \"\"\"\n\n def __init__(self, a: float, b: float) -> None:\n self.a = a\n self.b = b\n\n def forward(self, x: np.ndarray) -> np.ndarray:\n return self.a * x + self.b # type: ignore\n\n def backward(self, y: np.ndarray) -> np.ndarray:\n return (y - self.b) / self.a # type: ignore\n\n def _short_repr(self) -> str:\n return f\"Af({self.a},{self.b})\"\n\n\nclass Exponentiate(Transform):\n \"\"\"Exponentiation transform base ** (coeff * x)\n This can for instance be used for to get a logarithmicly distruted values 10**(-[1, 2, 3]).\n\n Parameters\n ----------\n base: float\n coeff: float\n \"\"\"\n\n def __init__(self, base: float = 10., coeff: float = 1.) -> None:\n self.base = base\n self.coeff = coeff\n\n def forward(self, x: np.ndarray) -> np.ndarray:\n return self.base ** (float(self.coeff) * x) # type: ignore\n\n def backward(self, y: np.ndarray) -> np.ndarray:\n return np.log(y) / (float(self.coeff) * np.log(self.base)) # type: ignore\n\n def _short_repr(self) -> str:\n return f\"Ex({self.base},{self.coeff})\"\n\n\nclass TanhBound(Transform):\n \"\"\"Bounds all real values into [min_val, max_val] using a tanh transform.\n Beware, tanh goes very fast to its limits.\n\n Parameters\n ----------\n min_val: float\n max_val: float\n \"\"\"\n\n def __init__(self, min_val: float, max_val: float) -> None:\n assert min_val < max_val\n self.min_val = min_val\n self.max_val = max_val\n self._b = .5 * (self.max_val + self.min_val)\n self._a = .5 * (self.max_val - self.min_val)\n\n def forward(self, x: np.ndarray) -> np.ndarray:\n return self._b + self._a * np.tanh(x) # type: ignore\n\n def backward(self, y: np.ndarray) -> np.ndarray:\n if np.max(y) > self.max_val or np.min(y) < self.min_val:\n raise ValueError(f\"Only data between {self.min_val} and {self.max_val} \"\n \"can be transformed back (bounds lead to infinity).\")\n return np.arctanh((y - self._b) / self._a) # type: ignore\n\n def _short_repr(self) -> str:\n return f\"Th({self.min_val},{self.max_val})\"\n\n\nclass ArctanBound(Transform):\n \"\"\"Bounds all real values into [min_val, max_val] using an arctan transform.\n This is a much softer approach compared to tanh.\n\n Parameters\n ----------\n min_val: float\n max_val: float\n \"\"\"\n\n def __init__(self, min_val: float, max_val: float) -> None:\n assert min_val < max_val\n self.min_val = min_val\n self.max_val = max_val\n self._b = .5 * (self.max_val + self.min_val)\n self._a = (self.max_val - self.min_val) / np.pi\n\n def forward(self, x: np.ndarray) -> np.ndarray:\n return self._b + self._a * np.arctan(x) # type: ignore\n\n def backward(self, y: np.ndarray) -> np.ndarray:\n if np.max(y) > self.max_val or np.min(y) < self.min_val:\n raise ValueError(f\"Only data between {self.min_val} and {self.max_val} can be transformed back.\")\n return np.tan((y - self._b) / self._a) # type: ignore\n\n def _short_repr(self) -> str:\n return f\"At({self.min_val},{self.max_val})\"\n\n\nclass CumulativeDensity(Transform):\n \"\"\"Bounds all real values into [0, 1] using a gaussian cumulative density function (cdf)\n Beware, cdf goes very fast to its limits.\n \"\"\"\n\n def forward(self, x: np.ndarray) -> np.ndarray:\n return stats.norm.cdf(x) # type: ignore\n\n def backward(self, y: np.ndarray) -> np.ndarray:\n if np.max(y) > 1 or np.min(y) < 0:\n raise ValueError(\"Only data between 0 and 1 can be transformed back (bounds lead to infinity).\")\n return stats.norm.ppf(y) # type: ignore\n\n def _short_repr(self) -> str:\n return f\"Cd()\"\n", "path": "nevergrad/instrumentation/transforms.py"}]}
| 2,145 | 425 |
gh_patches_debug_16334
|
rasdani/github-patches
|
git_diff
|
falconry__falcon-741
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Refactor setup.py external module selection for Cython
Reduce complexity and duplication of the external module selection for Cython in setup.py.
At the time of this issue, this cruft was located in:
https://github.com/kgriffs/falcon/blob/routing/setup.py#L35
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import glob
2 import imp
3 import io
4 import os
5 from os import path
6 from setuptools import setup, find_packages, Extension
7 import sys
8
9 MYDIR = path.abspath(os.path.dirname(__file__))
10
11 VERSION = imp.load_source('version', path.join('.', 'falcon', 'version.py'))
12 VERSION = VERSION.__version__
13
14 # NOTE(kgriffs): python-mimeparse is newer than mimeparse, supports Py3
15 # TODO(kgriffs): Fork and optimize/modernize python-mimeparse
16 REQUIRES = ['six>=1.4.0', 'python-mimeparse']
17
18 JYTHON = 'java' in sys.platform
19
20 try:
21 sys.pypy_version_info
22 PYPY = True
23 except AttributeError:
24 PYPY = False
25
26 if PYPY or JYTHON:
27 CYTHON = False
28 else:
29 try:
30 from Cython.Distutils import build_ext
31 CYTHON = True
32 except ImportError:
33 # TODO(kgriffs): pip now ignores all output, so the user
34 # may not see this message. See also:
35 #
36 # https://github.com/pypa/pip/issues/2732
37 #
38 print('\nNOTE: Cython not installed. '
39 'Falcon will still work fine, but may run '
40 'a bit slower.\n')
41 CYTHON = False
42
43 if CYTHON:
44 def list_modules(dirname):
45 filenames = glob.glob(path.join(dirname, '*.py'))
46
47 module_names = []
48 for name in filenames:
49 module, ext = path.splitext(path.basename(name))
50 if module != '__init__':
51 module_names.append(module)
52
53 return module_names
54
55 ext_modules = [
56 Extension('falcon.' + ext, [path.join('falcon', ext + '.py')])
57 for ext in list_modules(path.join(MYDIR, 'falcon'))]
58
59 ext_modules += [
60 Extension('falcon.util.' + ext,
61 [path.join('falcon', 'util', ext + '.py')])
62
63 for ext in list_modules(path.join(MYDIR, 'falcon', 'util'))]
64
65 ext_modules += [
66 Extension('falcon.routing.' + ext,
67 [path.join('falcon', 'routing', ext + '.py')])
68
69 for ext in list_modules(path.join(MYDIR, 'falcon', 'routing'))]
70
71 cmdclass = {'build_ext': build_ext}
72
73 else:
74 cmdclass = {}
75 ext_modules = []
76
77 setup(
78 name='falcon',
79 version=VERSION,
80 description='An unladen web framework for building APIs and app backends.',
81 long_description=io.open('README.rst', 'r', encoding='utf-8').read(),
82 classifiers=[
83 'Development Status :: 5 - Production/Stable',
84 'Environment :: Web Environment',
85 'Natural Language :: English',
86 'Intended Audience :: Developers',
87 'Intended Audience :: System Administrators',
88 'License :: OSI Approved :: Apache Software License',
89 'Operating System :: MacOS :: MacOS X',
90 'Operating System :: Microsoft :: Windows',
91 'Operating System :: POSIX',
92 'Topic :: Internet :: WWW/HTTP :: WSGI',
93 'Topic :: Software Development :: Libraries :: Application Frameworks',
94 'Programming Language :: Python',
95 'Programming Language :: Python :: Implementation :: CPython',
96 'Programming Language :: Python :: Implementation :: PyPy',
97 'Programming Language :: Python :: Implementation :: Jython',
98 'Programming Language :: Python :: 2.6',
99 'Programming Language :: Python :: 2.7',
100 'Programming Language :: Python :: 3.3',
101 'Programming Language :: Python :: 3.4',
102 'Programming Language :: Python :: 3.5',
103 ],
104 keywords='wsgi web api framework rest http cloud',
105 author='Kurt Griffiths',
106 author_email='[email protected]',
107 url='http://falconframework.org',
108 license='Apache 2.0',
109 packages=find_packages(exclude=['tests']),
110 include_package_data=True,
111 zip_safe=False,
112 install_requires=REQUIRES,
113 setup_requires=[],
114 cmdclass=cmdclass,
115 ext_modules=ext_modules,
116 test_suite='nose.collector',
117 entry_points={
118 'console_scripts': [
119 'falcon-bench = falcon.cmd.bench:main'
120 ]
121 }
122 )
123
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -52,21 +52,15 @@
return module_names
+ package_names = ['falcon', 'falcon.util', 'falcon.routing']
ext_modules = [
- Extension('falcon.' + ext, [path.join('falcon', ext + '.py')])
- for ext in list_modules(path.join(MYDIR, 'falcon'))]
-
- ext_modules += [
- Extension('falcon.util.' + ext,
- [path.join('falcon', 'util', ext + '.py')])
-
- for ext in list_modules(path.join(MYDIR, 'falcon', 'util'))]
-
- ext_modules += [
- Extension('falcon.routing.' + ext,
- [path.join('falcon', 'routing', ext + '.py')])
-
- for ext in list_modules(path.join(MYDIR, 'falcon', 'routing'))]
+ Extension(
+ package + '.' + module,
+ [path.join(*(package.split('.') + [module + '.py']))]
+ )
+ for package in package_names
+ for module in list_modules(path.join(MYDIR, *package.split('.')))
+ ]
cmdclass = {'build_ext': build_ext}
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -52,21 +52,15 @@\n \n return module_names\n \n+ package_names = ['falcon', 'falcon.util', 'falcon.routing']\n ext_modules = [\n- Extension('falcon.' + ext, [path.join('falcon', ext + '.py')])\n- for ext in list_modules(path.join(MYDIR, 'falcon'))]\n-\n- ext_modules += [\n- Extension('falcon.util.' + ext,\n- [path.join('falcon', 'util', ext + '.py')])\n-\n- for ext in list_modules(path.join(MYDIR, 'falcon', 'util'))]\n-\n- ext_modules += [\n- Extension('falcon.routing.' + ext,\n- [path.join('falcon', 'routing', ext + '.py')])\n-\n- for ext in list_modules(path.join(MYDIR, 'falcon', 'routing'))]\n+ Extension(\n+ package + '.' + module,\n+ [path.join(*(package.split('.') + [module + '.py']))]\n+ )\n+ for package in package_names\n+ for module in list_modules(path.join(MYDIR, *package.split('.')))\n+ ]\n \n cmdclass = {'build_ext': build_ext}\n", "issue": "Refactor setup.py external module selection for Cython\nReduce complexity and duplication of the external module selection for Cython in setup.py.\n\nAt the time of this issue, this cruft was located in: \nhttps://github.com/kgriffs/falcon/blob/routing/setup.py#L35\n\n", "before_files": [{"content": "import glob\nimport imp\nimport io\nimport os\nfrom os import path\nfrom setuptools import setup, find_packages, Extension\nimport sys\n\nMYDIR = path.abspath(os.path.dirname(__file__))\n\nVERSION = imp.load_source('version', path.join('.', 'falcon', 'version.py'))\nVERSION = VERSION.__version__\n\n# NOTE(kgriffs): python-mimeparse is newer than mimeparse, supports Py3\n# TODO(kgriffs): Fork and optimize/modernize python-mimeparse\nREQUIRES = ['six>=1.4.0', 'python-mimeparse']\n\nJYTHON = 'java' in sys.platform\n\ntry:\n sys.pypy_version_info\n PYPY = True\nexcept AttributeError:\n PYPY = False\n\nif PYPY or JYTHON:\n CYTHON = False\nelse:\n try:\n from Cython.Distutils import build_ext\n CYTHON = True\n except ImportError:\n # TODO(kgriffs): pip now ignores all output, so the user\n # may not see this message. See also:\n #\n # https://github.com/pypa/pip/issues/2732\n #\n print('\\nNOTE: Cython not installed. '\n 'Falcon will still work fine, but may run '\n 'a bit slower.\\n')\n CYTHON = False\n\nif CYTHON:\n def list_modules(dirname):\n filenames = glob.glob(path.join(dirname, '*.py'))\n\n module_names = []\n for name in filenames:\n module, ext = path.splitext(path.basename(name))\n if module != '__init__':\n module_names.append(module)\n\n return module_names\n\n ext_modules = [\n Extension('falcon.' + ext, [path.join('falcon', ext + '.py')])\n for ext in list_modules(path.join(MYDIR, 'falcon'))]\n\n ext_modules += [\n Extension('falcon.util.' + ext,\n [path.join('falcon', 'util', ext + '.py')])\n\n for ext in list_modules(path.join(MYDIR, 'falcon', 'util'))]\n\n ext_modules += [\n Extension('falcon.routing.' + ext,\n [path.join('falcon', 'routing', ext + '.py')])\n\n for ext in list_modules(path.join(MYDIR, 'falcon', 'routing'))]\n\n cmdclass = {'build_ext': build_ext}\n\nelse:\n cmdclass = {}\n ext_modules = []\n\nsetup(\n name='falcon',\n version=VERSION,\n description='An unladen web framework for building APIs and app backends.',\n long_description=io.open('README.rst', 'r', encoding='utf-8').read(),\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Natural Language :: English',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Topic :: Internet :: WWW/HTTP :: WSGI',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Programming Language :: Python :: Implementation :: Jython',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n ],\n keywords='wsgi web api framework rest http cloud',\n author='Kurt Griffiths',\n author_email='[email protected]',\n url='http://falconframework.org',\n license='Apache 2.0',\n packages=find_packages(exclude=['tests']),\n include_package_data=True,\n zip_safe=False,\n install_requires=REQUIRES,\n setup_requires=[],\n cmdclass=cmdclass,\n ext_modules=ext_modules,\n test_suite='nose.collector',\n entry_points={\n 'console_scripts': [\n 'falcon-bench = falcon.cmd.bench:main'\n ]\n }\n)\n", "path": "setup.py"}], "after_files": [{"content": "import glob\nimport imp\nimport io\nimport os\nfrom os import path\nfrom setuptools import setup, find_packages, Extension\nimport sys\n\nMYDIR = path.abspath(os.path.dirname(__file__))\n\nVERSION = imp.load_source('version', path.join('.', 'falcon', 'version.py'))\nVERSION = VERSION.__version__\n\n# NOTE(kgriffs): python-mimeparse is newer than mimeparse, supports Py3\n# TODO(kgriffs): Fork and optimize/modernize python-mimeparse\nREQUIRES = ['six>=1.4.0', 'python-mimeparse']\n\nJYTHON = 'java' in sys.platform\n\ntry:\n sys.pypy_version_info\n PYPY = True\nexcept AttributeError:\n PYPY = False\n\nif PYPY or JYTHON:\n CYTHON = False\nelse:\n try:\n from Cython.Distutils import build_ext\n CYTHON = True\n except ImportError:\n # TODO(kgriffs): pip now ignores all output, so the user\n # may not see this message. See also:\n #\n # https://github.com/pypa/pip/issues/2732\n #\n print('\\nNOTE: Cython not installed. '\n 'Falcon will still work fine, but may run '\n 'a bit slower.\\n')\n CYTHON = False\n\nif CYTHON:\n def list_modules(dirname):\n filenames = glob.glob(path.join(dirname, '*.py'))\n\n module_names = []\n for name in filenames:\n module, ext = path.splitext(path.basename(name))\n if module != '__init__':\n module_names.append(module)\n\n return module_names\n\n package_names = ['falcon', 'falcon.util', 'falcon.routing']\n ext_modules = [\n Extension(\n package + '.' + module,\n [path.join(*(package.split('.') + [module + '.py']))]\n )\n for package in package_names\n for module in list_modules(path.join(MYDIR, *package.split('.')))\n ]\n\n cmdclass = {'build_ext': build_ext}\n\nelse:\n cmdclass = {}\n ext_modules = []\n\nsetup(\n name='falcon',\n version=VERSION,\n description='An unladen web framework for building APIs and app backends.',\n long_description=io.open('README.rst', 'r', encoding='utf-8').read(),\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Natural Language :: English',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Topic :: Internet :: WWW/HTTP :: WSGI',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Programming Language :: Python :: Implementation :: Jython',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n ],\n keywords='wsgi web api framework rest http cloud',\n author='Kurt Griffiths',\n author_email='[email protected]',\n url='http://falconframework.org',\n license='Apache 2.0',\n packages=find_packages(exclude=['tests']),\n include_package_data=True,\n zip_safe=False,\n install_requires=REQUIRES,\n setup_requires=[],\n cmdclass=cmdclass,\n ext_modules=ext_modules,\n test_suite='nose.collector',\n entry_points={\n 'console_scripts': [\n 'falcon-bench = falcon.cmd.bench:main'\n ]\n }\n)\n", "path": "setup.py"}]}
| 1,516 | 291 |
gh_patches_debug_7542
|
rasdani/github-patches
|
git_diff
|
chainer__chainer-7835
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`chainer.backend.copyto` cannot copy chainerx array to cupy
* Code to reproduce
```python
import chainer
import numpy
for dst_device in ['@numpy', '@cupy:0', '@intel64']:
for src_device in ['native', 'cuda:0']:
print((dst_device, src_device))
dst = chainer.get_device(dst_device).send(
numpy.array([1, 2], numpy.float32))
src = chainer.get_device(src_device).send(
numpy.array([3, 4], numpy.float32))
try:
chainer.backend.copyto(dst, src)
except Exception as e:
print(repr(e))
else:
print('ok')
```
* Error messages, stack traces, or logs
```
('@numpy', 'native')
ok
('@numpy', 'cuda:0')
ok
('@cupy:0', 'native')
TypeError('object array cannot be set to float32 array')
('@cupy:0', 'cuda:0')
TypeError('object array cannot be set to float32 array')
('@intel64', 'native')
ok
('@intel64', 'cuda:0')
ok
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chainer/backend.py`
Content:
```
1 import numpy
2 import six
3
4 import chainer
5 from chainer.backends import _chainerx
6 from chainer.backends import _cpu
7 from chainer.backends import cuda
8 from chainer.backends import intel64
9 import chainerx
10
11 # Aliases
12 from chainer._backend import Device
13 from chainer.backends._chainerx import ChainerxDevice
14 from chainer.backends._chainerx import from_chx # NOQA
15 from chainer.backends._chainerx import to_chx # NOQA
16 from chainer.backends._cpu import CpuDevice
17 from chainer.backends.cuda import GpuDevice
18 from chainer.backends.intel64 import Intel64Device
19 from chainer import types # NOQA
20
21
22 def _contains_nan(x):
23 """Returns whether the input array has NaN values.
24
25 Args:
26 x (numpy.ndarray or cupy.ndarray): Array to be checked.
27
28 Returns:
29 bool: True if the input has NaN values.
30
31 """
32 if x.dtype.kind in ('f', 'c'):
33 device = get_device_from_array(x)
34 with chainer.using_device(device):
35 return device.xp.isnan(x).any()
36 else:
37 return False
38
39
40 def copyto(dst, src):
41 """Copies the elements of an ndarray to those of another one.
42
43 This function can copy the CPU/GPU arrays to the destination arrays on
44 another device.
45
46 Args:
47 dst (`numpy.ndarray`, `cupy.ndarray` or `ideep4py.mdarray`):
48 Destination array.
49 src (`numpy.ndarray`, `cupy.ndarray` or `ideep4py.mdarray`):
50 Source array.
51
52 """
53 if isinstance(dst, chainerx.ndarray):
54 dst[...] = _chainerx._array_to_chainerx(src, dst.device)
55 elif isinstance(dst, numpy.ndarray):
56 numpy.copyto(dst, _cpu._to_cpu(src))
57 elif isinstance(dst, intel64.mdarray):
58 intel64.ideep.basic_copyto(
59 dst, _cpu._to_cpu(src))
60 elif isinstance(dst, cuda.ndarray):
61 if isinstance(src, chainer.get_cpu_array_types()):
62 src = numpy.asarray(src)
63 if dst.flags.c_contiguous or dst.flags.f_contiguous:
64 dst.set(src)
65 else:
66 cuda.cupy.copyto(dst, cuda.to_gpu(src, device=dst.device))
67 elif isinstance(src, cuda.ndarray):
68 cuda.cupy.copyto(dst, src)
69 else:
70 raise TypeError('cannot copy from non-array object of type {}'
71 .format(type(src)))
72 else:
73 raise TypeError('cannot copy to non-array object of type {}'.format(
74 type(dst)))
75
76
77 def _guess_device_from_array_module(xp):
78 """Returns a plausible device from array module
79
80 .. warning::
81
82 There can be multiple devices for a module
83
84 """
85 if xp is cuda.cupy:
86 return cuda.GpuDevice(cuda.Device())
87 elif xp is chainerx:
88 return _chainerx.ChainerxDevice(chainerx.get_default_device())
89 else:
90 # Cannot detect intel64, because xp of intel64 is numpy.
91 return _cpu.CpuDevice()
92
93
94 def get_device(device_spec):
95 # type: (types.DeviceSpec) -> Device
96 """Returns a device object.
97
98 Args:
99 device_spec (object): Device specifier.
100 If a :class:`chainer.backend.Device` instance is given, it is
101 returned intact. Otherwise the following values are supported:
102
103 * ChainerX devices
104
105 * A string representing a device.
106 (ex. ``'native:0'``, ``'native'``)
107 * A :class:`chainerx.Device` object.
108
109 * CuPy
110
111 * A string starts with ``'@cupy:'``.
112 (ex. ``'@cupy:0'``)
113 * A :class:`cupy.cuda.Device` object.
114
115 * NumPy
116
117 * The string ``'@numpy'``.
118
119 * NumPy with Intel Architecture
120
121 * The string ``'@intel64'``.
122 """
123 if isinstance(device_spec, Device):
124 return device_spec
125
126 if isinstance(device_spec, cuda._integer_types):
127 return _get_device_cupy_or_numpy(device_spec)
128
129 if chainerx.is_available() and isinstance(device_spec, chainerx.Device):
130 return _chainerx.ChainerxDevice(device_spec)
131
132 if cuda.available and isinstance(device_spec, cuda.Device):
133 return cuda.GpuDevice(device_spec)
134
135 if isinstance(device_spec, six.string_types):
136 # '-1', '0', '1', ...
137 try:
138 int_device_spec = int(device_spec)
139 except ValueError:
140 pass
141 else:
142 return _get_device_cupy_or_numpy(int_device_spec)
143
144 if device_spec.startswith('@'):
145 # '@module:...'
146 mod_name, colon, precise_spec = device_spec[1:].partition(':')
147 if mod_name == 'numpy':
148 if not colon:
149 return _cpu.CpuDevice()
150 elif mod_name == 'cupy':
151 if colon:
152 return cuda.GpuDevice.from_device_id(int(precise_spec))
153 elif mod_name == 'intel64':
154 if not colon:
155 return intel64.Intel64Device()
156 raise ValueError(
157 'Device specifiers starting with \'@\' must be followed by'
158 ' a module name and depending on the module, module specific'
159 ' precise device specifiers. Actual: {}'.format(device_spec))
160 else:
161 # String device specifier without '@' prefix is assumed to be a
162 # ChainerX device.
163 if not chainerx.is_available():
164 raise RuntimeError(
165 'Tried to parse ChainerX device specifier \'{}\', '
166 'but ChainerX is not available. '
167 'Note that device specifiers without \'@\' prefix are '
168 'assumed to be ChainerX device '
169 'specifiers.'.format(device_spec))
170 return _chainerx.ChainerxDevice(chainerx.get_device(device_spec))
171
172 raise TypeError(
173 'Device specifier must be a backend.Device, cuda.Device,'
174 ' chainerx.Device, integer or a string. Actual: {}'.format(
175 type(device_spec)))
176
177
178 def _get_device_cupy_or_numpy(device_spec):
179 # legacy spec of (gpu) device
180 if device_spec >= 0:
181 return cuda.GpuDevice.from_device_id(device_spec)
182 else:
183 return _cpu.CpuDevice()
184
185
186 def using_device(device_spec):
187 """Context manager to apply the thread-local device state.
188
189 Args:
190 device_spec (object): Device specifier. See :func:`chainer.get_device`
191 for details.
192
193 .. admonition:: Example
194
195 .. testcode::
196 :skipif: doctest_helper.skipif_not_enough_cuda_devices(2)
197
198 with chainer.using_device('@cupy:1'):
199 a = cupy.empty((3, 2))
200
201 assert a.device.id == 1
202
203 """
204
205 # TODO(niboshi): Set default device (once this concept is introduced in
206 # Chainer).
207 device = get_device(device_spec)
208 return device.create_context()
209
210
211 def get_array_module(*args):
212 """Gets an appropriate NumPy-compatible module to process arguments
213
214 This function will return their data arrays' array module for
215 :class:`~chainer.Variable` arguments.
216
217 Args:
218 args: Values to determine whether NumPy, CuPy, or ChainerX should be
219 used.
220
221 Returns:
222 module: :mod:`numpy`, :mod:`cupy`, or :mod:`chainerx` is returned based
223 on the types of the arguments.
224
225 """
226 is_chainerx_available = chainerx.is_available()
227 if is_chainerx_available or cuda.available:
228 arrays = []
229 for arg in args:
230 # Unwrap arrays
231 if isinstance(arg, chainer.variable.Variable):
232 array = arg.data
233 else:
234 array = arg
235 if is_chainerx_available and isinstance(array, chainerx.ndarray):
236 return chainerx
237 arrays.append(array)
238 if cuda.available:
239 return cuda.cupy.get_array_module(*arrays)
240 return numpy
241
242
243 def get_device_from_array(*arrays):
244 """Gets the device from arrays.
245
246 The device on which the given array reside is returned.
247
248 .. note::
249
250 Unlike :func:`get_array_module`, this method does not recognize
251 :class:`~chainer.Variable` objects.
252 If you need to get device from the :class:`~chainer.Variable` instance
253 ``v``, you need to use ``get_device_from_array(v.array)``.
254
255 Args:
256 arrays (array or list of arrays):
257 Arrays to determine the device. If multiple arrays are given, the
258 device correspoinding to the first array which is not NumPy array
259 is returned.
260
261 Returns:
262 chainer.backend.Device: Device instance.
263 """
264 for array in arrays:
265 device = GpuDevice.from_array(array)
266 if device is not None:
267 return device
268
269 if isinstance(array, chainerx.ndarray):
270 return ChainerxDevice(array.device)
271
272 device = Intel64Device.from_array(array)
273 if device is not None:
274 return device
275
276 return CpuDevice()
277
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/chainer/backend.py b/chainer/backend.py
--- a/chainer/backend.py
+++ b/chainer/backend.py
@@ -52,7 +52,12 @@
"""
if isinstance(dst, chainerx.ndarray):
dst[...] = _chainerx._array_to_chainerx(src, dst.device)
- elif isinstance(dst, numpy.ndarray):
+ return
+
+ if isinstance(src, chainerx.ndarray):
+ src = from_chx(src)
+
+ if isinstance(dst, numpy.ndarray):
numpy.copyto(dst, _cpu._to_cpu(src))
elif isinstance(dst, intel64.mdarray):
intel64.ideep.basic_copyto(
|
{"golden_diff": "diff --git a/chainer/backend.py b/chainer/backend.py\n--- a/chainer/backend.py\n+++ b/chainer/backend.py\n@@ -52,7 +52,12 @@\n \"\"\"\n if isinstance(dst, chainerx.ndarray):\n dst[...] = _chainerx._array_to_chainerx(src, dst.device)\n- elif isinstance(dst, numpy.ndarray):\n+ return\n+\n+ if isinstance(src, chainerx.ndarray):\n+ src = from_chx(src)\n+\n+ if isinstance(dst, numpy.ndarray):\n numpy.copyto(dst, _cpu._to_cpu(src))\n elif isinstance(dst, intel64.mdarray):\n intel64.ideep.basic_copyto(\n", "issue": "`chainer.backend.copyto` cannot copy chainerx array to cupy\n* Code to reproduce\r\n```python\r\nimport chainer\r\nimport numpy\r\n\r\nfor dst_device in ['@numpy', '@cupy:0', '@intel64']:\r\n for src_device in ['native', 'cuda:0']:\r\n print((dst_device, src_device))\r\n dst = chainer.get_device(dst_device).send(\r\n numpy.array([1, 2], numpy.float32))\r\n src = chainer.get_device(src_device).send(\r\n numpy.array([3, 4], numpy.float32))\r\n try:\r\n chainer.backend.copyto(dst, src)\r\n except Exception as e:\r\n print(repr(e))\r\n else:\r\n print('ok')\r\n```\r\n\r\n* Error messages, stack traces, or logs\r\n```\r\n('@numpy', 'native')\r\nok\r\n('@numpy', 'cuda:0')\r\nok\r\n('@cupy:0', 'native')\r\nTypeError('object array cannot be set to float32 array')\r\n('@cupy:0', 'cuda:0')\r\nTypeError('object array cannot be set to float32 array')\r\n('@intel64', 'native')\r\nok\r\n('@intel64', 'cuda:0')\r\nok\r\n```\r\n\n", "before_files": [{"content": "import numpy\nimport six\n\nimport chainer\nfrom chainer.backends import _chainerx\nfrom chainer.backends import _cpu\nfrom chainer.backends import cuda\nfrom chainer.backends import intel64\nimport chainerx\n\n# Aliases\nfrom chainer._backend import Device\nfrom chainer.backends._chainerx import ChainerxDevice\nfrom chainer.backends._chainerx import from_chx # NOQA\nfrom chainer.backends._chainerx import to_chx # NOQA\nfrom chainer.backends._cpu import CpuDevice\nfrom chainer.backends.cuda import GpuDevice\nfrom chainer.backends.intel64 import Intel64Device\nfrom chainer import types # NOQA\n\n\ndef _contains_nan(x):\n \"\"\"Returns whether the input array has NaN values.\n\n Args:\n x (numpy.ndarray or cupy.ndarray): Array to be checked.\n\n Returns:\n bool: True if the input has NaN values.\n\n \"\"\"\n if x.dtype.kind in ('f', 'c'):\n device = get_device_from_array(x)\n with chainer.using_device(device):\n return device.xp.isnan(x).any()\n else:\n return False\n\n\ndef copyto(dst, src):\n \"\"\"Copies the elements of an ndarray to those of another one.\n\n This function can copy the CPU/GPU arrays to the destination arrays on\n another device.\n\n Args:\n dst (`numpy.ndarray`, `cupy.ndarray` or `ideep4py.mdarray`):\n Destination array.\n src (`numpy.ndarray`, `cupy.ndarray` or `ideep4py.mdarray`):\n Source array.\n\n \"\"\"\n if isinstance(dst, chainerx.ndarray):\n dst[...] = _chainerx._array_to_chainerx(src, dst.device)\n elif isinstance(dst, numpy.ndarray):\n numpy.copyto(dst, _cpu._to_cpu(src))\n elif isinstance(dst, intel64.mdarray):\n intel64.ideep.basic_copyto(\n dst, _cpu._to_cpu(src))\n elif isinstance(dst, cuda.ndarray):\n if isinstance(src, chainer.get_cpu_array_types()):\n src = numpy.asarray(src)\n if dst.flags.c_contiguous or dst.flags.f_contiguous:\n dst.set(src)\n else:\n cuda.cupy.copyto(dst, cuda.to_gpu(src, device=dst.device))\n elif isinstance(src, cuda.ndarray):\n cuda.cupy.copyto(dst, src)\n else:\n raise TypeError('cannot copy from non-array object of type {}'\n .format(type(src)))\n else:\n raise TypeError('cannot copy to non-array object of type {}'.format(\n type(dst)))\n\n\ndef _guess_device_from_array_module(xp):\n \"\"\"Returns a plausible device from array module\n\n .. warning::\n\n There can be multiple devices for a module\n\n \"\"\"\n if xp is cuda.cupy:\n return cuda.GpuDevice(cuda.Device())\n elif xp is chainerx:\n return _chainerx.ChainerxDevice(chainerx.get_default_device())\n else:\n # Cannot detect intel64, because xp of intel64 is numpy.\n return _cpu.CpuDevice()\n\n\ndef get_device(device_spec):\n # type: (types.DeviceSpec) -> Device\n \"\"\"Returns a device object.\n\n Args:\n device_spec (object): Device specifier.\n If a :class:`chainer.backend.Device` instance is given, it is\n returned intact. Otherwise the following values are supported:\n\n * ChainerX devices\n\n * A string representing a device.\n (ex. ``'native:0'``, ``'native'``)\n * A :class:`chainerx.Device` object.\n\n * CuPy\n\n * A string starts with ``'@cupy:'``.\n (ex. ``'@cupy:0'``)\n * A :class:`cupy.cuda.Device` object.\n\n * NumPy\n\n * The string ``'@numpy'``.\n\n * NumPy with Intel Architecture\n\n * The string ``'@intel64'``.\n \"\"\"\n if isinstance(device_spec, Device):\n return device_spec\n\n if isinstance(device_spec, cuda._integer_types):\n return _get_device_cupy_or_numpy(device_spec)\n\n if chainerx.is_available() and isinstance(device_spec, chainerx.Device):\n return _chainerx.ChainerxDevice(device_spec)\n\n if cuda.available and isinstance(device_spec, cuda.Device):\n return cuda.GpuDevice(device_spec)\n\n if isinstance(device_spec, six.string_types):\n # '-1', '0', '1', ...\n try:\n int_device_spec = int(device_spec)\n except ValueError:\n pass\n else:\n return _get_device_cupy_or_numpy(int_device_spec)\n\n if device_spec.startswith('@'):\n # '@module:...'\n mod_name, colon, precise_spec = device_spec[1:].partition(':')\n if mod_name == 'numpy':\n if not colon:\n return _cpu.CpuDevice()\n elif mod_name == 'cupy':\n if colon:\n return cuda.GpuDevice.from_device_id(int(precise_spec))\n elif mod_name == 'intel64':\n if not colon:\n return intel64.Intel64Device()\n raise ValueError(\n 'Device specifiers starting with \\'@\\' must be followed by'\n ' a module name and depending on the module, module specific'\n ' precise device specifiers. Actual: {}'.format(device_spec))\n else:\n # String device specifier without '@' prefix is assumed to be a\n # ChainerX device.\n if not chainerx.is_available():\n raise RuntimeError(\n 'Tried to parse ChainerX device specifier \\'{}\\', '\n 'but ChainerX is not available. '\n 'Note that device specifiers without \\'@\\' prefix are '\n 'assumed to be ChainerX device '\n 'specifiers.'.format(device_spec))\n return _chainerx.ChainerxDevice(chainerx.get_device(device_spec))\n\n raise TypeError(\n 'Device specifier must be a backend.Device, cuda.Device,'\n ' chainerx.Device, integer or a string. Actual: {}'.format(\n type(device_spec)))\n\n\ndef _get_device_cupy_or_numpy(device_spec):\n # legacy spec of (gpu) device\n if device_spec >= 0:\n return cuda.GpuDevice.from_device_id(device_spec)\n else:\n return _cpu.CpuDevice()\n\n\ndef using_device(device_spec):\n \"\"\"Context manager to apply the thread-local device state.\n\n Args:\n device_spec (object): Device specifier. See :func:`chainer.get_device`\n for details.\n\n .. admonition:: Example\n\n .. testcode::\n :skipif: doctest_helper.skipif_not_enough_cuda_devices(2)\n\n with chainer.using_device('@cupy:1'):\n a = cupy.empty((3, 2))\n\n assert a.device.id == 1\n\n \"\"\"\n\n # TODO(niboshi): Set default device (once this concept is introduced in\n # Chainer).\n device = get_device(device_spec)\n return device.create_context()\n\n\ndef get_array_module(*args):\n \"\"\"Gets an appropriate NumPy-compatible module to process arguments\n\n This function will return their data arrays' array module for\n :class:`~chainer.Variable` arguments.\n\n Args:\n args: Values to determine whether NumPy, CuPy, or ChainerX should be\n used.\n\n Returns:\n module: :mod:`numpy`, :mod:`cupy`, or :mod:`chainerx` is returned based\n on the types of the arguments.\n\n \"\"\"\n is_chainerx_available = chainerx.is_available()\n if is_chainerx_available or cuda.available:\n arrays = []\n for arg in args:\n # Unwrap arrays\n if isinstance(arg, chainer.variable.Variable):\n array = arg.data\n else:\n array = arg\n if is_chainerx_available and isinstance(array, chainerx.ndarray):\n return chainerx\n arrays.append(array)\n if cuda.available:\n return cuda.cupy.get_array_module(*arrays)\n return numpy\n\n\ndef get_device_from_array(*arrays):\n \"\"\"Gets the device from arrays.\n\n The device on which the given array reside is returned.\n\n .. note::\n\n Unlike :func:`get_array_module`, this method does not recognize\n :class:`~chainer.Variable` objects.\n If you need to get device from the :class:`~chainer.Variable` instance\n ``v``, you need to use ``get_device_from_array(v.array)``.\n\n Args:\n arrays (array or list of arrays):\n Arrays to determine the device. If multiple arrays are given, the\n device correspoinding to the first array which is not NumPy array\n is returned.\n\n Returns:\n chainer.backend.Device: Device instance.\n \"\"\"\n for array in arrays:\n device = GpuDevice.from_array(array)\n if device is not None:\n return device\n\n if isinstance(array, chainerx.ndarray):\n return ChainerxDevice(array.device)\n\n device = Intel64Device.from_array(array)\n if device is not None:\n return device\n\n return CpuDevice()\n", "path": "chainer/backend.py"}], "after_files": [{"content": "import numpy\nimport six\n\nimport chainer\nfrom chainer.backends import _chainerx\nfrom chainer.backends import _cpu\nfrom chainer.backends import cuda\nfrom chainer.backends import intel64\nimport chainerx\n\n# Aliases\nfrom chainer._backend import Device\nfrom chainer.backends._chainerx import ChainerxDevice\nfrom chainer.backends._chainerx import from_chx # NOQA\nfrom chainer.backends._chainerx import to_chx # NOQA\nfrom chainer.backends._cpu import CpuDevice\nfrom chainer.backends.cuda import GpuDevice\nfrom chainer.backends.intel64 import Intel64Device\nfrom chainer import types # NOQA\n\n\ndef _contains_nan(x):\n \"\"\"Returns whether the input array has NaN values.\n\n Args:\n x (numpy.ndarray or cupy.ndarray): Array to be checked.\n\n Returns:\n bool: True if the input has NaN values.\n\n \"\"\"\n if x.dtype.kind in ('f', 'c'):\n device = get_device_from_array(x)\n with chainer.using_device(device):\n return device.xp.isnan(x).any()\n else:\n return False\n\n\ndef copyto(dst, src):\n \"\"\"Copies the elements of an ndarray to those of another one.\n\n This function can copy the CPU/GPU arrays to the destination arrays on\n another device.\n\n Args:\n dst (`numpy.ndarray`, `cupy.ndarray` or `ideep4py.mdarray`):\n Destination array.\n src (`numpy.ndarray`, `cupy.ndarray` or `ideep4py.mdarray`):\n Source array.\n\n \"\"\"\n if isinstance(dst, chainerx.ndarray):\n dst[...] = _chainerx._array_to_chainerx(src, dst.device)\n return\n\n if isinstance(src, chainerx.ndarray):\n src = from_chx(src)\n\n if isinstance(dst, numpy.ndarray):\n numpy.copyto(dst, _cpu._to_cpu(src))\n elif isinstance(dst, intel64.mdarray):\n intel64.ideep.basic_copyto(\n dst, _cpu._to_cpu(src))\n elif isinstance(dst, cuda.ndarray):\n if isinstance(src, chainer.get_cpu_array_types()):\n src = numpy.asarray(src)\n if dst.flags.c_contiguous or dst.flags.f_contiguous:\n dst.set(src)\n else:\n cuda.cupy.copyto(dst, cuda.to_gpu(src, device=dst.device))\n elif isinstance(src, cuda.ndarray):\n cuda.cupy.copyto(dst, src)\n else:\n raise TypeError('cannot copy from non-array object of type {}'\n .format(type(src)))\n else:\n raise TypeError('cannot copy to non-array object of type {}'.format(\n type(dst)))\n\n\ndef _guess_device_from_array_module(xp):\n \"\"\"Returns a plausible device from array module\n\n .. warning::\n\n There can be multiple devices for a module\n\n \"\"\"\n if xp is cuda.cupy:\n return cuda.GpuDevice(cuda.Device())\n elif xp is chainerx:\n return _chainerx.ChainerxDevice(chainerx.get_default_device())\n else:\n # Cannot detect intel64, because xp of intel64 is numpy.\n return _cpu.CpuDevice()\n\n\ndef get_device(device_spec):\n # type: (types.DeviceSpec) -> Device\n \"\"\"Returns a device object.\n\n Args:\n device_spec (object): Device specifier.\n If a :class:`chainer.backend.Device` instance is given, it is\n returned intact. Otherwise the following values are supported:\n\n * ChainerX devices\n\n * A string representing a device.\n (ex. ``'native:0'``, ``'native'``)\n * A :class:`chainerx.Device` object.\n\n * CuPy\n\n * A string starts with ``'@cupy:'``.\n (ex. ``'@cupy:0'``)\n * A :class:`cupy.cuda.Device` object.\n\n * NumPy\n\n * The string ``'@numpy'``.\n\n * NumPy with Intel Architecture\n\n * The string ``'@intel64'``.\n \"\"\"\n if isinstance(device_spec, Device):\n return device_spec\n\n if isinstance(device_spec, cuda._integer_types):\n return _get_device_cupy_or_numpy(device_spec)\n\n if chainerx.is_available() and isinstance(device_spec, chainerx.Device):\n return _chainerx.ChainerxDevice(device_spec)\n\n if cuda.available and isinstance(device_spec, cuda.Device):\n return cuda.GpuDevice(device_spec)\n\n if isinstance(device_spec, six.string_types):\n # '-1', '0', '1', ...\n try:\n int_device_spec = int(device_spec)\n except ValueError:\n pass\n else:\n return _get_device_cupy_or_numpy(int_device_spec)\n\n if device_spec.startswith('@'):\n # '@module:...'\n mod_name, colon, precise_spec = device_spec[1:].partition(':')\n if mod_name == 'numpy':\n if not colon:\n return _cpu.CpuDevice()\n elif mod_name == 'cupy':\n if colon:\n return cuda.GpuDevice.from_device_id(int(precise_spec))\n elif mod_name == 'intel64':\n if not colon:\n return intel64.Intel64Device()\n raise ValueError(\n 'Device specifiers starting with \\'@\\' must be followed by'\n ' a module name and depending on the module, module specific'\n ' precise device specifiers. Actual: {}'.format(device_spec))\n else:\n # String device specifier without '@' prefix is assumed to be a\n # ChainerX device.\n if not chainerx.is_available():\n raise RuntimeError(\n 'Tried to parse ChainerX device specifier \\'{}\\', '\n 'but ChainerX is not available. '\n 'Note that device specifiers without \\'@\\' prefix are '\n 'assumed to be ChainerX device '\n 'specifiers.'.format(device_spec))\n return _chainerx.ChainerxDevice(chainerx.get_device(device_spec))\n\n raise TypeError(\n 'Device specifier must be a backend.Device, cuda.Device,'\n ' chainerx.Device, integer or a string. Actual: {}'.format(\n type(device_spec)))\n\n\ndef _get_device_cupy_or_numpy(device_spec):\n # legacy spec of (gpu) device\n if device_spec >= 0:\n return cuda.GpuDevice.from_device_id(device_spec)\n else:\n return _cpu.CpuDevice()\n\n\ndef using_device(device_spec):\n \"\"\"Context manager to apply the thread-local device state.\n\n Args:\n device_spec (object): Device specifier. See :func:`chainer.get_device`\n for details.\n\n .. admonition:: Example\n\n .. testcode::\n :skipif: doctest_helper.skipif_not_enough_cuda_devices(2)\n\n with chainer.using_device('@cupy:1'):\n a = cupy.empty((3, 2))\n\n assert a.device.id == 1\n\n \"\"\"\n\n # TODO(niboshi): Set default device (once this concept is introduced in\n # Chainer).\n device = get_device(device_spec)\n return device.create_context()\n\n\ndef get_array_module(*args):\n \"\"\"Gets an appropriate NumPy-compatible module to process arguments\n\n This function will return their data arrays' array module for\n :class:`~chainer.Variable` arguments.\n\n Args:\n args: Values to determine whether NumPy, CuPy, or ChainerX should be\n used.\n\n Returns:\n module: :mod:`numpy`, :mod:`cupy`, or :mod:`chainerx` is returned based\n on the types of the arguments.\n\n \"\"\"\n is_chainerx_available = chainerx.is_available()\n if is_chainerx_available or cuda.available:\n arrays = []\n for arg in args:\n # Unwrap arrays\n if isinstance(arg, chainer.variable.Variable):\n array = arg.data\n else:\n array = arg\n if is_chainerx_available and isinstance(array, chainerx.ndarray):\n return chainerx\n arrays.append(array)\n if cuda.available:\n return cuda.cupy.get_array_module(*arrays)\n return numpy\n\n\ndef get_device_from_array(*arrays):\n \"\"\"Gets the device from arrays.\n\n The device on which the given array reside is returned.\n\n .. note::\n\n Unlike :func:`get_array_module`, this method does not recognize\n :class:`~chainer.Variable` objects.\n If you need to get device from the :class:`~chainer.Variable` instance\n ``v``, you need to use ``get_device_from_array(v.array)``.\n\n Args:\n arrays (array or list of arrays):\n Arrays to determine the device. If multiple arrays are given, the\n device correspoinding to the first array which is not NumPy array\n is returned.\n\n Returns:\n chainer.backend.Device: Device instance.\n \"\"\"\n for array in arrays:\n device = GpuDevice.from_array(array)\n if device is not None:\n return device\n\n if isinstance(array, chainerx.ndarray):\n return ChainerxDevice(array.device)\n\n device = Intel64Device.from_array(array)\n if device is not None:\n return device\n\n return CpuDevice()\n", "path": "chainer/backend.py"}]}
| 3,266 | 151 |
gh_patches_debug_2614
|
rasdani/github-patches
|
git_diff
|
qutebrowser__qutebrowser-3318
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
edit-command --run should clear the status bar
Thanks to @rcorre for implementing `:edit-command` from #2453. Quick issue: when using the `--run` flag, not only should the command be executed, but the status bar should also be cleared (or whatever `<esc>` tends to do). Here's what happens currently (v1.0.3, abb5c9f63):
- bind `<ctrl-e>` to `edit-command --run`
- do `:open<ctrl-e>`
- type `www.qutebrowser.org`
- save & quit
What happens:
1. get sent to the Qutebrowser home page
2. status bar still says `:open www.qutebrowser.org`, and I also see URLs from my history
What I expected to happen: 1 but not 2.
This means I need to hit `<esc>` after doing an `:edit-command --run`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qutebrowser/mainwindow/statusbar/command.py`
Content:
```
1 # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
2
3 # Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]>
4 #
5 # This file is part of qutebrowser.
6 #
7 # qutebrowser is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # qutebrowser is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
16 #
17 # You should have received a copy of the GNU General Public License
18 # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
19
20 """The commandline in the statusbar."""
21
22 from PyQt5.QtCore import pyqtSignal, pyqtSlot, Qt, QSize
23 from PyQt5.QtWidgets import QSizePolicy
24
25 from qutebrowser.keyinput import modeman, modeparsers
26 from qutebrowser.commands import cmdexc, cmdutils
27 from qutebrowser.misc import cmdhistory, editor
28 from qutebrowser.misc import miscwidgets as misc
29 from qutebrowser.utils import usertypes, log, objreg
30
31
32 class Command(misc.MinimalLineEditMixin, misc.CommandLineEdit):
33
34 """The commandline part of the statusbar.
35
36 Attributes:
37 _win_id: The window ID this widget is associated with.
38
39 Signals:
40 got_cmd: Emitted when a command is triggered by the user.
41 arg: The command string and also potentially the count.
42 clear_completion_selection: Emitted before the completion widget is
43 hidden.
44 hide_completion: Emitted when the completion widget should be hidden.
45 update_completion: Emitted when the completion should be shown/updated.
46 show_cmd: Emitted when command input should be shown.
47 hide_cmd: Emitted when command input can be hidden.
48 """
49
50 got_cmd = pyqtSignal([str], [str, int])
51 clear_completion_selection = pyqtSignal()
52 hide_completion = pyqtSignal()
53 update_completion = pyqtSignal()
54 show_cmd = pyqtSignal()
55 hide_cmd = pyqtSignal()
56
57 def __init__(self, *, win_id, private, parent=None):
58 misc.CommandLineEdit.__init__(self, parent=parent)
59 misc.MinimalLineEditMixin.__init__(self)
60 self._win_id = win_id
61 if not private:
62 command_history = objreg.get('command-history')
63 self.history.history = command_history.data
64 self.history.changed.connect(command_history.changed)
65 self.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Ignored)
66 self.cursorPositionChanged.connect(self.update_completion)
67 self.textChanged.connect(self.update_completion)
68 self.textChanged.connect(self.updateGeometry)
69
70 def prefix(self):
71 """Get the currently entered command prefix."""
72 text = self.text()
73 if not text:
74 return ''
75 elif text[0] in modeparsers.STARTCHARS:
76 return text[0]
77 else:
78 return ''
79
80 def set_cmd_text(self, text):
81 """Preset the statusbar to some text.
82
83 Args:
84 text: The text to set as string.
85 """
86 self.setText(text)
87 log.modes.debug("Setting command text, focusing {!r}".format(self))
88 modeman.enter(self._win_id, usertypes.KeyMode.command, 'cmd focus')
89 self.setFocus()
90 self.show_cmd.emit()
91
92 @cmdutils.register(instance='status-command', name='set-cmd-text',
93 scope='window', maxsplit=0)
94 @cmdutils.argument('count', count=True)
95 def set_cmd_text_command(self, text, count=None, space=False, append=False,
96 run_on_count=False):
97 """Preset the statusbar to some text.
98
99 //
100
101 Wrapper for set_cmd_text to check the arguments and allow multiple
102 strings which will get joined.
103
104 Args:
105 text: The commandline to set.
106 count: The count if given.
107 space: If given, a space is added to the end.
108 append: If given, the text is appended to the current text.
109 run_on_count: If given with a count, the command is run with the
110 given count rather than setting the command text.
111 """
112 if space:
113 text += ' '
114 if append:
115 if not self.text():
116 raise cmdexc.CommandError("No current text!")
117 text = self.text() + text
118
119 if not text or text[0] not in modeparsers.STARTCHARS:
120 raise cmdexc.CommandError(
121 "Invalid command text '{}'.".format(text))
122 if run_on_count and count is not None:
123 self.got_cmd[str, int].emit(text, count)
124 else:
125 self.set_cmd_text(text)
126
127 @cmdutils.register(instance='status-command',
128 modes=[usertypes.KeyMode.command], scope='window')
129 def command_history_prev(self):
130 """Go back in the commandline history."""
131 try:
132 if not self.history.is_browsing():
133 item = self.history.start(self.text().strip())
134 else:
135 item = self.history.previtem()
136 except (cmdhistory.HistoryEmptyError,
137 cmdhistory.HistoryEndReachedError):
138 return
139 if item:
140 self.set_cmd_text(item)
141
142 @cmdutils.register(instance='status-command',
143 modes=[usertypes.KeyMode.command], scope='window')
144 def command_history_next(self):
145 """Go forward in the commandline history."""
146 if not self.history.is_browsing():
147 return
148 try:
149 item = self.history.nextitem()
150 except cmdhistory.HistoryEndReachedError:
151 return
152 if item:
153 self.set_cmd_text(item)
154
155 @cmdutils.register(instance='status-command',
156 modes=[usertypes.KeyMode.command], scope='window')
157 def command_accept(self):
158 """Execute the command currently in the commandline."""
159 prefixes = {
160 ':': '',
161 '/': 'search -- ',
162 '?': 'search -r -- ',
163 }
164 text = self.text()
165 self.history.append(text)
166 modeman.leave(self._win_id, usertypes.KeyMode.command, 'cmd accept')
167 self.got_cmd[str].emit(prefixes[text[0]] + text[1:])
168
169 @cmdutils.register(instance='status-command', scope='window')
170 def edit_command(self, run=False):
171 """Open an editor to modify the current command.
172
173 Args:
174 run: Run the command if the editor exits successfully.
175 """
176 ed = editor.ExternalEditor(parent=self)
177
178 def callback(text):
179 self.set_cmd_text(text)
180 if run:
181 self.got_cmd[str].emit(text)
182
183 ed.editing_finished.connect(callback)
184 ed.edit(self.text())
185
186 @pyqtSlot(usertypes.KeyMode)
187 def on_mode_left(self, mode):
188 """Clear up when command mode was left.
189
190 - Clear the statusbar text if it's explicitly unfocused.
191 - Clear completion selection
192 - Hide completion
193
194 Args:
195 mode: The mode which was left.
196 """
197 if mode == usertypes.KeyMode.command:
198 self.setText('')
199 self.history.stop()
200 self.hide_cmd.emit()
201 self.clear_completion_selection.emit()
202 self.hide_completion.emit()
203
204 def setText(self, text):
205 """Extend setText to set prefix and make sure the prompt is ok."""
206 if not text:
207 pass
208 elif text[0] in modeparsers.STARTCHARS:
209 super().set_prompt(text[0])
210 else:
211 raise AssertionError("setText got called with invalid text "
212 "'{}'!".format(text))
213 super().setText(text)
214
215 def keyPressEvent(self, e):
216 """Override keyPressEvent to ignore Return key presses.
217
218 If this widget is focused, we are in passthrough key mode, and
219 Enter/Shift+Enter/etc. will cause QLineEdit to think it's finished
220 without command_accept to be called.
221 """
222 if e.key() == Qt.Key_Return:
223 e.ignore()
224 return
225 else:
226 super().keyPressEvent(e)
227
228 def sizeHint(self):
229 """Dynamically calculate the needed size."""
230 height = super().sizeHint().height()
231 text = self.text()
232 if not text:
233 text = 'x'
234 width = self.fontMetrics().width(text)
235 return QSize(width, height)
236
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/qutebrowser/mainwindow/statusbar/command.py b/qutebrowser/mainwindow/statusbar/command.py
--- a/qutebrowser/mainwindow/statusbar/command.py
+++ b/qutebrowser/mainwindow/statusbar/command.py
@@ -178,7 +178,7 @@
def callback(text):
self.set_cmd_text(text)
if run:
- self.got_cmd[str].emit(text)
+ self.command_accept()
ed.editing_finished.connect(callback)
ed.edit(self.text())
|
{"golden_diff": "diff --git a/qutebrowser/mainwindow/statusbar/command.py b/qutebrowser/mainwindow/statusbar/command.py\n--- a/qutebrowser/mainwindow/statusbar/command.py\n+++ b/qutebrowser/mainwindow/statusbar/command.py\n@@ -178,7 +178,7 @@\n def callback(text):\n self.set_cmd_text(text)\n if run:\n- self.got_cmd[str].emit(text)\n+ self.command_accept()\n \n ed.editing_finished.connect(callback)\n ed.edit(self.text())\n", "issue": "edit-command --run should clear the status bar\nThanks to @rcorre for implementing `:edit-command` from #2453. Quick issue: when using the `--run` flag, not only should the command be executed, but the status bar should also be cleared (or whatever `<esc>` tends to do). Here's what happens currently (v1.0.3, abb5c9f63):\r\n\r\n- bind `<ctrl-e>` to `edit-command --run`\r\n- do `:open<ctrl-e>`\r\n- type `www.qutebrowser.org`\r\n- save & quit\r\n\r\nWhat happens:\r\n\r\n1. get sent to the Qutebrowser home page\r\n2. status bar still says `:open www.qutebrowser.org`, and I also see URLs from my history\r\n\r\nWhat I expected to happen: 1 but not 2.\r\n\r\nThis means I need to hit `<esc>` after doing an `:edit-command --run`.\n", "before_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"The commandline in the statusbar.\"\"\"\n\nfrom PyQt5.QtCore import pyqtSignal, pyqtSlot, Qt, QSize\nfrom PyQt5.QtWidgets import QSizePolicy\n\nfrom qutebrowser.keyinput import modeman, modeparsers\nfrom qutebrowser.commands import cmdexc, cmdutils\nfrom qutebrowser.misc import cmdhistory, editor\nfrom qutebrowser.misc import miscwidgets as misc\nfrom qutebrowser.utils import usertypes, log, objreg\n\n\nclass Command(misc.MinimalLineEditMixin, misc.CommandLineEdit):\n\n \"\"\"The commandline part of the statusbar.\n\n Attributes:\n _win_id: The window ID this widget is associated with.\n\n Signals:\n got_cmd: Emitted when a command is triggered by the user.\n arg: The command string and also potentially the count.\n clear_completion_selection: Emitted before the completion widget is\n hidden.\n hide_completion: Emitted when the completion widget should be hidden.\n update_completion: Emitted when the completion should be shown/updated.\n show_cmd: Emitted when command input should be shown.\n hide_cmd: Emitted when command input can be hidden.\n \"\"\"\n\n got_cmd = pyqtSignal([str], [str, int])\n clear_completion_selection = pyqtSignal()\n hide_completion = pyqtSignal()\n update_completion = pyqtSignal()\n show_cmd = pyqtSignal()\n hide_cmd = pyqtSignal()\n\n def __init__(self, *, win_id, private, parent=None):\n misc.CommandLineEdit.__init__(self, parent=parent)\n misc.MinimalLineEditMixin.__init__(self)\n self._win_id = win_id\n if not private:\n command_history = objreg.get('command-history')\n self.history.history = command_history.data\n self.history.changed.connect(command_history.changed)\n self.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Ignored)\n self.cursorPositionChanged.connect(self.update_completion)\n self.textChanged.connect(self.update_completion)\n self.textChanged.connect(self.updateGeometry)\n\n def prefix(self):\n \"\"\"Get the currently entered command prefix.\"\"\"\n text = self.text()\n if not text:\n return ''\n elif text[0] in modeparsers.STARTCHARS:\n return text[0]\n else:\n return ''\n\n def set_cmd_text(self, text):\n \"\"\"Preset the statusbar to some text.\n\n Args:\n text: The text to set as string.\n \"\"\"\n self.setText(text)\n log.modes.debug(\"Setting command text, focusing {!r}\".format(self))\n modeman.enter(self._win_id, usertypes.KeyMode.command, 'cmd focus')\n self.setFocus()\n self.show_cmd.emit()\n\n @cmdutils.register(instance='status-command', name='set-cmd-text',\n scope='window', maxsplit=0)\n @cmdutils.argument('count', count=True)\n def set_cmd_text_command(self, text, count=None, space=False, append=False,\n run_on_count=False):\n \"\"\"Preset the statusbar to some text.\n\n //\n\n Wrapper for set_cmd_text to check the arguments and allow multiple\n strings which will get joined.\n\n Args:\n text: The commandline to set.\n count: The count if given.\n space: If given, a space is added to the end.\n append: If given, the text is appended to the current text.\n run_on_count: If given with a count, the command is run with the\n given count rather than setting the command text.\n \"\"\"\n if space:\n text += ' '\n if append:\n if not self.text():\n raise cmdexc.CommandError(\"No current text!\")\n text = self.text() + text\n\n if not text or text[0] not in modeparsers.STARTCHARS:\n raise cmdexc.CommandError(\n \"Invalid command text '{}'.\".format(text))\n if run_on_count and count is not None:\n self.got_cmd[str, int].emit(text, count)\n else:\n self.set_cmd_text(text)\n\n @cmdutils.register(instance='status-command',\n modes=[usertypes.KeyMode.command], scope='window')\n def command_history_prev(self):\n \"\"\"Go back in the commandline history.\"\"\"\n try:\n if not self.history.is_browsing():\n item = self.history.start(self.text().strip())\n else:\n item = self.history.previtem()\n except (cmdhistory.HistoryEmptyError,\n cmdhistory.HistoryEndReachedError):\n return\n if item:\n self.set_cmd_text(item)\n\n @cmdutils.register(instance='status-command',\n modes=[usertypes.KeyMode.command], scope='window')\n def command_history_next(self):\n \"\"\"Go forward in the commandline history.\"\"\"\n if not self.history.is_browsing():\n return\n try:\n item = self.history.nextitem()\n except cmdhistory.HistoryEndReachedError:\n return\n if item:\n self.set_cmd_text(item)\n\n @cmdutils.register(instance='status-command',\n modes=[usertypes.KeyMode.command], scope='window')\n def command_accept(self):\n \"\"\"Execute the command currently in the commandline.\"\"\"\n prefixes = {\n ':': '',\n '/': 'search -- ',\n '?': 'search -r -- ',\n }\n text = self.text()\n self.history.append(text)\n modeman.leave(self._win_id, usertypes.KeyMode.command, 'cmd accept')\n self.got_cmd[str].emit(prefixes[text[0]] + text[1:])\n\n @cmdutils.register(instance='status-command', scope='window')\n def edit_command(self, run=False):\n \"\"\"Open an editor to modify the current command.\n\n Args:\n run: Run the command if the editor exits successfully.\n \"\"\"\n ed = editor.ExternalEditor(parent=self)\n\n def callback(text):\n self.set_cmd_text(text)\n if run:\n self.got_cmd[str].emit(text)\n\n ed.editing_finished.connect(callback)\n ed.edit(self.text())\n\n @pyqtSlot(usertypes.KeyMode)\n def on_mode_left(self, mode):\n \"\"\"Clear up when command mode was left.\n\n - Clear the statusbar text if it's explicitly unfocused.\n - Clear completion selection\n - Hide completion\n\n Args:\n mode: The mode which was left.\n \"\"\"\n if mode == usertypes.KeyMode.command:\n self.setText('')\n self.history.stop()\n self.hide_cmd.emit()\n self.clear_completion_selection.emit()\n self.hide_completion.emit()\n\n def setText(self, text):\n \"\"\"Extend setText to set prefix and make sure the prompt is ok.\"\"\"\n if not text:\n pass\n elif text[0] in modeparsers.STARTCHARS:\n super().set_prompt(text[0])\n else:\n raise AssertionError(\"setText got called with invalid text \"\n \"'{}'!\".format(text))\n super().setText(text)\n\n def keyPressEvent(self, e):\n \"\"\"Override keyPressEvent to ignore Return key presses.\n\n If this widget is focused, we are in passthrough key mode, and\n Enter/Shift+Enter/etc. will cause QLineEdit to think it's finished\n without command_accept to be called.\n \"\"\"\n if e.key() == Qt.Key_Return:\n e.ignore()\n return\n else:\n super().keyPressEvent(e)\n\n def sizeHint(self):\n \"\"\"Dynamically calculate the needed size.\"\"\"\n height = super().sizeHint().height()\n text = self.text()\n if not text:\n text = 'x'\n width = self.fontMetrics().width(text)\n return QSize(width, height)\n", "path": "qutebrowser/mainwindow/statusbar/command.py"}], "after_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"The commandline in the statusbar.\"\"\"\n\nfrom PyQt5.QtCore import pyqtSignal, pyqtSlot, Qt, QSize\nfrom PyQt5.QtWidgets import QSizePolicy\n\nfrom qutebrowser.keyinput import modeman, modeparsers\nfrom qutebrowser.commands import cmdexc, cmdutils\nfrom qutebrowser.misc import cmdhistory, editor\nfrom qutebrowser.misc import miscwidgets as misc\nfrom qutebrowser.utils import usertypes, log, objreg\n\n\nclass Command(misc.MinimalLineEditMixin, misc.CommandLineEdit):\n\n \"\"\"The commandline part of the statusbar.\n\n Attributes:\n _win_id: The window ID this widget is associated with.\n\n Signals:\n got_cmd: Emitted when a command is triggered by the user.\n arg: The command string and also potentially the count.\n clear_completion_selection: Emitted before the completion widget is\n hidden.\n hide_completion: Emitted when the completion widget should be hidden.\n update_completion: Emitted when the completion should be shown/updated.\n show_cmd: Emitted when command input should be shown.\n hide_cmd: Emitted when command input can be hidden.\n \"\"\"\n\n got_cmd = pyqtSignal([str], [str, int])\n clear_completion_selection = pyqtSignal()\n hide_completion = pyqtSignal()\n update_completion = pyqtSignal()\n show_cmd = pyqtSignal()\n hide_cmd = pyqtSignal()\n\n def __init__(self, *, win_id, private, parent=None):\n misc.CommandLineEdit.__init__(self, parent=parent)\n misc.MinimalLineEditMixin.__init__(self)\n self._win_id = win_id\n if not private:\n command_history = objreg.get('command-history')\n self.history.history = command_history.data\n self.history.changed.connect(command_history.changed)\n self.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Ignored)\n self.cursorPositionChanged.connect(self.update_completion)\n self.textChanged.connect(self.update_completion)\n self.textChanged.connect(self.updateGeometry)\n\n def prefix(self):\n \"\"\"Get the currently entered command prefix.\"\"\"\n text = self.text()\n if not text:\n return ''\n elif text[0] in modeparsers.STARTCHARS:\n return text[0]\n else:\n return ''\n\n def set_cmd_text(self, text):\n \"\"\"Preset the statusbar to some text.\n\n Args:\n text: The text to set as string.\n \"\"\"\n self.setText(text)\n log.modes.debug(\"Setting command text, focusing {!r}\".format(self))\n modeman.enter(self._win_id, usertypes.KeyMode.command, 'cmd focus')\n self.setFocus()\n self.show_cmd.emit()\n\n @cmdutils.register(instance='status-command', name='set-cmd-text',\n scope='window', maxsplit=0)\n @cmdutils.argument('count', count=True)\n def set_cmd_text_command(self, text, count=None, space=False, append=False,\n run_on_count=False):\n \"\"\"Preset the statusbar to some text.\n\n //\n\n Wrapper for set_cmd_text to check the arguments and allow multiple\n strings which will get joined.\n\n Args:\n text: The commandline to set.\n count: The count if given.\n space: If given, a space is added to the end.\n append: If given, the text is appended to the current text.\n run_on_count: If given with a count, the command is run with the\n given count rather than setting the command text.\n \"\"\"\n if space:\n text += ' '\n if append:\n if not self.text():\n raise cmdexc.CommandError(\"No current text!\")\n text = self.text() + text\n\n if not text or text[0] not in modeparsers.STARTCHARS:\n raise cmdexc.CommandError(\n \"Invalid command text '{}'.\".format(text))\n if run_on_count and count is not None:\n self.got_cmd[str, int].emit(text, count)\n else:\n self.set_cmd_text(text)\n\n @cmdutils.register(instance='status-command',\n modes=[usertypes.KeyMode.command], scope='window')\n def command_history_prev(self):\n \"\"\"Go back in the commandline history.\"\"\"\n try:\n if not self.history.is_browsing():\n item = self.history.start(self.text().strip())\n else:\n item = self.history.previtem()\n except (cmdhistory.HistoryEmptyError,\n cmdhistory.HistoryEndReachedError):\n return\n if item:\n self.set_cmd_text(item)\n\n @cmdutils.register(instance='status-command',\n modes=[usertypes.KeyMode.command], scope='window')\n def command_history_next(self):\n \"\"\"Go forward in the commandline history.\"\"\"\n if not self.history.is_browsing():\n return\n try:\n item = self.history.nextitem()\n except cmdhistory.HistoryEndReachedError:\n return\n if item:\n self.set_cmd_text(item)\n\n @cmdutils.register(instance='status-command',\n modes=[usertypes.KeyMode.command], scope='window')\n def command_accept(self):\n \"\"\"Execute the command currently in the commandline.\"\"\"\n prefixes = {\n ':': '',\n '/': 'search -- ',\n '?': 'search -r -- ',\n }\n text = self.text()\n self.history.append(text)\n modeman.leave(self._win_id, usertypes.KeyMode.command, 'cmd accept')\n self.got_cmd[str].emit(prefixes[text[0]] + text[1:])\n\n @cmdutils.register(instance='status-command', scope='window')\n def edit_command(self, run=False):\n \"\"\"Open an editor to modify the current command.\n\n Args:\n run: Run the command if the editor exits successfully.\n \"\"\"\n ed = editor.ExternalEditor(parent=self)\n\n def callback(text):\n self.set_cmd_text(text)\n if run:\n self.command_accept()\n\n ed.editing_finished.connect(callback)\n ed.edit(self.text())\n\n @pyqtSlot(usertypes.KeyMode)\n def on_mode_left(self, mode):\n \"\"\"Clear up when command mode was left.\n\n - Clear the statusbar text if it's explicitly unfocused.\n - Clear completion selection\n - Hide completion\n\n Args:\n mode: The mode which was left.\n \"\"\"\n if mode == usertypes.KeyMode.command:\n self.setText('')\n self.history.stop()\n self.hide_cmd.emit()\n self.clear_completion_selection.emit()\n self.hide_completion.emit()\n\n def setText(self, text):\n \"\"\"Extend setText to set prefix and make sure the prompt is ok.\"\"\"\n if not text:\n pass\n elif text[0] in modeparsers.STARTCHARS:\n super().set_prompt(text[0])\n else:\n raise AssertionError(\"setText got called with invalid text \"\n \"'{}'!\".format(text))\n super().setText(text)\n\n def keyPressEvent(self, e):\n \"\"\"Override keyPressEvent to ignore Return key presses.\n\n If this widget is focused, we are in passthrough key mode, and\n Enter/Shift+Enter/etc. will cause QLineEdit to think it's finished\n without command_accept to be called.\n \"\"\"\n if e.key() == Qt.Key_Return:\n e.ignore()\n return\n else:\n super().keyPressEvent(e)\n\n def sizeHint(self):\n \"\"\"Dynamically calculate the needed size.\"\"\"\n height = super().sizeHint().height()\n text = self.text()\n if not text:\n text = 'x'\n width = self.fontMetrics().width(text)\n return QSize(width, height)\n", "path": "qutebrowser/mainwindow/statusbar/command.py"}]}
| 2,894 | 106 |
gh_patches_debug_39901
|
rasdani/github-patches
|
git_diff
|
vas3k__vas3k.club-380
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Баг: При попытке добавления экспертизы в Профиль ошибка Server Error (500)
https://vas3k.club/profile/expertise/add/
Это если попытаться добавить Экспертизу с таким же именем каким уже была добавленна, хотел для теста попробовать такую же добавить, но с другим уровнем экспертизы. Получил ошибку Server Error (500).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `common/data/expertise.py`
Content:
```
1 EXPERTISE = [
2 ("Хард-скиллы", [
3 ("frontend", "Фронтенд"),
4 ("backend", "Бекенд"),
5 ("mobile", "Мобильная разработка"),
6 ("machine-learning", "Машинное Обучение"),
7 ("data", "Данные и аналитика"),
8 ("infra", "Инфраструктура"),
9 ("crypto", "Крипта"),
10 ("qa", "QA"),
11 ("devops", "DevOps"),
12 ("hardware", "Хардварь"),
13 ("imaging", "Компьютерное зрение"),
14 ("nlp", "NLP"),
15 ("iot", "IoT"),
16 ("ux", "UX/UI"),
17 ("pm", "Продакт-менеджмент"),
18 ("security", "Безопасность"),
19 ("marketing", "Маркетинг"),
20 ("video", "Видео-продакшен"),
21 ("audio", "Аудио"),
22 ("copywriting", "Копирайтинг"),
23 ("design", "Дизайн"),
24 ("science", "Наука"),
25 ("business", "Бизнес"),
26 ]),
27 ("Софт-скиллы", [
28 ("hire", "Найм людей"),
29 ("lead", "Управление командами"),
30 ("critical", "Критическое мышление"),
31 ("rationality", "Рациональность"),
32 ("conflicts", "Решение конфликтов"),
33 ("coaching", "Менторинг"),
34 ("public-speaking", "Публичные выступления"),
35 ("planning", "Планирование"),
36 ("ethics", "Этика"),
37 ]),
38 ("Языки", [
39 ("python", "Python"),
40 ("java", "Java"),
41 ("javascript", "JavaScript"),
42 ("go", "Go"),
43 ("php", "PHP"),
44 ("ruby", "Ruby"),
45 ("swift", "Swift"),
46 ("cplus", "C/C++"),
47 ("csharp", "C#"),
48 ])
49 ]
50
```
Path: `users/forms/profile.py`
Content:
```
1 from django import forms
2 from django.core.exceptions import ValidationError
3 from django.forms import ModelForm
4
5 from common.data.countries import COUNTRIES
6 from common.data.expertise import EXPERTISE
7 from users.models.user import User
8 from users.models.expertise import UserExpertise
9 from common.forms import ImageUploadField
10
11
12 class ProfileEditForm(ModelForm):
13 full_name = forms.CharField(
14 label="Имя и фамилия",
15 required=True,
16 max_length=128
17 )
18 avatar = ImageUploadField(
19 label="Аватар",
20 required=False,
21 resize=(512, 512),
22 convert_to="jpg",
23 )
24 city = forms.CharField(
25 label="город",
26 required=True,
27 max_length=120
28 )
29 country = forms.ChoiceField(
30 label="Страна",
31 choices=COUNTRIES,
32 required=True
33 )
34 bio = forms.CharField(
35 label="Ссылочки на себя и всякое такое",
36 required=False,
37 max_length=1024,
38 widget=forms.Textarea(attrs={"maxlength": 1024}),
39 )
40 company = forms.CharField(
41 label="Компания",
42 required=True,
43 max_length=128
44 )
45 position = forms.CharField(
46 label="Должность или что вы делаете",
47 required=True,
48 max_length=128
49 )
50 contact = forms.CharField(
51 label="Контакт для связи",
52 required=True,
53 max_length=256,
54 )
55
56 class Meta:
57 model = User
58 fields = [
59 "full_name",
60 "avatar",
61 "company",
62 "position",
63 "city",
64 "country",
65 "bio",
66 "contact",
67 ]
68
69
70 class NotificationsEditForm(ModelForm):
71 email_digest_type = forms.ChoiceField(
72 label="Тип email-дайджеста",
73 required=True,
74 choices=User.EMAIL_DIGEST_TYPES,
75 initial=User.EMAIL_DIGEST_TYPE_WEEKLY,
76 widget=forms.RadioSelect(),
77 )
78
79 class Meta:
80 model = User
81 fields = [
82 "email_digest_type",
83 ]
84
85
86 class ExpertiseForm(ModelForm):
87 expertise = forms.ChoiceField(
88 label="Область",
89 required=True,
90 choices=EXPERTISE + [("custom", "[добавить своё]")],
91 )
92 expertise_custom = forms.CharField(
93 label="Свой вариант",
94 required=False,
95 max_length=32
96 )
97 value = forms.IntegerField(
98 label="Скилл",
99 min_value=0,
100 max_value=100,
101 required=True,
102 widget=forms.NumberInput(attrs={"type": "range", "step": "1"}),
103 )
104
105 class Meta:
106 model = UserExpertise
107 fields = ["expertise", "value"]
108
109 def clean(self):
110 super().clean()
111 custom_expertise = self.cleaned_data.get("expertise_custom")
112 if custom_expertise:
113 self.cleaned_data["expertise"] = custom_expertise
114
115 if not self.cleaned_data["expertise"]:
116 raise ValidationError("Name is required")
117
```
Path: `users/models/expertise.py`
Content:
```
1 from uuid import uuid4
2
3 from django.db import models
4 from slugify import slugify
5
6 from common.data.colors import COOL_COLORS
7 from common.data.expertise import EXPERTISE
8 from users.models.user import User
9
10
11 class UserExpertise(models.Model):
12 id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
13 user = models.ForeignKey(User, related_name="expertise", on_delete=models.CASCADE)
14 expertise = models.CharField(max_length=32, null=False, db_index=True)
15 name = models.CharField(max_length=64, null=False)
16 value = models.IntegerField(default=0, null=False)
17 created_at = models.DateTimeField(auto_now_add=True)
18
19 class Meta:
20 db_table = "user_expertise"
21 unique_together = [["expertise", "user"]]
22 ordering = ["created_at"]
23
24 def save(self, *args, **kwargs):
25 pre_defined_expertise = dict(sum([e[1] for e in EXPERTISE], [])) # flatten nested items
26
27 if not self.name:
28 self.name = pre_defined_expertise.get(self.expertise) or self.expertise
29
30 if self.expertise not in pre_defined_expertise:
31 self.expertise = slugify(self.expertise.lower())[:32]
32
33 return super().save(*args, **kwargs)
34
35 @property
36 def color(self):
37 return COOL_COLORS[hash(self.name) % len(COOL_COLORS)]
38
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/common/data/expertise.py b/common/data/expertise.py
--- a/common/data/expertise.py
+++ b/common/data/expertise.py
@@ -1,3 +1,5 @@
+from itertools import chain
+
EXPERTISE = [
("Хард-скиллы", [
("frontend", "Фронтенд"),
@@ -47,3 +49,6 @@
("csharp", "C#"),
])
]
+
+
+EXPERTISE_FLAT_MAP = dict(chain.from_iterable(items for _, items in EXPERTISE))
diff --git a/users/forms/profile.py b/users/forms/profile.py
--- a/users/forms/profile.py
+++ b/users/forms/profile.py
@@ -110,7 +110,15 @@
super().clean()
custom_expertise = self.cleaned_data.get("expertise_custom")
if custom_expertise:
- self.cleaned_data["expertise"] = custom_expertise
+ self.cleaned_data["expertise"] = UserExpertise.make_custom_expertise_slug(custom_expertise)
if not self.cleaned_data["expertise"]:
raise ValidationError("Name is required")
+
+ def save(self, commit=True):
+ instance = super().save(commit=commit)
+ custom_expertise = self.cleaned_data.get("expertise_custom")
+ if custom_expertise:
+ instance.name = custom_expertise
+ return instance
+
diff --git a/users/models/expertise.py b/users/models/expertise.py
--- a/users/models/expertise.py
+++ b/users/models/expertise.py
@@ -4,14 +4,16 @@
from slugify import slugify
from common.data.colors import COOL_COLORS
-from common.data.expertise import EXPERTISE
+from common.data.expertise import EXPERTISE_FLAT_MAP
from users.models.user import User
class UserExpertise(models.Model):
+ EXPERTISE_SLUG_LENGTH = 32
+
id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
user = models.ForeignKey(User, related_name="expertise", on_delete=models.CASCADE)
- expertise = models.CharField(max_length=32, null=False, db_index=True)
+ expertise = models.CharField(max_length=EXPERTISE_SLUG_LENGTH, null=False, db_index=True)
name = models.CharField(max_length=64, null=False)
value = models.IntegerField(default=0, null=False)
created_at = models.DateTimeField(auto_now_add=True)
@@ -22,16 +24,19 @@
ordering = ["created_at"]
def save(self, *args, **kwargs):
- pre_defined_expertise = dict(sum([e[1] for e in EXPERTISE], [])) # flatten nested items
if not self.name:
- self.name = pre_defined_expertise.get(self.expertise) or self.expertise
+ self.name = EXPERTISE_FLAT_MAP.get(self.expertise) or self.expertise
- if self.expertise not in pre_defined_expertise:
- self.expertise = slugify(self.expertise.lower())[:32]
+ if self.expertise not in EXPERTISE_FLAT_MAP:
+ self.expertise = self.make_custom_expertise_slug(self.expertise)
return super().save(*args, **kwargs)
@property
def color(self):
return COOL_COLORS[hash(self.name) % len(COOL_COLORS)]
+
+ @classmethod
+ def make_custom_expertise_slug(cls, expertise_name: str):
+ return slugify(expertise_name.lower())[:cls.EXPERTISE_SLUG_LENGTH]
|
{"golden_diff": "diff --git a/common/data/expertise.py b/common/data/expertise.py\n--- a/common/data/expertise.py\n+++ b/common/data/expertise.py\n@@ -1,3 +1,5 @@\n+from itertools import chain\n+\n EXPERTISE = [\n (\"\u0425\u0430\u0440\u0434-\u0441\u043a\u0438\u043b\u043b\u044b\", [\n (\"frontend\", \"\u0424\u0440\u043e\u043d\u0442\u0435\u043d\u0434\"),\n@@ -47,3 +49,6 @@\n (\"csharp\", \"C#\"),\n ])\n ]\n+\n+\n+EXPERTISE_FLAT_MAP = dict(chain.from_iterable(items for _, items in EXPERTISE))\ndiff --git a/users/forms/profile.py b/users/forms/profile.py\n--- a/users/forms/profile.py\n+++ b/users/forms/profile.py\n@@ -110,7 +110,15 @@\n super().clean()\n custom_expertise = self.cleaned_data.get(\"expertise_custom\")\n if custom_expertise:\n- self.cleaned_data[\"expertise\"] = custom_expertise\n+ self.cleaned_data[\"expertise\"] = UserExpertise.make_custom_expertise_slug(custom_expertise)\n \n if not self.cleaned_data[\"expertise\"]:\n raise ValidationError(\"Name is required\")\n+\n+ def save(self, commit=True):\n+ instance = super().save(commit=commit)\n+ custom_expertise = self.cleaned_data.get(\"expertise_custom\")\n+ if custom_expertise:\n+ instance.name = custom_expertise\n+ return instance\n+\ndiff --git a/users/models/expertise.py b/users/models/expertise.py\n--- a/users/models/expertise.py\n+++ b/users/models/expertise.py\n@@ -4,14 +4,16 @@\n from slugify import slugify\n \n from common.data.colors import COOL_COLORS\n-from common.data.expertise import EXPERTISE\n+from common.data.expertise import EXPERTISE_FLAT_MAP\n from users.models.user import User\n \n \n class UserExpertise(models.Model):\n+ EXPERTISE_SLUG_LENGTH = 32\n+\n id = models.UUIDField(primary_key=True, default=uuid4, editable=False)\n user = models.ForeignKey(User, related_name=\"expertise\", on_delete=models.CASCADE)\n- expertise = models.CharField(max_length=32, null=False, db_index=True)\n+ expertise = models.CharField(max_length=EXPERTISE_SLUG_LENGTH, null=False, db_index=True)\n name = models.CharField(max_length=64, null=False)\n value = models.IntegerField(default=0, null=False)\n created_at = models.DateTimeField(auto_now_add=True)\n@@ -22,16 +24,19 @@\n ordering = [\"created_at\"]\n \n def save(self, *args, **kwargs):\n- pre_defined_expertise = dict(sum([e[1] for e in EXPERTISE], [])) # flatten nested items\n \n if not self.name:\n- self.name = pre_defined_expertise.get(self.expertise) or self.expertise\n+ self.name = EXPERTISE_FLAT_MAP.get(self.expertise) or self.expertise\n \n- if self.expertise not in pre_defined_expertise:\n- self.expertise = slugify(self.expertise.lower())[:32]\n+ if self.expertise not in EXPERTISE_FLAT_MAP:\n+ self.expertise = self.make_custom_expertise_slug(self.expertise)\n \n return super().save(*args, **kwargs)\n \n @property\n def color(self):\n return COOL_COLORS[hash(self.name) % len(COOL_COLORS)]\n+\n+ @classmethod\n+ def make_custom_expertise_slug(cls, expertise_name: str):\n+ return slugify(expertise_name.lower())[:cls.EXPERTISE_SLUG_LENGTH]\n", "issue": "\u0411\u0430\u0433: \u041f\u0440\u0438 \u043f\u043e\u043f\u044b\u0442\u043a\u0435 \u0434\u043e\u0431\u0430\u0432\u043b\u0435\u043d\u0438\u044f \u044d\u043a\u0441\u043f\u0435\u0440\u0442\u0438\u0437\u044b \u0432 \u041f\u0440\u043e\u0444\u0438\u043b\u044c \u043e\u0448\u0438\u0431\u043a\u0430 Server Error (500)\nhttps://vas3k.club/profile/expertise/add/\r\n\r\n\u042d\u0442\u043e \u0435\u0441\u043b\u0438 \u043f\u043e\u043f\u044b\u0442\u0430\u0442\u044c\u0441\u044f \u0434\u043e\u0431\u0430\u0432\u0438\u0442\u044c \u042d\u043a\u0441\u043f\u0435\u0440\u0442\u0438\u0437\u0443 \u0441 \u0442\u0430\u043a\u0438\u043c \u0436\u0435 \u0438\u043c\u0435\u043d\u0435\u043c \u043a\u0430\u043a\u0438\u043c \u0443\u0436\u0435 \u0431\u044b\u043b\u0430 \u0434\u043e\u0431\u0430\u0432\u043b\u0435\u043d\u043d\u0430, \u0445\u043e\u0442\u0435\u043b \u0434\u043b\u044f \u0442\u0435\u0441\u0442\u0430 \u043f\u043e\u043f\u0440\u043e\u0431\u043e\u0432\u0430\u0442\u044c \u0442\u0430\u043a\u0443\u044e \u0436\u0435 \u0434\u043e\u0431\u0430\u0432\u0438\u0442\u044c, \u043d\u043e \u0441 \u0434\u0440\u0443\u0433\u0438\u043c \u0443\u0440\u043e\u0432\u043d\u0435\u043c \u044d\u043a\u0441\u043f\u0435\u0440\u0442\u0438\u0437\u044b. \u041f\u043e\u043b\u0443\u0447\u0438\u043b \u043e\u0448\u0438\u0431\u043a\u0443 Server Error (500).\n", "before_files": [{"content": "EXPERTISE = [\n (\"\u0425\u0430\u0440\u0434-\u0441\u043a\u0438\u043b\u043b\u044b\", [\n (\"frontend\", \"\u0424\u0440\u043e\u043d\u0442\u0435\u043d\u0434\"),\n (\"backend\", \"\u0411\u0435\u043a\u0435\u043d\u0434\"),\n (\"mobile\", \"\u041c\u043e\u0431\u0438\u043b\u044c\u043d\u0430\u044f \u0440\u0430\u0437\u0440\u0430\u0431\u043e\u0442\u043a\u0430\"),\n (\"machine-learning\", \"\u041c\u0430\u0448\u0438\u043d\u043d\u043e\u0435 \u041e\u0431\u0443\u0447\u0435\u043d\u0438\u0435\"),\n (\"data\", \"\u0414\u0430\u043d\u043d\u044b\u0435 \u0438 \u0430\u043d\u0430\u043b\u0438\u0442\u0438\u043a\u0430\"),\n (\"infra\", \"\u0418\u043d\u0444\u0440\u0430\u0441\u0442\u0440\u0443\u043a\u0442\u0443\u0440\u0430\"),\n (\"crypto\", \"\u041a\u0440\u0438\u043f\u0442\u0430\"),\n (\"qa\", \"QA\"),\n (\"devops\", \"DevOps\"),\n (\"hardware\", \"\u0425\u0430\u0440\u0434\u0432\u0430\u0440\u044c\"),\n (\"imaging\", \"\u041a\u043e\u043c\u043f\u044c\u044e\u0442\u0435\u0440\u043d\u043e\u0435 \u0437\u0440\u0435\u043d\u0438\u0435\"),\n (\"nlp\", \"NLP\"),\n (\"iot\", \"IoT\"),\n (\"ux\", \"UX/UI\"),\n (\"pm\", \"\u041f\u0440\u043e\u0434\u0430\u043a\u0442-\u043c\u0435\u043d\u0435\u0434\u0436\u043c\u0435\u043d\u0442\"),\n (\"security\", \"\u0411\u0435\u0437\u043e\u043f\u0430\u0441\u043d\u043e\u0441\u0442\u044c\"),\n (\"marketing\", \"\u041c\u0430\u0440\u043a\u0435\u0442\u0438\u043d\u0433\"),\n (\"video\", \"\u0412\u0438\u0434\u0435\u043e-\u043f\u0440\u043e\u0434\u0430\u043a\u0448\u0435\u043d\"),\n (\"audio\", \"\u0410\u0443\u0434\u0438\u043e\"),\n (\"copywriting\", \"\u041a\u043e\u043f\u0438\u0440\u0430\u0439\u0442\u0438\u043d\u0433\"),\n (\"design\", \"\u0414\u0438\u0437\u0430\u0439\u043d\"),\n (\"science\", \"\u041d\u0430\u0443\u043a\u0430\"),\n (\"business\", \"\u0411\u0438\u0437\u043d\u0435\u0441\"),\n ]),\n (\"\u0421\u043e\u0444\u0442-\u0441\u043a\u0438\u043b\u043b\u044b\", [\n (\"hire\", \"\u041d\u0430\u0439\u043c \u043b\u044e\u0434\u0435\u0439\"),\n (\"lead\", \"\u0423\u043f\u0440\u0430\u0432\u043b\u0435\u043d\u0438\u0435 \u043a\u043e\u043c\u0430\u043d\u0434\u0430\u043c\u0438\"),\n (\"critical\", \"\u041a\u0440\u0438\u0442\u0438\u0447\u0435\u0441\u043a\u043e\u0435 \u043c\u044b\u0448\u043b\u0435\u043d\u0438\u0435\"),\n (\"rationality\", \"\u0420\u0430\u0446\u0438\u043e\u043d\u0430\u043b\u044c\u043d\u043e\u0441\u0442\u044c\"),\n (\"conflicts\", \"\u0420\u0435\u0448\u0435\u043d\u0438\u0435 \u043a\u043e\u043d\u0444\u043b\u0438\u043a\u0442\u043e\u0432\"),\n (\"coaching\", \"\u041c\u0435\u043d\u0442\u043e\u0440\u0438\u043d\u0433\"),\n (\"public-speaking\", \"\u041f\u0443\u0431\u043b\u0438\u0447\u043d\u044b\u0435 \u0432\u044b\u0441\u0442\u0443\u043f\u043b\u0435\u043d\u0438\u044f\"),\n (\"planning\", \"\u041f\u043b\u0430\u043d\u0438\u0440\u043e\u0432\u0430\u043d\u0438\u0435\"),\n (\"ethics\", \"\u042d\u0442\u0438\u043a\u0430\"),\n ]),\n (\"\u042f\u0437\u044b\u043a\u0438\", [\n (\"python\", \"Python\"),\n (\"java\", \"Java\"),\n (\"javascript\", \"JavaScript\"),\n (\"go\", \"Go\"),\n (\"php\", \"PHP\"),\n (\"ruby\", \"Ruby\"),\n (\"swift\", \"Swift\"),\n (\"cplus\", \"C/C++\"),\n (\"csharp\", \"C#\"),\n ])\n]\n", "path": "common/data/expertise.py"}, {"content": "from django import forms\nfrom django.core.exceptions import ValidationError\nfrom django.forms import ModelForm\n\nfrom common.data.countries import COUNTRIES\nfrom common.data.expertise import EXPERTISE\nfrom users.models.user import User\nfrom users.models.expertise import UserExpertise\nfrom common.forms import ImageUploadField\n\n\nclass ProfileEditForm(ModelForm):\n full_name = forms.CharField(\n label=\"\u0418\u043c\u044f \u0438 \u0444\u0430\u043c\u0438\u043b\u0438\u044f\",\n required=True,\n max_length=128\n )\n avatar = ImageUploadField(\n label=\"\u0410\u0432\u0430\u0442\u0430\u0440\",\n required=False,\n resize=(512, 512),\n convert_to=\"jpg\",\n )\n city = forms.CharField(\n label=\"\u0433\u043e\u0440\u043e\u0434\",\n required=True,\n max_length=120\n )\n country = forms.ChoiceField(\n label=\"\u0421\u0442\u0440\u0430\u043d\u0430\",\n choices=COUNTRIES,\n required=True\n )\n bio = forms.CharField(\n label=\"\u0421\u0441\u044b\u043b\u043e\u0447\u043a\u0438 \u043d\u0430 \u0441\u0435\u0431\u044f \u0438 \u0432\u0441\u044f\u043a\u043e\u0435 \u0442\u0430\u043a\u043e\u0435\",\n required=False,\n max_length=1024,\n widget=forms.Textarea(attrs={\"maxlength\": 1024}),\n )\n company = forms.CharField(\n label=\"\u041a\u043e\u043c\u043f\u0430\u043d\u0438\u044f\",\n required=True,\n max_length=128\n )\n position = forms.CharField(\n label=\"\u0414\u043e\u043b\u0436\u043d\u043e\u0441\u0442\u044c \u0438\u043b\u0438 \u0447\u0442\u043e \u0432\u044b \u0434\u0435\u043b\u0430\u0435\u0442\u0435\",\n required=True,\n max_length=128\n )\n contact = forms.CharField(\n label=\"\u041a\u043e\u043d\u0442\u0430\u043a\u0442 \u0434\u043b\u044f \u0441\u0432\u044f\u0437\u0438\",\n required=True,\n max_length=256,\n )\n\n class Meta:\n model = User\n fields = [\n \"full_name\",\n \"avatar\",\n \"company\",\n \"position\",\n \"city\",\n \"country\",\n \"bio\",\n \"contact\",\n ]\n\n\nclass NotificationsEditForm(ModelForm):\n email_digest_type = forms.ChoiceField(\n label=\"\u0422\u0438\u043f email-\u0434\u0430\u0439\u0434\u0436\u0435\u0441\u0442\u0430\",\n required=True,\n choices=User.EMAIL_DIGEST_TYPES,\n initial=User.EMAIL_DIGEST_TYPE_WEEKLY,\n widget=forms.RadioSelect(),\n )\n\n class Meta:\n model = User\n fields = [\n \"email_digest_type\",\n ]\n\n\nclass ExpertiseForm(ModelForm):\n expertise = forms.ChoiceField(\n label=\"\u041e\u0431\u043b\u0430\u0441\u0442\u044c\",\n required=True,\n choices=EXPERTISE + [(\"custom\", \"[\u0434\u043e\u0431\u0430\u0432\u0438\u0442\u044c \u0441\u0432\u043e\u0451]\")],\n )\n expertise_custom = forms.CharField(\n label=\"\u0421\u0432\u043e\u0439 \u0432\u0430\u0440\u0438\u0430\u043d\u0442\",\n required=False,\n max_length=32\n )\n value = forms.IntegerField(\n label=\"\u0421\u043a\u0438\u043b\u043b\",\n min_value=0,\n max_value=100,\n required=True,\n widget=forms.NumberInput(attrs={\"type\": \"range\", \"step\": \"1\"}),\n )\n\n class Meta:\n model = UserExpertise\n fields = [\"expertise\", \"value\"]\n\n def clean(self):\n super().clean()\n custom_expertise = self.cleaned_data.get(\"expertise_custom\")\n if custom_expertise:\n self.cleaned_data[\"expertise\"] = custom_expertise\n\n if not self.cleaned_data[\"expertise\"]:\n raise ValidationError(\"Name is required\")\n", "path": "users/forms/profile.py"}, {"content": "from uuid import uuid4\n\nfrom django.db import models\nfrom slugify import slugify\n\nfrom common.data.colors import COOL_COLORS\nfrom common.data.expertise import EXPERTISE\nfrom users.models.user import User\n\n\nclass UserExpertise(models.Model):\n id = models.UUIDField(primary_key=True, default=uuid4, editable=False)\n user = models.ForeignKey(User, related_name=\"expertise\", on_delete=models.CASCADE)\n expertise = models.CharField(max_length=32, null=False, db_index=True)\n name = models.CharField(max_length=64, null=False)\n value = models.IntegerField(default=0, null=False)\n created_at = models.DateTimeField(auto_now_add=True)\n\n class Meta:\n db_table = \"user_expertise\"\n unique_together = [[\"expertise\", \"user\"]]\n ordering = [\"created_at\"]\n\n def save(self, *args, **kwargs):\n pre_defined_expertise = dict(sum([e[1] for e in EXPERTISE], [])) # flatten nested items\n\n if not self.name:\n self.name = pre_defined_expertise.get(self.expertise) or self.expertise\n\n if self.expertise not in pre_defined_expertise:\n self.expertise = slugify(self.expertise.lower())[:32]\n\n return super().save(*args, **kwargs)\n\n @property\n def color(self):\n return COOL_COLORS[hash(self.name) % len(COOL_COLORS)]\n", "path": "users/models/expertise.py"}], "after_files": [{"content": "from itertools import chain\n\nEXPERTISE = [\n (\"\u0425\u0430\u0440\u0434-\u0441\u043a\u0438\u043b\u043b\u044b\", [\n (\"frontend\", \"\u0424\u0440\u043e\u043d\u0442\u0435\u043d\u0434\"),\n (\"backend\", \"\u0411\u0435\u043a\u0435\u043d\u0434\"),\n (\"mobile\", \"\u041c\u043e\u0431\u0438\u043b\u044c\u043d\u0430\u044f \u0440\u0430\u0437\u0440\u0430\u0431\u043e\u0442\u043a\u0430\"),\n (\"machine-learning\", \"\u041c\u0430\u0448\u0438\u043d\u043d\u043e\u0435 \u041e\u0431\u0443\u0447\u0435\u043d\u0438\u0435\"),\n (\"data\", \"\u0414\u0430\u043d\u043d\u044b\u0435 \u0438 \u0430\u043d\u0430\u043b\u0438\u0442\u0438\u043a\u0430\"),\n (\"infra\", \"\u0418\u043d\u0444\u0440\u0430\u0441\u0442\u0440\u0443\u043a\u0442\u0443\u0440\u0430\"),\n (\"crypto\", \"\u041a\u0440\u0438\u043f\u0442\u0430\"),\n (\"qa\", \"QA\"),\n (\"devops\", \"DevOps\"),\n (\"hardware\", \"\u0425\u0430\u0440\u0434\u0432\u0430\u0440\u044c\"),\n (\"imaging\", \"\u041a\u043e\u043c\u043f\u044c\u044e\u0442\u0435\u0440\u043d\u043e\u0435 \u0437\u0440\u0435\u043d\u0438\u0435\"),\n (\"nlp\", \"NLP\"),\n (\"iot\", \"IoT\"),\n (\"ux\", \"UX/UI\"),\n (\"pm\", \"\u041f\u0440\u043e\u0434\u0430\u043a\u0442-\u043c\u0435\u043d\u0435\u0434\u0436\u043c\u0435\u043d\u0442\"),\n (\"security\", \"\u0411\u0435\u0437\u043e\u043f\u0430\u0441\u043d\u043e\u0441\u0442\u044c\"),\n (\"marketing\", \"\u041c\u0430\u0440\u043a\u0435\u0442\u0438\u043d\u0433\"),\n (\"video\", \"\u0412\u0438\u0434\u0435\u043e-\u043f\u0440\u043e\u0434\u0430\u043a\u0448\u0435\u043d\"),\n (\"audio\", \"\u0410\u0443\u0434\u0438\u043e\"),\n (\"copywriting\", \"\u041a\u043e\u043f\u0438\u0440\u0430\u0439\u0442\u0438\u043d\u0433\"),\n (\"design\", \"\u0414\u0438\u0437\u0430\u0439\u043d\"),\n (\"science\", \"\u041d\u0430\u0443\u043a\u0430\"),\n (\"business\", \"\u0411\u0438\u0437\u043d\u0435\u0441\"),\n ]),\n (\"\u0421\u043e\u0444\u0442-\u0441\u043a\u0438\u043b\u043b\u044b\", [\n (\"hire\", \"\u041d\u0430\u0439\u043c \u043b\u044e\u0434\u0435\u0439\"),\n (\"lead\", \"\u0423\u043f\u0440\u0430\u0432\u043b\u0435\u043d\u0438\u0435 \u043a\u043e\u043c\u0430\u043d\u0434\u0430\u043c\u0438\"),\n (\"critical\", \"\u041a\u0440\u0438\u0442\u0438\u0447\u0435\u0441\u043a\u043e\u0435 \u043c\u044b\u0448\u043b\u0435\u043d\u0438\u0435\"),\n (\"rationality\", \"\u0420\u0430\u0446\u0438\u043e\u043d\u0430\u043b\u044c\u043d\u043e\u0441\u0442\u044c\"),\n (\"conflicts\", \"\u0420\u0435\u0448\u0435\u043d\u0438\u0435 \u043a\u043e\u043d\u0444\u043b\u0438\u043a\u0442\u043e\u0432\"),\n (\"coaching\", \"\u041c\u0435\u043d\u0442\u043e\u0440\u0438\u043d\u0433\"),\n (\"public-speaking\", \"\u041f\u0443\u0431\u043b\u0438\u0447\u043d\u044b\u0435 \u0432\u044b\u0441\u0442\u0443\u043f\u043b\u0435\u043d\u0438\u044f\"),\n (\"planning\", \"\u041f\u043b\u0430\u043d\u0438\u0440\u043e\u0432\u0430\u043d\u0438\u0435\"),\n (\"ethics\", \"\u042d\u0442\u0438\u043a\u0430\"),\n ]),\n (\"\u042f\u0437\u044b\u043a\u0438\", [\n (\"python\", \"Python\"),\n (\"java\", \"Java\"),\n (\"javascript\", \"JavaScript\"),\n (\"go\", \"Go\"),\n (\"php\", \"PHP\"),\n (\"ruby\", \"Ruby\"),\n (\"swift\", \"Swift\"),\n (\"cplus\", \"C/C++\"),\n (\"csharp\", \"C#\"),\n ])\n]\n\n\nEXPERTISE_FLAT_MAP = dict(chain.from_iterable(items for _, items in EXPERTISE))\n", "path": "common/data/expertise.py"}, {"content": "from django import forms\nfrom django.core.exceptions import ValidationError\nfrom django.forms import ModelForm\n\nfrom common.data.countries import COUNTRIES\nfrom common.data.expertise import EXPERTISE\nfrom users.models.user import User\nfrom users.models.expertise import UserExpertise\nfrom common.forms import ImageUploadField\n\n\nclass ProfileEditForm(ModelForm):\n full_name = forms.CharField(\n label=\"\u0418\u043c\u044f \u0438 \u0444\u0430\u043c\u0438\u043b\u0438\u044f\",\n required=True,\n max_length=128\n )\n avatar = ImageUploadField(\n label=\"\u0410\u0432\u0430\u0442\u0430\u0440\",\n required=False,\n resize=(512, 512),\n convert_to=\"jpg\",\n )\n city = forms.CharField(\n label=\"\u0433\u043e\u0440\u043e\u0434\",\n required=True,\n max_length=120\n )\n country = forms.ChoiceField(\n label=\"\u0421\u0442\u0440\u0430\u043d\u0430\",\n choices=COUNTRIES,\n required=True\n )\n bio = forms.CharField(\n label=\"\u0421\u0441\u044b\u043b\u043e\u0447\u043a\u0438 \u043d\u0430 \u0441\u0435\u0431\u044f \u0438 \u0432\u0441\u044f\u043a\u043e\u0435 \u0442\u0430\u043a\u043e\u0435\",\n required=False,\n max_length=1024,\n widget=forms.Textarea(attrs={\"maxlength\": 1024}),\n )\n company = forms.CharField(\n label=\"\u041a\u043e\u043c\u043f\u0430\u043d\u0438\u044f\",\n required=True,\n max_length=128\n )\n position = forms.CharField(\n label=\"\u0414\u043e\u043b\u0436\u043d\u043e\u0441\u0442\u044c \u0438\u043b\u0438 \u0447\u0442\u043e \u0432\u044b \u0434\u0435\u043b\u0430\u0435\u0442\u0435\",\n required=True,\n max_length=128\n )\n contact = forms.CharField(\n label=\"\u041a\u043e\u043d\u0442\u0430\u043a\u0442 \u0434\u043b\u044f \u0441\u0432\u044f\u0437\u0438\",\n required=True,\n max_length=256,\n )\n\n class Meta:\n model = User\n fields = [\n \"full_name\",\n \"avatar\",\n \"company\",\n \"position\",\n \"city\",\n \"country\",\n \"bio\",\n \"contact\",\n ]\n\n\nclass NotificationsEditForm(ModelForm):\n email_digest_type = forms.ChoiceField(\n label=\"\u0422\u0438\u043f email-\u0434\u0430\u0439\u0434\u0436\u0435\u0441\u0442\u0430\",\n required=True,\n choices=User.EMAIL_DIGEST_TYPES,\n initial=User.EMAIL_DIGEST_TYPE_WEEKLY,\n widget=forms.RadioSelect(),\n )\n\n class Meta:\n model = User\n fields = [\n \"email_digest_type\",\n ]\n\n\nclass ExpertiseForm(ModelForm):\n expertise = forms.ChoiceField(\n label=\"\u041e\u0431\u043b\u0430\u0441\u0442\u044c\",\n required=True,\n choices=EXPERTISE + [(\"custom\", \"[\u0434\u043e\u0431\u0430\u0432\u0438\u0442\u044c \u0441\u0432\u043e\u0451]\")],\n )\n expertise_custom = forms.CharField(\n label=\"\u0421\u0432\u043e\u0439 \u0432\u0430\u0440\u0438\u0430\u043d\u0442\",\n required=False,\n max_length=32\n )\n value = forms.IntegerField(\n label=\"\u0421\u043a\u0438\u043b\u043b\",\n min_value=0,\n max_value=100,\n required=True,\n widget=forms.NumberInput(attrs={\"type\": \"range\", \"step\": \"1\"}),\n )\n\n class Meta:\n model = UserExpertise\n fields = [\"expertise\", \"value\"]\n\n def clean(self):\n super().clean()\n custom_expertise = self.cleaned_data.get(\"expertise_custom\")\n if custom_expertise:\n self.cleaned_data[\"expertise\"] = UserExpertise.make_custom_expertise_slug(custom_expertise)\n\n if not self.cleaned_data[\"expertise\"]:\n raise ValidationError(\"Name is required\")\n\n def save(self, commit=True):\n instance = super().save(commit=commit)\n custom_expertise = self.cleaned_data.get(\"expertise_custom\")\n if custom_expertise:\n instance.name = custom_expertise\n return instance\n\n", "path": "users/forms/profile.py"}, {"content": "from uuid import uuid4\n\nfrom django.db import models\nfrom slugify import slugify\n\nfrom common.data.colors import COOL_COLORS\nfrom common.data.expertise import EXPERTISE_FLAT_MAP\nfrom users.models.user import User\n\n\nclass UserExpertise(models.Model):\n EXPERTISE_SLUG_LENGTH = 32\n\n id = models.UUIDField(primary_key=True, default=uuid4, editable=False)\n user = models.ForeignKey(User, related_name=\"expertise\", on_delete=models.CASCADE)\n expertise = models.CharField(max_length=EXPERTISE_SLUG_LENGTH, null=False, db_index=True)\n name = models.CharField(max_length=64, null=False)\n value = models.IntegerField(default=0, null=False)\n created_at = models.DateTimeField(auto_now_add=True)\n\n class Meta:\n db_table = \"user_expertise\"\n unique_together = [[\"expertise\", \"user\"]]\n ordering = [\"created_at\"]\n\n def save(self, *args, **kwargs):\n\n if not self.name:\n self.name = EXPERTISE_FLAT_MAP.get(self.expertise) or self.expertise\n\n if self.expertise not in EXPERTISE_FLAT_MAP:\n self.expertise = self.make_custom_expertise_slug(self.expertise)\n\n return super().save(*args, **kwargs)\n\n @property\n def color(self):\n return COOL_COLORS[hash(self.name) % len(COOL_COLORS)]\n\n @classmethod\n def make_custom_expertise_slug(cls, expertise_name: str):\n return slugify(expertise_name.lower())[:cls.EXPERTISE_SLUG_LENGTH]\n", "path": "users/models/expertise.py"}]}
| 2,263 | 813 |
gh_patches_debug_2492
|
rasdani/github-patches
|
git_diff
|
cobbler__cobbler-3292
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cobbler modules don't load properly
### Describe the bug
Introduced in https://github.com/cobbler/cobbler/commit/2477c78094af7ba44ecbe350294c775296d96560
### Steps to reproduce
1. Import any Cobbler Module
2. See import error
### Expected behavior
Bug not present
### Cobbler version
````paste below
````
### Operating system
openSUSE Tumbleweed
### Cobbler log
````paste below
````
### Screenshots
None
### Additional information
None
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cobbler/module_loader.py`
Content:
```
1 """
2 Module loader, adapted for Cobbler usage
3 """
4
5 # SPDX-License-Identifier: GPL-2.0-or-later
6 # SPDX-FileCopyrightText: Copyright 2006-2009, Red Hat, Inc and Others
7 # SPDX-FileCopyrightText: Adrian Likins <[email protected]>
8 # SPDX-FileCopyrightText: Michael DeHaan <michael.dehaan AT gmail>
9
10 import logging
11 from importlib import import_module
12
13 import glob
14 import os
15 from typing import Optional, Dict, Any
16
17 from cobbler.cexceptions import CX
18 from cobbler.utils import log_exc
19
20 # add cobbler/modules to python path
21 import cobbler
22
23
24 class ModuleLoader:
25 """
26 Class for dynamically loading Cobbler Plugins on startup
27 """
28
29 def __init__(self, api, module_path: str = ""):
30 """
31 Constructor to initialize the ModuleLoader class.
32
33 :param api: CobblerAPI
34 :param module_path: The path which should be considered as the root module path. If this an empty string, try to
35 auto-detect the path.
36 """
37 self.logger = logging.getLogger()
38 self.mod_path = os.path.join(
39 os.path.abspath(os.path.dirname(cobbler.__file__)), "modules"
40 )
41 if module_path:
42 self.mod_path = module_path
43 self.module_cache: Dict[str, Any] = {}
44 self.modules_by_category: Dict[str, Dict[str, Any]] = {}
45 self.api = api
46
47 def load_modules(self):
48 """
49 Load the modules from the path handed to the function into Cobbler.
50
51 :return: Two dictionary's with the dynamically loaded modules.
52 """
53
54 filenames = glob.glob(f"{self.mod_path}/*.py")
55 filenames += glob.glob(f"{self.mod_path}/*.pyc")
56 filenames += glob.glob(f"{self.mod_path}/*.pyo")
57 # Allow recursive modules
58 filenames += glob.glob(f"{self.mod_path}/**/*.py")
59 filenames += glob.glob(f"{self.mod_path}/**/*.pyc")
60 filenames += glob.glob(f"{self.mod_path}/**/*.pyo")
61
62 for filename in filenames:
63 basename = filename.replace(self.mod_path, "")
64 modname = ""
65
66 if basename in ("__pycache__", "__init__.py"):
67 continue
68
69 if basename[0] == "/":
70 basename = basename[1:]
71
72 basename = basename.replace("/", ".")
73
74 if basename[-3:] == ".py":
75 modname = basename[:-3]
76 elif basename[-4:] in [".pyc", ".pyo"]:
77 modname = basename[:-4]
78
79 self.__import_module(modname)
80
81 return self.module_cache, self.modules_by_category
82
83 def __import_module(self, modname: str):
84 """
85 Import a module which is not part of the core functionality of Cobbler.
86
87 :param modname: The name of the module.
88 """
89 try:
90 blip = import_module(f"cobbler.modules.{modname}")
91 if not hasattr(blip, "register"):
92 self.logger.debug(
93 "%s.%s is not a proper module", self.mod_path, modname
94 )
95 return None
96 category = blip.register()
97 if category:
98 self.module_cache[modname] = blip
99 if category not in self.modules_by_category:
100 self.modules_by_category[category] = {}
101 self.modules_by_category[category][modname] = blip
102 except Exception:
103 self.logger.info("Exception raised when loading module %s", modname)
104 log_exc()
105
106 def get_module_by_name(self, name: str):
107 """
108 Get a module by its name. The category of the module is not needed.
109
110 :param name: The name of the module.
111 :return: The module asked by the function parameter.
112 """
113 return self.module_cache.get(name, None)
114
115 def get_module_name(
116 self, category: str, field: str, fallback_module_name: Optional[str] = None
117 ) -> str:
118 """
119 Get module name from the settings.
120
121 :param category: Field category in configuration file.
122 :param field: Field in configuration file
123 :param fallback_module_name: Default value used if category/field is not found in configuration file
124 :raises FileNotFoundError: If unable to find configuration file.
125 :raises ValueError: If the category does not exist or the field is empty.
126 :raises CX: If the field could not be read and no fallback_module_name was given.
127 :returns: The name of the module.
128 """
129 # FIXME: We can't enabled this check since it is to strict atm.
130 # if category not in MODULES_BY_CATEGORY:
131 # raise ValueError("category must be one of: %s" % MODULES_BY_CATEGORY.keys())
132
133 if field.isspace():
134 raise ValueError('field cannot be empty. Did you mean "module" maybe?')
135
136 try:
137 value = self.api.settings().modules.get(category, {}).get("module")
138 if value is None:
139 raise ModuleNotFoundError("Requested module could not be retrieved")
140 except Exception as exception:
141 if fallback_module_name is None:
142 raise CX(
143 f"Cannot find config file setting for: {category}.{field}"
144 ) from exception
145 value = fallback_module_name
146 self.logger.warning(
147 'Requested module "%s.%s" not found. Using fallback module: "%s"',
148 category,
149 field,
150 value,
151 )
152 return value
153
154 def get_module_from_file(
155 self, category: str, field: str, fallback_module_name: Optional[str] = None
156 ):
157 """
158 Get Python module, based on name defined in configuration file
159
160 :param category: field category in configuration file
161 :param field: field in configuration file
162 :param fallback_module_name: default value used if category/field is not found in configuration file
163 :raises CX: If unable to load Python module
164 :returns: A Python module.
165 """
166
167 module_name = self.get_module_name(category, field, fallback_module_name)
168 requested_module = self.module_cache.get(module_name, None)
169 if requested_module is None:
170 raise CX(f"Failed to load module for {category}.{field}")
171 return requested_module
172
173 def get_modules_in_category(self, category: str) -> list:
174 """
175 Return all modules of a module category.
176
177 :param category: The module category.
178 :return: A list of all modules of that category. Returns an empty list if the Category does not exist.
179 """
180 if category not in self.modules_by_category:
181 # FIXME: We can't enabled this check since it is to strict atm.
182 # raise ValueError("category must be one of: %s" % MODULES_BY_CATEGORY.keys())
183 return []
184 return list(self.modules_by_category[category].values())
185
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/cobbler/module_loader.py b/cobbler/module_loader.py
--- a/cobbler/module_loader.py
+++ b/cobbler/module_loader.py
@@ -63,7 +63,7 @@
basename = filename.replace(self.mod_path, "")
modname = ""
- if basename in ("__pycache__", "__init__.py"):
+ if "__pycache__" in basename or "__init__.py" in basename:
continue
if basename[0] == "/":
|
{"golden_diff": "diff --git a/cobbler/module_loader.py b/cobbler/module_loader.py\n--- a/cobbler/module_loader.py\n+++ b/cobbler/module_loader.py\n@@ -63,7 +63,7 @@\n basename = filename.replace(self.mod_path, \"\")\n modname = \"\"\n \n- if basename in (\"__pycache__\", \"__init__.py\"):\n+ if \"__pycache__\" in basename or \"__init__.py\" in basename:\n continue\n \n if basename[0] == \"/\":\n", "issue": "Cobbler modules don't load properly\n### Describe the bug\r\n\r\nIntroduced in https://github.com/cobbler/cobbler/commit/2477c78094af7ba44ecbe350294c775296d96560\r\n\r\n### Steps to reproduce\r\n\r\n1. Import any Cobbler Module\r\n2. See import error\r\n\r\n### Expected behavior\r\n\r\nBug not present\r\n\r\n### Cobbler version\r\n\r\n````paste below\r\n````\r\n\r\n### Operating system\r\n\r\nopenSUSE Tumbleweed\r\n\r\n### Cobbler log\r\n\r\n````paste below\r\n````\r\n\r\n### Screenshots\r\n\r\nNone\r\n\r\n### Additional information\r\n\r\nNone\r\n\n", "before_files": [{"content": "\"\"\"\nModule loader, adapted for Cobbler usage\n\"\"\"\n\n# SPDX-License-Identifier: GPL-2.0-or-later\n# SPDX-FileCopyrightText: Copyright 2006-2009, Red Hat, Inc and Others\n# SPDX-FileCopyrightText: Adrian Likins <[email protected]>\n# SPDX-FileCopyrightText: Michael DeHaan <michael.dehaan AT gmail>\n\nimport logging\nfrom importlib import import_module\n\nimport glob\nimport os\nfrom typing import Optional, Dict, Any\n\nfrom cobbler.cexceptions import CX\nfrom cobbler.utils import log_exc\n\n# add cobbler/modules to python path\nimport cobbler\n\n\nclass ModuleLoader:\n \"\"\"\n Class for dynamically loading Cobbler Plugins on startup\n \"\"\"\n\n def __init__(self, api, module_path: str = \"\"):\n \"\"\"\n Constructor to initialize the ModuleLoader class.\n\n :param api: CobblerAPI\n :param module_path: The path which should be considered as the root module path. If this an empty string, try to\n auto-detect the path.\n \"\"\"\n self.logger = logging.getLogger()\n self.mod_path = os.path.join(\n os.path.abspath(os.path.dirname(cobbler.__file__)), \"modules\"\n )\n if module_path:\n self.mod_path = module_path\n self.module_cache: Dict[str, Any] = {}\n self.modules_by_category: Dict[str, Dict[str, Any]] = {}\n self.api = api\n\n def load_modules(self):\n \"\"\"\n Load the modules from the path handed to the function into Cobbler.\n\n :return: Two dictionary's with the dynamically loaded modules.\n \"\"\"\n\n filenames = glob.glob(f\"{self.mod_path}/*.py\")\n filenames += glob.glob(f\"{self.mod_path}/*.pyc\")\n filenames += glob.glob(f\"{self.mod_path}/*.pyo\")\n # Allow recursive modules\n filenames += glob.glob(f\"{self.mod_path}/**/*.py\")\n filenames += glob.glob(f\"{self.mod_path}/**/*.pyc\")\n filenames += glob.glob(f\"{self.mod_path}/**/*.pyo\")\n\n for filename in filenames:\n basename = filename.replace(self.mod_path, \"\")\n modname = \"\"\n\n if basename in (\"__pycache__\", \"__init__.py\"):\n continue\n\n if basename[0] == \"/\":\n basename = basename[1:]\n\n basename = basename.replace(\"/\", \".\")\n\n if basename[-3:] == \".py\":\n modname = basename[:-3]\n elif basename[-4:] in [\".pyc\", \".pyo\"]:\n modname = basename[:-4]\n\n self.__import_module(modname)\n\n return self.module_cache, self.modules_by_category\n\n def __import_module(self, modname: str):\n \"\"\"\n Import a module which is not part of the core functionality of Cobbler.\n\n :param modname: The name of the module.\n \"\"\"\n try:\n blip = import_module(f\"cobbler.modules.{modname}\")\n if not hasattr(blip, \"register\"):\n self.logger.debug(\n \"%s.%s is not a proper module\", self.mod_path, modname\n )\n return None\n category = blip.register()\n if category:\n self.module_cache[modname] = blip\n if category not in self.modules_by_category:\n self.modules_by_category[category] = {}\n self.modules_by_category[category][modname] = blip\n except Exception:\n self.logger.info(\"Exception raised when loading module %s\", modname)\n log_exc()\n\n def get_module_by_name(self, name: str):\n \"\"\"\n Get a module by its name. The category of the module is not needed.\n\n :param name: The name of the module.\n :return: The module asked by the function parameter.\n \"\"\"\n return self.module_cache.get(name, None)\n\n def get_module_name(\n self, category: str, field: str, fallback_module_name: Optional[str] = None\n ) -> str:\n \"\"\"\n Get module name from the settings.\n\n :param category: Field category in configuration file.\n :param field: Field in configuration file\n :param fallback_module_name: Default value used if category/field is not found in configuration file\n :raises FileNotFoundError: If unable to find configuration file.\n :raises ValueError: If the category does not exist or the field is empty.\n :raises CX: If the field could not be read and no fallback_module_name was given.\n :returns: The name of the module.\n \"\"\"\n # FIXME: We can't enabled this check since it is to strict atm.\n # if category not in MODULES_BY_CATEGORY:\n # raise ValueError(\"category must be one of: %s\" % MODULES_BY_CATEGORY.keys())\n\n if field.isspace():\n raise ValueError('field cannot be empty. Did you mean \"module\" maybe?')\n\n try:\n value = self.api.settings().modules.get(category, {}).get(\"module\")\n if value is None:\n raise ModuleNotFoundError(\"Requested module could not be retrieved\")\n except Exception as exception:\n if fallback_module_name is None:\n raise CX(\n f\"Cannot find config file setting for: {category}.{field}\"\n ) from exception\n value = fallback_module_name\n self.logger.warning(\n 'Requested module \"%s.%s\" not found. Using fallback module: \"%s\"',\n category,\n field,\n value,\n )\n return value\n\n def get_module_from_file(\n self, category: str, field: str, fallback_module_name: Optional[str] = None\n ):\n \"\"\"\n Get Python module, based on name defined in configuration file\n\n :param category: field category in configuration file\n :param field: field in configuration file\n :param fallback_module_name: default value used if category/field is not found in configuration file\n :raises CX: If unable to load Python module\n :returns: A Python module.\n \"\"\"\n\n module_name = self.get_module_name(category, field, fallback_module_name)\n requested_module = self.module_cache.get(module_name, None)\n if requested_module is None:\n raise CX(f\"Failed to load module for {category}.{field}\")\n return requested_module\n\n def get_modules_in_category(self, category: str) -> list:\n \"\"\"\n Return all modules of a module category.\n\n :param category: The module category.\n :return: A list of all modules of that category. Returns an empty list if the Category does not exist.\n \"\"\"\n if category not in self.modules_by_category:\n # FIXME: We can't enabled this check since it is to strict atm.\n # raise ValueError(\"category must be one of: %s\" % MODULES_BY_CATEGORY.keys())\n return []\n return list(self.modules_by_category[category].values())\n", "path": "cobbler/module_loader.py"}], "after_files": [{"content": "\"\"\"\nModule loader, adapted for Cobbler usage\n\"\"\"\n\n# SPDX-License-Identifier: GPL-2.0-or-later\n# SPDX-FileCopyrightText: Copyright 2006-2009, Red Hat, Inc and Others\n# SPDX-FileCopyrightText: Adrian Likins <[email protected]>\n# SPDX-FileCopyrightText: Michael DeHaan <michael.dehaan AT gmail>\n\nimport logging\nfrom importlib import import_module\n\nimport glob\nimport os\nfrom typing import Optional, Dict, Any\n\nfrom cobbler.cexceptions import CX\nfrom cobbler.utils import log_exc\n\n# add cobbler/modules to python path\nimport cobbler\n\n\nclass ModuleLoader:\n \"\"\"\n Class for dynamically loading Cobbler Plugins on startup\n \"\"\"\n\n def __init__(self, api, module_path: str = \"\"):\n \"\"\"\n Constructor to initialize the ModuleLoader class.\n\n :param api: CobblerAPI\n :param module_path: The path which should be considered as the root module path. If this an empty string, try to\n auto-detect the path.\n \"\"\"\n self.logger = logging.getLogger()\n self.mod_path = os.path.join(\n os.path.abspath(os.path.dirname(cobbler.__file__)), \"modules\"\n )\n if module_path:\n self.mod_path = module_path\n self.module_cache: Dict[str, Any] = {}\n self.modules_by_category: Dict[str, Dict[str, Any]] = {}\n self.api = api\n\n def load_modules(self):\n \"\"\"\n Load the modules from the path handed to the function into Cobbler.\n\n :return: Two dictionary's with the dynamically loaded modules.\n \"\"\"\n\n filenames = glob.glob(f\"{self.mod_path}/*.py\")\n filenames += glob.glob(f\"{self.mod_path}/*.pyc\")\n filenames += glob.glob(f\"{self.mod_path}/*.pyo\")\n # Allow recursive modules\n filenames += glob.glob(f\"{self.mod_path}/**/*.py\")\n filenames += glob.glob(f\"{self.mod_path}/**/*.pyc\")\n filenames += glob.glob(f\"{self.mod_path}/**/*.pyo\")\n\n for filename in filenames:\n basename = filename.replace(self.mod_path, \"\")\n modname = \"\"\n\n if \"__pycache__\" in basename or \"__init__.py\" in basename:\n continue\n\n if basename[0] == \"/\":\n basename = basename[1:]\n\n basename = basename.replace(\"/\", \".\")\n\n if basename[-3:] == \".py\":\n modname = basename[:-3]\n elif basename[-4:] in [\".pyc\", \".pyo\"]:\n modname = basename[:-4]\n\n self.__import_module(modname)\n\n return self.module_cache, self.modules_by_category\n\n def __import_module(self, modname: str):\n \"\"\"\n Import a module which is not part of the core functionality of Cobbler.\n\n :param modname: The name of the module.\n \"\"\"\n try:\n blip = import_module(f\"cobbler.modules.{modname}\")\n if not hasattr(blip, \"register\"):\n self.logger.debug(\n \"%s.%s is not a proper module\", self.mod_path, modname\n )\n return None\n category = blip.register()\n if category:\n self.module_cache[modname] = blip\n if category not in self.modules_by_category:\n self.modules_by_category[category] = {}\n self.modules_by_category[category][modname] = blip\n except Exception:\n self.logger.info(\"Exception raised when loading module %s\", modname)\n log_exc()\n\n def get_module_by_name(self, name: str):\n \"\"\"\n Get a module by its name. The category of the module is not needed.\n\n :param name: The name of the module.\n :return: The module asked by the function parameter.\n \"\"\"\n return self.module_cache.get(name, None)\n\n def get_module_name(\n self, category: str, field: str, fallback_module_name: Optional[str] = None\n ) -> str:\n \"\"\"\n Get module name from the settings.\n\n :param category: Field category in configuration file.\n :param field: Field in configuration file\n :param fallback_module_name: Default value used if category/field is not found in configuration file\n :raises FileNotFoundError: If unable to find configuration file.\n :raises ValueError: If the category does not exist or the field is empty.\n :raises CX: If the field could not be read and no fallback_module_name was given.\n :returns: The name of the module.\n \"\"\"\n # FIXME: We can't enabled this check since it is to strict atm.\n # if category not in MODULES_BY_CATEGORY:\n # raise ValueError(\"category must be one of: %s\" % MODULES_BY_CATEGORY.keys())\n\n if field.isspace():\n raise ValueError('field cannot be empty. Did you mean \"module\" maybe?')\n\n try:\n value = self.api.settings().modules.get(category, {}).get(\"module\")\n if value is None:\n raise ModuleNotFoundError(\"Requested module could not be retrieved\")\n except Exception as exception:\n if fallback_module_name is None:\n raise CX(\n f\"Cannot find config file setting for: {category}.{field}\"\n ) from exception\n value = fallback_module_name\n self.logger.warning(\n 'Requested module \"%s.%s\" not found. Using fallback module: \"%s\"',\n category,\n field,\n value,\n )\n return value\n\n def get_module_from_file(\n self, category: str, field: str, fallback_module_name: Optional[str] = None\n ):\n \"\"\"\n Get Python module, based on name defined in configuration file\n\n :param category: field category in configuration file\n :param field: field in configuration file\n :param fallback_module_name: default value used if category/field is not found in configuration file\n :raises CX: If unable to load Python module\n :returns: A Python module.\n \"\"\"\n\n module_name = self.get_module_name(category, field, fallback_module_name)\n requested_module = self.module_cache.get(module_name, None)\n if requested_module is None:\n raise CX(f\"Failed to load module for {category}.{field}\")\n return requested_module\n\n def get_modules_in_category(self, category: str) -> list:\n \"\"\"\n Return all modules of a module category.\n\n :param category: The module category.\n :return: A list of all modules of that category. Returns an empty list if the Category does not exist.\n \"\"\"\n if category not in self.modules_by_category:\n # FIXME: We can't enabled this check since it is to strict atm.\n # raise ValueError(\"category must be one of: %s\" % MODULES_BY_CATEGORY.keys())\n return []\n return list(self.modules_by_category[category].values())\n", "path": "cobbler/module_loader.py"}]}
| 2,336 | 111 |
gh_patches_debug_20789
|
rasdani/github-patches
|
git_diff
|
googleapis__python-bigquery-247
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pin black to specific version
New releases of `black` can change the way the code is formatted ("blackened"), causing the CI checks to fail. We should thus use a deterministic version of `black`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `noxfile.py`
Content:
```
1 # Copyright 2016 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from __future__ import absolute_import
16
17 import os
18 import shutil
19
20 import nox
21
22
23 BLACK_PATHS = ("docs", "google", "samples", "tests", "noxfile.py", "setup.py")
24
25
26 def default(session):
27 """Default unit test session.
28
29 This is intended to be run **without** an interpreter set, so
30 that the current ``python`` (on the ``PATH``) or the version of
31 Python corresponding to the ``nox`` binary the ``PATH`` can
32 run the tests.
33 """
34 # Install all test dependencies, then install local packages in-place.
35 session.install(
36 "mock", "pytest", "google-cloud-testutils", "pytest-cov", "freezegun"
37 )
38 session.install("grpcio")
39
40 # fastparquet is not included in .[all] because, in general, it's redundant
41 # with pyarrow. We still want to run some unit tests with fastparquet
42 # serialization, though.
43 session.install("-e", ".[all,fastparquet]")
44
45 # IPython does not support Python 2 after version 5.x
46 if session.python == "2.7":
47 session.install("ipython==5.5")
48 else:
49 session.install("ipython")
50
51 # opentelemetry was not added to [all] because opentelemetry does not support Python 2.
52 # Exporter does not need to be in nox thus it has been added to README documentation
53 if session.python != "2.7":
54 session.install("-e", ".[opentelemetry]")
55
56 # Run py.test against the unit tests.
57 session.run(
58 "py.test",
59 "--quiet",
60 "--cov=google.cloud.bigquery",
61 "--cov=tests.unit",
62 "--cov-append",
63 "--cov-config=.coveragerc",
64 "--cov-report=",
65 "--cov-fail-under=0",
66 os.path.join("tests", "unit"),
67 *session.posargs,
68 )
69
70
71 @nox.session(python=["2.7", "3.5", "3.6", "3.7", "3.8"])
72 def unit(session):
73 """Run the unit test suite."""
74 default(session)
75
76
77 @nox.session(python=["2.7", "3.8"])
78 def system(session):
79 """Run the system test suite."""
80
81 # Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true.
82 if os.environ.get("RUN_SYSTEM_TESTS", "true") == "false":
83 session.skip("RUN_SYSTEM_TESTS is set to false, skipping")
84
85 # Sanity check: Only run system tests if the environment variable is set.
86 if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""):
87 session.skip("Credentials must be set via environment variable.")
88
89 # Use pre-release gRPC for system tests.
90 session.install("--pre", "grpcio")
91
92 # Install all test dependencies, then install local packages in place.
93 session.install("mock", "pytest", "psutil", "google-cloud-testutils")
94 session.install("google-cloud-storage")
95 session.install("-e", ".[all]")
96
97 # IPython does not support Python 2 after version 5.x
98 if session.python == "2.7":
99 session.install("ipython==5.5")
100 else:
101 session.install("ipython")
102
103 # Run py.test against the system tests.
104 session.run(
105 "py.test", "--quiet", os.path.join("tests", "system.py"), *session.posargs
106 )
107
108
109 @nox.session(python=["2.7", "3.8"])
110 def snippets(session):
111 """Run the snippets test suite."""
112
113 # Sanity check: Only run snippets tests if the environment variable is set.
114 if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""):
115 session.skip("Credentials must be set via environment variable.")
116
117 # Install all test dependencies, then install local packages in place.
118 session.install("mock", "pytest", "google-cloud-testutils")
119 session.install("google-cloud-storage")
120 session.install("grpcio")
121 session.install("-e", ".[all]")
122
123 # Run py.test against the snippets tests.
124 # Skip tests in samples/snippets, as those are run in a different session
125 # using the nox config from that directory.
126 session.run("py.test", os.path.join("docs", "snippets.py"), *session.posargs)
127 session.run("py.test", "samples", "--ignore=samples/snippets", *session.posargs)
128
129
130 @nox.session(python="3.8")
131 def cover(session):
132 """Run the final coverage report.
133
134 This outputs the coverage report aggregating coverage from the unit
135 test runs (not system test runs), and then erases coverage data.
136 """
137 session.install("coverage", "pytest-cov")
138 session.run("coverage", "report", "--show-missing", "--fail-under=100")
139 session.run("coverage", "erase")
140
141
142 @nox.session(python="3.8")
143 def lint(session):
144 """Run linters.
145
146 Returns a failure if the linters find linting errors or sufficiently
147 serious code quality issues.
148 """
149
150 session.install("black", "flake8")
151 session.install("-e", ".")
152 session.run("flake8", os.path.join("google", "cloud", "bigquery"))
153 session.run("flake8", "tests")
154 session.run("flake8", os.path.join("docs", "samples"))
155 session.run("flake8", os.path.join("docs", "snippets.py"))
156 session.run("black", "--check", *BLACK_PATHS)
157
158
159 @nox.session(python="3.8")
160 def lint_setup_py(session):
161 """Verify that setup.py is valid (including RST check)."""
162
163 session.install("docutils", "Pygments")
164 session.run("python", "setup.py", "check", "--restructuredtext", "--strict")
165
166
167 @nox.session(python="3.6")
168 def blacken(session):
169 """Run black.
170 Format code to uniform standard.
171
172 This currently uses Python 3.6 due to the automated Kokoro run of synthtool.
173 That run uses an image that doesn't have 3.6 installed. Before updating this
174 check the state of the `gcp_ubuntu_config` we use for that Kokoro run.
175 """
176 session.install("black")
177 session.run("black", *BLACK_PATHS)
178
179
180 @nox.session(python="3.8")
181 def docs(session):
182 """Build the docs."""
183
184 session.install("ipython", "recommonmark", "sphinx", "sphinx_rtd_theme")
185 session.install("google-cloud-storage")
186 session.install("-e", ".[all]")
187
188 shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
189 session.run(
190 "sphinx-build",
191 "-W", # warnings as errors
192 "-T", # show full traceback on exception
193 "-N", # no colors
194 "-b",
195 "html",
196 "-d",
197 os.path.join("docs", "_build", "doctrees", ""),
198 os.path.join("docs", ""),
199 os.path.join("docs", "_build", "html", ""),
200 )
201
202
203 @nox.session(python="3.8")
204 def docfx(session):
205 """Build the docfx yaml files for this library."""
206
207 session.install("-e", ".")
208 session.install("sphinx", "alabaster", "recommonmark", "sphinx-docfx-yaml")
209
210 shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
211 session.run(
212 "sphinx-build",
213 "-T", # show full traceback on exception
214 "-N", # no colors
215 "-D",
216 (
217 "extensions=sphinx.ext.autodoc,"
218 "sphinx.ext.autosummary,"
219 "docfx_yaml.extension,"
220 "sphinx.ext.intersphinx,"
221 "sphinx.ext.coverage,"
222 "sphinx.ext.napoleon,"
223 "sphinx.ext.todo,"
224 "sphinx.ext.viewcode,"
225 "recommonmark"
226 ),
227 "-b",
228 "html",
229 "-d",
230 os.path.join("docs", "_build", "doctrees", ""),
231 os.path.join("docs", ""),
232 os.path.join("docs", "_build", "html", ""),
233 )
234
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/noxfile.py b/noxfile.py
--- a/noxfile.py
+++ b/noxfile.py
@@ -20,6 +20,7 @@
import nox
+BLACK_VERSION = "black==19.10b0"
BLACK_PATHS = ("docs", "google", "samples", "tests", "noxfile.py", "setup.py")
@@ -147,7 +148,7 @@
serious code quality issues.
"""
- session.install("black", "flake8")
+ session.install("flake8", BLACK_VERSION)
session.install("-e", ".")
session.run("flake8", os.path.join("google", "cloud", "bigquery"))
session.run("flake8", "tests")
@@ -173,7 +174,7 @@
That run uses an image that doesn't have 3.6 installed. Before updating this
check the state of the `gcp_ubuntu_config` we use for that Kokoro run.
"""
- session.install("black")
+ session.install(BLACK_VERSION)
session.run("black", *BLACK_PATHS)
|
{"golden_diff": "diff --git a/noxfile.py b/noxfile.py\n--- a/noxfile.py\n+++ b/noxfile.py\n@@ -20,6 +20,7 @@\n import nox\n \n \n+BLACK_VERSION = \"black==19.10b0\"\n BLACK_PATHS = (\"docs\", \"google\", \"samples\", \"tests\", \"noxfile.py\", \"setup.py\")\n \n \n@@ -147,7 +148,7 @@\n serious code quality issues.\n \"\"\"\n \n- session.install(\"black\", \"flake8\")\n+ session.install(\"flake8\", BLACK_VERSION)\n session.install(\"-e\", \".\")\n session.run(\"flake8\", os.path.join(\"google\", \"cloud\", \"bigquery\"))\n session.run(\"flake8\", \"tests\")\n@@ -173,7 +174,7 @@\n That run uses an image that doesn't have 3.6 installed. Before updating this\n check the state of the `gcp_ubuntu_config` we use for that Kokoro run.\n \"\"\"\n- session.install(\"black\")\n+ session.install(BLACK_VERSION)\n session.run(\"black\", *BLACK_PATHS)\n", "issue": "Pin black to specific version\nNew releases of `black` can change the way the code is formatted (\"blackened\"), causing the CI checks to fail. We should thus use a deterministic version of `black`.\n", "before_files": [{"content": "# Copyright 2016 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nimport os\nimport shutil\n\nimport nox\n\n\nBLACK_PATHS = (\"docs\", \"google\", \"samples\", \"tests\", \"noxfile.py\", \"setup.py\")\n\n\ndef default(session):\n \"\"\"Default unit test session.\n\n This is intended to be run **without** an interpreter set, so\n that the current ``python`` (on the ``PATH``) or the version of\n Python corresponding to the ``nox`` binary the ``PATH`` can\n run the tests.\n \"\"\"\n # Install all test dependencies, then install local packages in-place.\n session.install(\n \"mock\", \"pytest\", \"google-cloud-testutils\", \"pytest-cov\", \"freezegun\"\n )\n session.install(\"grpcio\")\n\n # fastparquet is not included in .[all] because, in general, it's redundant\n # with pyarrow. We still want to run some unit tests with fastparquet\n # serialization, though.\n session.install(\"-e\", \".[all,fastparquet]\")\n\n # IPython does not support Python 2 after version 5.x\n if session.python == \"2.7\":\n session.install(\"ipython==5.5\")\n else:\n session.install(\"ipython\")\n\n # opentelemetry was not added to [all] because opentelemetry does not support Python 2.\n # Exporter does not need to be in nox thus it has been added to README documentation\n if session.python != \"2.7\":\n session.install(\"-e\", \".[opentelemetry]\")\n\n # Run py.test against the unit tests.\n session.run(\n \"py.test\",\n \"--quiet\",\n \"--cov=google.cloud.bigquery\",\n \"--cov=tests.unit\",\n \"--cov-append\",\n \"--cov-config=.coveragerc\",\n \"--cov-report=\",\n \"--cov-fail-under=0\",\n os.path.join(\"tests\", \"unit\"),\n *session.posargs,\n )\n\n\[email protected](python=[\"2.7\", \"3.5\", \"3.6\", \"3.7\", \"3.8\"])\ndef unit(session):\n \"\"\"Run the unit test suite.\"\"\"\n default(session)\n\n\[email protected](python=[\"2.7\", \"3.8\"])\ndef system(session):\n \"\"\"Run the system test suite.\"\"\"\n\n # Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true.\n if os.environ.get(\"RUN_SYSTEM_TESTS\", \"true\") == \"false\":\n session.skip(\"RUN_SYSTEM_TESTS is set to false, skipping\")\n\n # Sanity check: Only run system tests if the environment variable is set.\n if not os.environ.get(\"GOOGLE_APPLICATION_CREDENTIALS\", \"\"):\n session.skip(\"Credentials must be set via environment variable.\")\n\n # Use pre-release gRPC for system tests.\n session.install(\"--pre\", \"grpcio\")\n\n # Install all test dependencies, then install local packages in place.\n session.install(\"mock\", \"pytest\", \"psutil\", \"google-cloud-testutils\")\n session.install(\"google-cloud-storage\")\n session.install(\"-e\", \".[all]\")\n\n # IPython does not support Python 2 after version 5.x\n if session.python == \"2.7\":\n session.install(\"ipython==5.5\")\n else:\n session.install(\"ipython\")\n\n # Run py.test against the system tests.\n session.run(\n \"py.test\", \"--quiet\", os.path.join(\"tests\", \"system.py\"), *session.posargs\n )\n\n\[email protected](python=[\"2.7\", \"3.8\"])\ndef snippets(session):\n \"\"\"Run the snippets test suite.\"\"\"\n\n # Sanity check: Only run snippets tests if the environment variable is set.\n if not os.environ.get(\"GOOGLE_APPLICATION_CREDENTIALS\", \"\"):\n session.skip(\"Credentials must be set via environment variable.\")\n\n # Install all test dependencies, then install local packages in place.\n session.install(\"mock\", \"pytest\", \"google-cloud-testutils\")\n session.install(\"google-cloud-storage\")\n session.install(\"grpcio\")\n session.install(\"-e\", \".[all]\")\n\n # Run py.test against the snippets tests.\n # Skip tests in samples/snippets, as those are run in a different session\n # using the nox config from that directory.\n session.run(\"py.test\", os.path.join(\"docs\", \"snippets.py\"), *session.posargs)\n session.run(\"py.test\", \"samples\", \"--ignore=samples/snippets\", *session.posargs)\n\n\[email protected](python=\"3.8\")\ndef cover(session):\n \"\"\"Run the final coverage report.\n\n This outputs the coverage report aggregating coverage from the unit\n test runs (not system test runs), and then erases coverage data.\n \"\"\"\n session.install(\"coverage\", \"pytest-cov\")\n session.run(\"coverage\", \"report\", \"--show-missing\", \"--fail-under=100\")\n session.run(\"coverage\", \"erase\")\n\n\[email protected](python=\"3.8\")\ndef lint(session):\n \"\"\"Run linters.\n\n Returns a failure if the linters find linting errors or sufficiently\n serious code quality issues.\n \"\"\"\n\n session.install(\"black\", \"flake8\")\n session.install(\"-e\", \".\")\n session.run(\"flake8\", os.path.join(\"google\", \"cloud\", \"bigquery\"))\n session.run(\"flake8\", \"tests\")\n session.run(\"flake8\", os.path.join(\"docs\", \"samples\"))\n session.run(\"flake8\", os.path.join(\"docs\", \"snippets.py\"))\n session.run(\"black\", \"--check\", *BLACK_PATHS)\n\n\[email protected](python=\"3.8\")\ndef lint_setup_py(session):\n \"\"\"Verify that setup.py is valid (including RST check).\"\"\"\n\n session.install(\"docutils\", \"Pygments\")\n session.run(\"python\", \"setup.py\", \"check\", \"--restructuredtext\", \"--strict\")\n\n\[email protected](python=\"3.6\")\ndef blacken(session):\n \"\"\"Run black.\n Format code to uniform standard.\n\n This currently uses Python 3.6 due to the automated Kokoro run of synthtool.\n That run uses an image that doesn't have 3.6 installed. Before updating this\n check the state of the `gcp_ubuntu_config` we use for that Kokoro run.\n \"\"\"\n session.install(\"black\")\n session.run(\"black\", *BLACK_PATHS)\n\n\[email protected](python=\"3.8\")\ndef docs(session):\n \"\"\"Build the docs.\"\"\"\n\n session.install(\"ipython\", \"recommonmark\", \"sphinx\", \"sphinx_rtd_theme\")\n session.install(\"google-cloud-storage\")\n session.install(\"-e\", \".[all]\")\n\n shutil.rmtree(os.path.join(\"docs\", \"_build\"), ignore_errors=True)\n session.run(\n \"sphinx-build\",\n \"-W\", # warnings as errors\n \"-T\", # show full traceback on exception\n \"-N\", # no colors\n \"-b\",\n \"html\",\n \"-d\",\n os.path.join(\"docs\", \"_build\", \"doctrees\", \"\"),\n os.path.join(\"docs\", \"\"),\n os.path.join(\"docs\", \"_build\", \"html\", \"\"),\n )\n\n\[email protected](python=\"3.8\")\ndef docfx(session):\n \"\"\"Build the docfx yaml files for this library.\"\"\"\n\n session.install(\"-e\", \".\")\n session.install(\"sphinx\", \"alabaster\", \"recommonmark\", \"sphinx-docfx-yaml\")\n\n shutil.rmtree(os.path.join(\"docs\", \"_build\"), ignore_errors=True)\n session.run(\n \"sphinx-build\",\n \"-T\", # show full traceback on exception\n \"-N\", # no colors\n \"-D\",\n (\n \"extensions=sphinx.ext.autodoc,\"\n \"sphinx.ext.autosummary,\"\n \"docfx_yaml.extension,\"\n \"sphinx.ext.intersphinx,\"\n \"sphinx.ext.coverage,\"\n \"sphinx.ext.napoleon,\"\n \"sphinx.ext.todo,\"\n \"sphinx.ext.viewcode,\"\n \"recommonmark\"\n ),\n \"-b\",\n \"html\",\n \"-d\",\n os.path.join(\"docs\", \"_build\", \"doctrees\", \"\"),\n os.path.join(\"docs\", \"\"),\n os.path.join(\"docs\", \"_build\", \"html\", \"\"),\n )\n", "path": "noxfile.py"}], "after_files": [{"content": "# Copyright 2016 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nimport os\nimport shutil\n\nimport nox\n\n\nBLACK_VERSION = \"black==19.10b0\"\nBLACK_PATHS = (\"docs\", \"google\", \"samples\", \"tests\", \"noxfile.py\", \"setup.py\")\n\n\ndef default(session):\n \"\"\"Default unit test session.\n\n This is intended to be run **without** an interpreter set, so\n that the current ``python`` (on the ``PATH``) or the version of\n Python corresponding to the ``nox`` binary the ``PATH`` can\n run the tests.\n \"\"\"\n # Install all test dependencies, then install local packages in-place.\n session.install(\n \"mock\", \"pytest\", \"google-cloud-testutils\", \"pytest-cov\", \"freezegun\"\n )\n session.install(\"grpcio\")\n\n # fastparquet is not included in .[all] because, in general, it's redundant\n # with pyarrow. We still want to run some unit tests with fastparquet\n # serialization, though.\n session.install(\"-e\", \".[all,fastparquet]\")\n\n # IPython does not support Python 2 after version 5.x\n if session.python == \"2.7\":\n session.install(\"ipython==5.5\")\n else:\n session.install(\"ipython\")\n\n # opentelemetry was not added to [all] because opentelemetry does not support Python 2.\n # Exporter does not need to be in nox thus it has been added to README documentation\n if session.python != \"2.7\":\n session.install(\"-e\", \".[opentelemetry]\")\n\n # Run py.test against the unit tests.\n session.run(\n \"py.test\",\n \"--quiet\",\n \"--cov=google.cloud.bigquery\",\n \"--cov=tests.unit\",\n \"--cov-append\",\n \"--cov-config=.coveragerc\",\n \"--cov-report=\",\n \"--cov-fail-under=0\",\n os.path.join(\"tests\", \"unit\"),\n *session.posargs,\n )\n\n\[email protected](python=[\"2.7\", \"3.5\", \"3.6\", \"3.7\", \"3.8\"])\ndef unit(session):\n \"\"\"Run the unit test suite.\"\"\"\n default(session)\n\n\[email protected](python=[\"2.7\", \"3.8\"])\ndef system(session):\n \"\"\"Run the system test suite.\"\"\"\n\n # Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true.\n if os.environ.get(\"RUN_SYSTEM_TESTS\", \"true\") == \"false\":\n session.skip(\"RUN_SYSTEM_TESTS is set to false, skipping\")\n\n # Sanity check: Only run system tests if the environment variable is set.\n if not os.environ.get(\"GOOGLE_APPLICATION_CREDENTIALS\", \"\"):\n session.skip(\"Credentials must be set via environment variable.\")\n\n # Use pre-release gRPC for system tests.\n session.install(\"--pre\", \"grpcio\")\n\n # Install all test dependencies, then install local packages in place.\n session.install(\"mock\", \"pytest\", \"psutil\", \"google-cloud-testutils\")\n session.install(\"google-cloud-storage\")\n session.install(\"-e\", \".[all]\")\n\n # IPython does not support Python 2 after version 5.x\n if session.python == \"2.7\":\n session.install(\"ipython==5.5\")\n else:\n session.install(\"ipython\")\n\n # Run py.test against the system tests.\n session.run(\n \"py.test\", \"--quiet\", os.path.join(\"tests\", \"system.py\"), *session.posargs\n )\n\n\[email protected](python=[\"2.7\", \"3.8\"])\ndef snippets(session):\n \"\"\"Run the snippets test suite.\"\"\"\n\n # Sanity check: Only run snippets tests if the environment variable is set.\n if not os.environ.get(\"GOOGLE_APPLICATION_CREDENTIALS\", \"\"):\n session.skip(\"Credentials must be set via environment variable.\")\n\n # Install all test dependencies, then install local packages in place.\n session.install(\"mock\", \"pytest\", \"google-cloud-testutils\")\n session.install(\"google-cloud-storage\")\n session.install(\"grpcio\")\n session.install(\"-e\", \".[all]\")\n\n # Run py.test against the snippets tests.\n # Skip tests in samples/snippets, as those are run in a different session\n # using the nox config from that directory.\n session.run(\"py.test\", os.path.join(\"docs\", \"snippets.py\"), *session.posargs)\n session.run(\"py.test\", \"samples\", \"--ignore=samples/snippets\", *session.posargs)\n\n\[email protected](python=\"3.8\")\ndef cover(session):\n \"\"\"Run the final coverage report.\n\n This outputs the coverage report aggregating coverage from the unit\n test runs (not system test runs), and then erases coverage data.\n \"\"\"\n session.install(\"coverage\", \"pytest-cov\")\n session.run(\"coverage\", \"report\", \"--show-missing\", \"--fail-under=100\")\n session.run(\"coverage\", \"erase\")\n\n\[email protected](python=\"3.8\")\ndef lint(session):\n \"\"\"Run linters.\n\n Returns a failure if the linters find linting errors or sufficiently\n serious code quality issues.\n \"\"\"\n\n session.install(\"flake8\", BLACK_VERSION)\n session.install(\"-e\", \".\")\n session.run(\"flake8\", os.path.join(\"google\", \"cloud\", \"bigquery\"))\n session.run(\"flake8\", \"tests\")\n session.run(\"flake8\", os.path.join(\"docs\", \"samples\"))\n session.run(\"flake8\", os.path.join(\"docs\", \"snippets.py\"))\n session.run(\"black\", \"--check\", *BLACK_PATHS)\n\n\[email protected](python=\"3.8\")\ndef lint_setup_py(session):\n \"\"\"Verify that setup.py is valid (including RST check).\"\"\"\n\n session.install(\"docutils\", \"Pygments\")\n session.run(\"python\", \"setup.py\", \"check\", \"--restructuredtext\", \"--strict\")\n\n\[email protected](python=\"3.6\")\ndef blacken(session):\n \"\"\"Run black.\n Format code to uniform standard.\n\n This currently uses Python 3.6 due to the automated Kokoro run of synthtool.\n That run uses an image that doesn't have 3.6 installed. Before updating this\n check the state of the `gcp_ubuntu_config` we use for that Kokoro run.\n \"\"\"\n session.install(BLACK_VERSION)\n session.run(\"black\", *BLACK_PATHS)\n\n\[email protected](python=\"3.8\")\ndef docs(session):\n \"\"\"Build the docs.\"\"\"\n\n session.install(\"ipython\", \"recommonmark\", \"sphinx\", \"sphinx_rtd_theme\")\n session.install(\"google-cloud-storage\")\n session.install(\"-e\", \".[all]\")\n\n shutil.rmtree(os.path.join(\"docs\", \"_build\"), ignore_errors=True)\n session.run(\n \"sphinx-build\",\n \"-W\", # warnings as errors\n \"-T\", # show full traceback on exception\n \"-N\", # no colors\n \"-b\",\n \"html\",\n \"-d\",\n os.path.join(\"docs\", \"_build\", \"doctrees\", \"\"),\n os.path.join(\"docs\", \"\"),\n os.path.join(\"docs\", \"_build\", \"html\", \"\"),\n )\n\n\[email protected](python=\"3.8\")\ndef docfx(session):\n \"\"\"Build the docfx yaml files for this library.\"\"\"\n\n session.install(\"-e\", \".\")\n session.install(\"sphinx\", \"alabaster\", \"recommonmark\", \"sphinx-docfx-yaml\")\n\n shutil.rmtree(os.path.join(\"docs\", \"_build\"), ignore_errors=True)\n session.run(\n \"sphinx-build\",\n \"-T\", # show full traceback on exception\n \"-N\", # no colors\n \"-D\",\n (\n \"extensions=sphinx.ext.autodoc,\"\n \"sphinx.ext.autosummary,\"\n \"docfx_yaml.extension,\"\n \"sphinx.ext.intersphinx,\"\n \"sphinx.ext.coverage,\"\n \"sphinx.ext.napoleon,\"\n \"sphinx.ext.todo,\"\n \"sphinx.ext.viewcode,\"\n \"recommonmark\"\n ),\n \"-b\",\n \"html\",\n \"-d\",\n os.path.join(\"docs\", \"_build\", \"doctrees\", \"\"),\n os.path.join(\"docs\", \"\"),\n os.path.join(\"docs\", \"_build\", \"html\", \"\"),\n )\n", "path": "noxfile.py"}]}
| 2,842 | 253 |
gh_patches_debug_15588
|
rasdani/github-patches
|
git_diff
|
StackStorm__st2-920
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BUG: "'FabricRemoteAction' object has no attribute 'id'"
Fabric based actions are failing due to an incorrectly named attribute.
root@st2stage201:/tmp# st2 execution get 5493450cce36d2111eb26c79
STATUS: failed
RESULT:
{
"message": "'FabricRemoteAction' object has no attribute 'id'",
"traceback": " File "/usr/lib/python2.7/dist-packages/st2actions/container/base.py", line 117, in _do_run
run_result = runner.run(action_params)
File "/usr/lib/python2.7/dist-packages/st2actions/runners/fabricrunner.py", line 106, in run
result = self._run(remote_action)
File "/usr/lib/python2.7/dist-packages/st2actions/runners/fabricrunner.py", line 120, in _run
remote_action.name, remote_action.id, remote_action.get_command(),
"
}
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `st2actions/st2actions/runners/fabricrunner.py`
Content:
```
1 # Licensed to the StackStorm, Inc ('StackStorm') under one or more
2 # contributor license agreements. See the NOTICE file distributed with
3 # this work for additional information regarding copyright ownership.
4 # The ASF licenses this file to You under the Apache License, Version 2.0
5 # (the "License"); you may not use this file except in compliance with
6 # the License. You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 import os
17 import uuid
18
19 from fabric.api import (env, execute)
20 from oslo.config import cfg
21 import six
22
23 from st2actions.runners import ActionRunner
24 from st2actions.runners import ShellRunnerMixin
25 from st2common import log as logging
26 from st2common.exceptions.actionrunner import ActionRunnerPreRunError
27 from st2common.exceptions.fabricrunner import FabricExecutionFailureException
28 from st2common.constants.action import ACTIONEXEC_STATUS_SUCCEEDED, ACTIONEXEC_STATUS_FAILED
29 from st2common.models.system.action import (FabricRemoteAction, FabricRemoteScriptAction)
30
31 # Replace with container call to get logger.
32 LOG = logging.getLogger(__name__)
33
34 DEFAULT_ACTION_TIMEOUT = 60
35
36
37 # Fabric environment level settings.
38 # XXX: Note fabric env is a global singleton.
39 env.parallel = True # By default, execute things in parallel. Uses multiprocessing under the hood.
40 env.user = cfg.CONF.system_user.user
41 ssh_key_file = cfg.CONF.system_user.ssh_key_file
42
43 if ssh_key_file:
44 ssh_key_file = os.path.expanduser(ssh_key_file)
45
46 if ssh_key_file and os.path.exists(ssh_key_file):
47 env.key_filename = ssh_key_file
48
49 env.timeout = 10 # Timeout for connections (in seconds)
50 env.command_timeout = DEFAULT_ACTION_TIMEOUT # timeout for commands (in seconds)
51 env.combine_stderr = False
52 env.group = 'staff'
53 env.abort_exception = FabricExecutionFailureException
54
55 # constants to lookup in runner_parameters.
56 RUNNER_HOSTS = 'hosts'
57 RUNNER_PARALLEL = 'parallel'
58 RUNNER_SUDO = 'sudo'
59 RUNNER_ON_BEHALF_USER = 'user'
60 RUNNER_REMOTE_DIR = 'dir'
61 RUNNER_COMMAND = 'cmd'
62 RUNNER_KWARG_OP = 'kwarg_op'
63 RUNNER_TIMEOUT = 'timeout'
64
65
66 def get_runner():
67 return FabricRunner(str(uuid.uuid4()))
68
69
70 class FabricRunner(ActionRunner, ShellRunnerMixin):
71 def __init__(self, runner_id):
72 super(FabricRunner, self).__init__(runner_id=runner_id)
73 self._hosts = None
74 self._parallel = True
75 self._sudo = False
76 self._on_behalf_user = None
77 self._user = None
78 self._kwarg_op = '--'
79
80 def pre_run(self):
81 LOG.debug('Entering FabricRunner.pre_run() for actionexec_id="%s"',
82 self.action_execution_id)
83 LOG.debug(' runner_parameters = %s', self.runner_parameters)
84 hosts = self.runner_parameters.get(RUNNER_HOSTS, '').split(',')
85 self._hosts = [h.strip() for h in hosts if len(h) > 0]
86 if len(self._hosts) < 1:
87 raise ActionRunnerPreRunError('No hosts specified to run action for action %s.',
88 self.action_execution_id)
89 self._parallel = self.runner_parameters.get(RUNNER_PARALLEL, True)
90 self._sudo = self.runner_parameters.get(RUNNER_SUDO, False)
91 self._sudo = self._sudo if self._sudo else False
92 self._on_behalf_user = self.context.get(RUNNER_ON_BEHALF_USER, env.user)
93 self._user = cfg.CONF.system_user.user
94 self._kwarg_op = self.runner_parameters.get(RUNNER_KWARG_OP, '--')
95 self._timeout = self.runner_parameters.get(RUNNER_TIMEOUT, DEFAULT_ACTION_TIMEOUT)
96
97 LOG.info('[FabricRunner="%s", actionexec_id="%s"] Finished pre_run.',
98 self.runner_id, self.action_execution_id)
99
100 def run(self, action_parameters):
101 LOG.debug(' action_parameters = %s', action_parameters)
102 remote_action = self._get_fabric_remote_action(action_parameters) \
103 if self.entry_point is None or len(self.entry_point) < 1 \
104 else self._get_fabric_remote_script_action(action_parameters)
105 LOG.debug('Will execute remote_action : %s.', str(remote_action))
106 result = self._run(remote_action)
107 LOG.debug('Executed remote_action : %s. Result is : %s.', remote_action, result)
108 self.container_service.report_status(FabricRunner._get_result_status(
109 result, cfg.CONF.ssh_runner.allow_partial_failure))
110 self.container_service.report_result(result)
111
112 # TODO (manas) : figure out the right boolean representation.
113 return result is not None
114
115 def _run(self, remote_action):
116 LOG.info('Executing action via FabricRunner :%s for user: %s.',
117 self.runner_id, remote_action.get_on_behalf_user())
118 LOG.info(('[Action info] name: %s, Id: %s, command: %s, on behalf user: %s, '
119 'actual user: %s, sudo: %s'),
120 remote_action.name, remote_action.id, remote_action.get_command(),
121 remote_action.get_on_behalf_user(), remote_action.get_user(),
122 remote_action.is_sudo())
123 results = execute(remote_action.get_fabric_task(), hosts=remote_action.hosts)
124 return results
125
126 def _get_fabric_remote_action(self, action_paramaters):
127 command = self.runner_parameters.get(RUNNER_COMMAND, None)
128 env_vars = self._get_env_vars()
129 return FabricRemoteAction(self.action_name,
130 str(self.action_execution_id),
131 command,
132 env_vars=env_vars,
133 on_behalf_user=self._on_behalf_user,
134 user=self._user,
135 hosts=self._hosts,
136 parallel=self._parallel,
137 sudo=self._sudo,
138 timeout=self._timeout)
139
140 def _get_fabric_remote_script_action(self, action_parameters):
141 script_local_path_abs = self.entry_point
142 pos_args, named_args = self._get_script_args(action_parameters)
143 named_args = self._transform_named_args(named_args)
144 env_vars = self._get_env_vars()
145 remote_dir = self.runner_parameters.get(RUNNER_REMOTE_DIR,
146 cfg.CONF.ssh_runner.remote_dir)
147 remote_dir = os.path.join(remote_dir, self.action_execution_id)
148 return FabricRemoteScriptAction(self.action_name,
149 str(self.action_execution_id),
150 script_local_path_abs,
151 self.libs_dir_path,
152 named_args=named_args,
153 positional_args=pos_args,
154 env_vars=env_vars,
155 on_behalf_user=self._on_behalf_user,
156 user=self._user,
157 remote_dir=remote_dir,
158 hosts=self._hosts,
159 parallel=self._parallel,
160 sudo=self._sudo,
161 timeout=self._timeout)
162
163 def _get_env_vars(self):
164 return {'st2_auth_token': self.auth_token.token} if self.auth_token else {}
165
166 @staticmethod
167 def _get_result_status(result, allow_partial_failure):
168 success = not allow_partial_failure
169 for r in six.itervalues(result):
170 if allow_partial_failure:
171 success |= r.get('succeeded', False)
172 if success:
173 return ACTIONEXEC_STATUS_SUCCEEDED
174 else:
175 success &= r.get('succeeded', False)
176 if not success:
177 return ACTIONEXEC_STATUS_FAILED
178 return ACTIONEXEC_STATUS_SUCCEEDED if success else ACTIONEXEC_STATUS_FAILED
179
180
181 # XXX: Write proper tests.
182 if __name__ == '__main__':
183
184 print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
185 print('!!!!!!!!!!!!!!!!!!!!! NORMAL CMD !!!!!!!!!!!!!!!!!!!!!!!!!!')
186 print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
187 runner = FabricRunner(str(uuid.uuid4()))
188 remote_action = FabricRemoteAction('UNAME', 'action_exec_id' + str(uuid.uuid4()), 'uname -a',
189 'narcissist', 'stanley', hosts=['54.191.85.86',
190 '54.191.17.38', '54.200.102.55'])
191 print(str(remote_action))
192 results = runner._run(remote_action)
193
194 print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
195 print('!!!!!!!!!!!!!!!!!!!!! RESULTS !!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
196 print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
197
198 print(results)
199
200 print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
201 print('!!!!!!!!!!!!!!!!!!!!! SUDO CMD !!!!!!!!!!!!!!!!!!!!!!!!!!')
202 print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
203 runner = FabricRunner(str(uuid.uuid4()))
204 remote_action = FabricRemoteAction('UNAME', 'action_exec_id' + str(uuid.uuid4()), 'uname -a',
205 'narcissist', 'stanley', hosts=['54.191.85.86',
206 '54.191.17.38', '54.200.102.55'], parallel=True, sudo=True)
207 results = runner._run(remote_action)
208
209 print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
210 print('!!!!!!!!!!!!!!!!!!!!! RESULTS !!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
211 print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
212
213 print(results)
214
215 print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
216 print('!!!!!!!!!!!!!!!!!!!!! SCRIPT DAWG !!!!!!!!!!!!!!!!!!!!!!!!!!!')
217 print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
218 script_action = FabricRemoteScriptAction('UNAME', 'action_exec_id' + str(uuid.uuid4()),
219 '/tmp/ls-script.sh', named_args={},
220 positional_args='/tmp', on_behalf_user='narcissist',
221 user='stanley', hosts=['54.191.85.86'],
222 parallel=True, sudo=False)
223 results = runner._run(script_action)
224
225 print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
226 print('!!!!!!!!!!!!!!!!!!!!! RESULTS !!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
227 print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
228
229 print(results)
230
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/st2actions/st2actions/runners/fabricrunner.py b/st2actions/st2actions/runners/fabricrunner.py
--- a/st2actions/st2actions/runners/fabricrunner.py
+++ b/st2actions/st2actions/runners/fabricrunner.py
@@ -117,7 +117,7 @@
self.runner_id, remote_action.get_on_behalf_user())
LOG.info(('[Action info] name: %s, Id: %s, command: %s, on behalf user: %s, '
'actual user: %s, sudo: %s'),
- remote_action.name, remote_action.id, remote_action.get_command(),
+ remote_action.name, remote_action.action_exec_id, remote_action.get_command(),
remote_action.get_on_behalf_user(), remote_action.get_user(),
remote_action.is_sudo())
results = execute(remote_action.get_fabric_task(), hosts=remote_action.hosts)
|
{"golden_diff": "diff --git a/st2actions/st2actions/runners/fabricrunner.py b/st2actions/st2actions/runners/fabricrunner.py\n--- a/st2actions/st2actions/runners/fabricrunner.py\n+++ b/st2actions/st2actions/runners/fabricrunner.py\n@@ -117,7 +117,7 @@\n self.runner_id, remote_action.get_on_behalf_user())\n LOG.info(('[Action info] name: %s, Id: %s, command: %s, on behalf user: %s, '\n 'actual user: %s, sudo: %s'),\n- remote_action.name, remote_action.id, remote_action.get_command(),\n+ remote_action.name, remote_action.action_exec_id, remote_action.get_command(),\n remote_action.get_on_behalf_user(), remote_action.get_user(),\n remote_action.is_sudo())\n results = execute(remote_action.get_fabric_task(), hosts=remote_action.hosts)\n", "issue": "BUG: \"'FabricRemoteAction' object has no attribute 'id'\"\nFabric based actions are failing due to an incorrectly named attribute.\n\nroot@st2stage201:/tmp# st2 execution get 5493450cce36d2111eb26c79\nSTATUS: failed\nRESULT: \n{\n \"message\": \"'FabricRemoteAction' object has no attribute 'id'\", \n \"traceback\": \" File \"/usr/lib/python2.7/dist-packages/st2actions/container/base.py\", line 117, in _do_run\n run_result = runner.run(action_params)\n File \"/usr/lib/python2.7/dist-packages/st2actions/runners/fabricrunner.py\", line 106, in run\n result = self._run(remote_action)\n File \"/usr/lib/python2.7/dist-packages/st2actions/runners/fabricrunner.py\", line 120, in _run\n remote_action.name, remote_action.id, remote_action.get_command(),\n\"\n}\n\n", "before_files": [{"content": "# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport uuid\n\nfrom fabric.api import (env, execute)\nfrom oslo.config import cfg\nimport six\n\nfrom st2actions.runners import ActionRunner\nfrom st2actions.runners import ShellRunnerMixin\nfrom st2common import log as logging\nfrom st2common.exceptions.actionrunner import ActionRunnerPreRunError\nfrom st2common.exceptions.fabricrunner import FabricExecutionFailureException\nfrom st2common.constants.action import ACTIONEXEC_STATUS_SUCCEEDED, ACTIONEXEC_STATUS_FAILED\nfrom st2common.models.system.action import (FabricRemoteAction, FabricRemoteScriptAction)\n\n# Replace with container call to get logger.\nLOG = logging.getLogger(__name__)\n\nDEFAULT_ACTION_TIMEOUT = 60\n\n\n# Fabric environment level settings.\n# XXX: Note fabric env is a global singleton.\nenv.parallel = True # By default, execute things in parallel. Uses multiprocessing under the hood.\nenv.user = cfg.CONF.system_user.user\nssh_key_file = cfg.CONF.system_user.ssh_key_file\n\nif ssh_key_file:\n ssh_key_file = os.path.expanduser(ssh_key_file)\n\nif ssh_key_file and os.path.exists(ssh_key_file):\n env.key_filename = ssh_key_file\n\nenv.timeout = 10 # Timeout for connections (in seconds)\nenv.command_timeout = DEFAULT_ACTION_TIMEOUT # timeout for commands (in seconds)\nenv.combine_stderr = False\nenv.group = 'staff'\nenv.abort_exception = FabricExecutionFailureException\n\n# constants to lookup in runner_parameters.\nRUNNER_HOSTS = 'hosts'\nRUNNER_PARALLEL = 'parallel'\nRUNNER_SUDO = 'sudo'\nRUNNER_ON_BEHALF_USER = 'user'\nRUNNER_REMOTE_DIR = 'dir'\nRUNNER_COMMAND = 'cmd'\nRUNNER_KWARG_OP = 'kwarg_op'\nRUNNER_TIMEOUT = 'timeout'\n\n\ndef get_runner():\n return FabricRunner(str(uuid.uuid4()))\n\n\nclass FabricRunner(ActionRunner, ShellRunnerMixin):\n def __init__(self, runner_id):\n super(FabricRunner, self).__init__(runner_id=runner_id)\n self._hosts = None\n self._parallel = True\n self._sudo = False\n self._on_behalf_user = None\n self._user = None\n self._kwarg_op = '--'\n\n def pre_run(self):\n LOG.debug('Entering FabricRunner.pre_run() for actionexec_id=\"%s\"',\n self.action_execution_id)\n LOG.debug(' runner_parameters = %s', self.runner_parameters)\n hosts = self.runner_parameters.get(RUNNER_HOSTS, '').split(',')\n self._hosts = [h.strip() for h in hosts if len(h) > 0]\n if len(self._hosts) < 1:\n raise ActionRunnerPreRunError('No hosts specified to run action for action %s.',\n self.action_execution_id)\n self._parallel = self.runner_parameters.get(RUNNER_PARALLEL, True)\n self._sudo = self.runner_parameters.get(RUNNER_SUDO, False)\n self._sudo = self._sudo if self._sudo else False\n self._on_behalf_user = self.context.get(RUNNER_ON_BEHALF_USER, env.user)\n self._user = cfg.CONF.system_user.user\n self._kwarg_op = self.runner_parameters.get(RUNNER_KWARG_OP, '--')\n self._timeout = self.runner_parameters.get(RUNNER_TIMEOUT, DEFAULT_ACTION_TIMEOUT)\n\n LOG.info('[FabricRunner=\"%s\", actionexec_id=\"%s\"] Finished pre_run.',\n self.runner_id, self.action_execution_id)\n\n def run(self, action_parameters):\n LOG.debug(' action_parameters = %s', action_parameters)\n remote_action = self._get_fabric_remote_action(action_parameters) \\\n if self.entry_point is None or len(self.entry_point) < 1 \\\n else self._get_fabric_remote_script_action(action_parameters)\n LOG.debug('Will execute remote_action : %s.', str(remote_action))\n result = self._run(remote_action)\n LOG.debug('Executed remote_action : %s. Result is : %s.', remote_action, result)\n self.container_service.report_status(FabricRunner._get_result_status(\n result, cfg.CONF.ssh_runner.allow_partial_failure))\n self.container_service.report_result(result)\n\n # TODO (manas) : figure out the right boolean representation.\n return result is not None\n\n def _run(self, remote_action):\n LOG.info('Executing action via FabricRunner :%s for user: %s.',\n self.runner_id, remote_action.get_on_behalf_user())\n LOG.info(('[Action info] name: %s, Id: %s, command: %s, on behalf user: %s, '\n 'actual user: %s, sudo: %s'),\n remote_action.name, remote_action.id, remote_action.get_command(),\n remote_action.get_on_behalf_user(), remote_action.get_user(),\n remote_action.is_sudo())\n results = execute(remote_action.get_fabric_task(), hosts=remote_action.hosts)\n return results\n\n def _get_fabric_remote_action(self, action_paramaters):\n command = self.runner_parameters.get(RUNNER_COMMAND, None)\n env_vars = self._get_env_vars()\n return FabricRemoteAction(self.action_name,\n str(self.action_execution_id),\n command,\n env_vars=env_vars,\n on_behalf_user=self._on_behalf_user,\n user=self._user,\n hosts=self._hosts,\n parallel=self._parallel,\n sudo=self._sudo,\n timeout=self._timeout)\n\n def _get_fabric_remote_script_action(self, action_parameters):\n script_local_path_abs = self.entry_point\n pos_args, named_args = self._get_script_args(action_parameters)\n named_args = self._transform_named_args(named_args)\n env_vars = self._get_env_vars()\n remote_dir = self.runner_parameters.get(RUNNER_REMOTE_DIR,\n cfg.CONF.ssh_runner.remote_dir)\n remote_dir = os.path.join(remote_dir, self.action_execution_id)\n return FabricRemoteScriptAction(self.action_name,\n str(self.action_execution_id),\n script_local_path_abs,\n self.libs_dir_path,\n named_args=named_args,\n positional_args=pos_args,\n env_vars=env_vars,\n on_behalf_user=self._on_behalf_user,\n user=self._user,\n remote_dir=remote_dir,\n hosts=self._hosts,\n parallel=self._parallel,\n sudo=self._sudo,\n timeout=self._timeout)\n\n def _get_env_vars(self):\n return {'st2_auth_token': self.auth_token.token} if self.auth_token else {}\n\n @staticmethod\n def _get_result_status(result, allow_partial_failure):\n success = not allow_partial_failure\n for r in six.itervalues(result):\n if allow_partial_failure:\n success |= r.get('succeeded', False)\n if success:\n return ACTIONEXEC_STATUS_SUCCEEDED\n else:\n success &= r.get('succeeded', False)\n if not success:\n return ACTIONEXEC_STATUS_FAILED\n return ACTIONEXEC_STATUS_SUCCEEDED if success else ACTIONEXEC_STATUS_FAILED\n\n\n# XXX: Write proper tests.\nif __name__ == '__main__':\n\n print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n print('!!!!!!!!!!!!!!!!!!!!! NORMAL CMD !!!!!!!!!!!!!!!!!!!!!!!!!!')\n print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n runner = FabricRunner(str(uuid.uuid4()))\n remote_action = FabricRemoteAction('UNAME', 'action_exec_id' + str(uuid.uuid4()), 'uname -a',\n 'narcissist', 'stanley', hosts=['54.191.85.86',\n '54.191.17.38', '54.200.102.55'])\n print(str(remote_action))\n results = runner._run(remote_action)\n\n print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n print('!!!!!!!!!!!!!!!!!!!!! RESULTS !!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n\n print(results)\n\n print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n print('!!!!!!!!!!!!!!!!!!!!! SUDO CMD !!!!!!!!!!!!!!!!!!!!!!!!!!')\n print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n runner = FabricRunner(str(uuid.uuid4()))\n remote_action = FabricRemoteAction('UNAME', 'action_exec_id' + str(uuid.uuid4()), 'uname -a',\n 'narcissist', 'stanley', hosts=['54.191.85.86',\n '54.191.17.38', '54.200.102.55'], parallel=True, sudo=True)\n results = runner._run(remote_action)\n\n print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n print('!!!!!!!!!!!!!!!!!!!!! RESULTS !!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n\n print(results)\n\n print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n print('!!!!!!!!!!!!!!!!!!!!! SCRIPT DAWG !!!!!!!!!!!!!!!!!!!!!!!!!!!')\n print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n script_action = FabricRemoteScriptAction('UNAME', 'action_exec_id' + str(uuid.uuid4()),\n '/tmp/ls-script.sh', named_args={},\n positional_args='/tmp', on_behalf_user='narcissist',\n user='stanley', hosts=['54.191.85.86'],\n parallel=True, sudo=False)\n results = runner._run(script_action)\n\n print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n print('!!!!!!!!!!!!!!!!!!!!! RESULTS !!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n\n print(results)\n", "path": "st2actions/st2actions/runners/fabricrunner.py"}], "after_files": [{"content": "# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport uuid\n\nfrom fabric.api import (env, execute)\nfrom oslo.config import cfg\nimport six\n\nfrom st2actions.runners import ActionRunner\nfrom st2actions.runners import ShellRunnerMixin\nfrom st2common import log as logging\nfrom st2common.exceptions.actionrunner import ActionRunnerPreRunError\nfrom st2common.exceptions.fabricrunner import FabricExecutionFailureException\nfrom st2common.constants.action import ACTIONEXEC_STATUS_SUCCEEDED, ACTIONEXEC_STATUS_FAILED\nfrom st2common.models.system.action import (FabricRemoteAction, FabricRemoteScriptAction)\n\n# Replace with container call to get logger.\nLOG = logging.getLogger(__name__)\n\nDEFAULT_ACTION_TIMEOUT = 60\n\n\n# Fabric environment level settings.\n# XXX: Note fabric env is a global singleton.\nenv.parallel = True # By default, execute things in parallel. Uses multiprocessing under the hood.\nenv.user = cfg.CONF.system_user.user\nssh_key_file = cfg.CONF.system_user.ssh_key_file\n\nif ssh_key_file:\n ssh_key_file = os.path.expanduser(ssh_key_file)\n\nif ssh_key_file and os.path.exists(ssh_key_file):\n env.key_filename = ssh_key_file\n\nenv.timeout = 10 # Timeout for connections (in seconds)\nenv.command_timeout = DEFAULT_ACTION_TIMEOUT # timeout for commands (in seconds)\nenv.combine_stderr = False\nenv.group = 'staff'\nenv.abort_exception = FabricExecutionFailureException\n\n# constants to lookup in runner_parameters.\nRUNNER_HOSTS = 'hosts'\nRUNNER_PARALLEL = 'parallel'\nRUNNER_SUDO = 'sudo'\nRUNNER_ON_BEHALF_USER = 'user'\nRUNNER_REMOTE_DIR = 'dir'\nRUNNER_COMMAND = 'cmd'\nRUNNER_KWARG_OP = 'kwarg_op'\nRUNNER_TIMEOUT = 'timeout'\n\n\ndef get_runner():\n return FabricRunner(str(uuid.uuid4()))\n\n\nclass FabricRunner(ActionRunner, ShellRunnerMixin):\n def __init__(self, runner_id):\n super(FabricRunner, self).__init__(runner_id=runner_id)\n self._hosts = None\n self._parallel = True\n self._sudo = False\n self._on_behalf_user = None\n self._user = None\n self._kwarg_op = '--'\n\n def pre_run(self):\n LOG.debug('Entering FabricRunner.pre_run() for actionexec_id=\"%s\"',\n self.action_execution_id)\n LOG.debug(' runner_parameters = %s', self.runner_parameters)\n hosts = self.runner_parameters.get(RUNNER_HOSTS, '').split(',')\n self._hosts = [h.strip() for h in hosts if len(h) > 0]\n if len(self._hosts) < 1:\n raise ActionRunnerPreRunError('No hosts specified to run action for action %s.',\n self.action_execution_id)\n self._parallel = self.runner_parameters.get(RUNNER_PARALLEL, True)\n self._sudo = self.runner_parameters.get(RUNNER_SUDO, False)\n self._sudo = self._sudo if self._sudo else False\n self._on_behalf_user = self.context.get(RUNNER_ON_BEHALF_USER, env.user)\n self._user = cfg.CONF.system_user.user\n self._kwarg_op = self.runner_parameters.get(RUNNER_KWARG_OP, '--')\n self._timeout = self.runner_parameters.get(RUNNER_TIMEOUT, DEFAULT_ACTION_TIMEOUT)\n\n LOG.info('[FabricRunner=\"%s\", actionexec_id=\"%s\"] Finished pre_run.',\n self.runner_id, self.action_execution_id)\n\n def run(self, action_parameters):\n LOG.debug(' action_parameters = %s', action_parameters)\n remote_action = self._get_fabric_remote_action(action_parameters) \\\n if self.entry_point is None or len(self.entry_point) < 1 \\\n else self._get_fabric_remote_script_action(action_parameters)\n LOG.debug('Will execute remote_action : %s.', str(remote_action))\n result = self._run(remote_action)\n LOG.debug('Executed remote_action : %s. Result is : %s.', remote_action, result)\n self.container_service.report_status(FabricRunner._get_result_status(\n result, cfg.CONF.ssh_runner.allow_partial_failure))\n self.container_service.report_result(result)\n\n # TODO (manas) : figure out the right boolean representation.\n return result is not None\n\n def _run(self, remote_action):\n LOG.info('Executing action via FabricRunner :%s for user: %s.',\n self.runner_id, remote_action.get_on_behalf_user())\n LOG.info(('[Action info] name: %s, Id: %s, command: %s, on behalf user: %s, '\n 'actual user: %s, sudo: %s'),\n remote_action.name, remote_action.action_exec_id, remote_action.get_command(),\n remote_action.get_on_behalf_user(), remote_action.get_user(),\n remote_action.is_sudo())\n results = execute(remote_action.get_fabric_task(), hosts=remote_action.hosts)\n return results\n\n def _get_fabric_remote_action(self, action_paramaters):\n command = self.runner_parameters.get(RUNNER_COMMAND, None)\n env_vars = self._get_env_vars()\n return FabricRemoteAction(self.action_name,\n str(self.action_execution_id),\n command,\n env_vars=env_vars,\n on_behalf_user=self._on_behalf_user,\n user=self._user,\n hosts=self._hosts,\n parallel=self._parallel,\n sudo=self._sudo,\n timeout=self._timeout)\n\n def _get_fabric_remote_script_action(self, action_parameters):\n script_local_path_abs = self.entry_point\n pos_args, named_args = self._get_script_args(action_parameters)\n named_args = self._transform_named_args(named_args)\n env_vars = self._get_env_vars()\n remote_dir = self.runner_parameters.get(RUNNER_REMOTE_DIR,\n cfg.CONF.ssh_runner.remote_dir)\n remote_dir = os.path.join(remote_dir, self.action_execution_id)\n return FabricRemoteScriptAction(self.action_name,\n str(self.action_execution_id),\n script_local_path_abs,\n self.libs_dir_path,\n named_args=named_args,\n positional_args=pos_args,\n env_vars=env_vars,\n on_behalf_user=self._on_behalf_user,\n user=self._user,\n remote_dir=remote_dir,\n hosts=self._hosts,\n parallel=self._parallel,\n sudo=self._sudo,\n timeout=self._timeout)\n\n def _get_env_vars(self):\n return {'st2_auth_token': self.auth_token.token} if self.auth_token else {}\n\n @staticmethod\n def _get_result_status(result, allow_partial_failure):\n success = not allow_partial_failure\n for r in six.itervalues(result):\n if allow_partial_failure:\n success |= r.get('succeeded', False)\n if success:\n return ACTIONEXEC_STATUS_SUCCEEDED\n else:\n success &= r.get('succeeded', False)\n if not success:\n return ACTIONEXEC_STATUS_FAILED\n return ACTIONEXEC_STATUS_SUCCEEDED if success else ACTIONEXEC_STATUS_FAILED\n\n\n# XXX: Write proper tests.\nif __name__ == '__main__':\n\n print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n print('!!!!!!!!!!!!!!!!!!!!! NORMAL CMD !!!!!!!!!!!!!!!!!!!!!!!!!!')\n print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n runner = FabricRunner(str(uuid.uuid4()))\n remote_action = FabricRemoteAction('UNAME', 'action_exec_id' + str(uuid.uuid4()), 'uname -a',\n 'narcissist', 'stanley', hosts=['54.191.85.86',\n '54.191.17.38', '54.200.102.55'])\n print(str(remote_action))\n results = runner._run(remote_action)\n\n print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n print('!!!!!!!!!!!!!!!!!!!!! RESULTS !!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n\n print(results)\n\n print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n print('!!!!!!!!!!!!!!!!!!!!! SUDO CMD !!!!!!!!!!!!!!!!!!!!!!!!!!')\n print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n runner = FabricRunner(str(uuid.uuid4()))\n remote_action = FabricRemoteAction('UNAME', 'action_exec_id' + str(uuid.uuid4()), 'uname -a',\n 'narcissist', 'stanley', hosts=['54.191.85.86',\n '54.191.17.38', '54.200.102.55'], parallel=True, sudo=True)\n results = runner._run(remote_action)\n\n print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n print('!!!!!!!!!!!!!!!!!!!!! RESULTS !!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n\n print(results)\n\n print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n print('!!!!!!!!!!!!!!!!!!!!! SCRIPT DAWG !!!!!!!!!!!!!!!!!!!!!!!!!!!')\n print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n script_action = FabricRemoteScriptAction('UNAME', 'action_exec_id' + str(uuid.uuid4()),\n '/tmp/ls-script.sh', named_args={},\n positional_args='/tmp', on_behalf_user='narcissist',\n user='stanley', hosts=['54.191.85.86'],\n parallel=True, sudo=False)\n results = runner._run(script_action)\n\n print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n print('!!!!!!!!!!!!!!!!!!!!! RESULTS !!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n\n print(results)\n", "path": "st2actions/st2actions/runners/fabricrunner.py"}]}
| 3,313 | 202 |
gh_patches_debug_24192
|
rasdani/github-patches
|
git_diff
|
kserve__kserve-3229
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
KServe lgbserver runtime error input for v1 endpoint
/kind bug
**What steps did you take and what happened:**
lgbserver used to support both of following inputs formats prior to 0.11 release.
```python
request = {'inputs' : [{'sepal_width_(cm)': {0: 3.5},
'petal_length_(cm)': {0: 1.4},
'petal_width_(cm)': {0: 0.2},
'sepal_length_(cm)': {0: 5.1} }]}
```
```python
request2 = {'inputs': [
[{'sepal_width_(cm)': 3.5},
{'petal_length_(cm)': 1.4},
{'petal_width_(cm)': 0.2},
{'sepal_length_(cm)': 5.1}]
] }
```
KServe only documented the first input format https://kserve.github.io/website/0.11/modelserving/v1beta1/lightgbm
and in 0.11 the second input format stop working with following error
```bash
2023-11-03 09:06:02.099 32367 kserve ERROR [inference_error_handler():89] Exception:
Traceback (most recent call last):
File "/Users/dsun20/kserve/python/lgbserver/lgbserver/model.py", line 62, in predict
result = self._booster.predict(instances)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/homebrew/lib/python3.11/site-packages/lightgbm/basic.py", line 4220, in predict
return predictor.predict(
^^^^^^^^^^^^^^^^^^
File "/opt/homebrew/lib/python3.11/site-packages/lightgbm/basic.py", line 1047, in predict
preds, nrow = self.__pred_for_np2d(
^^^^^^^^^^^^^^^^^^^^^
File "/opt/homebrew/lib/python3.11/site-packages/lightgbm/basic.py", line 1187, in __pred_for_np2d
return self.__inner_predict_np2d(
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/homebrew/lib/python3.11/site-packages/lightgbm/basic.py", line 1127, in __inner_predict_np2d
data = np.array(mat.reshape(mat.size), dtype=np.float32)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
TypeError: float() argument must be a string or a real number, not 'dict'
```
**What did you expect to happen:**
**What's the InferenceService yaml:**
[To help us debug please run `kubectl get isvc $name -n $namespace -oyaml` and paste the output]
**Anything else you would like to add:**
[Miscellaneous information that will assist in solving the issue.]
**Environment:**
- Istio Version:
- Knative Version:
- KServe Version:
- Kubeflow version:
- Cloud Environment:[k8s_istio/istio_dex/gcp_basic_auth/gcp_iap/aws/aws_cognito/ibm]
- Minikube/Kind version:
- Kubernetes version: (use `kubectl version`):
- OS (e.g. from `/etc/os-release`):
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/kserve/kserve/utils/utils.py`
Content:
```
1 # Copyright 2021 The KServe Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16 import sys
17 import uuid
18 from kserve.protocol.grpc.grpc_predict_v2_pb2 import InferParameter
19 from typing import Dict, Union
20
21 from kserve.utils.numpy_codec import from_np_dtype
22 import pandas as pd
23 import numpy as np
24 import psutil
25 from cloudevents.conversion import to_binary, to_structured
26 from cloudevents.http import CloudEvent
27 from grpc import ServicerContext
28 from kserve.protocol.infer_type import InferOutput, InferRequest, InferResponse
29
30
31 def is_running_in_k8s():
32 return os.path.isdir('/var/run/secrets/kubernetes.io/')
33
34
35 def get_current_k8s_namespace():
36 with open('/var/run/secrets/kubernetes.io/serviceaccount/namespace', 'r') as f:
37 return f.readline()
38
39
40 def get_default_target_namespace():
41 if not is_running_in_k8s():
42 return 'default'
43 return get_current_k8s_namespace()
44
45
46 def get_isvc_namespace(inferenceservice):
47 return inferenceservice.metadata.namespace or get_default_target_namespace()
48
49
50 def get_ig_namespace(inferencegraph):
51 return inferencegraph.metadata.namespace or get_default_target_namespace()
52
53
54 def cpu_count():
55 """Get the available CPU count for this system.
56 Takes the minimum value from the following locations:
57 - Total system cpus available on the host.
58 - CPU Affinity (if set)
59 - Cgroups limit (if set)
60 """
61 count = os.cpu_count()
62
63 # Check CPU affinity if available
64 try:
65 affinity_count = len(psutil.Process().cpu_affinity())
66 if affinity_count > 0:
67 count = min(count, affinity_count)
68 except Exception:
69 pass
70
71 # Check cgroups if available
72 if sys.platform == "linux":
73 try:
74 with open("/sys/fs/cgroup/cpu,cpuacct/cpu.cfs_quota_us") as f:
75 quota = int(f.read())
76 with open("/sys/fs/cgroup/cpu,cpuacct/cpu.cfs_period_us") as f:
77 period = int(f.read())
78 cgroups_count = int(quota / period)
79 if cgroups_count > 0:
80 count = min(count, cgroups_count)
81 except Exception:
82 pass
83
84 return count
85
86
87 def is_structured_cloudevent(body: Dict) -> bool:
88 """Returns True if the JSON request body resembles a structured CloudEvent"""
89 return "time" in body \
90 and "type" in body \
91 and "source" in body \
92 and "id" in body \
93 and "specversion" in body \
94 and "data" in body
95
96
97 def create_response_cloudevent(model_name: str, response: Dict, req_attributes: Dict,
98 binary_event=False) -> tuple:
99 ce_attributes = {}
100
101 if os.getenv("CE_MERGE", "false").lower() == "true":
102 if binary_event:
103 ce_attributes = req_attributes
104 if "datacontenttype" in ce_attributes: # Optional field so must check
105 del ce_attributes["datacontenttype"]
106 else:
107 ce_attributes = req_attributes
108
109 # Remove these fields so we generate new ones
110 del ce_attributes["id"]
111 del ce_attributes["time"]
112
113 ce_attributes["type"] = os.getenv("CE_TYPE", "io.kserve.inference.response")
114 ce_attributes["source"] = os.getenv("CE_SOURCE", f"io.kserve.inference.{model_name}")
115
116 event = CloudEvent(ce_attributes, response)
117
118 if binary_event:
119 event_headers, event_body = to_binary(event)
120 else:
121 event_headers, event_body = to_structured(event)
122
123 return event_headers, event_body
124
125
126 def generate_uuid() -> str:
127 return str(uuid.uuid4())
128
129
130 def to_headers(context: ServicerContext) -> Dict[str, str]:
131 metadata = context.invocation_metadata()
132 if hasattr(context, "trailing_metadata"):
133 metadata += context.trailing_metadata()
134 headers = {}
135 for metadatum in metadata:
136 headers[metadatum.key] = metadatum.value
137
138 return headers
139
140
141 def get_predict_input(payload: Union[Dict, InferRequest]) -> Union[np.ndarray, pd.DataFrame]:
142 if isinstance(payload, Dict):
143 instances = payload["inputs"] if "inputs" in payload else payload["instances"]
144 if len(instances) == 0:
145 return np.array(instances)
146 if isinstance(instances[0], Dict):
147 dfs = []
148 for input in instances:
149 dfs.append(pd.DataFrame(input))
150 inputs = pd.concat(dfs, axis=0)
151 return inputs
152 else:
153 return np.array(instances)
154
155 elif isinstance(payload, InferRequest):
156 content_type = ''
157 parameters = payload.parameters
158 if parameters:
159 if isinstance(parameters.get("content_type"), InferParameter):
160 # for v2 grpc, we get InferParameter obj eg: {"content_type": string_param: "pd"}
161 content_type = str(parameters.get("content_type").string_param)
162 else:
163 # for v2 http, we get string eg: {"content_type": "pd"}
164 content_type = parameters.get("content_type")
165
166 if content_type == "pd":
167 return payload.as_dataframe()
168 else:
169 input = payload.inputs[0]
170 return input.as_numpy()
171
172
173 def get_predict_response(payload: Union[Dict, InferRequest], result: Union[np.ndarray, pd.DataFrame],
174 model_name: str) -> Union[Dict, InferResponse]:
175 if isinstance(payload, Dict):
176 infer_outputs = result
177 if isinstance(result, pd.DataFrame):
178 infer_outputs = []
179 for label, row in result.iterrows():
180 infer_outputs.append(row.to_dict())
181 elif isinstance(result, np.ndarray):
182 infer_outputs = result.tolist()
183 return {"predictions": infer_outputs}
184 elif isinstance(payload, InferRequest):
185 infer_outputs = []
186 if isinstance(result, pd.DataFrame):
187 for col in result.columns:
188 infer_output = InferOutput(
189 name=col,
190 shape=list(result[col].shape),
191 datatype=from_np_dtype(result[col].dtype),
192 data=result[col].tolist()
193 )
194 infer_outputs.append(infer_output)
195 else:
196 infer_output = InferOutput(
197 name="output-0",
198 shape=list(result.shape),
199 datatype=from_np_dtype(result.dtype),
200 data=result.flatten().tolist()
201 )
202 infer_outputs.append(infer_output)
203 return InferResponse(
204 model_name=model_name,
205 infer_outputs=infer_outputs,
206 response_id=payload.id if payload.id else generate_uuid()
207 )
208
209
210 def strtobool(val: str) -> bool:
211 """Convert a string representation of truth to True or False.
212
213 True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
214 are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
215 'val' is anything else.
216
217 Adapted from deprecated `distutils`
218 https://github.com/python/cpython/blob/3.11/Lib/distutils/util.py
219 """
220 val = val.lower()
221 if val in ('y', 'yes', 't', 'true', 'on', '1'):
222 return True
223 elif val in ('n', 'no', 'f', 'false', 'off', '0'):
224 return False
225 else:
226 raise ValueError("invalid truth value %r" % (val,))
227
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/python/kserve/kserve/utils/utils.py b/python/kserve/kserve/utils/utils.py
--- a/python/kserve/kserve/utils/utils.py
+++ b/python/kserve/kserve/utils/utils.py
@@ -16,7 +16,7 @@
import sys
import uuid
from kserve.protocol.grpc.grpc_predict_v2_pb2 import InferParameter
-from typing import Dict, Union
+from typing import Dict, Union, List
from kserve.utils.numpy_codec import from_np_dtype
import pandas as pd
@@ -149,6 +149,24 @@
dfs.append(pd.DataFrame(input))
inputs = pd.concat(dfs, axis=0)
return inputs
+
+ # Handles the following input format
+ # {'inputs': [
+ # [{'sepal_width_(cm)': 3.5},
+ # {'petal_length_(cm)': 1.4},
+ # {'petal_width_(cm)': 0.2},
+ # {'sepal_length_(cm)': 5.1}]
+ # ]}
+ elif isinstance(instances[0], List) and len(instances[0]) != 0 and isinstance(instances[0][0], Dict):
+ data: Dict[str, List] = {}
+ for instance in instances:
+ for item in instance:
+ for key, val in item.items():
+ if key in data:
+ data[key].append(val)
+ else:
+ data[key] = [val]
+ return pd.DataFrame(data)
else:
return np.array(instances)
|
{"golden_diff": "diff --git a/python/kserve/kserve/utils/utils.py b/python/kserve/kserve/utils/utils.py\n--- a/python/kserve/kserve/utils/utils.py\n+++ b/python/kserve/kserve/utils/utils.py\n@@ -16,7 +16,7 @@\n import sys\n import uuid\n from kserve.protocol.grpc.grpc_predict_v2_pb2 import InferParameter\n-from typing import Dict, Union\n+from typing import Dict, Union, List\n \n from kserve.utils.numpy_codec import from_np_dtype\n import pandas as pd\n@@ -149,6 +149,24 @@\n dfs.append(pd.DataFrame(input))\n inputs = pd.concat(dfs, axis=0)\n return inputs\n+\n+ # Handles the following input format\n+ # {'inputs': [\n+ # [{'sepal_width_(cm)': 3.5},\n+ # {'petal_length_(cm)': 1.4},\n+ # {'petal_width_(cm)': 0.2},\n+ # {'sepal_length_(cm)': 5.1}]\n+ # ]}\n+ elif isinstance(instances[0], List) and len(instances[0]) != 0 and isinstance(instances[0][0], Dict):\n+ data: Dict[str, List] = {}\n+ for instance in instances:\n+ for item in instance:\n+ for key, val in item.items():\n+ if key in data:\n+ data[key].append(val)\n+ else:\n+ data[key] = [val]\n+ return pd.DataFrame(data)\n else:\n return np.array(instances)\n", "issue": "KServe lgbserver runtime error input for v1 endpoint\n/kind bug\r\n\r\n**What steps did you take and what happened:**\r\nlgbserver used to support both of following inputs formats prior to 0.11 release.\r\n\r\n```python\r\nrequest = {'inputs' : [{'sepal_width_(cm)': {0: 3.5}, \r\n'petal_length_(cm)': {0: 1.4}, \r\n'petal_width_(cm)': {0: 0.2},\r\n'sepal_length_(cm)': {0: 5.1} }]}\r\n```\r\n\r\n```python\r\nrequest2 = {'inputs': [\r\n[{'sepal_width_(cm)': 3.5}, \r\n {'petal_length_(cm)': 1.4}, \r\n {'petal_width_(cm)': 0.2}, \r\n {'sepal_length_(cm)': 5.1}]\r\n ] }\r\n```\r\n\r\nKServe only documented the first input format https://kserve.github.io/website/0.11/modelserving/v1beta1/lightgbm\r\n\r\nand in 0.11 the second input format stop working with following error\r\n\r\n```bash\r\n2023-11-03 09:06:02.099 32367 kserve ERROR [inference_error_handler():89] Exception:\r\nTraceback (most recent call last):\r\n File \"/Users/dsun20/kserve/python/lgbserver/lgbserver/model.py\", line 62, in predict\r\n result = self._booster.predict(instances)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/opt/homebrew/lib/python3.11/site-packages/lightgbm/basic.py\", line 4220, in predict\r\n return predictor.predict(\r\n ^^^^^^^^^^^^^^^^^^\r\n File \"/opt/homebrew/lib/python3.11/site-packages/lightgbm/basic.py\", line 1047, in predict\r\n preds, nrow = self.__pred_for_np2d(\r\n ^^^^^^^^^^^^^^^^^^^^^\r\n File \"/opt/homebrew/lib/python3.11/site-packages/lightgbm/basic.py\", line 1187, in __pred_for_np2d\r\n return self.__inner_predict_np2d(\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/opt/homebrew/lib/python3.11/site-packages/lightgbm/basic.py\", line 1127, in __inner_predict_np2d\r\n data = np.array(mat.reshape(mat.size), dtype=np.float32)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\nTypeError: float() argument must be a string or a real number, not 'dict'\r\n```\r\n\r\n\r\n**What did you expect to happen:**\r\n\r\n\r\n**What's the InferenceService yaml:**\r\n[To help us debug please run `kubectl get isvc $name -n $namespace -oyaml` and paste the output]\r\n\r\n**Anything else you would like to add:**\r\n[Miscellaneous information that will assist in solving the issue.]\r\n\r\n\r\n**Environment:**\r\n\r\n- Istio Version:\r\n- Knative Version:\r\n- KServe Version:\r\n- Kubeflow version:\r\n- Cloud Environment:[k8s_istio/istio_dex/gcp_basic_auth/gcp_iap/aws/aws_cognito/ibm]\r\n- Minikube/Kind version:\r\n- Kubernetes version: (use `kubectl version`):\r\n- OS (e.g. from `/etc/os-release`):\r\n\n", "before_files": [{"content": "# Copyright 2021 The KServe Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\nimport uuid\nfrom kserve.protocol.grpc.grpc_predict_v2_pb2 import InferParameter\nfrom typing import Dict, Union\n\nfrom kserve.utils.numpy_codec import from_np_dtype\nimport pandas as pd\nimport numpy as np\nimport psutil\nfrom cloudevents.conversion import to_binary, to_structured\nfrom cloudevents.http import CloudEvent\nfrom grpc import ServicerContext\nfrom kserve.protocol.infer_type import InferOutput, InferRequest, InferResponse\n\n\ndef is_running_in_k8s():\n return os.path.isdir('/var/run/secrets/kubernetes.io/')\n\n\ndef get_current_k8s_namespace():\n with open('/var/run/secrets/kubernetes.io/serviceaccount/namespace', 'r') as f:\n return f.readline()\n\n\ndef get_default_target_namespace():\n if not is_running_in_k8s():\n return 'default'\n return get_current_k8s_namespace()\n\n\ndef get_isvc_namespace(inferenceservice):\n return inferenceservice.metadata.namespace or get_default_target_namespace()\n\n\ndef get_ig_namespace(inferencegraph):\n return inferencegraph.metadata.namespace or get_default_target_namespace()\n\n\ndef cpu_count():\n \"\"\"Get the available CPU count for this system.\n Takes the minimum value from the following locations:\n - Total system cpus available on the host.\n - CPU Affinity (if set)\n - Cgroups limit (if set)\n \"\"\"\n count = os.cpu_count()\n\n # Check CPU affinity if available\n try:\n affinity_count = len(psutil.Process().cpu_affinity())\n if affinity_count > 0:\n count = min(count, affinity_count)\n except Exception:\n pass\n\n # Check cgroups if available\n if sys.platform == \"linux\":\n try:\n with open(\"/sys/fs/cgroup/cpu,cpuacct/cpu.cfs_quota_us\") as f:\n quota = int(f.read())\n with open(\"/sys/fs/cgroup/cpu,cpuacct/cpu.cfs_period_us\") as f:\n period = int(f.read())\n cgroups_count = int(quota / period)\n if cgroups_count > 0:\n count = min(count, cgroups_count)\n except Exception:\n pass\n\n return count\n\n\ndef is_structured_cloudevent(body: Dict) -> bool:\n \"\"\"Returns True if the JSON request body resembles a structured CloudEvent\"\"\"\n return \"time\" in body \\\n and \"type\" in body \\\n and \"source\" in body \\\n and \"id\" in body \\\n and \"specversion\" in body \\\n and \"data\" in body\n\n\ndef create_response_cloudevent(model_name: str, response: Dict, req_attributes: Dict,\n binary_event=False) -> tuple:\n ce_attributes = {}\n\n if os.getenv(\"CE_MERGE\", \"false\").lower() == \"true\":\n if binary_event:\n ce_attributes = req_attributes\n if \"datacontenttype\" in ce_attributes: # Optional field so must check\n del ce_attributes[\"datacontenttype\"]\n else:\n ce_attributes = req_attributes\n\n # Remove these fields so we generate new ones\n del ce_attributes[\"id\"]\n del ce_attributes[\"time\"]\n\n ce_attributes[\"type\"] = os.getenv(\"CE_TYPE\", \"io.kserve.inference.response\")\n ce_attributes[\"source\"] = os.getenv(\"CE_SOURCE\", f\"io.kserve.inference.{model_name}\")\n\n event = CloudEvent(ce_attributes, response)\n\n if binary_event:\n event_headers, event_body = to_binary(event)\n else:\n event_headers, event_body = to_structured(event)\n\n return event_headers, event_body\n\n\ndef generate_uuid() -> str:\n return str(uuid.uuid4())\n\n\ndef to_headers(context: ServicerContext) -> Dict[str, str]:\n metadata = context.invocation_metadata()\n if hasattr(context, \"trailing_metadata\"):\n metadata += context.trailing_metadata()\n headers = {}\n for metadatum in metadata:\n headers[metadatum.key] = metadatum.value\n\n return headers\n\n\ndef get_predict_input(payload: Union[Dict, InferRequest]) -> Union[np.ndarray, pd.DataFrame]:\n if isinstance(payload, Dict):\n instances = payload[\"inputs\"] if \"inputs\" in payload else payload[\"instances\"]\n if len(instances) == 0:\n return np.array(instances)\n if isinstance(instances[0], Dict):\n dfs = []\n for input in instances:\n dfs.append(pd.DataFrame(input))\n inputs = pd.concat(dfs, axis=0)\n return inputs\n else:\n return np.array(instances)\n\n elif isinstance(payload, InferRequest):\n content_type = ''\n parameters = payload.parameters\n if parameters:\n if isinstance(parameters.get(\"content_type\"), InferParameter):\n # for v2 grpc, we get InferParameter obj eg: {\"content_type\": string_param: \"pd\"}\n content_type = str(parameters.get(\"content_type\").string_param)\n else:\n # for v2 http, we get string eg: {\"content_type\": \"pd\"}\n content_type = parameters.get(\"content_type\")\n\n if content_type == \"pd\":\n return payload.as_dataframe()\n else:\n input = payload.inputs[0]\n return input.as_numpy()\n\n\ndef get_predict_response(payload: Union[Dict, InferRequest], result: Union[np.ndarray, pd.DataFrame],\n model_name: str) -> Union[Dict, InferResponse]:\n if isinstance(payload, Dict):\n infer_outputs = result\n if isinstance(result, pd.DataFrame):\n infer_outputs = []\n for label, row in result.iterrows():\n infer_outputs.append(row.to_dict())\n elif isinstance(result, np.ndarray):\n infer_outputs = result.tolist()\n return {\"predictions\": infer_outputs}\n elif isinstance(payload, InferRequest):\n infer_outputs = []\n if isinstance(result, pd.DataFrame):\n for col in result.columns:\n infer_output = InferOutput(\n name=col,\n shape=list(result[col].shape),\n datatype=from_np_dtype(result[col].dtype),\n data=result[col].tolist()\n )\n infer_outputs.append(infer_output)\n else:\n infer_output = InferOutput(\n name=\"output-0\",\n shape=list(result.shape),\n datatype=from_np_dtype(result.dtype),\n data=result.flatten().tolist()\n )\n infer_outputs.append(infer_output)\n return InferResponse(\n model_name=model_name,\n infer_outputs=infer_outputs,\n response_id=payload.id if payload.id else generate_uuid()\n )\n\n\ndef strtobool(val: str) -> bool:\n \"\"\"Convert a string representation of truth to True or False.\n\n True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values\n are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if\n 'val' is anything else.\n\n Adapted from deprecated `distutils`\n https://github.com/python/cpython/blob/3.11/Lib/distutils/util.py\n \"\"\"\n val = val.lower()\n if val in ('y', 'yes', 't', 'true', 'on', '1'):\n return True\n elif val in ('n', 'no', 'f', 'false', 'off', '0'):\n return False\n else:\n raise ValueError(\"invalid truth value %r\" % (val,))\n", "path": "python/kserve/kserve/utils/utils.py"}], "after_files": [{"content": "# Copyright 2021 The KServe Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\nimport uuid\nfrom kserve.protocol.grpc.grpc_predict_v2_pb2 import InferParameter\nfrom typing import Dict, Union, List\n\nfrom kserve.utils.numpy_codec import from_np_dtype\nimport pandas as pd\nimport numpy as np\nimport psutil\nfrom cloudevents.conversion import to_binary, to_structured\nfrom cloudevents.http import CloudEvent\nfrom grpc import ServicerContext\nfrom kserve.protocol.infer_type import InferOutput, InferRequest, InferResponse\n\n\ndef is_running_in_k8s():\n return os.path.isdir('/var/run/secrets/kubernetes.io/')\n\n\ndef get_current_k8s_namespace():\n with open('/var/run/secrets/kubernetes.io/serviceaccount/namespace', 'r') as f:\n return f.readline()\n\n\ndef get_default_target_namespace():\n if not is_running_in_k8s():\n return 'default'\n return get_current_k8s_namespace()\n\n\ndef get_isvc_namespace(inferenceservice):\n return inferenceservice.metadata.namespace or get_default_target_namespace()\n\n\ndef get_ig_namespace(inferencegraph):\n return inferencegraph.metadata.namespace or get_default_target_namespace()\n\n\ndef cpu_count():\n \"\"\"Get the available CPU count for this system.\n Takes the minimum value from the following locations:\n - Total system cpus available on the host.\n - CPU Affinity (if set)\n - Cgroups limit (if set)\n \"\"\"\n count = os.cpu_count()\n\n # Check CPU affinity if available\n try:\n affinity_count = len(psutil.Process().cpu_affinity())\n if affinity_count > 0:\n count = min(count, affinity_count)\n except Exception:\n pass\n\n # Check cgroups if available\n if sys.platform == \"linux\":\n try:\n with open(\"/sys/fs/cgroup/cpu,cpuacct/cpu.cfs_quota_us\") as f:\n quota = int(f.read())\n with open(\"/sys/fs/cgroup/cpu,cpuacct/cpu.cfs_period_us\") as f:\n period = int(f.read())\n cgroups_count = int(quota / period)\n if cgroups_count > 0:\n count = min(count, cgroups_count)\n except Exception:\n pass\n\n return count\n\n\ndef is_structured_cloudevent(body: Dict) -> bool:\n \"\"\"Returns True if the JSON request body resembles a structured CloudEvent\"\"\"\n return \"time\" in body \\\n and \"type\" in body \\\n and \"source\" in body \\\n and \"id\" in body \\\n and \"specversion\" in body \\\n and \"data\" in body\n\n\ndef create_response_cloudevent(model_name: str, response: Dict, req_attributes: Dict,\n binary_event=False) -> tuple:\n ce_attributes = {}\n\n if os.getenv(\"CE_MERGE\", \"false\").lower() == \"true\":\n if binary_event:\n ce_attributes = req_attributes\n if \"datacontenttype\" in ce_attributes: # Optional field so must check\n del ce_attributes[\"datacontenttype\"]\n else:\n ce_attributes = req_attributes\n\n # Remove these fields so we generate new ones\n del ce_attributes[\"id\"]\n del ce_attributes[\"time\"]\n\n ce_attributes[\"type\"] = os.getenv(\"CE_TYPE\", \"io.kserve.inference.response\")\n ce_attributes[\"source\"] = os.getenv(\"CE_SOURCE\", f\"io.kserve.inference.{model_name}\")\n\n event = CloudEvent(ce_attributes, response)\n\n if binary_event:\n event_headers, event_body = to_binary(event)\n else:\n event_headers, event_body = to_structured(event)\n\n return event_headers, event_body\n\n\ndef generate_uuid() -> str:\n return str(uuid.uuid4())\n\n\ndef to_headers(context: ServicerContext) -> Dict[str, str]:\n metadata = context.invocation_metadata()\n if hasattr(context, \"trailing_metadata\"):\n metadata += context.trailing_metadata()\n headers = {}\n for metadatum in metadata:\n headers[metadatum.key] = metadatum.value\n\n return headers\n\n\ndef get_predict_input(payload: Union[Dict, InferRequest]) -> Union[np.ndarray, pd.DataFrame]:\n if isinstance(payload, Dict):\n instances = payload[\"inputs\"] if \"inputs\" in payload else payload[\"instances\"]\n if len(instances) == 0:\n return np.array(instances)\n if isinstance(instances[0], Dict):\n dfs = []\n for input in instances:\n dfs.append(pd.DataFrame(input))\n inputs = pd.concat(dfs, axis=0)\n return inputs\n\n # Handles the following input format\n # {'inputs': [\n # [{'sepal_width_(cm)': 3.5},\n # {'petal_length_(cm)': 1.4},\n # {'petal_width_(cm)': 0.2},\n # {'sepal_length_(cm)': 5.1}]\n # ]}\n elif isinstance(instances[0], List) and len(instances[0]) != 0 and isinstance(instances[0][0], Dict):\n data: Dict[str, List] = {}\n for instance in instances:\n for item in instance:\n for key, val in item.items():\n if key in data:\n data[key].append(val)\n else:\n data[key] = [val]\n return pd.DataFrame(data)\n else:\n return np.array(instances)\n\n elif isinstance(payload, InferRequest):\n content_type = ''\n parameters = payload.parameters\n if parameters:\n if isinstance(parameters.get(\"content_type\"), InferParameter):\n # for v2 grpc, we get InferParameter obj eg: {\"content_type\": string_param: \"pd\"}\n content_type = str(parameters.get(\"content_type\").string_param)\n else:\n # for v2 http, we get string eg: {\"content_type\": \"pd\"}\n content_type = parameters.get(\"content_type\")\n\n if content_type == \"pd\":\n return payload.as_dataframe()\n else:\n input = payload.inputs[0]\n return input.as_numpy()\n\n\ndef get_predict_response(payload: Union[Dict, InferRequest], result: Union[np.ndarray, pd.DataFrame],\n model_name: str) -> Union[Dict, InferResponse]:\n if isinstance(payload, Dict):\n infer_outputs = result\n if isinstance(result, pd.DataFrame):\n infer_outputs = []\n for label, row in result.iterrows():\n infer_outputs.append(row.to_dict())\n elif isinstance(result, np.ndarray):\n infer_outputs = result.tolist()\n return {\"predictions\": infer_outputs}\n elif isinstance(payload, InferRequest):\n infer_outputs = []\n if isinstance(result, pd.DataFrame):\n for col in result.columns:\n infer_output = InferOutput(\n name=col,\n shape=list(result[col].shape),\n datatype=from_np_dtype(result[col].dtype),\n data=result[col].tolist()\n )\n infer_outputs.append(infer_output)\n else:\n infer_output = InferOutput(\n name=\"output-0\",\n shape=list(result.shape),\n datatype=from_np_dtype(result.dtype),\n data=result.flatten().tolist()\n )\n infer_outputs.append(infer_output)\n return InferResponse(\n model_name=model_name,\n infer_outputs=infer_outputs,\n response_id=payload.id if payload.id else generate_uuid()\n )\n\n\ndef strtobool(val: str) -> bool:\n \"\"\"Convert a string representation of truth to True or False.\n\n True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values\n are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if\n 'val' is anything else.\n\n Adapted from deprecated `distutils`\n https://github.com/python/cpython/blob/3.11/Lib/distutils/util.py\n \"\"\"\n val = val.lower()\n if val in ('y', 'yes', 't', 'true', 'on', '1'):\n return True\n elif val in ('n', 'no', 'f', 'false', 'off', '0'):\n return False\n else:\n raise ValueError(\"invalid truth value %r\" % (val,))\n", "path": "python/kserve/kserve/utils/utils.py"}]}
| 3,295 | 345 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.