problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
10.2k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 582
21k
| num_tokens
int64 271
2.05k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_31101 | rasdani/github-patches | git_diff | StackStorm__st2-4592 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The api key in the st2api log is not obfuscated
##### SUMMARY
The user found in clean API key in query request (for the load balancer health check)
```GET /api/v1/?st2-api-key=foo HTTP/1.1```
##### ISSUE TYPE
- Bug Report
##### STACKSTORM VERSION
st2 2.10.3, on Python 2.7.12
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `st2common/st2common/middleware/logging.py`
Content:
```
1 # Licensed to the StackStorm, Inc ('StackStorm') under one or more
2 # contributor license agreements. See the NOTICE file distributed with
3 # this work for additional information regarding copyright ownership.
4 # The ASF licenses this file to You under the Apache License, Version 2.0
5 # (the "License"); you may not use this file except in compliance with
6 # the License. You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 from __future__ import absolute_import
17 import time
18 import types
19 import itertools
20
21 from st2common.constants.api import REQUEST_ID_HEADER
22 from st2common import log as logging
23 from st2common.router import Request, NotFoundException
24
25 LOG = logging.getLogger(__name__)
26
27 try:
28 clock = time.perf_counter
29 except AttributeError:
30 clock = time.time
31
32
33 class LoggingMiddleware(object):
34 """
35 Logs all incoming requests and outgoing responses
36 """
37
38 def __init__(self, app, router):
39 self.app = app
40 self.router = router
41
42 def __call__(self, environ, start_response):
43 start_time = clock()
44 status_code = []
45 content_length = []
46
47 request = Request(environ)
48
49 # Log the incoming request
50 values = {
51 'method': request.method,
52 'path': request.path,
53 'remote_addr': request.remote_addr,
54 'query': request.GET.dict_of_lists(),
55 'request_id': request.headers.get(REQUEST_ID_HEADER, None)
56 }
57
58 LOG.info('%(request_id)s - %(method)s %(path)s with query=%(query)s' %
59 values, extra=values)
60
61 def custom_start_response(status, headers, exc_info=None):
62 status_code.append(int(status.split(' ')[0]))
63
64 for name, value in headers:
65 if name.lower() == 'content-length':
66 content_length.append(int(value))
67 break
68
69 return start_response(status, headers, exc_info)
70
71 retval = self.app(environ, custom_start_response)
72
73 try:
74 endpoint, path_vars = self.router.match(request)
75 except NotFoundException:
76 endpoint = {}
77
78 log_result = endpoint.get('x-log-result', True)
79
80 if isinstance(retval, (types.GeneratorType, itertools.chain)):
81 # Note: We don't log the result when return value is a generator, because this would
82 # result in calling str() on the generator and as such, exhausting it
83 content_length = [float('inf')]
84 log_result = False
85
86 # Log the response
87 values = {
88 'method': request.method,
89 'path': request.path,
90 'remote_addr': request.remote_addr,
91 'status': status_code[0],
92 'runtime': float("{0:.3f}".format((clock() - start_time) * 10**3)),
93 'content_length': content_length[0] if content_length else len(b''.join(retval)),
94 'request_id': request.headers.get(REQUEST_ID_HEADER, None)
95 }
96
97 log_msg = '%(request_id)s - %(status)s %(content_length)s %(runtime)sms' % (values)
98 LOG.info(log_msg, extra=values)
99
100 if log_result:
101 values['result'] = retval[0]
102 log_msg = ('%(request_id)s - %(status)s %(content_length)s %(runtime)sms\n%(result)s' %
103 (values))
104 LOG.debug(log_msg, extra=values)
105
106 return retval
107
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/st2common/st2common/middleware/logging.py b/st2common/st2common/middleware/logging.py
--- a/st2common/st2common/middleware/logging.py
+++ b/st2common/st2common/middleware/logging.py
@@ -14,16 +14,28 @@
# limitations under the License.
from __future__ import absolute_import
+
import time
import types
import itertools
+from oslo_config import cfg
+
from st2common.constants.api import REQUEST_ID_HEADER
+from st2common.constants.auth import QUERY_PARAM_ATTRIBUTE_NAME
+from st2common.constants.auth import QUERY_PARAM_API_KEY_ATTRIBUTE_NAME
+from st2common.constants.secrets import MASKED_ATTRIBUTE_VALUE
+from st2common.constants.secrets import MASKED_ATTRIBUTES_BLACKLIST
from st2common import log as logging
from st2common.router import Request, NotFoundException
LOG = logging.getLogger(__name__)
+SECRET_QUERY_PARAMS = [
+ QUERY_PARAM_ATTRIBUTE_NAME,
+ QUERY_PARAM_API_KEY_ATTRIBUTE_NAME
+] + MASKED_ATTRIBUTES_BLACKLIST
+
try:
clock = time.perf_counter
except AttributeError:
@@ -46,12 +58,20 @@
request = Request(environ)
+ query_params = request.GET.dict_of_lists()
+
+ # Mask secret / sensitive query params
+ secret_query_params = SECRET_QUERY_PARAMS + cfg.CONF.log.mask_secrets_blacklist
+ for param_name in secret_query_params:
+ if param_name in query_params:
+ query_params[param_name] = MASKED_ATTRIBUTE_VALUE
+
# Log the incoming request
values = {
'method': request.method,
'path': request.path,
'remote_addr': request.remote_addr,
- 'query': request.GET.dict_of_lists(),
+ 'query': query_params,
'request_id': request.headers.get(REQUEST_ID_HEADER, None)
}
| {"golden_diff": "diff --git a/st2common/st2common/middleware/logging.py b/st2common/st2common/middleware/logging.py\n--- a/st2common/st2common/middleware/logging.py\n+++ b/st2common/st2common/middleware/logging.py\n@@ -14,16 +14,28 @@\n # limitations under the License.\n \n from __future__ import absolute_import\n+\n import time\n import types\n import itertools\n \n+from oslo_config import cfg\n+\n from st2common.constants.api import REQUEST_ID_HEADER\n+from st2common.constants.auth import QUERY_PARAM_ATTRIBUTE_NAME\n+from st2common.constants.auth import QUERY_PARAM_API_KEY_ATTRIBUTE_NAME\n+from st2common.constants.secrets import MASKED_ATTRIBUTE_VALUE\n+from st2common.constants.secrets import MASKED_ATTRIBUTES_BLACKLIST\n from st2common import log as logging\n from st2common.router import Request, NotFoundException\n \n LOG = logging.getLogger(__name__)\n \n+SECRET_QUERY_PARAMS = [\n+ QUERY_PARAM_ATTRIBUTE_NAME,\n+ QUERY_PARAM_API_KEY_ATTRIBUTE_NAME\n+] + MASKED_ATTRIBUTES_BLACKLIST\n+\n try:\n clock = time.perf_counter\n except AttributeError:\n@@ -46,12 +58,20 @@\n \n request = Request(environ)\n \n+ query_params = request.GET.dict_of_lists()\n+\n+ # Mask secret / sensitive query params\n+ secret_query_params = SECRET_QUERY_PARAMS + cfg.CONF.log.mask_secrets_blacklist\n+ for param_name in secret_query_params:\n+ if param_name in query_params:\n+ query_params[param_name] = MASKED_ATTRIBUTE_VALUE\n+\n # Log the incoming request\n values = {\n 'method': request.method,\n 'path': request.path,\n 'remote_addr': request.remote_addr,\n- 'query': request.GET.dict_of_lists(),\n+ 'query': query_params,\n 'request_id': request.headers.get(REQUEST_ID_HEADER, None)\n }\n", "issue": "The api key in the st2api log is not obfuscated\n##### SUMMARY\r\nThe user found in clean API key in query request (for the load balancer health check)\r\n```GET /api/v1/?st2-api-key=foo HTTP/1.1```\r\n\r\n##### ISSUE TYPE\r\n - Bug Report\r\n \r\n##### STACKSTORM VERSION\r\nst2 2.10.3, on Python 2.7.12\n", "before_files": [{"content": "# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nimport time\nimport types\nimport itertools\n\nfrom st2common.constants.api import REQUEST_ID_HEADER\nfrom st2common import log as logging\nfrom st2common.router import Request, NotFoundException\n\nLOG = logging.getLogger(__name__)\n\ntry:\n clock = time.perf_counter\nexcept AttributeError:\n clock = time.time\n\n\nclass LoggingMiddleware(object):\n \"\"\"\n Logs all incoming requests and outgoing responses\n \"\"\"\n\n def __init__(self, app, router):\n self.app = app\n self.router = router\n\n def __call__(self, environ, start_response):\n start_time = clock()\n status_code = []\n content_length = []\n\n request = Request(environ)\n\n # Log the incoming request\n values = {\n 'method': request.method,\n 'path': request.path,\n 'remote_addr': request.remote_addr,\n 'query': request.GET.dict_of_lists(),\n 'request_id': request.headers.get(REQUEST_ID_HEADER, None)\n }\n\n LOG.info('%(request_id)s - %(method)s %(path)s with query=%(query)s' %\n values, extra=values)\n\n def custom_start_response(status, headers, exc_info=None):\n status_code.append(int(status.split(' ')[0]))\n\n for name, value in headers:\n if name.lower() == 'content-length':\n content_length.append(int(value))\n break\n\n return start_response(status, headers, exc_info)\n\n retval = self.app(environ, custom_start_response)\n\n try:\n endpoint, path_vars = self.router.match(request)\n except NotFoundException:\n endpoint = {}\n\n log_result = endpoint.get('x-log-result', True)\n\n if isinstance(retval, (types.GeneratorType, itertools.chain)):\n # Note: We don't log the result when return value is a generator, because this would\n # result in calling str() on the generator and as such, exhausting it\n content_length = [float('inf')]\n log_result = False\n\n # Log the response\n values = {\n 'method': request.method,\n 'path': request.path,\n 'remote_addr': request.remote_addr,\n 'status': status_code[0],\n 'runtime': float(\"{0:.3f}\".format((clock() - start_time) * 10**3)),\n 'content_length': content_length[0] if content_length else len(b''.join(retval)),\n 'request_id': request.headers.get(REQUEST_ID_HEADER, None)\n }\n\n log_msg = '%(request_id)s - %(status)s %(content_length)s %(runtime)sms' % (values)\n LOG.info(log_msg, extra=values)\n\n if log_result:\n values['result'] = retval[0]\n log_msg = ('%(request_id)s - %(status)s %(content_length)s %(runtime)sms\\n%(result)s' %\n (values))\n LOG.debug(log_msg, extra=values)\n\n return retval\n", "path": "st2common/st2common/middleware/logging.py"}], "after_files": [{"content": "# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nimport time\nimport types\nimport itertools\n\nfrom oslo_config import cfg\n\nfrom st2common.constants.api import REQUEST_ID_HEADER\nfrom st2common.constants.auth import QUERY_PARAM_ATTRIBUTE_NAME\nfrom st2common.constants.auth import QUERY_PARAM_API_KEY_ATTRIBUTE_NAME\nfrom st2common.constants.secrets import MASKED_ATTRIBUTE_VALUE\nfrom st2common.constants.secrets import MASKED_ATTRIBUTES_BLACKLIST\nfrom st2common import log as logging\nfrom st2common.router import Request, NotFoundException\n\nLOG = logging.getLogger(__name__)\n\nSECRET_QUERY_PARAMS = [\n QUERY_PARAM_ATTRIBUTE_NAME,\n QUERY_PARAM_API_KEY_ATTRIBUTE_NAME\n] + MASKED_ATTRIBUTES_BLACKLIST\n\ntry:\n clock = time.perf_counter\nexcept AttributeError:\n clock = time.time\n\n\nclass LoggingMiddleware(object):\n \"\"\"\n Logs all incoming requests and outgoing responses\n \"\"\"\n\n def __init__(self, app, router):\n self.app = app\n self.router = router\n\n def __call__(self, environ, start_response):\n start_time = clock()\n status_code = []\n content_length = []\n\n request = Request(environ)\n\n query_params = request.GET.dict_of_lists()\n\n # Mask secret / sensitive query params\n secret_query_params = SECRET_QUERY_PARAMS + cfg.CONF.log.mask_secrets_blacklist\n for param_name in secret_query_params:\n if param_name in query_params:\n query_params[param_name] = MASKED_ATTRIBUTE_VALUE\n\n # Log the incoming request\n values = {\n 'method': request.method,\n 'path': request.path,\n 'remote_addr': request.remote_addr,\n 'query': query_params,\n 'request_id': request.headers.get(REQUEST_ID_HEADER, None)\n }\n\n LOG.info('%(request_id)s - %(method)s %(path)s with query=%(query)s' %\n values, extra=values)\n\n def custom_start_response(status, headers, exc_info=None):\n status_code.append(int(status.split(' ')[0]))\n\n for name, value in headers:\n if name.lower() == 'content-length':\n content_length.append(int(value))\n break\n\n return start_response(status, headers, exc_info)\n\n retval = self.app(environ, custom_start_response)\n\n try:\n endpoint, path_vars = self.router.match(request)\n except NotFoundException:\n endpoint = {}\n\n log_result = endpoint.get('x-log-result', True)\n\n if isinstance(retval, (types.GeneratorType, itertools.chain)):\n # Note: We don't log the result when return value is a generator, because this would\n # result in calling str() on the generator and as such, exhausting it\n content_length = [float('inf')]\n log_result = False\n\n # Log the response\n values = {\n 'method': request.method,\n 'path': request.path,\n 'remote_addr': request.remote_addr,\n 'status': status_code[0],\n 'runtime': float(\"{0:.3f}\".format((clock() - start_time) * 10**3)),\n 'content_length': content_length[0] if content_length else len(b''.join(retval)),\n 'request_id': request.headers.get(REQUEST_ID_HEADER, None)\n }\n\n log_msg = '%(request_id)s - %(status)s %(content_length)s %(runtime)sms' % (values)\n LOG.info(log_msg, extra=values)\n\n if log_result:\n values['result'] = retval[0]\n log_msg = ('%(request_id)s - %(status)s %(content_length)s %(runtime)sms\\n%(result)s' %\n (values))\n LOG.debug(log_msg, extra=values)\n\n return retval\n", "path": "st2common/st2common/middleware/logging.py"}]} | 1,376 | 406 |
gh_patches_debug_14129 | rasdani/github-patches | git_diff | freedomofpress__securedrop-237 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Possible path confusion / traversal via imprecise store.verify()
The method `store.verify()` checks file paths provided via URL and other ways and raises an exception if they are not matching the validation criteria.
A problem with this validation process was spotted: `os.path.commonprefix()` is not sufficient to check if the path is inside the configured store path. It only compares character by character. Thus allows to navigate into another folder when they share the same start string.
```
Example: config.STORE_DIR = '/opt/store'
PoC: store.verify('/opt/store_backup')
```
Mitigation has to make sure, that the path is inside the configured store folder. A mitigation could be to add another check in `store.verify()` with `os.path.relpath(p, config.STORE_DIR)`. If the absolute path p is not inside the store directory, `os.path.relpath()` will return a string starting with '../'.
Example:
```
os.path.relpath('/opt/store_backup', config.STORE_DIR) == '../store_backup'
```
**Reported as part of the cure53 audit of 0.2 as: SD-01-006**
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `securedrop/store.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import os
3 import re
4 import config
5 import zipfile
6 import crypto_util
7 import uuid
8 import tempfile
9
10 VALIDATE_FILENAME = re.compile(
11 "^(reply-)?[a-f0-9-]+(_msg|_doc\.zip|)\.gpg$").match
12
13
14 class PathException(Exception):
15
16 '''An exception raised by `store.verify` when it encounters a bad path. A path
17 can be bad when it is not absolute, not normalized, not within
18 `config.STORE_DIR`, or doesn't match the filename format.
19 '''
20 pass
21
22
23 def verify(p):
24 '''Assert that the path is absolute, normalized, inside `config.STORE_DIR`, and
25 matches the filename format.
26 '''
27 if not os.path.isabs(config.STORE_DIR):
28 raise PathException("config.STORE_DIR(%s) is not absolute" % (
29 config.STORE_DIR, ))
30
31 # os.path.abspath makes the path absolute and normalizes '/foo/../bar' to
32 # '/bar', etc. We have to check that the path is normalized before checking
33 # that it starts with the `config.STORE_DIR` or else a malicious actor could
34 # append a bunch of '../../..' to access files outside of the store.
35 if not p == os.path.abspath(p):
36 raise PathException("The path is not absolute and/or normalized")
37
38 if os.path.commonprefix([config.STORE_DIR, p]) != config.STORE_DIR:
39 raise PathException("Invalid directory %s" % (p, ))
40
41 filename = os.path.basename(p)
42 ext = os.path.splitext(filename)[-1]
43
44 if os.path.isfile(p):
45 if filename == '_FLAG':
46 return True
47 if ext != '.gpg':
48 # if there's an extension, verify it's a GPG
49 raise PathException("Invalid file extension %s" % (ext, ))
50 if not VALIDATE_FILENAME(filename):
51 raise PathException("Invalid filename %s" % (filename, ))
52
53
54 def path(*s):
55 '''Get the normalized, absolute file path, within `config.STORE_DIR`.'''
56 joined = os.path.join(os.path.abspath(config.STORE_DIR), *s)
57 absolute = os.path.abspath(joined)
58 verify(absolute)
59 return absolute
60
61
62 def get_bulk_archive(filenames):
63 zip_file = tempfile.NamedTemporaryFile(prefix='tmp_securedrop_bulk_dl_')
64 with zipfile.ZipFile(zip_file, 'w') as zip:
65 for filename in filenames:
66 verify(filename)
67 zip.write(filename, arcname=os.path.basename(filename))
68 return zip_file
69
70
71 def log(msg):
72 file(path('NOTES'), 'a').write(msg)
73
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/securedrop/store.py b/securedrop/store.py
--- a/securedrop/store.py
+++ b/securedrop/store.py
@@ -35,13 +35,13 @@
if not p == os.path.abspath(p):
raise PathException("The path is not absolute and/or normalized")
- if os.path.commonprefix([config.STORE_DIR, p]) != config.STORE_DIR:
+ # Check that the path p is in config.STORE_DIR
+ if os.path.relpath(p, config.STORE_DIR).startswith('..'):
raise PathException("Invalid directory %s" % (p, ))
- filename = os.path.basename(p)
- ext = os.path.splitext(filename)[-1]
-
if os.path.isfile(p):
+ filename = os.path.basename(p)
+ ext = os.path.splitext(filename)[-1]
if filename == '_FLAG':
return True
if ext != '.gpg':
| {"golden_diff": "diff --git a/securedrop/store.py b/securedrop/store.py\n--- a/securedrop/store.py\n+++ b/securedrop/store.py\n@@ -35,13 +35,13 @@\n if not p == os.path.abspath(p):\n raise PathException(\"The path is not absolute and/or normalized\")\n \n- if os.path.commonprefix([config.STORE_DIR, p]) != config.STORE_DIR:\n+ # Check that the path p is in config.STORE_DIR\n+ if os.path.relpath(p, config.STORE_DIR).startswith('..'):\n raise PathException(\"Invalid directory %s\" % (p, ))\n \n- filename = os.path.basename(p)\n- ext = os.path.splitext(filename)[-1]\n-\n if os.path.isfile(p):\n+ filename = os.path.basename(p)\n+ ext = os.path.splitext(filename)[-1]\n if filename == '_FLAG':\n return True\n if ext != '.gpg':\n", "issue": "Possible path confusion / traversal via imprecise store.verify()\nThe method `store.verify()` checks file paths provided via URL and other ways and raises an exception if they are not matching the validation criteria.\n\nA problem with this validation process was spotted: `os.path.commonprefix()` is not sufficient to check if the path is inside the configured store path. It only compares character by character. Thus allows to navigate into another folder when they share the same start string.\n\n```\nExample: config.STORE_DIR = '/opt/store'\nPoC: store.verify('/opt/store_backup')\n```\n\nMitigation has to make sure, that the path is inside the configured store folder. A mitigation could be to add another check in `store.verify()` with `os.path.relpath(p, config.STORE_DIR)`. If the absolute path p is not inside the store directory, `os.path.relpath()` will return a string starting with '../'.\n\nExample:\n\n```\nos.path.relpath('/opt/store_backup', config.STORE_DIR) == '../store_backup'\n```\n\n**Reported as part of the cure53 audit of 0.2 as: SD-01-006**\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport os\nimport re\nimport config\nimport zipfile\nimport crypto_util\nimport uuid\nimport tempfile\n\nVALIDATE_FILENAME = re.compile(\n \"^(reply-)?[a-f0-9-]+(_msg|_doc\\.zip|)\\.gpg$\").match\n\n\nclass PathException(Exception):\n\n '''An exception raised by `store.verify` when it encounters a bad path. A path\n can be bad when it is not absolute, not normalized, not within\n `config.STORE_DIR`, or doesn't match the filename format.\n '''\n pass\n\n\ndef verify(p):\n '''Assert that the path is absolute, normalized, inside `config.STORE_DIR`, and\n matches the filename format.\n '''\n if not os.path.isabs(config.STORE_DIR):\n raise PathException(\"config.STORE_DIR(%s) is not absolute\" % (\n config.STORE_DIR, ))\n\n # os.path.abspath makes the path absolute and normalizes '/foo/../bar' to\n # '/bar', etc. We have to check that the path is normalized before checking\n # that it starts with the `config.STORE_DIR` or else a malicious actor could\n # append a bunch of '../../..' to access files outside of the store.\n if not p == os.path.abspath(p):\n raise PathException(\"The path is not absolute and/or normalized\")\n\n if os.path.commonprefix([config.STORE_DIR, p]) != config.STORE_DIR:\n raise PathException(\"Invalid directory %s\" % (p, ))\n\n filename = os.path.basename(p)\n ext = os.path.splitext(filename)[-1]\n\n if os.path.isfile(p):\n if filename == '_FLAG':\n return True\n if ext != '.gpg':\n # if there's an extension, verify it's a GPG\n raise PathException(\"Invalid file extension %s\" % (ext, ))\n if not VALIDATE_FILENAME(filename):\n raise PathException(\"Invalid filename %s\" % (filename, ))\n\n\ndef path(*s):\n '''Get the normalized, absolute file path, within `config.STORE_DIR`.'''\n joined = os.path.join(os.path.abspath(config.STORE_DIR), *s)\n absolute = os.path.abspath(joined)\n verify(absolute)\n return absolute\n\n\ndef get_bulk_archive(filenames):\n zip_file = tempfile.NamedTemporaryFile(prefix='tmp_securedrop_bulk_dl_')\n with zipfile.ZipFile(zip_file, 'w') as zip:\n for filename in filenames:\n verify(filename)\n zip.write(filename, arcname=os.path.basename(filename))\n return zip_file\n\n\ndef log(msg):\n file(path('NOTES'), 'a').write(msg)\n", "path": "securedrop/store.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport os\nimport re\nimport config\nimport zipfile\nimport crypto_util\nimport uuid\nimport tempfile\n\nVALIDATE_FILENAME = re.compile(\n \"^(reply-)?[a-f0-9-]+(_msg|_doc\\.zip|)\\.gpg$\").match\n\n\nclass PathException(Exception):\n\n '''An exception raised by `store.verify` when it encounters a bad path. A path\n can be bad when it is not absolute, not normalized, not within\n `config.STORE_DIR`, or doesn't match the filename format.\n '''\n pass\n\n\ndef verify(p):\n '''Assert that the path is absolute, normalized, inside `config.STORE_DIR`, and\n matches the filename format.\n '''\n if not os.path.isabs(config.STORE_DIR):\n raise PathException(\"config.STORE_DIR(%s) is not absolute\" % (\n config.STORE_DIR, ))\n\n # os.path.abspath makes the path absolute and normalizes '/foo/../bar' to\n # '/bar', etc. We have to check that the path is normalized before checking\n # that it starts with the `config.STORE_DIR` or else a malicious actor could\n # append a bunch of '../../..' to access files outside of the store.\n if not p == os.path.abspath(p):\n raise PathException(\"The path is not absolute and/or normalized\")\n\n # Check that the path p is in config.STORE_DIR\n if os.path.relpath(p, config.STORE_DIR).startswith('..'):\n raise PathException(\"Invalid directory %s\" % (p, ))\n\n if os.path.isfile(p):\n filename = os.path.basename(p)\n ext = os.path.splitext(filename)[-1]\n if filename == '_FLAG':\n return True\n if ext != '.gpg':\n # if there's an extension, verify it's a GPG\n raise PathException(\"Invalid file extension %s\" % (ext, ))\n if not VALIDATE_FILENAME(filename):\n raise PathException(\"Invalid filename %s\" % (filename, ))\n\n\ndef path(*s):\n '''Get the normalized, absolute file path, within `config.STORE_DIR`.'''\n joined = os.path.join(os.path.abspath(config.STORE_DIR), *s)\n absolute = os.path.abspath(joined)\n verify(absolute)\n return absolute\n\n\ndef get_bulk_archive(filenames):\n zip_file = tempfile.NamedTemporaryFile(prefix='tmp_securedrop_bulk_dl_')\n with zipfile.ZipFile(zip_file, 'w') as zip:\n for filename in filenames:\n verify(filename)\n zip.write(filename, arcname=os.path.basename(filename))\n return zip_file\n\n\ndef log(msg):\n file(path('NOTES'), 'a').write(msg)\n", "path": "securedrop/store.py"}]} | 1,222 | 208 |
gh_patches_debug_26522 | rasdani/github-patches | git_diff | plone__Products.CMFPlone-3741 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Assigning group members: memberlist batch navigation is broken.
## groupmembers listing batch and `showAll` link is broken
### What I did:
Assign members to a group:
- click on "show all" in the user filter.
- if you have lots of users the list is batched
- click on the next batch page
### What I expect to happen:
the next user batch list is shown
### What actually happened:
the user list is empty
### What version of Plone/ Addons I am using:
Plone 6.0.2
### Additional
The "toggle all" checkboxes do not work. This can be solved with `pat-checklist` ...
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Products/CMFPlone/controlpanel/browser/usergroups_groupmembership.py`
Content:
```
1 from Products.CMFCore.utils import getToolByName
2 from Products.CMFPlone import PloneMessageFactory as _
3 from Products.CMFPlone.controlpanel.browser.usergroups import (
4 UsersGroupsControlPanelView,
5 )
6 from Products.CMFPlone.utils import normalizeString
7 from zExceptions import Forbidden
8
9
10 class GroupMembershipControlPanel(UsersGroupsControlPanelView):
11
12 def update(self):
13 self.groupname = getattr(self.request, 'groupname')
14 self.gtool = getToolByName(self, 'portal_groups')
15 self.mtool = getToolByName(self, 'portal_membership')
16 self.group = self.gtool.getGroupById(self.groupname)
17 if self.group is None:
18 return
19
20 self.grouptitle = self.group.getGroupTitleOrName() or self.groupname
21
22 self.request.set('grouproles', self.group.getRoles()
23 if self.group else [])
24 self.canAddUsers = True
25 if 'Manager' in self.request.get('grouproles') and not self.is_zope_manager:
26 self.canAddUsers = False
27
28 self.groupquery = self.makeQuery(groupname=self.groupname)
29 self.groupkeyquery = self.makeQuery(key=self.groupname)
30
31 form = self.request.form
32 submitted = form.get('form.submitted', False)
33
34 self.searchResults = []
35 self.searchString = ''
36 self.newSearch = False
37
38 if submitted:
39 # add/delete before we search so we don't show stale results
40 toAdd = form.get('add', [])
41 if toAdd:
42 if not self.canAddUsers:
43 raise Forbidden
44
45 for u in toAdd:
46 self.gtool.addPrincipalToGroup(
47 u, self.groupname, self.request)
48 self.context.plone_utils.addPortalMessage(_('Changes made.'))
49
50 toDelete = form.get('delete', [])
51 if toDelete:
52 for u in toDelete:
53 self.gtool.removePrincipalFromGroup(
54 u, self.groupname, self.request)
55 self.context.plone_utils.addPortalMessage(_('Changes made.'))
56
57 search = form.get('form.button.Search', None) is not None
58 edit = form.get('form.button.Edit', None) is not None and toDelete
59 add = form.get('form.button.Add', None) is not None and toAdd
60 findAll = form.get('form.button.FindAll', None) is not None and \
61 not self.many_users
62 # The search string should be cleared when one of the
63 # non-search buttons has been clicked.
64 if findAll or edit or add:
65 form['searchstring'] = ''
66 self.searchString = form.get('searchstring', '')
67 if findAll or bool(self.searchString):
68 self.searchResults = self.getPotentialMembers(
69 self.searchString)
70
71 if search or findAll:
72 self.newSearch = True
73
74 self.groupMembers = self.getMembers()
75
76 def __call__(self):
77 self.update()
78 return self.index()
79
80 def isGroup(self, itemName):
81 return self.gtool.isGroup(itemName)
82
83 def getMembers(self):
84 searchResults = self.gtool.getGroupMembers(self.groupname)
85
86 groupResults = []
87 userResults = []
88 for principal_id in searchResults:
89 principal = self.gtool.getGroupById(principal_id)
90 if principal is not None:
91 groupResults.append(principal)
92 continue
93 principal = self.mtool.getMemberById(principal_id)
94 if principal is not None:
95 userResults.append(principal)
96
97 groupResults.sort(key=lambda x: normalizeString(x.getGroupTitleOrName()))
98 userResults.sort(key=lambda x: normalizeString(x.getProperty('fullname') or ''))
99
100 return groupResults + userResults
101
102 def getPotentialMembers(self, searchString):
103 ignoredUsersGroups = [
104 x.id for x in self.getMembers() + [self.group, ] if x is not None]
105 return self.membershipSearch(searchString, ignore=ignoredUsersGroups)
106
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/Products/CMFPlone/controlpanel/browser/usergroups_groupmembership.py b/Products/CMFPlone/controlpanel/browser/usergroups_groupmembership.py
--- a/Products/CMFPlone/controlpanel/browser/usergroups_groupmembership.py
+++ b/Products/CMFPlone/controlpanel/browser/usergroups_groupmembership.py
@@ -57,14 +57,21 @@
search = form.get('form.button.Search', None) is not None
edit = form.get('form.button.Edit', None) is not None and toDelete
add = form.get('form.button.Add', None) is not None and toAdd
- findAll = form.get('form.button.FindAll', None) is not None and \
- not self.many_users
+ isBatched = form.get("b_start", None) is not None
+ findAll = (
+ form.get('form.button.FindAll', None) is not None
+ and not self.many_users
+ )
+ unbatchedAll = (
+ form.get("showAll", "") == "y"
+ and not self.many_users
+ )
# The search string should be cleared when one of the
# non-search buttons has been clicked.
- if findAll or edit or add:
+ if findAll or unbatchedAll or edit or add:
form['searchstring'] = ''
self.searchString = form.get('searchstring', '')
- if findAll or bool(self.searchString):
+ if findAll or isBatched or unbatchedAll or bool(self.searchString):
self.searchResults = self.getPotentialMembers(
self.searchString)
| {"golden_diff": "diff --git a/Products/CMFPlone/controlpanel/browser/usergroups_groupmembership.py b/Products/CMFPlone/controlpanel/browser/usergroups_groupmembership.py\n--- a/Products/CMFPlone/controlpanel/browser/usergroups_groupmembership.py\n+++ b/Products/CMFPlone/controlpanel/browser/usergroups_groupmembership.py\n@@ -57,14 +57,21 @@\n search = form.get('form.button.Search', None) is not None\n edit = form.get('form.button.Edit', None) is not None and toDelete\n add = form.get('form.button.Add', None) is not None and toAdd\n- findAll = form.get('form.button.FindAll', None) is not None and \\\n- not self.many_users\n+ isBatched = form.get(\"b_start\", None) is not None\n+ findAll = (\n+ form.get('form.button.FindAll', None) is not None\n+ and not self.many_users\n+ )\n+ unbatchedAll = (\n+ form.get(\"showAll\", \"\") == \"y\"\n+ and not self.many_users\n+ )\n # The search string should be cleared when one of the\n # non-search buttons has been clicked.\n- if findAll or edit or add:\n+ if findAll or unbatchedAll or edit or add:\n form['searchstring'] = ''\n self.searchString = form.get('searchstring', '')\n- if findAll or bool(self.searchString):\n+ if findAll or isBatched or unbatchedAll or bool(self.searchString):\n self.searchResults = self.getPotentialMembers(\n self.searchString)\n", "issue": "Assigning group members: memberlist batch navigation is broken.\n## groupmembers listing batch and `showAll` link is broken\r\n\r\n### What I did:\r\n\r\nAssign members to a group:\r\n\r\n- click on \"show all\" in the user filter.\r\n- if you have lots of users the list is batched\r\n- click on the next batch page\r\n\r\n### What I expect to happen:\r\n\r\nthe next user batch list is shown\r\n\r\n### What actually happened:\r\n\r\nthe user list is empty\r\n\r\n### What version of Plone/ Addons I am using:\r\n\r\nPlone 6.0.2\r\n\r\n\r\n### Additional\r\n\r\nThe \"toggle all\" checkboxes do not work. This can be solved with `pat-checklist` ...\n", "before_files": [{"content": "from Products.CMFCore.utils import getToolByName\nfrom Products.CMFPlone import PloneMessageFactory as _\nfrom Products.CMFPlone.controlpanel.browser.usergroups import (\n UsersGroupsControlPanelView,\n)\nfrom Products.CMFPlone.utils import normalizeString\nfrom zExceptions import Forbidden\n\n\nclass GroupMembershipControlPanel(UsersGroupsControlPanelView):\n\n def update(self):\n self.groupname = getattr(self.request, 'groupname')\n self.gtool = getToolByName(self, 'portal_groups')\n self.mtool = getToolByName(self, 'portal_membership')\n self.group = self.gtool.getGroupById(self.groupname)\n if self.group is None:\n return\n\n self.grouptitle = self.group.getGroupTitleOrName() or self.groupname\n\n self.request.set('grouproles', self.group.getRoles()\n if self.group else [])\n self.canAddUsers = True\n if 'Manager' in self.request.get('grouproles') and not self.is_zope_manager:\n self.canAddUsers = False\n\n self.groupquery = self.makeQuery(groupname=self.groupname)\n self.groupkeyquery = self.makeQuery(key=self.groupname)\n\n form = self.request.form\n submitted = form.get('form.submitted', False)\n\n self.searchResults = []\n self.searchString = ''\n self.newSearch = False\n\n if submitted:\n # add/delete before we search so we don't show stale results\n toAdd = form.get('add', [])\n if toAdd:\n if not self.canAddUsers:\n raise Forbidden\n\n for u in toAdd:\n self.gtool.addPrincipalToGroup(\n u, self.groupname, self.request)\n self.context.plone_utils.addPortalMessage(_('Changes made.'))\n\n toDelete = form.get('delete', [])\n if toDelete:\n for u in toDelete:\n self.gtool.removePrincipalFromGroup(\n u, self.groupname, self.request)\n self.context.plone_utils.addPortalMessage(_('Changes made.'))\n\n search = form.get('form.button.Search', None) is not None\n edit = form.get('form.button.Edit', None) is not None and toDelete\n add = form.get('form.button.Add', None) is not None and toAdd\n findAll = form.get('form.button.FindAll', None) is not None and \\\n not self.many_users\n # The search string should be cleared when one of the\n # non-search buttons has been clicked.\n if findAll or edit or add:\n form['searchstring'] = ''\n self.searchString = form.get('searchstring', '')\n if findAll or bool(self.searchString):\n self.searchResults = self.getPotentialMembers(\n self.searchString)\n\n if search or findAll:\n self.newSearch = True\n\n self.groupMembers = self.getMembers()\n\n def __call__(self):\n self.update()\n return self.index()\n\n def isGroup(self, itemName):\n return self.gtool.isGroup(itemName)\n\n def getMembers(self):\n searchResults = self.gtool.getGroupMembers(self.groupname)\n\n groupResults = []\n userResults = []\n for principal_id in searchResults:\n principal = self.gtool.getGroupById(principal_id)\n if principal is not None:\n groupResults.append(principal)\n continue\n principal = self.mtool.getMemberById(principal_id)\n if principal is not None:\n userResults.append(principal)\n\n groupResults.sort(key=lambda x: normalizeString(x.getGroupTitleOrName()))\n userResults.sort(key=lambda x: normalizeString(x.getProperty('fullname') or ''))\n\n return groupResults + userResults\n\n def getPotentialMembers(self, searchString):\n ignoredUsersGroups = [\n x.id for x in self.getMembers() + [self.group, ] if x is not None]\n return self.membershipSearch(searchString, ignore=ignoredUsersGroups)\n", "path": "Products/CMFPlone/controlpanel/browser/usergroups_groupmembership.py"}], "after_files": [{"content": "from Products.CMFCore.utils import getToolByName\nfrom Products.CMFPlone import PloneMessageFactory as _\nfrom Products.CMFPlone.controlpanel.browser.usergroups import (\n UsersGroupsControlPanelView,\n)\nfrom Products.CMFPlone.utils import normalizeString\nfrom zExceptions import Forbidden\n\n\nclass GroupMembershipControlPanel(UsersGroupsControlPanelView):\n\n def update(self):\n self.groupname = getattr(self.request, 'groupname')\n self.gtool = getToolByName(self, 'portal_groups')\n self.mtool = getToolByName(self, 'portal_membership')\n self.group = self.gtool.getGroupById(self.groupname)\n if self.group is None:\n return\n\n self.grouptitle = self.group.getGroupTitleOrName() or self.groupname\n\n self.request.set('grouproles', self.group.getRoles()\n if self.group else [])\n self.canAddUsers = True\n if 'Manager' in self.request.get('grouproles') and not self.is_zope_manager:\n self.canAddUsers = False\n\n self.groupquery = self.makeQuery(groupname=self.groupname)\n self.groupkeyquery = self.makeQuery(key=self.groupname)\n\n form = self.request.form\n submitted = form.get('form.submitted', False)\n\n self.searchResults = []\n self.searchString = ''\n self.newSearch = False\n\n if submitted:\n # add/delete before we search so we don't show stale results\n toAdd = form.get('add', [])\n if toAdd:\n if not self.canAddUsers:\n raise Forbidden\n\n for u in toAdd:\n self.gtool.addPrincipalToGroup(\n u, self.groupname, self.request)\n self.context.plone_utils.addPortalMessage(_('Changes made.'))\n\n toDelete = form.get('delete', [])\n if toDelete:\n for u in toDelete:\n self.gtool.removePrincipalFromGroup(\n u, self.groupname, self.request)\n self.context.plone_utils.addPortalMessage(_('Changes made.'))\n\n search = form.get('form.button.Search', None) is not None\n edit = form.get('form.button.Edit', None) is not None and toDelete\n add = form.get('form.button.Add', None) is not None and toAdd\n isBatched = form.get(\"b_start\", None) is not None\n findAll = (\n form.get('form.button.FindAll', None) is not None\n and not self.many_users\n )\n unbatchedAll = (\n form.get(\"showAll\", \"\") == \"y\"\n and not self.many_users\n )\n # The search string should be cleared when one of the\n # non-search buttons has been clicked.\n if findAll or unbatchedAll or edit or add:\n form['searchstring'] = ''\n self.searchString = form.get('searchstring', '')\n if findAll or isBatched or unbatchedAll or bool(self.searchString):\n self.searchResults = self.getPotentialMembers(\n self.searchString)\n\n if search or findAll:\n self.newSearch = True\n\n self.groupMembers = self.getMembers()\n\n def __call__(self):\n self.update()\n return self.index()\n\n def isGroup(self, itemName):\n return self.gtool.isGroup(itemName)\n\n def getMembers(self):\n searchResults = self.gtool.getGroupMembers(self.groupname)\n\n groupResults = []\n userResults = []\n for principal_id in searchResults:\n principal = self.gtool.getGroupById(principal_id)\n if principal is not None:\n groupResults.append(principal)\n continue\n principal = self.mtool.getMemberById(principal_id)\n if principal is not None:\n userResults.append(principal)\n\n groupResults.sort(key=lambda x: normalizeString(x.getGroupTitleOrName()))\n userResults.sort(key=lambda x: normalizeString(x.getProperty('fullname') or ''))\n\n return groupResults + userResults\n\n def getPotentialMembers(self, searchString):\n ignoredUsersGroups = [\n x.id for x in self.getMembers() + [self.group, ] if x is not None]\n return self.membershipSearch(searchString, ignore=ignoredUsersGroups)\n", "path": "Products/CMFPlone/controlpanel/browser/usergroups_groupmembership.py"}]} | 1,471 | 360 |
gh_patches_debug_3110 | rasdani/github-patches | git_diff | kserve__kserve-2018 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
KServe 0.8 release tracking
/kind feature
**Describe the solution you'd like**
KServe 0.8 release tracking:
RC release Date: 12/30/2021
Release Date: 1/14/2021
KServe Model Serving:
- [x] torchserve v2 protocol
- https://github.com/kserve/kserve/pull/1870 @jagadeeshi2i
- [X] Transformer -> Predictor gRPC support
- https://github.com/kserve/kserve/pull/1933
- [X] MLServer 0.5 update
- https://github.com/kserve/kserve/pull/1853 @adriangonz
- [X] Scikit-Learn 1.0.1 and XGBoost 1.5.0 upgrade
- https://github.com/kserve/kserve/pull/1954 @yuzisun
- [X] Introduce ServingRuntime to single model serving @pvaneck @Suresh-Nakkeran
- https://github.com/kserve/kserve/pull/1901
- https://github.com/kserve/kserve/pull/1926
- [ ] Introduce new storage spec @Tomcli
- https://github.com/kserve/kserve/pull/1899
- [X] Storage initializer fixes
- https://github.com/kserve/kserve/pull/1883
- https://github.com/kserve/kserve/pull/1940
- [X] Helm chart for KServe and ModelMesh @yuzisun
- https://github.com/kserve/kserve/pull/1878
- [X] KServe SDK features and fixes
- https://github.com/kserve/kserve/pull/1949 @markwinter
- https://github.com/kserve/kserve/pull/1934 @markwinter
- https://github.com/kserve/kserve/pull/1918 @markwinter
ModelMesh:
- [X] Multi-namespace support for ModelMesh
- [X] Improve rest proxy support
- https://github.com/kserve/rest-proxy/pull/6
Models UI:
- [ ] Models Web App KServe migration @kimwnasptd
Website:
- [ ] Website doc update
**Anything else you would like to add:**
[Miscellaneous information that will assist in solving the issue.]
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/kserve/setup.py`
Content:
```
1 # Copyright 2021 The KServe Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import setuptools
16
17 TESTS_REQUIRES = [
18 'pytest',
19 'pytest-xdist',
20 'pytest-cov',
21 'pytest-asyncio',
22 'pytest-tornasync',
23 'mypy'
24 ]
25
26 with open('requirements.txt') as f:
27 REQUIRES = f.readlines()
28
29 setuptools.setup(
30 name='kserve',
31 version='0.8.0rc0',
32 author="The KServe Authors",
33 author_email='[email protected], [email protected], [email protected]',
34 license="Apache License Version 2.0",
35 url="https://github.com/kserve/kserve/tree/master/python/kserve",
36 description="KServe Python SDK",
37 long_description="Python SDK for KServe Server and Client.",
38 python_requires='>=3.6',
39 packages=[
40 'kserve',
41 'kserve.api',
42 'kserve.constants',
43 'kserve.models',
44 'kserve.handlers',
45 'kserve.utils',
46 ],
47 package_data={'': ['requirements.txt']},
48 include_package_data=True,
49 zip_safe=False,
50 classifiers=[
51 'Intended Audience :: Developers',
52 'Intended Audience :: Education',
53 'Intended Audience :: Science/Research',
54 'Programming Language :: Python :: 3',
55 'Programming Language :: Python :: 3.6',
56 'Programming Language :: Python :: 3.7',
57 "License :: OSI Approved :: Apache Software License",
58 "Operating System :: OS Independent",
59 'Topic :: Scientific/Engineering',
60 'Topic :: Scientific/Engineering :: Artificial Intelligence',
61 'Topic :: Software Development',
62 'Topic :: Software Development :: Libraries',
63 'Topic :: Software Development :: Libraries :: Python Modules',
64 ],
65 install_requires=REQUIRES,
66 tests_require=TESTS_REQUIRES,
67 extras_require={'test': TESTS_REQUIRES}
68 )
69
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/python/kserve/setup.py b/python/kserve/setup.py
--- a/python/kserve/setup.py
+++ b/python/kserve/setup.py
@@ -28,7 +28,7 @@
setuptools.setup(
name='kserve',
- version='0.8.0rc0',
+ version='0.8.0',
author="The KServe Authors",
author_email='[email protected], [email protected], [email protected]',
license="Apache License Version 2.0",
| {"golden_diff": "diff --git a/python/kserve/setup.py b/python/kserve/setup.py\n--- a/python/kserve/setup.py\n+++ b/python/kserve/setup.py\n@@ -28,7 +28,7 @@\n \n setuptools.setup(\n name='kserve',\n- version='0.8.0rc0',\n+ version='0.8.0',\n author=\"The KServe Authors\",\n author_email='[email protected], [email protected], [email protected]',\n license=\"Apache License Version 2.0\",\n", "issue": "KServe 0.8 release tracking\n/kind feature\r\n\r\n**Describe the solution you'd like**\r\nKServe 0.8 release tracking:\r\nRC release Date: 12/30/2021\r\nRelease Date: 1/14/2021\r\n\r\nKServe Model Serving:\r\n- [x] torchserve v2 protocol\r\n - https://github.com/kserve/kserve/pull/1870 @jagadeeshi2i \r\n- [X] Transformer -> Predictor gRPC support\r\n - https://github.com/kserve/kserve/pull/1933\r\n- [X] MLServer 0.5 update\r\n - https://github.com/kserve/kserve/pull/1853 @adriangonz \r\n- [X] Scikit-Learn 1.0.1 and XGBoost 1.5.0 upgrade\r\n - https://github.com/kserve/kserve/pull/1954 @yuzisun \r\n- [X] Introduce ServingRuntime to single model serving @pvaneck @Suresh-Nakkeran \r\n - https://github.com/kserve/kserve/pull/1901\r\n - https://github.com/kserve/kserve/pull/1926\r\n- [ ] Introduce new storage spec @Tomcli \r\n - https://github.com/kserve/kserve/pull/1899\r\n- [X] Storage initializer fixes\r\n - https://github.com/kserve/kserve/pull/1883\r\n - https://github.com/kserve/kserve/pull/1940\r\n- [X] Helm chart for KServe and ModelMesh @yuzisun \r\n - https://github.com/kserve/kserve/pull/1878\r\n- [X] KServe SDK features and fixes\r\n - https://github.com/kserve/kserve/pull/1949 @markwinter \r\n - https://github.com/kserve/kserve/pull/1934 @markwinter \r\n - https://github.com/kserve/kserve/pull/1918 @markwinter \r\n\r\nModelMesh:\r\n- [X] Multi-namespace support for ModelMesh\r\n- [X] Improve rest proxy support\r\n - https://github.com/kserve/rest-proxy/pull/6\r\n\r\nModels UI:\r\n- [ ] Models Web App KServe migration @kimwnasptd \r\n \r\n \r\nWebsite: \r\n- [ ] Website doc update\r\n\r\n\r\n**Anything else you would like to add:**\r\n[Miscellaneous information that will assist in solving the issue.]\r\n\n", "before_files": [{"content": "# Copyright 2021 The KServe Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport setuptools\n\nTESTS_REQUIRES = [\n 'pytest',\n 'pytest-xdist',\n 'pytest-cov',\n 'pytest-asyncio',\n 'pytest-tornasync',\n 'mypy'\n]\n\nwith open('requirements.txt') as f:\n REQUIRES = f.readlines()\n\nsetuptools.setup(\n name='kserve',\n version='0.8.0rc0',\n author=\"The KServe Authors\",\n author_email='[email protected], [email protected], [email protected]',\n license=\"Apache License Version 2.0\",\n url=\"https://github.com/kserve/kserve/tree/master/python/kserve\",\n description=\"KServe Python SDK\",\n long_description=\"Python SDK for KServe Server and Client.\",\n python_requires='>=3.6',\n packages=[\n 'kserve',\n 'kserve.api',\n 'kserve.constants',\n 'kserve.models',\n 'kserve.handlers',\n 'kserve.utils',\n ],\n package_data={'': ['requirements.txt']},\n include_package_data=True,\n zip_safe=False,\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n install_requires=REQUIRES,\n tests_require=TESTS_REQUIRES,\n extras_require={'test': TESTS_REQUIRES}\n)\n", "path": "python/kserve/setup.py"}], "after_files": [{"content": "# Copyright 2021 The KServe Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport setuptools\n\nTESTS_REQUIRES = [\n 'pytest',\n 'pytest-xdist',\n 'pytest-cov',\n 'pytest-asyncio',\n 'pytest-tornasync',\n 'mypy'\n]\n\nwith open('requirements.txt') as f:\n REQUIRES = f.readlines()\n\nsetuptools.setup(\n name='kserve',\n version='0.8.0',\n author=\"The KServe Authors\",\n author_email='[email protected], [email protected], [email protected]',\n license=\"Apache License Version 2.0\",\n url=\"https://github.com/kserve/kserve/tree/master/python/kserve\",\n description=\"KServe Python SDK\",\n long_description=\"Python SDK for KServe Server and Client.\",\n python_requires='>=3.6',\n packages=[\n 'kserve',\n 'kserve.api',\n 'kserve.constants',\n 'kserve.models',\n 'kserve.handlers',\n 'kserve.utils',\n ],\n package_data={'': ['requirements.txt']},\n include_package_data=True,\n zip_safe=False,\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n install_requires=REQUIRES,\n tests_require=TESTS_REQUIRES,\n extras_require={'test': TESTS_REQUIRES}\n)\n", "path": "python/kserve/setup.py"}]} | 1,472 | 124 |
gh_patches_debug_22124 | rasdani/github-patches | git_diff | fossasia__open-event-server-5566 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Session Export CSV does not include all data
The Session Export should export all data sets that are available e.g. including:
* Submission time
* All speakers
* Proposed length
* Type (Workshop, Talk)
* Level (e.g. Intermediate)
* Status (e.g. pending, accepted etc.)

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/api/helpers/csv_jobs_util.py`
Content:
```
1 from app.models.helpers.versioning import strip_tags
2
3
4 def export_orders_csv(orders):
5 headers = ['Order#', 'Order Date', 'Status', 'Payment Type', 'Total Amount', 'Quantity',
6 'Discount Code', 'First Name', 'Last Name', 'Email']
7
8 rows = [headers]
9 for order in orders:
10 if order.status != "deleted":
11 column = [str(order.get_invoice_number()), str(order.created_at) if order.created_at else '',
12 str(order.status) if order.status else '', str(order.paid_via) if order.paid_via else '',
13 str(order.amount) if order.amount else '', str(order.tickets_count),
14 str(order.discount_code.code) if order.discount_code else '',
15 str(order.user.first_name)
16 if order.user and order.user.first_name else '',
17 str(order.user.last_name)
18 if order.user and order.user.last_name else '',
19 str(order.user.email) if order.user and order.user.email else '']
20 rows.append(column)
21
22 return rows
23
24
25 def export_attendees_csv(attendees):
26 headers = ['Order#', 'Order Date', 'Status', 'First Name', 'Last Name', 'Email',
27 'Country', 'Payment Type', 'Ticket Name', 'Ticket Price', 'Ticket Type']
28
29 rows = [headers]
30 for attendee in attendees:
31 column = [str(attendee.order.get_invoice_number()) if attendee.order else '-',
32 str(attendee.order.created_at) if attendee.order and attendee.order.created_at else '-',
33 str(attendee.order.status) if attendee.order and attendee.order.status else '-',
34 str(attendee.firstname) if attendee.firstname else '',
35 str(attendee.lastname) if attendee.lastname else '',
36 str(attendee.email) if attendee.email else '',
37 str(attendee.country) if attendee.country else '',
38 str(attendee.order.payment_mode) if attendee.order and attendee.order.payment_mode else '',
39 str(attendee.ticket.name) if attendee.ticket and attendee.ticket.name else '',
40 str(attendee.ticket.price) if attendee.ticket and attendee.ticket.price else '0',
41 str(attendee.ticket.type) if attendee.ticket and attendee.ticket.type else '']
42
43 rows.append(column)
44
45 return rows
46
47
48 def export_sessions_csv(sessions):
49 headers = ['Session Title', 'Session Speakers',
50 'Session Track', 'Session Abstract', 'Created At', 'Email Sent']
51 rows = [headers]
52 for session in sessions:
53 if not session.deleted_at:
54 column = [session.title + ' (' + session.state + ')' if session.title else '']
55 if session.speakers:
56 in_session = ''
57 for speaker in session.speakers:
58 if speaker.name:
59 in_session += (speaker.name + '; ')
60 column.append(in_session[:-2])
61 else:
62 column.append('')
63 column.append(session.track.name if session.track and session.track.name else '')
64 column.append(strip_tags(session.short_abstract) if session.short_abstract else '')
65 column.append(session.created_at if session.created_at else '')
66 column.append('Yes' if session.is_mail_sent else 'No')
67 rows.append(column)
68
69 return rows
70
71
72 def export_speakers_csv(speakers):
73 headers = ['Speaker Name', 'Speaker Email', 'Speaker Session(s)',
74 'Speaker Mobile', 'Speaker Bio', 'Speaker Organisation', 'Speaker Position']
75 rows = [headers]
76 for speaker in speakers:
77 column = [speaker.name if speaker.name else '', speaker.email if speaker.email else '']
78 if speaker.sessions:
79 session_details = ''
80 for session in speaker.sessions:
81 if not session.deleted_at:
82 session_details += session.title + ' (' + session.state + '); '
83 column.append(session_details[:-2])
84 else:
85 column.append('')
86 column.append(speaker.mobile if speaker.mobile else '')
87 column.append(speaker.short_biography if speaker.short_biography else '')
88 column.append(speaker.organisation if speaker.organisation else '')
89 column.append(speaker.position if speaker.position else '')
90 rows.append(column)
91
92 return rows
93
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/api/helpers/csv_jobs_util.py b/app/api/helpers/csv_jobs_util.py
--- a/app/api/helpers/csv_jobs_util.py
+++ b/app/api/helpers/csv_jobs_util.py
@@ -47,7 +47,8 @@
def export_sessions_csv(sessions):
headers = ['Session Title', 'Session Speakers',
- 'Session Track', 'Session Abstract', 'Created At', 'Email Sent']
+ 'Session Track', 'Session Abstract', 'Created At', 'Email Sent',
+ 'Level', 'Status', 'Session Type', 'Talk Length']
rows = [headers]
for session in sessions:
if not session.deleted_at:
@@ -64,6 +65,10 @@
column.append(strip_tags(session.short_abstract) if session.short_abstract else '')
column.append(session.created_at if session.created_at else '')
column.append('Yes' if session.is_mail_sent else 'No')
+ column.append(session.level)
+ column.append(session.state)
+ column.append(session.type)
+ column.append(len(session.long_abstract))
rows.append(column)
return rows
| {"golden_diff": "diff --git a/app/api/helpers/csv_jobs_util.py b/app/api/helpers/csv_jobs_util.py\n--- a/app/api/helpers/csv_jobs_util.py\n+++ b/app/api/helpers/csv_jobs_util.py\n@@ -47,7 +47,8 @@\n \n def export_sessions_csv(sessions):\n headers = ['Session Title', 'Session Speakers',\n- 'Session Track', 'Session Abstract', 'Created At', 'Email Sent']\n+ 'Session Track', 'Session Abstract', 'Created At', 'Email Sent',\n+ 'Level', 'Status', 'Session Type', 'Talk Length']\n rows = [headers]\n for session in sessions:\n if not session.deleted_at:\n@@ -64,6 +65,10 @@\n column.append(strip_tags(session.short_abstract) if session.short_abstract else '')\n column.append(session.created_at if session.created_at else '')\n column.append('Yes' if session.is_mail_sent else 'No')\n+ column.append(session.level)\n+ column.append(session.state)\n+ column.append(session.type)\n+ column.append(len(session.long_abstract))\n rows.append(column)\n \n return rows\n", "issue": "Session Export CSV does not include all data \nThe Session Export should export all data sets that are available e.g. including:\r\n* Submission time\r\n* All speakers\r\n* Proposed length\r\n* Type (Workshop, Talk)\r\n* Level (e.g. Intermediate)\r\n* Status (e.g. pending, accepted etc.)\r\n\r\n\n", "before_files": [{"content": "from app.models.helpers.versioning import strip_tags\n\n\ndef export_orders_csv(orders):\n headers = ['Order#', 'Order Date', 'Status', 'Payment Type', 'Total Amount', 'Quantity',\n 'Discount Code', 'First Name', 'Last Name', 'Email']\n\n rows = [headers]\n for order in orders:\n if order.status != \"deleted\":\n column = [str(order.get_invoice_number()), str(order.created_at) if order.created_at else '',\n str(order.status) if order.status else '', str(order.paid_via) if order.paid_via else '',\n str(order.amount) if order.amount else '', str(order.tickets_count),\n str(order.discount_code.code) if order.discount_code else '',\n str(order.user.first_name)\n if order.user and order.user.first_name else '',\n str(order.user.last_name)\n if order.user and order.user.last_name else '',\n str(order.user.email) if order.user and order.user.email else '']\n rows.append(column)\n\n return rows\n\n\ndef export_attendees_csv(attendees):\n headers = ['Order#', 'Order Date', 'Status', 'First Name', 'Last Name', 'Email',\n 'Country', 'Payment Type', 'Ticket Name', 'Ticket Price', 'Ticket Type']\n\n rows = [headers]\n for attendee in attendees:\n column = [str(attendee.order.get_invoice_number()) if attendee.order else '-',\n str(attendee.order.created_at) if attendee.order and attendee.order.created_at else '-',\n str(attendee.order.status) if attendee.order and attendee.order.status else '-',\n str(attendee.firstname) if attendee.firstname else '',\n str(attendee.lastname) if attendee.lastname else '',\n str(attendee.email) if attendee.email else '',\n str(attendee.country) if attendee.country else '',\n str(attendee.order.payment_mode) if attendee.order and attendee.order.payment_mode else '',\n str(attendee.ticket.name) if attendee.ticket and attendee.ticket.name else '',\n str(attendee.ticket.price) if attendee.ticket and attendee.ticket.price else '0',\n str(attendee.ticket.type) if attendee.ticket and attendee.ticket.type else '']\n\n rows.append(column)\n\n return rows\n\n\ndef export_sessions_csv(sessions):\n headers = ['Session Title', 'Session Speakers',\n 'Session Track', 'Session Abstract', 'Created At', 'Email Sent']\n rows = [headers]\n for session in sessions:\n if not session.deleted_at:\n column = [session.title + ' (' + session.state + ')' if session.title else '']\n if session.speakers:\n in_session = ''\n for speaker in session.speakers:\n if speaker.name:\n in_session += (speaker.name + '; ')\n column.append(in_session[:-2])\n else:\n column.append('')\n column.append(session.track.name if session.track and session.track.name else '')\n column.append(strip_tags(session.short_abstract) if session.short_abstract else '')\n column.append(session.created_at if session.created_at else '')\n column.append('Yes' if session.is_mail_sent else 'No')\n rows.append(column)\n\n return rows\n\n\ndef export_speakers_csv(speakers):\n headers = ['Speaker Name', 'Speaker Email', 'Speaker Session(s)',\n 'Speaker Mobile', 'Speaker Bio', 'Speaker Organisation', 'Speaker Position']\n rows = [headers]\n for speaker in speakers:\n column = [speaker.name if speaker.name else '', speaker.email if speaker.email else '']\n if speaker.sessions:\n session_details = ''\n for session in speaker.sessions:\n if not session.deleted_at:\n session_details += session.title + ' (' + session.state + '); '\n column.append(session_details[:-2])\n else:\n column.append('')\n column.append(speaker.mobile if speaker.mobile else '')\n column.append(speaker.short_biography if speaker.short_biography else '')\n column.append(speaker.organisation if speaker.organisation else '')\n column.append(speaker.position if speaker.position else '')\n rows.append(column)\n\n return rows\n", "path": "app/api/helpers/csv_jobs_util.py"}], "after_files": [{"content": "from app.models.helpers.versioning import strip_tags\n\n\ndef export_orders_csv(orders):\n headers = ['Order#', 'Order Date', 'Status', 'Payment Type', 'Total Amount', 'Quantity',\n 'Discount Code', 'First Name', 'Last Name', 'Email']\n\n rows = [headers]\n for order in orders:\n if order.status != \"deleted\":\n column = [str(order.get_invoice_number()), str(order.created_at) if order.created_at else '',\n str(order.status) if order.status else '', str(order.paid_via) if order.paid_via else '',\n str(order.amount) if order.amount else '', str(order.tickets_count),\n str(order.discount_code.code) if order.discount_code else '',\n str(order.user.first_name)\n if order.user and order.user.first_name else '',\n str(order.user.last_name)\n if order.user and order.user.last_name else '',\n str(order.user.email) if order.user and order.user.email else '']\n rows.append(column)\n\n return rows\n\n\ndef export_attendees_csv(attendees):\n headers = ['Order#', 'Order Date', 'Status', 'First Name', 'Last Name', 'Email',\n 'Country', 'Payment Type', 'Ticket Name', 'Ticket Price', 'Ticket Type']\n\n rows = [headers]\n for attendee in attendees:\n column = [str(attendee.order.get_invoice_number()) if attendee.order else '-',\n str(attendee.order.created_at) if attendee.order and attendee.order.created_at else '-',\n str(attendee.order.status) if attendee.order and attendee.order.status else '-',\n str(attendee.firstname) if attendee.firstname else '',\n str(attendee.lastname) if attendee.lastname else '',\n str(attendee.email) if attendee.email else '',\n str(attendee.country) if attendee.country else '',\n str(attendee.order.payment_mode) if attendee.order and attendee.order.payment_mode else '',\n str(attendee.ticket.name) if attendee.ticket and attendee.ticket.name else '',\n str(attendee.ticket.price) if attendee.ticket and attendee.ticket.price else '0',\n str(attendee.ticket.type) if attendee.ticket and attendee.ticket.type else '']\n\n rows.append(column)\n\n return rows\n\n\ndef export_sessions_csv(sessions):\n headers = ['Session Title', 'Session Speakers',\n 'Session Track', 'Session Abstract', 'Created At', 'Email Sent',\n 'Level', 'Status', 'Session Type', 'Talk Length']\n rows = [headers]\n for session in sessions:\n if not session.deleted_at:\n column = [session.title + ' (' + session.state + ')' if session.title else '']\n if session.speakers:\n in_session = ''\n for speaker in session.speakers:\n if speaker.name:\n in_session += (speaker.name + '; ')\n column.append(in_session[:-2])\n else:\n column.append('')\n column.append(session.track.name if session.track and session.track.name else '')\n column.append(strip_tags(session.short_abstract) if session.short_abstract else '')\n column.append(session.created_at if session.created_at else '')\n column.append('Yes' if session.is_mail_sent else 'No')\n column.append(session.level)\n column.append(session.state)\n column.append(session.type)\n column.append(len(session.long_abstract))\n rows.append(column)\n\n return rows\n\n\ndef export_speakers_csv(speakers):\n headers = ['Speaker Name', 'Speaker Email', 'Speaker Session(s)',\n 'Speaker Mobile', 'Speaker Bio', 'Speaker Organisation', 'Speaker Position']\n rows = [headers]\n for speaker in speakers:\n column = [speaker.name if speaker.name else '', speaker.email if speaker.email else '']\n if speaker.sessions:\n session_details = ''\n for session in speaker.sessions:\n if not session.deleted_at:\n session_details += session.title + ' (' + session.state + '); '\n column.append(session_details[:-2])\n else:\n column.append('')\n column.append(speaker.mobile if speaker.mobile else '')\n column.append(speaker.short_biography if speaker.short_biography else '')\n column.append(speaker.organisation if speaker.organisation else '')\n column.append(speaker.position if speaker.position else '')\n rows.append(column)\n\n return rows\n", "path": "app/api/helpers/csv_jobs_util.py"}]} | 1,444 | 237 |
gh_patches_debug_10825 | rasdani/github-patches | git_diff | chainer__chainer-601 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
chainer.functions.Parameter cannot accept cupy.ndarray
```
In [1]: import numpy, chainer, cupy
In [2]: p = chainer.functions.Parameter(numpy.arange(12, dtype=numpy.float32))
In [3]: p = chainer.functions.Parameter(cupy.arange(12, dtype=numpy.float32))
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-3-3bee41ef9fca> in <module>()
----> 1 p = chainer.functions.Parameter(cupy.arange(12, dtype=numpy.float32))
/home/delta/dev/chainer2/chainer/functions/connection/parameter.py in __init__(self, array)
21 def __init__(self, array):
22 self.W = array
---> 23 self.gW = numpy.full_like(array, numpy.nan)
24
25 def __call__(self, volatile=False):
/home/delta/.pyenv/versions/pyenv-2.7.9/lib/python2.7/site-packages/numpy/core/numeric.pyc in full_like(a, fill_value, dtype, order, subok)
344
345 """
--> 346 res = empty_like(a, dtype=dtype, order=order, subok=subok)
347 multiarray.copyto(res, fill_value, casting='unsafe')
348 return res
ValueError: object __array__ method not producing an array
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chainer/functions/connection/parameter.py`
Content:
```
1 import numpy
2
3 from chainer import function
4 from chainer.utils import type_check
5
6
7 class Parameter(function.Function):
8
9 """Function that outputs its weight array.
10
11 This is a parameterized function that takes no input and returns a variable
12 holding a shallow copy of the parameter array.
13
14 Args:
15 array: Initial parameter array.
16
17 """
18 parameter_names = 'W',
19 gradient_names = 'gW',
20
21 def __init__(self, array):
22 self.W = array
23 self.gW = numpy.full_like(array, numpy.nan)
24
25 def __call__(self, volatile=False):
26 ret = super(Parameter, self).__call__()
27 if volatile:
28 ret.unchain_backward()
29 ret.volatile = volatile
30 return ret
31
32 def check_type_forward(self, in_types):
33 type_check.expect(in_types.size() == 0)
34
35 def forward(self, x):
36 return self.W,
37
38 def backward(self, x, gy):
39 self.gW += gy[0]
40 return ()
41
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/chainer/functions/connection/parameter.py b/chainer/functions/connection/parameter.py
--- a/chainer/functions/connection/parameter.py
+++ b/chainer/functions/connection/parameter.py
@@ -1,5 +1,6 @@
import numpy
+from chainer import cuda
from chainer import function
from chainer.utils import type_check
@@ -20,7 +21,8 @@
def __init__(self, array):
self.W = array
- self.gW = numpy.full_like(array, numpy.nan)
+ xp = cuda.get_array_module(array)
+ self.gW = xp.full_like(self.W, numpy.nan)
def __call__(self, volatile=False):
ret = super(Parameter, self).__call__()
| {"golden_diff": "diff --git a/chainer/functions/connection/parameter.py b/chainer/functions/connection/parameter.py\n--- a/chainer/functions/connection/parameter.py\n+++ b/chainer/functions/connection/parameter.py\n@@ -1,5 +1,6 @@\n import numpy\n \n+from chainer import cuda\n from chainer import function\n from chainer.utils import type_check\n \n@@ -20,7 +21,8 @@\n \n def __init__(self, array):\n self.W = array\n- self.gW = numpy.full_like(array, numpy.nan)\n+ xp = cuda.get_array_module(array)\n+ self.gW = xp.full_like(self.W, numpy.nan)\n \n def __call__(self, volatile=False):\n ret = super(Parameter, self).__call__()\n", "issue": "chainer.functions.Parameter cannot accept cupy.ndarray\n```\nIn [1]: import numpy, chainer, cupy\nIn [2]: p = chainer.functions.Parameter(numpy.arange(12, dtype=numpy.float32))\nIn [3]: p = chainer.functions.Parameter(cupy.arange(12, dtype=numpy.float32))\n---------------------------------------------------------------------------\nValueError Traceback (most recent call last)\n<ipython-input-3-3bee41ef9fca> in <module>()\n----> 1 p = chainer.functions.Parameter(cupy.arange(12, dtype=numpy.float32))\n\n/home/delta/dev/chainer2/chainer/functions/connection/parameter.py in __init__(self, array)\n 21 def __init__(self, array):\n 22 self.W = array\n---> 23 self.gW = numpy.full_like(array, numpy.nan)\n 24 \n 25 def __call__(self, volatile=False):\n\n/home/delta/.pyenv/versions/pyenv-2.7.9/lib/python2.7/site-packages/numpy/core/numeric.pyc in full_like(a, fill_value, dtype, order, subok)\n 344 \n 345 \"\"\"\n--> 346 res = empty_like(a, dtype=dtype, order=order, subok=subok)\n 347 multiarray.copyto(res, fill_value, casting='unsafe')\n 348 return res\n\nValueError: object __array__ method not producing an array\n```\n\n", "before_files": [{"content": "import numpy\n\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\nclass Parameter(function.Function):\n\n \"\"\"Function that outputs its weight array.\n\n This is a parameterized function that takes no input and returns a variable\n holding a shallow copy of the parameter array.\n\n Args:\n array: Initial parameter array.\n\n \"\"\"\n parameter_names = 'W',\n gradient_names = 'gW',\n\n def __init__(self, array):\n self.W = array\n self.gW = numpy.full_like(array, numpy.nan)\n\n def __call__(self, volatile=False):\n ret = super(Parameter, self).__call__()\n if volatile:\n ret.unchain_backward()\n ret.volatile = volatile\n return ret\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 0)\n\n def forward(self, x):\n return self.W,\n\n def backward(self, x, gy):\n self.gW += gy[0]\n return ()\n", "path": "chainer/functions/connection/parameter.py"}], "after_files": [{"content": "import numpy\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\nclass Parameter(function.Function):\n\n \"\"\"Function that outputs its weight array.\n\n This is a parameterized function that takes no input and returns a variable\n holding a shallow copy of the parameter array.\n\n Args:\n array: Initial parameter array.\n\n \"\"\"\n parameter_names = 'W',\n gradient_names = 'gW',\n\n def __init__(self, array):\n self.W = array\n xp = cuda.get_array_module(array)\n self.gW = xp.full_like(self.W, numpy.nan)\n\n def __call__(self, volatile=False):\n ret = super(Parameter, self).__call__()\n if volatile:\n ret.unchain_backward()\n ret.volatile = volatile\n return ret\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 0)\n\n def forward(self, x):\n return self.W,\n\n def backward(self, x, gy):\n self.gW += gy[0]\n return ()\n", "path": "chainer/functions/connection/parameter.py"}]} | 892 | 162 |
gh_patches_debug_19977 | rasdani/github-patches | git_diff | uclapi__uclapi-1219 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Data exposed by webhooks, not shown by /bookings
An example is Gordon St (22) 4.01 . which is provided by webhooks when bookings change but we do not return it for bookings usually.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `backend/uclapi/roombookings/management/commands/trigger_webhooks.py`
Content:
```
1 from django.core.management.base import BaseCommand
2 from roombookings.models import BookingA, BookingB
3 from timetable.models import Lock
4 from roombookings.helpers import _serialize_bookings
5 from dashboard.models import Webhook, WebhookTriggerHistory
6 from datetime import datetime
7 from deepdiff import DeepDiff
8 from django.utils import timezone
9 from requests_futures.sessions import FuturesSession
10
11
12 class Command(BaseCommand):
13
14 help = 'Diff roombooking result sets and notify relevant webhooks'
15
16 def add_arguments(self, parser):
17 parser.add_argument(
18 '--debug',
19 action='store_true',
20 dest='debug',
21 help='Print webhook responses',
22 )
23
24 def handle(self, *args, **options):
25 self.stdout.write("Triggering webhooks")
26 session = FuturesSession()
27
28 # currently not locked table is the old one, more recent one is locked
29 lock = Lock.objects.all()[0] # there is only ever one lock
30
31 if not lock.a:
32 old_booking_table = BookingA
33 new_booking_table = BookingB
34 else:
35 old_booking_table = BookingB
36 new_booking_table = BookingA
37
38 now = datetime.now()
39
40 old_bookings = _serialize_bookings(
41 old_booking_table.objects.filter(
42 startdatetime__gt=now
43 )
44 )
45 new_bookings = _serialize_bookings(
46 new_booking_table.objects.filter(
47 startdatetime__gt=now
48 )
49 )
50
51 ddiff = DeepDiff(old_bookings, new_bookings, ignore_order=True)
52
53 webhooks = Webhook.objects.filter(app__deleted=False)
54 # assumption: list of webhooks will be longer than ddiff
55
56 num_bookings_added = 0
57 num_bookings_removed = 0
58 if "iterable_item_added" in ddiff:
59 num_bookings_added = len(
60 ddiff["iterable_item_added"].values()
61 )
62
63 if "iterable_item_removed" in ddiff:
64 num_bookings_removed = len(
65 ddiff["iterable_item_removed"].values()
66 )
67
68 self.stdout.write(
69 "{} bookings added\n{} bookings removed.".format(
70 num_bookings_added,
71 num_bookings_removed
72 )
73 )
74
75 def webhook_map(webhook):
76 def webhook_filter(booking):
77 return (
78 (
79 webhook.siteid == '' or
80 booking["siteid"] == webhook.siteid
81 ) and
82 (
83 webhook.roomid == '' or
84 booking["roomid"] == webhook.roomid
85 ) and
86 (
87 webhook.contact == '' or
88 # mimick SQL 'like'
89 webhook.contact in str(booking["contact"])
90 )
91 )
92 output = {
93 "webhook_in_db": webhook,
94 "url": webhook.url,
95 "verification_secret": webhook.verification_secret
96 }
97 if "iterable_item_added" in ddiff:
98 bookings_added = list(filter(
99 webhook_filter, ddiff["iterable_item_added"].values()
100 ))
101 if bookings_added != []:
102 output["bookings_added"] = bookings_added
103 if "iterable_item_removed" in ddiff:
104 bookings_removed = list(filter(
105 webhook_filter, ddiff["iterable_item_removed"].values()
106 ))
107 if bookings_removed != []:
108 output["bookings_removed"] = bookings_removed
109
110 return output
111
112 webhooks_to_enact = list(map(webhook_map, webhooks))
113
114 unsent_requests = []
115 for idx, webhook in enumerate(webhooks_to_enact):
116 payload = {
117 "service": "roombookings",
118 "name": "bookings_changed",
119 "verification_secret": webhook["verification_secret"],
120 "content": {}
121 }
122
123 if "bookings_added" in webhook:
124 payload["content"]["bookings_added"] = (
125 webhook["bookings_added"]
126 )
127 if "bookings_removed" in webhook:
128 payload["content"]["bookings_removed"] = (
129 webhook["bookings_removed"]
130 )
131
132 webhooks_to_enact[idx]["payload"] = payload
133
134 if payload["content"] != {} and webhook["url"] != "":
135 unsent_requests.append(
136 session.post(
137 webhook["url"], json=payload,
138 headers={
139 "User-Agent": "uclapi-bot/1"
140 }
141 )
142 )
143 self.stdout.write(
144 "Triggering {} webhooks.".format(len(unsent_requests))
145 )
146 if("debug" in options):
147 for i in unsent_requests:
148 self.stdout.write(
149 'response status {0}'.format(i.result().status_code)
150 )
151
152 for webhook in webhooks_to_enact:
153 if webhook["payload"]["content"] != {}:
154 webhook_in_db = webhook["webhook_in_db"]
155 webhook_in_db.last_fired = timezone.now()
156 webhook_in_db.save()
157
158 new_webhook_history_entry = WebhookTriggerHistory(
159 webhook=webhook_in_db,
160 payload=webhook["payload"]
161 )
162 new_webhook_history_entry.save()
163
164 self.stdout.write("Webhooks triggered.")
165
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/backend/uclapi/roombookings/management/commands/trigger_webhooks.py b/backend/uclapi/roombookings/management/commands/trigger_webhooks.py
--- a/backend/uclapi/roombookings/management/commands/trigger_webhooks.py
+++ b/backend/uclapi/roombookings/management/commands/trigger_webhooks.py
@@ -7,6 +7,7 @@
from deepdiff import DeepDiff
from django.utils import timezone
from requests_futures.sessions import FuturesSession
+from django.db.models import Q
class Command(BaseCommand):
@@ -39,11 +40,13 @@
old_bookings = _serialize_bookings(
old_booking_table.objects.filter(
+ Q(bookabletype='CB') | Q(siteid='238') | Q(siteid='240'),
startdatetime__gt=now
)
)
new_bookings = _serialize_bookings(
new_booking_table.objects.filter(
+ Q(bookabletype='CB') | Q(siteid='238') | Q(siteid='240'),
startdatetime__gt=now
)
)
| {"golden_diff": "diff --git a/backend/uclapi/roombookings/management/commands/trigger_webhooks.py b/backend/uclapi/roombookings/management/commands/trigger_webhooks.py\n--- a/backend/uclapi/roombookings/management/commands/trigger_webhooks.py\n+++ b/backend/uclapi/roombookings/management/commands/trigger_webhooks.py\n@@ -7,6 +7,7 @@\n from deepdiff import DeepDiff\n from django.utils import timezone\n from requests_futures.sessions import FuturesSession\n+from django.db.models import Q\n \n \n class Command(BaseCommand):\n@@ -39,11 +40,13 @@\n \n old_bookings = _serialize_bookings(\n old_booking_table.objects.filter(\n+ Q(bookabletype='CB') | Q(siteid='238') | Q(siteid='240'),\n startdatetime__gt=now\n )\n )\n new_bookings = _serialize_bookings(\n new_booking_table.objects.filter(\n+ Q(bookabletype='CB') | Q(siteid='238') | Q(siteid='240'),\n startdatetime__gt=now\n )\n )\n", "issue": "Data exposed by webhooks, not shown by /bookings\nAn example is Gordon St (22) 4.01 . which is provided by webhooks when bookings change but we do not return it for bookings usually.\n", "before_files": [{"content": "from django.core.management.base import BaseCommand\nfrom roombookings.models import BookingA, BookingB\nfrom timetable.models import Lock\nfrom roombookings.helpers import _serialize_bookings\nfrom dashboard.models import Webhook, WebhookTriggerHistory\nfrom datetime import datetime\nfrom deepdiff import DeepDiff\nfrom django.utils import timezone\nfrom requests_futures.sessions import FuturesSession\n\n\nclass Command(BaseCommand):\n\n help = 'Diff roombooking result sets and notify relevant webhooks'\n\n def add_arguments(self, parser):\n parser.add_argument(\n '--debug',\n action='store_true',\n dest='debug',\n help='Print webhook responses',\n )\n\n def handle(self, *args, **options):\n self.stdout.write(\"Triggering webhooks\")\n session = FuturesSession()\n\n # currently not locked table is the old one, more recent one is locked\n lock = Lock.objects.all()[0] # there is only ever one lock\n\n if not lock.a:\n old_booking_table = BookingA\n new_booking_table = BookingB\n else:\n old_booking_table = BookingB\n new_booking_table = BookingA\n\n now = datetime.now()\n\n old_bookings = _serialize_bookings(\n old_booking_table.objects.filter(\n startdatetime__gt=now\n )\n )\n new_bookings = _serialize_bookings(\n new_booking_table.objects.filter(\n startdatetime__gt=now\n )\n )\n\n ddiff = DeepDiff(old_bookings, new_bookings, ignore_order=True)\n\n webhooks = Webhook.objects.filter(app__deleted=False)\n # assumption: list of webhooks will be longer than ddiff\n\n num_bookings_added = 0\n num_bookings_removed = 0\n if \"iterable_item_added\" in ddiff:\n num_bookings_added = len(\n ddiff[\"iterable_item_added\"].values()\n )\n\n if \"iterable_item_removed\" in ddiff:\n num_bookings_removed = len(\n ddiff[\"iterable_item_removed\"].values()\n )\n\n self.stdout.write(\n \"{} bookings added\\n{} bookings removed.\".format(\n num_bookings_added,\n num_bookings_removed\n )\n )\n\n def webhook_map(webhook):\n def webhook_filter(booking):\n return (\n (\n webhook.siteid == '' or\n booking[\"siteid\"] == webhook.siteid\n ) and\n (\n webhook.roomid == '' or\n booking[\"roomid\"] == webhook.roomid\n ) and\n (\n webhook.contact == '' or\n # mimick SQL 'like'\n webhook.contact in str(booking[\"contact\"])\n )\n )\n output = {\n \"webhook_in_db\": webhook,\n \"url\": webhook.url,\n \"verification_secret\": webhook.verification_secret\n }\n if \"iterable_item_added\" in ddiff:\n bookings_added = list(filter(\n webhook_filter, ddiff[\"iterable_item_added\"].values()\n ))\n if bookings_added != []:\n output[\"bookings_added\"] = bookings_added\n if \"iterable_item_removed\" in ddiff:\n bookings_removed = list(filter(\n webhook_filter, ddiff[\"iterable_item_removed\"].values()\n ))\n if bookings_removed != []:\n output[\"bookings_removed\"] = bookings_removed\n\n return output\n\n webhooks_to_enact = list(map(webhook_map, webhooks))\n\n unsent_requests = []\n for idx, webhook in enumerate(webhooks_to_enact):\n payload = {\n \"service\": \"roombookings\",\n \"name\": \"bookings_changed\",\n \"verification_secret\": webhook[\"verification_secret\"],\n \"content\": {}\n }\n\n if \"bookings_added\" in webhook:\n payload[\"content\"][\"bookings_added\"] = (\n webhook[\"bookings_added\"]\n )\n if \"bookings_removed\" in webhook:\n payload[\"content\"][\"bookings_removed\"] = (\n webhook[\"bookings_removed\"]\n )\n\n webhooks_to_enact[idx][\"payload\"] = payload\n\n if payload[\"content\"] != {} and webhook[\"url\"] != \"\":\n unsent_requests.append(\n session.post(\n webhook[\"url\"], json=payload,\n headers={\n \"User-Agent\": \"uclapi-bot/1\"\n }\n )\n )\n self.stdout.write(\n \"Triggering {} webhooks.\".format(len(unsent_requests))\n )\n if(\"debug\" in options):\n for i in unsent_requests:\n self.stdout.write(\n 'response status {0}'.format(i.result().status_code)\n )\n\n for webhook in webhooks_to_enact:\n if webhook[\"payload\"][\"content\"] != {}:\n webhook_in_db = webhook[\"webhook_in_db\"]\n webhook_in_db.last_fired = timezone.now()\n webhook_in_db.save()\n\n new_webhook_history_entry = WebhookTriggerHistory(\n webhook=webhook_in_db,\n payload=webhook[\"payload\"]\n )\n new_webhook_history_entry.save()\n\n self.stdout.write(\"Webhooks triggered.\")\n", "path": "backend/uclapi/roombookings/management/commands/trigger_webhooks.py"}], "after_files": [{"content": "from django.core.management.base import BaseCommand\nfrom roombookings.models import BookingA, BookingB\nfrom timetable.models import Lock\nfrom roombookings.helpers import _serialize_bookings\nfrom dashboard.models import Webhook, WebhookTriggerHistory\nfrom datetime import datetime\nfrom deepdiff import DeepDiff\nfrom django.utils import timezone\nfrom requests_futures.sessions import FuturesSession\nfrom django.db.models import Q\n\n\nclass Command(BaseCommand):\n\n help = 'Diff roombooking result sets and notify relevant webhooks'\n\n def add_arguments(self, parser):\n parser.add_argument(\n '--debug',\n action='store_true',\n dest='debug',\n help='Print webhook responses',\n )\n\n def handle(self, *args, **options):\n self.stdout.write(\"Triggering webhooks\")\n session = FuturesSession()\n\n # currently not locked table is the old one, more recent one is locked\n lock = Lock.objects.all()[0] # there is only ever one lock\n\n if not lock.a:\n old_booking_table = BookingA\n new_booking_table = BookingB\n else:\n old_booking_table = BookingB\n new_booking_table = BookingA\n\n now = datetime.now()\n\n old_bookings = _serialize_bookings(\n old_booking_table.objects.filter(\n Q(bookabletype='CB') | Q(siteid='238') | Q(siteid='240'),\n startdatetime__gt=now\n )\n )\n new_bookings = _serialize_bookings(\n new_booking_table.objects.filter(\n Q(bookabletype='CB') | Q(siteid='238') | Q(siteid='240'),\n startdatetime__gt=now\n )\n )\n\n ddiff = DeepDiff(old_bookings, new_bookings, ignore_order=True)\n\n webhooks = Webhook.objects.filter(app__deleted=False)\n # assumption: list of webhooks will be longer than ddiff\n\n num_bookings_added = 0\n num_bookings_removed = 0\n if \"iterable_item_added\" in ddiff:\n num_bookings_added = len(\n ddiff[\"iterable_item_added\"].values()\n )\n\n if \"iterable_item_removed\" in ddiff:\n num_bookings_removed = len(\n ddiff[\"iterable_item_removed\"].values()\n )\n\n self.stdout.write(\n \"{} bookings added\\n{} bookings removed.\".format(\n num_bookings_added,\n num_bookings_removed\n )\n )\n\n def webhook_map(webhook):\n def webhook_filter(booking):\n return (\n (\n webhook.siteid == '' or\n booking[\"siteid\"] == webhook.siteid\n ) and\n (\n webhook.roomid == '' or\n booking[\"roomid\"] == webhook.roomid\n ) and\n (\n webhook.contact == '' or\n # mimick SQL 'like'\n webhook.contact in str(booking[\"contact\"])\n )\n )\n output = {\n \"webhook_in_db\": webhook,\n \"url\": webhook.url,\n \"verification_secret\": webhook.verification_secret\n }\n if \"iterable_item_added\" in ddiff:\n bookings_added = list(filter(\n webhook_filter, ddiff[\"iterable_item_added\"].values()\n ))\n if bookings_added != []:\n output[\"bookings_added\"] = bookings_added\n if \"iterable_item_removed\" in ddiff:\n bookings_removed = list(filter(\n webhook_filter, ddiff[\"iterable_item_removed\"].values()\n ))\n if bookings_removed != []:\n output[\"bookings_removed\"] = bookings_removed\n\n return output\n\n webhooks_to_enact = list(map(webhook_map, webhooks))\n\n unsent_requests = []\n for idx, webhook in enumerate(webhooks_to_enact):\n payload = {\n \"service\": \"roombookings\",\n \"name\": \"bookings_changed\",\n \"verification_secret\": webhook[\"verification_secret\"],\n \"content\": {}\n }\n\n if \"bookings_added\" in webhook:\n payload[\"content\"][\"bookings_added\"] = (\n webhook[\"bookings_added\"]\n )\n if \"bookings_removed\" in webhook:\n payload[\"content\"][\"bookings_removed\"] = (\n webhook[\"bookings_removed\"]\n )\n\n webhooks_to_enact[idx][\"payload\"] = payload\n\n if payload[\"content\"] != {} and webhook[\"url\"] != \"\":\n unsent_requests.append(\n session.post(\n webhook[\"url\"], json=payload,\n headers={\n \"User-Agent\": \"uclapi-bot/1\"\n }\n )\n )\n self.stdout.write(\n \"Triggering {} webhooks.\".format(len(unsent_requests))\n )\n if(\"debug\" in options):\n for i in unsent_requests:\n self.stdout.write(\n 'response status {0}'.format(i.result().status_code)\n )\n\n for webhook in webhooks_to_enact:\n if webhook[\"payload\"][\"content\"] != {}:\n webhook_in_db = webhook[\"webhook_in_db\"]\n webhook_in_db.last_fired = timezone.now()\n webhook_in_db.save()\n\n new_webhook_history_entry = WebhookTriggerHistory(\n webhook=webhook_in_db,\n payload=webhook[\"payload\"]\n )\n new_webhook_history_entry.save()\n\n self.stdout.write(\"Webhooks triggered.\")\n", "path": "backend/uclapi/roombookings/management/commands/trigger_webhooks.py"}]} | 1,790 | 253 |
gh_patches_debug_24551 | rasdani/github-patches | git_diff | opsdroid__opsdroid-41 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Generate default config
It should be possible to generate some basic config with a command line flag to opsdroid. It should cause opsdroid to print out the config so that is can be piped into a file.
e.g
```
opsdroid --gen-config > configuration.yaml
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opsdroid/__main__.py`
Content:
```
1 """Starts opsdroid."""
2
3 import logging
4
5 from opsdroid.loader import Loader
6 from opsdroid.core import OpsDroid
7 from opsdroid.helper import set_logging_level
8 from opsdroid.const import LOG_FILENAME
9
10
11 def main():
12 """The main function."""
13 logging.basicConfig(filename=LOG_FILENAME, level=logging.INFO)
14 logging.info("="*40)
15 logging.info("Stated application")
16 with OpsDroid() as opsdroid:
17 loader = Loader(opsdroid)
18 opsdroid.config = loader.load_config_file([
19 "./configuration.yaml",
20 "~/.opsdroid/configuration.yaml",
21 "/etc/opsdroid/configuration.yaml"
22 ])
23 if "logging" in opsdroid.config:
24 set_logging_level(opsdroid.config['logging'])
25 loader.load_config(opsdroid.config)
26 opsdroid.exit()
27
28 if __name__ == "__main__":
29 main()
30
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/opsdroid/__main__.py b/opsdroid/__main__.py
--- a/opsdroid/__main__.py
+++ b/opsdroid/__main__.py
@@ -1,6 +1,9 @@
"""Starts opsdroid."""
+import sys
+import os
import logging
+import argparse
from opsdroid.loader import Loader
from opsdroid.core import OpsDroid
@@ -8,11 +11,30 @@
from opsdroid.const import LOG_FILENAME
+def parse_args(args):
+ """Parse command line arguments."""
+ parser = argparse.ArgumentParser(description='Run opsdroid.')
+ parser.add_argument('--gen-config', action="store_true",
+ help='prints out an example configuration file')
+ return parser.parse_args(args)
+
+
def main():
"""The main function."""
logging.basicConfig(filename=LOG_FILENAME, level=logging.INFO)
logging.info("="*40)
logging.info("Stated application")
+
+ args = parse_args(sys.argv[1:])
+
+ if args.gen_config:
+ path = os.path.join(
+ os.path.dirname(os.path.abspath(__file__)),
+ "configuration/example_configuration.yaml")
+ with open(path, 'r') as conf:
+ print(conf.read())
+ sys.exit(0)
+
with OpsDroid() as opsdroid:
loader = Loader(opsdroid)
opsdroid.config = loader.load_config_file([
| {"golden_diff": "diff --git a/opsdroid/__main__.py b/opsdroid/__main__.py\n--- a/opsdroid/__main__.py\n+++ b/opsdroid/__main__.py\n@@ -1,6 +1,9 @@\n \"\"\"Starts opsdroid.\"\"\"\n \n+import sys\n+import os\n import logging\n+import argparse\n \n from opsdroid.loader import Loader\n from opsdroid.core import OpsDroid\n@@ -8,11 +11,30 @@\n from opsdroid.const import LOG_FILENAME\n \n \n+def parse_args(args):\n+ \"\"\"Parse command line arguments.\"\"\"\n+ parser = argparse.ArgumentParser(description='Run opsdroid.')\n+ parser.add_argument('--gen-config', action=\"store_true\",\n+ help='prints out an example configuration file')\n+ return parser.parse_args(args)\n+\n+\n def main():\n \"\"\"The main function.\"\"\"\n logging.basicConfig(filename=LOG_FILENAME, level=logging.INFO)\n logging.info(\"=\"*40)\n logging.info(\"Stated application\")\n+\n+ args = parse_args(sys.argv[1:])\n+\n+ if args.gen_config:\n+ path = os.path.join(\n+ os.path.dirname(os.path.abspath(__file__)),\n+ \"configuration/example_configuration.yaml\")\n+ with open(path, 'r') as conf:\n+ print(conf.read())\n+ sys.exit(0)\n+\n with OpsDroid() as opsdroid:\n loader = Loader(opsdroid)\n opsdroid.config = loader.load_config_file([\n", "issue": "Generate default config\nIt should be possible to generate some basic config with a command line flag to opsdroid. It should cause opsdroid to print out the config so that is can be piped into a file.\n\ne.g\n\n```\nopsdroid --gen-config > configuration.yaml\n```\n\n", "before_files": [{"content": "\"\"\"Starts opsdroid.\"\"\"\n\nimport logging\n\nfrom opsdroid.loader import Loader\nfrom opsdroid.core import OpsDroid\nfrom opsdroid.helper import set_logging_level\nfrom opsdroid.const import LOG_FILENAME\n\n\ndef main():\n \"\"\"The main function.\"\"\"\n logging.basicConfig(filename=LOG_FILENAME, level=logging.INFO)\n logging.info(\"=\"*40)\n logging.info(\"Stated application\")\n with OpsDroid() as opsdroid:\n loader = Loader(opsdroid)\n opsdroid.config = loader.load_config_file([\n \"./configuration.yaml\",\n \"~/.opsdroid/configuration.yaml\",\n \"/etc/opsdroid/configuration.yaml\"\n ])\n if \"logging\" in opsdroid.config:\n set_logging_level(opsdroid.config['logging'])\n loader.load_config(opsdroid.config)\n opsdroid.exit()\n\nif __name__ == \"__main__\":\n main()\n", "path": "opsdroid/__main__.py"}], "after_files": [{"content": "\"\"\"Starts opsdroid.\"\"\"\n\nimport sys\nimport os\nimport logging\nimport argparse\n\nfrom opsdroid.loader import Loader\nfrom opsdroid.core import OpsDroid\nfrom opsdroid.helper import set_logging_level\nfrom opsdroid.const import LOG_FILENAME\n\n\ndef parse_args(args):\n \"\"\"Parse command line arguments.\"\"\"\n parser = argparse.ArgumentParser(description='Run opsdroid.')\n parser.add_argument('--gen-config', action=\"store_true\",\n help='prints out an example configuration file')\n return parser.parse_args(args)\n\n\ndef main():\n \"\"\"The main function.\"\"\"\n logging.basicConfig(filename=LOG_FILENAME, level=logging.INFO)\n logging.info(\"=\"*40)\n logging.info(\"Stated application\")\n\n args = parse_args(sys.argv[1:])\n\n if args.gen_config:\n path = os.path.join(\n os.path.dirname(os.path.abspath(__file__)),\n \"configuration/example_configuration.yaml\")\n with open(path, 'r') as conf:\n print(conf.read())\n sys.exit(0)\n\n with OpsDroid() as opsdroid:\n loader = Loader(opsdroid)\n opsdroid.config = loader.load_config_file([\n \"./configuration.yaml\",\n \"~/.opsdroid/configuration.yaml\",\n \"/etc/opsdroid/configuration.yaml\"\n ])\n if \"logging\" in opsdroid.config:\n set_logging_level(opsdroid.config['logging'])\n loader.load_config(opsdroid.config)\n opsdroid.exit()\n\nif __name__ == \"__main__\":\n main()\n", "path": "opsdroid/__main__.py"}]} | 570 | 322 |
gh_patches_debug_35552 | rasdani/github-patches | git_diff | pfnet__pytorch-pfn-extras-385 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Profiler: Automatically fill `tag` in `record`?
Maybe we can use the caller's function name (`inspect.stack()`) if tag is not given.
TODO: Need to measure overhead.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pytorch_pfn_extras/profiler/_record.py`
Content:
```
1 from contextlib import contextmanager
2 from typing import Any, Callable, Generator, Iterable, Optional, TypeVar
3
4 import torch
5
6 from pytorch_pfn_extras.profiler._time_summary import time_summary, _ReportNotification
7
8
9 @contextmanager
10 def record(
11 tag: str,
12 metric: Optional[str] = None,
13 use_cuda: bool = False,
14 ) -> Generator[_ReportNotification, None, None]:
15 if metric is None:
16 metric = tag
17
18 if use_cuda:
19 torch.cuda.nvtx.range_push(tag) # type: ignore[no-untyped-call]
20 try:
21 with torch.autograd.profiler.record_function(tag):
22 with time_summary.report(metric, use_cuda) as ntf:
23 yield ntf
24 finally:
25 if use_cuda:
26 torch.cuda.nvtx.range_pop() # type: ignore[no-untyped-call]
27
28
29 _T = TypeVar('_T')
30
31
32 def record_function(
33 tag: str,
34 use_cuda: bool = False,
35 ) -> Callable[[Callable[..., _T]], Callable[..., _T]]:
36 def wrapper(f: Callable[..., _T]) -> Callable[..., _T]:
37 def wrapped(*args: Any, **kwargs: Any) -> _T:
38 with record(tag, use_cuda=use_cuda):
39 return f(*args, **kwargs)
40
41 return wrapped
42
43 return wrapper
44
45
46 def record_iterable(
47 tag: str,
48 iter: Iterable[_T],
49 divide_metric: bool = False,
50 use_cuda: bool = False,
51 ) -> Iterable[_T]:
52 def wrapped() -> Iterable[_T]:
53 for i, x in enumerate(iter):
54 name = f"{tag}-{i}"
55 metric = name if divide_metric else tag
56 with record(name, metric, use_cuda=use_cuda):
57 yield x
58
59 return wrapped()
60
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pytorch_pfn_extras/profiler/_record.py b/pytorch_pfn_extras/profiler/_record.py
--- a/pytorch_pfn_extras/profiler/_record.py
+++ b/pytorch_pfn_extras/profiler/_record.py
@@ -1,17 +1,35 @@
from contextlib import contextmanager
+import inspect
from typing import Any, Callable, Generator, Iterable, Optional, TypeVar
+import types
import torch
from pytorch_pfn_extras.profiler._time_summary import time_summary, _ReportNotification
+def _infer_tag_name(frame: Optional[types.FrameType], depth: int) -> str:
+ for _ in range(depth):
+ assert frame is not None
+ frame = frame.f_back
+ assert frame is not None
+ frame_info = inspect.getframeinfo(frame, context=0)
+ return '{}:{}:{}'.format(
+ inspect.getmodulename(frame_info.filename),
+ frame_info.lineno,
+ frame_info.function,
+ )
+
+
@contextmanager
def record(
- tag: str,
+ tag: Optional[str],
metric: Optional[str] = None,
use_cuda: bool = False,
) -> Generator[_ReportNotification, None, None]:
+ if tag is None:
+ tag = _infer_tag_name(inspect.currentframe(), depth=2)
+
if metric is None:
metric = tag
@@ -30,12 +48,12 @@
def record_function(
- tag: str,
+ tag: Optional[str],
use_cuda: bool = False,
) -> Callable[[Callable[..., _T]], Callable[..., _T]]:
def wrapper(f: Callable[..., _T]) -> Callable[..., _T]:
def wrapped(*args: Any, **kwargs: Any) -> _T:
- with record(tag, use_cuda=use_cuda):
+ with record(tag or f.__name__, use_cuda=use_cuda):
return f(*args, **kwargs)
return wrapped
@@ -44,11 +62,14 @@
def record_iterable(
- tag: str,
- iter: Iterable[_T],
- divide_metric: bool = False,
- use_cuda: bool = False,
+ tag: Optional[str],
+ iter: Iterable[_T],
+ divide_metric: bool = False,
+ use_cuda: bool = False,
) -> Iterable[_T]:
+ if tag is None:
+ tag = _infer_tag_name(inspect.currentframe(), depth=1)
+
def wrapped() -> Iterable[_T]:
for i, x in enumerate(iter):
name = f"{tag}-{i}"
| {"golden_diff": "diff --git a/pytorch_pfn_extras/profiler/_record.py b/pytorch_pfn_extras/profiler/_record.py\n--- a/pytorch_pfn_extras/profiler/_record.py\n+++ b/pytorch_pfn_extras/profiler/_record.py\n@@ -1,17 +1,35 @@\n from contextlib import contextmanager\n+import inspect\n from typing import Any, Callable, Generator, Iterable, Optional, TypeVar\n+import types\n \n import torch\n \n from pytorch_pfn_extras.profiler._time_summary import time_summary, _ReportNotification\n \n \n+def _infer_tag_name(frame: Optional[types.FrameType], depth: int) -> str:\n+ for _ in range(depth):\n+ assert frame is not None\n+ frame = frame.f_back\n+ assert frame is not None\n+ frame_info = inspect.getframeinfo(frame, context=0)\n+ return '{}:{}:{}'.format(\n+ inspect.getmodulename(frame_info.filename),\n+ frame_info.lineno,\n+ frame_info.function,\n+ )\n+\n+\n @contextmanager\n def record(\n- tag: str,\n+ tag: Optional[str],\n metric: Optional[str] = None,\n use_cuda: bool = False,\n ) -> Generator[_ReportNotification, None, None]:\n+ if tag is None:\n+ tag = _infer_tag_name(inspect.currentframe(), depth=2)\n+\n if metric is None:\n metric = tag\n \n@@ -30,12 +48,12 @@\n \n \n def record_function(\n- tag: str,\n+ tag: Optional[str],\n use_cuda: bool = False,\n ) -> Callable[[Callable[..., _T]], Callable[..., _T]]:\n def wrapper(f: Callable[..., _T]) -> Callable[..., _T]:\n def wrapped(*args: Any, **kwargs: Any) -> _T:\n- with record(tag, use_cuda=use_cuda):\n+ with record(tag or f.__name__, use_cuda=use_cuda):\n return f(*args, **kwargs)\n \n return wrapped\n@@ -44,11 +62,14 @@\n \n \n def record_iterable(\n- tag: str,\n- iter: Iterable[_T],\n- divide_metric: bool = False,\n- use_cuda: bool = False,\n+ tag: Optional[str],\n+ iter: Iterable[_T],\n+ divide_metric: bool = False,\n+ use_cuda: bool = False,\n ) -> Iterable[_T]:\n+ if tag is None:\n+ tag = _infer_tag_name(inspect.currentframe(), depth=1)\n+\n def wrapped() -> Iterable[_T]:\n for i, x in enumerate(iter):\n name = f\"{tag}-{i}\"\n", "issue": "Profiler: Automatically fill `tag` in `record`?\nMaybe we can use the caller's function name (`inspect.stack()`) if tag is not given.\r\n\r\nTODO: Need to measure overhead.\n", "before_files": [{"content": "from contextlib import contextmanager\nfrom typing import Any, Callable, Generator, Iterable, Optional, TypeVar\n\nimport torch\n\nfrom pytorch_pfn_extras.profiler._time_summary import time_summary, _ReportNotification\n\n\n@contextmanager\ndef record(\n tag: str,\n metric: Optional[str] = None,\n use_cuda: bool = False,\n) -> Generator[_ReportNotification, None, None]:\n if metric is None:\n metric = tag\n\n if use_cuda:\n torch.cuda.nvtx.range_push(tag) # type: ignore[no-untyped-call]\n try:\n with torch.autograd.profiler.record_function(tag):\n with time_summary.report(metric, use_cuda) as ntf:\n yield ntf\n finally:\n if use_cuda:\n torch.cuda.nvtx.range_pop() # type: ignore[no-untyped-call]\n\n\n_T = TypeVar('_T')\n\n\ndef record_function(\n tag: str,\n use_cuda: bool = False,\n) -> Callable[[Callable[..., _T]], Callable[..., _T]]:\n def wrapper(f: Callable[..., _T]) -> Callable[..., _T]:\n def wrapped(*args: Any, **kwargs: Any) -> _T:\n with record(tag, use_cuda=use_cuda):\n return f(*args, **kwargs)\n\n return wrapped\n\n return wrapper\n\n\ndef record_iterable(\n tag: str,\n iter: Iterable[_T],\n divide_metric: bool = False,\n use_cuda: bool = False,\n) -> Iterable[_T]:\n def wrapped() -> Iterable[_T]:\n for i, x in enumerate(iter):\n name = f\"{tag}-{i}\"\n metric = name if divide_metric else tag\n with record(name, metric, use_cuda=use_cuda):\n yield x\n\n return wrapped()\n", "path": "pytorch_pfn_extras/profiler/_record.py"}], "after_files": [{"content": "from contextlib import contextmanager\nimport inspect\nfrom typing import Any, Callable, Generator, Iterable, Optional, TypeVar\nimport types\n\nimport torch\n\nfrom pytorch_pfn_extras.profiler._time_summary import time_summary, _ReportNotification\n\n\ndef _infer_tag_name(frame: Optional[types.FrameType], depth: int) -> str:\n for _ in range(depth):\n assert frame is not None\n frame = frame.f_back\n assert frame is not None\n frame_info = inspect.getframeinfo(frame, context=0)\n return '{}:{}:{}'.format(\n inspect.getmodulename(frame_info.filename),\n frame_info.lineno,\n frame_info.function,\n )\n\n\n@contextmanager\ndef record(\n tag: Optional[str],\n metric: Optional[str] = None,\n use_cuda: bool = False,\n) -> Generator[_ReportNotification, None, None]:\n if tag is None:\n tag = _infer_tag_name(inspect.currentframe(), depth=2)\n\n if metric is None:\n metric = tag\n\n if use_cuda:\n torch.cuda.nvtx.range_push(tag) # type: ignore[no-untyped-call]\n try:\n with torch.autograd.profiler.record_function(tag):\n with time_summary.report(metric, use_cuda) as ntf:\n yield ntf\n finally:\n if use_cuda:\n torch.cuda.nvtx.range_pop() # type: ignore[no-untyped-call]\n\n\n_T = TypeVar('_T')\n\n\ndef record_function(\n tag: Optional[str],\n use_cuda: bool = False,\n) -> Callable[[Callable[..., _T]], Callable[..., _T]]:\n def wrapper(f: Callable[..., _T]) -> Callable[..., _T]:\n def wrapped(*args: Any, **kwargs: Any) -> _T:\n with record(tag or f.__name__, use_cuda=use_cuda):\n return f(*args, **kwargs)\n\n return wrapped\n\n return wrapper\n\n\ndef record_iterable(\n tag: Optional[str],\n iter: Iterable[_T],\n divide_metric: bool = False,\n use_cuda: bool = False,\n) -> Iterable[_T]:\n if tag is None:\n tag = _infer_tag_name(inspect.currentframe(), depth=1)\n\n def wrapped() -> Iterable[_T]:\n for i, x in enumerate(iter):\n name = f\"{tag}-{i}\"\n metric = name if divide_metric else tag\n with record(name, metric, use_cuda=use_cuda):\n yield x\n\n return wrapped()\n", "path": "pytorch_pfn_extras/profiler/_record.py"}]} | 810 | 585 |
gh_patches_debug_35093 | rasdani/github-patches | git_diff | hydroshare__hydroshare-4819 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
rewrite author order test
**Description of the bug**
This test fails occasionally. Rewrite it removing 2 assertions:
[https://github.com/hydroshare/hydroshare/blob/4372-communities-and-groups-2.0/hs_c[…]ore/tests/api/native/test_reorder_authors_management_command.py](https://github.com/hydroshare/hydroshare/blob/4372-communities-and-groups-2.0/hs_core/tests/api/native/test_reorder_authors_management_command.py#L180)
[https://github.com/hydroshare/hydroshare/blob/4372-communities-and-groups-2.0/hs_c[…]ore/tests/api/native/test_reorder_authors_management_command.py](https://github.com/hydroshare/hydroshare/blob/4372-communities-and-groups-2.0/hs_core/tests/api/native/test_reorder_authors_management_command.py#L152)
Also: rewrite this management command so that it takes a res ID as a param:
https://github.com/hydroshare/hydroshare/blob/4372-communities-and-groups-2.0/hs_core/management/commands/reorder_authors.py#L24
Steps to reproduce the bug:
http://ci.hydroshare.org:8080/job/hydroshare-pull-requests/5750/testReport/junit/hs_core.tests.api.native.test_reorder_authors_management_command/TestReorderAuthorsCommand/test_command_fixes_triplicate_authors/
**Expected behavior**
Test should not be dependent on django .get() order
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `hs_core/management/commands/reorder_authors.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 """
4 Fix duplicate author "order" values
5
6 Related to https://github.com/hydroshare/hydroshare/issues/4695
7 """
8
9 from django.core.management.base import BaseCommand
10 from hs_core.models import BaseResource
11 from hs_core.hydroshare.utils import set_dirty_bag_flag
12
13
14 class Command(BaseCommand):
15 help = "Fix duplicate author 'order' values"
16
17 def handle(self, *args, **options):
18 resources = BaseResource.objects.filter(raccess__published=False).only('object_id', 'short_id')
19 for res in resources:
20 if res.metadata is not None:
21 creators = res.metadata.creators.all()
22 is_dirty = False
23 for index, creator in enumerate(creators, start=1):
24 if creator.order != index:
25 print("*" * 100)
26 print(f"Author out of order.\nR:{res.short_id}"
27 f"\nExpected: {index}, got: {creator.order}")
28 creator.order = index
29 creator.save()
30 is_dirty = True
31 if is_dirty:
32 set_dirty_bag_flag(res)
33
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/hs_core/management/commands/reorder_authors.py b/hs_core/management/commands/reorder_authors.py
--- a/hs_core/management/commands/reorder_authors.py
+++ b/hs_core/management/commands/reorder_authors.py
@@ -6,7 +6,7 @@
Related to https://github.com/hydroshare/hydroshare/issues/4695
"""
-from django.core.management.base import BaseCommand
+from django.core.management.base import BaseCommand, CommandError
from hs_core.models import BaseResource
from hs_core.hydroshare.utils import set_dirty_bag_flag
@@ -14,19 +14,30 @@
class Command(BaseCommand):
help = "Fix duplicate author 'order' values"
+ def add_arguments(self, parser):
+ # ID of a resource for which users should be re-ordered
+ parser.add_argument('--resource_id', type=str, help=('Required. The id (short_id) of'
+ ' the resource'))
+
def handle(self, *args, **options):
- resources = BaseResource.objects.filter(raccess__published=False).only('object_id', 'short_id')
- for res in resources:
- if res.metadata is not None:
- creators = res.metadata.creators.all()
- is_dirty = False
- for index, creator in enumerate(creators, start=1):
- if creator.order != index:
- print("*" * 100)
- print(f"Author out of order.\nR:{res.short_id}"
- f"\nExpected: {index}, got: {creator.order}")
- creator.order = index
- creator.save()
- is_dirty = True
- if is_dirty:
- set_dirty_bag_flag(res)
+ if not options['resource_id']:
+ raise CommandError('resource_id argument is required')
+ res_id = options['resource_id']
+ res = BaseResource.objects.filter(short_id=res_id).first()
+ if not res:
+ raise CommandError('No resource found for the provided resource_id')
+ if res.raccess.published:
+ raise CommandError(f"Resource id: {res_id} is already published--can't update author order.")
+ if res.metadata is not None:
+ creators = res.metadata.creators.all()
+ is_dirty = False
+ for index, creator in enumerate(creators, start=1):
+ if creator.order != index:
+ print("*" * 100)
+ print(f"Author out of order.\nR:{res.short_id}"
+ f"\nExpected: {index}, got: {creator.order}")
+ creator.order = index
+ creator.save()
+ is_dirty = True
+ if is_dirty:
+ set_dirty_bag_flag(res)
| {"golden_diff": "diff --git a/hs_core/management/commands/reorder_authors.py b/hs_core/management/commands/reorder_authors.py\n--- a/hs_core/management/commands/reorder_authors.py\n+++ b/hs_core/management/commands/reorder_authors.py\n@@ -6,7 +6,7 @@\n Related to https://github.com/hydroshare/hydroshare/issues/4695\n \"\"\"\n \n-from django.core.management.base import BaseCommand\n+from django.core.management.base import BaseCommand, CommandError\n from hs_core.models import BaseResource\n from hs_core.hydroshare.utils import set_dirty_bag_flag\n \n@@ -14,19 +14,30 @@\n class Command(BaseCommand):\n help = \"Fix duplicate author 'order' values\"\n \n+ def add_arguments(self, parser):\n+ # ID of a resource for which users should be re-ordered\n+ parser.add_argument('--resource_id', type=str, help=('Required. The id (short_id) of'\n+ ' the resource'))\n+\n def handle(self, *args, **options):\n- resources = BaseResource.objects.filter(raccess__published=False).only('object_id', 'short_id')\n- for res in resources:\n- if res.metadata is not None:\n- creators = res.metadata.creators.all()\n- is_dirty = False\n- for index, creator in enumerate(creators, start=1):\n- if creator.order != index:\n- print(\"*\" * 100)\n- print(f\"Author out of order.\\nR:{res.short_id}\"\n- f\"\\nExpected: {index}, got: {creator.order}\")\n- creator.order = index\n- creator.save()\n- is_dirty = True\n- if is_dirty:\n- set_dirty_bag_flag(res)\n+ if not options['resource_id']:\n+ raise CommandError('resource_id argument is required')\n+ res_id = options['resource_id']\n+ res = BaseResource.objects.filter(short_id=res_id).first()\n+ if not res:\n+ raise CommandError('No resource found for the provided resource_id')\n+ if res.raccess.published:\n+ raise CommandError(f\"Resource id: {res_id} is already published--can't update author order.\")\n+ if res.metadata is not None:\n+ creators = res.metadata.creators.all()\n+ is_dirty = False\n+ for index, creator in enumerate(creators, start=1):\n+ if creator.order != index:\n+ print(\"*\" * 100)\n+ print(f\"Author out of order.\\nR:{res.short_id}\"\n+ f\"\\nExpected: {index}, got: {creator.order}\")\n+ creator.order = index\n+ creator.save()\n+ is_dirty = True\n+ if is_dirty:\n+ set_dirty_bag_flag(res)\n", "issue": "rewrite author order test\n**Description of the bug**\r\nThis test fails occasionally. Rewrite it removing 2 assertions:\r\n[https://github.com/hydroshare/hydroshare/blob/4372-communities-and-groups-2.0/hs_c[\u2026]ore/tests/api/native/test_reorder_authors_management_command.py](https://github.com/hydroshare/hydroshare/blob/4372-communities-and-groups-2.0/hs_core/tests/api/native/test_reorder_authors_management_command.py#L180)\r\n\r\n[https://github.com/hydroshare/hydroshare/blob/4372-communities-and-groups-2.0/hs_c[\u2026]ore/tests/api/native/test_reorder_authors_management_command.py](https://github.com/hydroshare/hydroshare/blob/4372-communities-and-groups-2.0/hs_core/tests/api/native/test_reorder_authors_management_command.py#L152)\r\n\r\nAlso: rewrite this management command so that it takes a res ID as a param:\r\nhttps://github.com/hydroshare/hydroshare/blob/4372-communities-and-groups-2.0/hs_core/management/commands/reorder_authors.py#L24\r\n\r\nSteps to reproduce the bug:\r\nhttp://ci.hydroshare.org:8080/job/hydroshare-pull-requests/5750/testReport/junit/hs_core.tests.api.native.test_reorder_authors_management_command/TestReorderAuthorsCommand/test_command_fixes_triplicate_authors/\r\n\r\n**Expected behavior**\r\nTest should not be dependent on django .get() order\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"\nFix duplicate author \"order\" values\n\nRelated to https://github.com/hydroshare/hydroshare/issues/4695\n\"\"\"\n\nfrom django.core.management.base import BaseCommand\nfrom hs_core.models import BaseResource\nfrom hs_core.hydroshare.utils import set_dirty_bag_flag\n\n\nclass Command(BaseCommand):\n help = \"Fix duplicate author 'order' values\"\n\n def handle(self, *args, **options):\n resources = BaseResource.objects.filter(raccess__published=False).only('object_id', 'short_id')\n for res in resources:\n if res.metadata is not None:\n creators = res.metadata.creators.all()\n is_dirty = False\n for index, creator in enumerate(creators, start=1):\n if creator.order != index:\n print(\"*\" * 100)\n print(f\"Author out of order.\\nR:{res.short_id}\"\n f\"\\nExpected: {index}, got: {creator.order}\")\n creator.order = index\n creator.save()\n is_dirty = True\n if is_dirty:\n set_dirty_bag_flag(res)\n", "path": "hs_core/management/commands/reorder_authors.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"\nFix duplicate author \"order\" values\n\nRelated to https://github.com/hydroshare/hydroshare/issues/4695\n\"\"\"\n\nfrom django.core.management.base import BaseCommand, CommandError\nfrom hs_core.models import BaseResource\nfrom hs_core.hydroshare.utils import set_dirty_bag_flag\n\n\nclass Command(BaseCommand):\n help = \"Fix duplicate author 'order' values\"\n\n def add_arguments(self, parser):\n # ID of a resource for which users should be re-ordered\n parser.add_argument('--resource_id', type=str, help=('Required. The id (short_id) of'\n ' the resource'))\n\n def handle(self, *args, **options):\n if not options['resource_id']:\n raise CommandError('resource_id argument is required')\n res_id = options['resource_id']\n res = BaseResource.objects.filter(short_id=res_id).first()\n if not res:\n raise CommandError('No resource found for the provided resource_id')\n if res.raccess.published:\n raise CommandError(f\"Resource id: {res_id} is already published--can't update author order.\")\n if res.metadata is not None:\n creators = res.metadata.creators.all()\n is_dirty = False\n for index, creator in enumerate(creators, start=1):\n if creator.order != index:\n print(\"*\" * 100)\n print(f\"Author out of order.\\nR:{res.short_id}\"\n f\"\\nExpected: {index}, got: {creator.order}\")\n creator.order = index\n creator.save()\n is_dirty = True\n if is_dirty:\n set_dirty_bag_flag(res)\n", "path": "hs_core/management/commands/reorder_authors.py"}]} | 899 | 618 |
gh_patches_debug_3501 | rasdani/github-patches | git_diff | plone__Products.CMFPlone-1068 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
mail controlpanel: doesn't keep password field
Saving the mail settings in the controlpanel doesn't keep the password field value, as it is obviously never shown in ESMTP password.
Steps to reproduce:
1. Fill in ESMTP username and ESMTP password. Save settings. They are correctly stored.
2. Apply save settings again. ESMTP password is incorrectly stored as None.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Products/CMFPlone/controlpanel/browser/mail.py`
Content:
```
1 from Products.CMFCore.utils import getToolByName
2 from Products.CMFPlone import PloneMessageFactory as _
3 from Products.CMFPlone.interfaces.controlpanel import IMailSchema
4 from Products.MailHost.MailHost import MailHostError
5 from Products.statusmessages.interfaces import IStatusMessage
6 from logging import getLogger
7 from plone.app.registry.browser import controlpanel
8 from plone.registry.interfaces import IRegistry
9 from z3c.form import button
10 from zope.component import getUtility
11
12 import smtplib
13 import socket
14 import sys
15
16 log = getLogger('Plone')
17
18
19 class MailControlPanelForm(controlpanel.RegistryEditForm):
20
21 id = "MailControlPanel"
22 label = _(u"Mail Settings")
23 schema = IMailSchema
24 schema_prefix = "plone"
25
26 @button.buttonAndHandler(_('Save'), name=None)
27 def handleSave(self, action):
28 self.save()
29
30 @button.buttonAndHandler(_('Cancel'), name='cancel')
31 def handleCancel(self, action):
32 super(MailControlPanelForm, self).handleCancel(self, action)
33
34 def save(self):
35 data, errors = self.extractData()
36 if errors:
37 self.status = self.formErrorsMessage
38 return False
39 self.applyChanges(data)
40 return True
41
42 @button.buttonAndHandler(
43 _('label_smtp_test', default='Save and send test e-mail'),
44 name='test')
45 def handle_test_action(self, action):
46 # Save data first
47 if not self.save():
48 return
49 mailhost = getToolByName(self.context, 'MailHost')
50
51 registry = getUtility(IRegistry)
52 mail_settings = registry.forInterface(IMailSchema, prefix='plone')
53 fromaddr = mail_settings.email_from_address
54 fromname = mail_settings.email_from_name
55
56 message = ("Hi,\n\nThis is a test message sent from the Plone "
57 "'Mail settings' control panel. Your receipt of this "
58 "message (at the address specified in the Site 'From' "
59 "address field) indicates that your e-mail server is "
60 "working!\n\n"
61 "Have a nice day.\n\n"
62 "Love,\n\nPlone")
63 email_charset = mail_settings.email_charset
64 subject = "Test e-mail from Plone"
65
66 # Make the timeout incredibly short. This is enough time for most mail
67 # servers, wherever they may be in the world, to respond to the
68 # connection request. Make sure we save the current value
69 # and restore it afterward.
70 timeout = socket.getdefaulttimeout()
71 try:
72 socket.setdefaulttimeout(3)
73 try:
74 mailhost.send(message,
75 mto=fromaddr,
76 mfrom=fromaddr,
77 subject=subject,
78 charset=email_charset,
79 immediate=True)
80
81 except (socket.error, MailHostError, smtplib.SMTPException):
82 # Connection refused or timeout.
83 log.exception('Unable to send test e-mail.')
84 value = sys.exc_info()[1]
85 msg = _(u'Unable to send test e-mail ${error}.',
86 mapping={'error': unicode(value)})
87 IStatusMessage(self.request).addStatusMessage(
88 msg, type='error')
89 else:
90 IStatusMessage(self.request).addStatusMessage(
91 _(u'Success! Check your mailbox for the test message.'),
92 type='info')
93 finally:
94 # Restore timeout to default value
95 socket.setdefaulttimeout(timeout)
96
97
98 class MailControlPanel(controlpanel.ControlPanelFormWrapper):
99 form = MailControlPanelForm
100
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/Products/CMFPlone/controlpanel/browser/mail.py b/Products/CMFPlone/controlpanel/browser/mail.py
--- a/Products/CMFPlone/controlpanel/browser/mail.py
+++ b/Products/CMFPlone/controlpanel/browser/mail.py
@@ -36,6 +36,10 @@
if errors:
self.status = self.formErrorsMessage
return False
+ #keep password field
+ if (data.get('smtp_userid') is not None
+ and data.get('smtp_pass') is None):
+ del data['smtp_pass']
self.applyChanges(data)
return True
| {"golden_diff": "diff --git a/Products/CMFPlone/controlpanel/browser/mail.py b/Products/CMFPlone/controlpanel/browser/mail.py\n--- a/Products/CMFPlone/controlpanel/browser/mail.py\n+++ b/Products/CMFPlone/controlpanel/browser/mail.py\n@@ -36,6 +36,10 @@\n if errors:\n self.status = self.formErrorsMessage\n return False\n+ #keep password field\n+ if (data.get('smtp_userid') is not None\n+ and data.get('smtp_pass') is None):\n+ del data['smtp_pass']\n self.applyChanges(data)\n return True\n", "issue": "mail controlpanel: doesn't keep password field\nSaving the mail settings in the controlpanel doesn't keep the password field value, as it is obviously never shown in ESMTP password.\n\nSteps to reproduce:\n1. Fill in ESMTP username and ESMTP password. Save settings. They are correctly stored.\n2. Apply save settings again. ESMTP password is incorrectly stored as None.\n\n", "before_files": [{"content": "from Products.CMFCore.utils import getToolByName\nfrom Products.CMFPlone import PloneMessageFactory as _\nfrom Products.CMFPlone.interfaces.controlpanel import IMailSchema\nfrom Products.MailHost.MailHost import MailHostError\nfrom Products.statusmessages.interfaces import IStatusMessage\nfrom logging import getLogger\nfrom plone.app.registry.browser import controlpanel\nfrom plone.registry.interfaces import IRegistry\nfrom z3c.form import button\nfrom zope.component import getUtility\n\nimport smtplib\nimport socket\nimport sys\n\nlog = getLogger('Plone')\n\n\nclass MailControlPanelForm(controlpanel.RegistryEditForm):\n\n id = \"MailControlPanel\"\n label = _(u\"Mail Settings\")\n schema = IMailSchema\n schema_prefix = \"plone\"\n\n @button.buttonAndHandler(_('Save'), name=None)\n def handleSave(self, action):\n self.save()\n\n @button.buttonAndHandler(_('Cancel'), name='cancel')\n def handleCancel(self, action):\n super(MailControlPanelForm, self).handleCancel(self, action)\n\n def save(self):\n data, errors = self.extractData()\n if errors:\n self.status = self.formErrorsMessage\n return False\n self.applyChanges(data)\n return True\n\n @button.buttonAndHandler(\n _('label_smtp_test', default='Save and send test e-mail'),\n name='test')\n def handle_test_action(self, action):\n # Save data first\n if not self.save():\n return\n mailhost = getToolByName(self.context, 'MailHost')\n\n registry = getUtility(IRegistry)\n mail_settings = registry.forInterface(IMailSchema, prefix='plone')\n fromaddr = mail_settings.email_from_address\n fromname = mail_settings.email_from_name\n\n message = (\"Hi,\\n\\nThis is a test message sent from the Plone \"\n \"'Mail settings' control panel. Your receipt of this \"\n \"message (at the address specified in the Site 'From' \"\n \"address field) indicates that your e-mail server is \"\n \"working!\\n\\n\"\n \"Have a nice day.\\n\\n\"\n \"Love,\\n\\nPlone\")\n email_charset = mail_settings.email_charset\n subject = \"Test e-mail from Plone\"\n\n # Make the timeout incredibly short. This is enough time for most mail\n # servers, wherever they may be in the world, to respond to the\n # connection request. Make sure we save the current value\n # and restore it afterward.\n timeout = socket.getdefaulttimeout()\n try:\n socket.setdefaulttimeout(3)\n try:\n mailhost.send(message,\n mto=fromaddr,\n mfrom=fromaddr,\n subject=subject,\n charset=email_charset,\n immediate=True)\n\n except (socket.error, MailHostError, smtplib.SMTPException):\n # Connection refused or timeout.\n log.exception('Unable to send test e-mail.')\n value = sys.exc_info()[1]\n msg = _(u'Unable to send test e-mail ${error}.',\n mapping={'error': unicode(value)})\n IStatusMessage(self.request).addStatusMessage(\n msg, type='error')\n else:\n IStatusMessage(self.request).addStatusMessage(\n _(u'Success! Check your mailbox for the test message.'),\n type='info')\n finally:\n # Restore timeout to default value\n socket.setdefaulttimeout(timeout)\n\n\nclass MailControlPanel(controlpanel.ControlPanelFormWrapper):\n form = MailControlPanelForm\n", "path": "Products/CMFPlone/controlpanel/browser/mail.py"}], "after_files": [{"content": "from Products.CMFCore.utils import getToolByName\nfrom Products.CMFPlone import PloneMessageFactory as _\nfrom Products.CMFPlone.interfaces.controlpanel import IMailSchema\nfrom Products.MailHost.MailHost import MailHostError\nfrom Products.statusmessages.interfaces import IStatusMessage\nfrom logging import getLogger\nfrom plone.app.registry.browser import controlpanel\nfrom plone.registry.interfaces import IRegistry\nfrom z3c.form import button\nfrom zope.component import getUtility\n\nimport smtplib\nimport socket\nimport sys\n\nlog = getLogger('Plone')\n\n\nclass MailControlPanelForm(controlpanel.RegistryEditForm):\n\n id = \"MailControlPanel\"\n label = _(u\"Mail Settings\")\n schema = IMailSchema\n schema_prefix = \"plone\"\n\n @button.buttonAndHandler(_('Save'), name=None)\n def handleSave(self, action):\n self.save()\n\n @button.buttonAndHandler(_('Cancel'), name='cancel')\n def handleCancel(self, action):\n super(MailControlPanelForm, self).handleCancel(self, action)\n\n def save(self):\n data, errors = self.extractData()\n if errors:\n self.status = self.formErrorsMessage\n return False\n #keep password field\n if (data.get('smtp_userid') is not None\n and data.get('smtp_pass') is None):\n del data['smtp_pass']\n self.applyChanges(data)\n return True\n\n @button.buttonAndHandler(\n _('label_smtp_test', default='Save and send test e-mail'),\n name='test')\n def handle_test_action(self, action):\n # Save data first\n if not self.save():\n return\n mailhost = getToolByName(self.context, 'MailHost')\n\n registry = getUtility(IRegistry)\n mail_settings = registry.forInterface(IMailSchema, prefix='plone')\n fromaddr = mail_settings.email_from_address\n fromname = mail_settings.email_from_name\n\n message = (\"Hi,\\n\\nThis is a test message sent from the Plone \"\n \"'Mail settings' control panel. Your receipt of this \"\n \"message (at the address specified in the Site 'From' \"\n \"address field) indicates that your e-mail server is \"\n \"working!\\n\\n\"\n \"Have a nice day.\\n\\n\"\n \"Love,\\n\\nPlone\")\n email_charset = mail_settings.email_charset\n subject = \"Test e-mail from Plone\"\n\n # Make the timeout incredibly short. This is enough time for most mail\n # servers, wherever they may be in the world, to respond to the\n # connection request. Make sure we save the current value\n # and restore it afterward.\n timeout = socket.getdefaulttimeout()\n try:\n socket.setdefaulttimeout(3)\n try:\n mailhost.send(message,\n mto=fromaddr,\n mfrom=fromaddr,\n subject=subject,\n charset=email_charset,\n immediate=True)\n\n except (socket.error, MailHostError, smtplib.SMTPException):\n # Connection refused or timeout.\n log.exception('Unable to send test e-mail.')\n value = sys.exc_info()[1]\n msg = _(u'Unable to send test e-mail ${error}.',\n mapping={'error': unicode(value)})\n IStatusMessage(self.request).addStatusMessage(\n msg, type='error')\n else:\n IStatusMessage(self.request).addStatusMessage(\n _(u'Success! Check your mailbox for the test message.'),\n type='info')\n finally:\n # Restore timeout to default value\n socket.setdefaulttimeout(timeout)\n\n\nclass MailControlPanel(controlpanel.ControlPanelFormWrapper):\n form = MailControlPanelForm\n", "path": "Products/CMFPlone/controlpanel/browser/mail.py"}]} | 1,288 | 141 |
gh_patches_debug_32239 | rasdani/github-patches | git_diff | Textualize__textual-2112 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`)` cannot appear as part of the parameter passed to an action
Reproduction:
```python
from textual.app import App
class ActionBugApp(App):
BINDINGS = [("a", "test(')')", "Test")]
def action_test(self, _: str) -> None:
pass
if __name__ == '__main__':
app = ActionBugApp()
app.run()
```
Omitting the full stack trace (since it's fairly easy to reproduce), the key error message is:
```
ActionError: unable to parse "(')" in action "test(')')"
```
Seems that [this regex](https://github.com/Textualize/textual/blob/2a6368754a8b3a11f1772b52298b5d3b50ceebaa/src/textual/actions.py#L20) is not general enough.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/textual/actions.py`
Content:
```
1 from __future__ import annotations
2
3 import ast
4 import re
5
6 from typing_extensions import Any, TypeAlias
7
8 ActionParseResult: TypeAlias = "tuple[str, tuple[Any, ...]]"
9 """An action is its name and the arbitrary tuple of its parameters."""
10
11
12 class SkipAction(Exception):
13 """Raise in an action to skip the action (and allow any parent bindings to run)."""
14
15
16 class ActionError(Exception):
17 pass
18
19
20 re_action_params = re.compile(r"([\w\.]+)(\(.*?\))")
21
22
23 def parse(action: str) -> ActionParseResult:
24 """Parses an action string.
25
26 Args:
27 action: String containing action.
28
29 Raises:
30 ActionError: If the action has invalid syntax.
31
32 Returns:
33 Action name and parameters
34 """
35 params_match = re_action_params.match(action)
36 if params_match is not None:
37 action_name, action_params_str = params_match.groups()
38 try:
39 action_params = ast.literal_eval(action_params_str)
40 except Exception:
41 raise ActionError(
42 f"unable to parse {action_params_str!r} in action {action!r}"
43 )
44 else:
45 action_name = action
46 action_params = ()
47
48 return (
49 action_name,
50 action_params if isinstance(action_params, tuple) else (action_params,),
51 )
52
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/textual/actions.py b/src/textual/actions.py
--- a/src/textual/actions.py
+++ b/src/textual/actions.py
@@ -6,7 +6,7 @@
from typing_extensions import Any, TypeAlias
ActionParseResult: TypeAlias = "tuple[str, tuple[Any, ...]]"
-"""An action is its name and the arbitrary tuple of its parameters."""
+"""An action is its name and the arbitrary tuple of its arguments."""
class SkipAction(Exception):
@@ -17,7 +17,7 @@
pass
-re_action_params = re.compile(r"([\w\.]+)(\(.*?\))")
+re_action_args = re.compile(r"([\w\.]+)\((.*)\)")
def parse(action: str) -> ActionParseResult:
@@ -30,22 +30,25 @@
ActionError: If the action has invalid syntax.
Returns:
- Action name and parameters
+ Action name and arguments.
"""
- params_match = re_action_params.match(action)
- if params_match is not None:
- action_name, action_params_str = params_match.groups()
- try:
- action_params = ast.literal_eval(action_params_str)
- except Exception:
- raise ActionError(
- f"unable to parse {action_params_str!r} in action {action!r}"
- )
+ args_match = re_action_args.match(action)
+ if args_match is not None:
+ action_name, action_args_str = args_match.groups()
+ if action_args_str:
+ try:
+ # We wrap `action_args_str` to be able to disambiguate the cases where
+ # the list of arguments is a comma-separated list of values from the
+ # case where the argument is a single tuple.
+ action_args: tuple[Any, ...] = ast.literal_eval(f"({action_args_str},)")
+ except Exception:
+ raise ActionError(
+ f"unable to parse {action_args_str!r} in action {action!r}"
+ )
+ else:
+ action_args = ()
else:
action_name = action
- action_params = ()
+ action_args = ()
- return (
- action_name,
- action_params if isinstance(action_params, tuple) else (action_params,),
- )
+ return action_name, action_args
| {"golden_diff": "diff --git a/src/textual/actions.py b/src/textual/actions.py\n--- a/src/textual/actions.py\n+++ b/src/textual/actions.py\n@@ -6,7 +6,7 @@\n from typing_extensions import Any, TypeAlias\n \n ActionParseResult: TypeAlias = \"tuple[str, tuple[Any, ...]]\"\n-\"\"\"An action is its name and the arbitrary tuple of its parameters.\"\"\"\n+\"\"\"An action is its name and the arbitrary tuple of its arguments.\"\"\"\n \n \n class SkipAction(Exception):\n@@ -17,7 +17,7 @@\n pass\n \n \n-re_action_params = re.compile(r\"([\\w\\.]+)(\\(.*?\\))\")\n+re_action_args = re.compile(r\"([\\w\\.]+)\\((.*)\\)\")\n \n \n def parse(action: str) -> ActionParseResult:\n@@ -30,22 +30,25 @@\n ActionError: If the action has invalid syntax.\n \n Returns:\n- Action name and parameters\n+ Action name and arguments.\n \"\"\"\n- params_match = re_action_params.match(action)\n- if params_match is not None:\n- action_name, action_params_str = params_match.groups()\n- try:\n- action_params = ast.literal_eval(action_params_str)\n- except Exception:\n- raise ActionError(\n- f\"unable to parse {action_params_str!r} in action {action!r}\"\n- )\n+ args_match = re_action_args.match(action)\n+ if args_match is not None:\n+ action_name, action_args_str = args_match.groups()\n+ if action_args_str:\n+ try:\n+ # We wrap `action_args_str` to be able to disambiguate the cases where\n+ # the list of arguments is a comma-separated list of values from the\n+ # case where the argument is a single tuple.\n+ action_args: tuple[Any, ...] = ast.literal_eval(f\"({action_args_str},)\")\n+ except Exception:\n+ raise ActionError(\n+ f\"unable to parse {action_args_str!r} in action {action!r}\"\n+ )\n+ else:\n+ action_args = ()\n else:\n action_name = action\n- action_params = ()\n+ action_args = ()\n \n- return (\n- action_name,\n- action_params if isinstance(action_params, tuple) else (action_params,),\n- )\n+ return action_name, action_args\n", "issue": "`)` cannot appear as part of the parameter passed to an action\nReproduction:\r\n\r\n```python\r\nfrom textual.app import App\r\n\r\n\r\nclass ActionBugApp(App):\r\n BINDINGS = [(\"a\", \"test(')')\", \"Test\")]\r\n\r\n def action_test(self, _: str) -> None:\r\n pass\r\n\r\n\r\nif __name__ == '__main__':\r\n app = ActionBugApp()\r\n app.run()\r\n```\r\n\r\nOmitting the full stack trace (since it's fairly easy to reproduce), the key error message is:\r\n\r\n```\r\nActionError: unable to parse \"(')\" in action \"test(')')\"\r\n```\r\n\r\nSeems that [this regex](https://github.com/Textualize/textual/blob/2a6368754a8b3a11f1772b52298b5d3b50ceebaa/src/textual/actions.py#L20) is not general enough.\n", "before_files": [{"content": "from __future__ import annotations\n\nimport ast\nimport re\n\nfrom typing_extensions import Any, TypeAlias\n\nActionParseResult: TypeAlias = \"tuple[str, tuple[Any, ...]]\"\n\"\"\"An action is its name and the arbitrary tuple of its parameters.\"\"\"\n\n\nclass SkipAction(Exception):\n \"\"\"Raise in an action to skip the action (and allow any parent bindings to run).\"\"\"\n\n\nclass ActionError(Exception):\n pass\n\n\nre_action_params = re.compile(r\"([\\w\\.]+)(\\(.*?\\))\")\n\n\ndef parse(action: str) -> ActionParseResult:\n \"\"\"Parses an action string.\n\n Args:\n action: String containing action.\n\n Raises:\n ActionError: If the action has invalid syntax.\n\n Returns:\n Action name and parameters\n \"\"\"\n params_match = re_action_params.match(action)\n if params_match is not None:\n action_name, action_params_str = params_match.groups()\n try:\n action_params = ast.literal_eval(action_params_str)\n except Exception:\n raise ActionError(\n f\"unable to parse {action_params_str!r} in action {action!r}\"\n )\n else:\n action_name = action\n action_params = ()\n\n return (\n action_name,\n action_params if isinstance(action_params, tuple) else (action_params,),\n )\n", "path": "src/textual/actions.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport ast\nimport re\n\nfrom typing_extensions import Any, TypeAlias\n\nActionParseResult: TypeAlias = \"tuple[str, tuple[Any, ...]]\"\n\"\"\"An action is its name and the arbitrary tuple of its arguments.\"\"\"\n\n\nclass SkipAction(Exception):\n \"\"\"Raise in an action to skip the action (and allow any parent bindings to run).\"\"\"\n\n\nclass ActionError(Exception):\n pass\n\n\nre_action_args = re.compile(r\"([\\w\\.]+)\\((.*)\\)\")\n\n\ndef parse(action: str) -> ActionParseResult:\n \"\"\"Parses an action string.\n\n Args:\n action: String containing action.\n\n Raises:\n ActionError: If the action has invalid syntax.\n\n Returns:\n Action name and arguments.\n \"\"\"\n args_match = re_action_args.match(action)\n if args_match is not None:\n action_name, action_args_str = args_match.groups()\n if action_args_str:\n try:\n # We wrap `action_args_str` to be able to disambiguate the cases where\n # the list of arguments is a comma-separated list of values from the\n # case where the argument is a single tuple.\n action_args: tuple[Any, ...] = ast.literal_eval(f\"({action_args_str},)\")\n except Exception:\n raise ActionError(\n f\"unable to parse {action_args_str!r} in action {action!r}\"\n )\n else:\n action_args = ()\n else:\n action_name = action\n action_args = ()\n\n return action_name, action_args\n", "path": "src/textual/actions.py"}]} | 839 | 518 |
gh_patches_debug_7276 | rasdani/github-patches | git_diff | pyodide__pyodide-3013 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Relative URLs in pyodide.loadPackage
## 🐛 Bug
<!-- A clear and concise description of what the bug is. -->
The documentation states that [pyodide.loadPackage](https://pyodide.org/en/stable/usage/api/js-api.html#pyodide.loadPackage) supports relative URLs. I'm trying to load an out-of-tree wheel from my local webserver, but this doesn't seem to work out well.
### To Reproduce
<!-- Minimal code example to reproduce the bug. -->
```js
await pyodide.loadPackage("dist/igraph-0.9.11-cp310-cp310-emscripten_3_1_14_wasm32.whl");
```
or
```js
await pyodide.loadPackage("./dist/igraph-0.9.11-cp310-cp310-emscripten_3_1_14_wasm32.whl");
```
Pyodide tries to load the wheel from `https://cdn.jsdelivr.net/pyodide/v0.21.1/full/dist/igraph-0.9.11-cp310-cp310-emscripten_3_1_14_wasm32.whl`.
### Expected behavior
<!-- FILL IN -->
Load the wheel from the relative URL.
### Environment
- Pyodide Version<!-- (e.g. 1.8.1) -->: 0.21.1
- Browser version<!-- (e.g. Chrome 95.0.4638.54) -->: Firefox ESR 91.12.0, Chromium 104.0.5112.101
- Any other relevant information:
<!-- If you are building Pyodide by yourself, please also include these information: -->
<!--
- Commit hash of Pyodide git repository:
- Build environment<!--(e.g. Ubuntu 18.04, pyodide/pyodide-env:19 docker)- ->:
-->
### Additional context
<!-- Add any other context about the problem here. -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `packages/micropip/src/micropip/_compat_in_pyodide.py`
Content:
```
1 from io import BytesIO
2 from typing import IO
3 from urllib.parse import urlparse
4
5 from pyodide._core import IN_BROWSER
6 from pyodide.http import pyfetch
7
8 try:
9 import pyodide_js
10 from pyodide_js import loadedPackages, loadPackage
11 from pyodide_js._api import loadBinaryFile, loadDynlib # type: ignore[import]
12
13 REPODATA_PACKAGES = pyodide_js._api.repodata_packages.to_py()
14 REPODATA_INFO = pyodide_js._api.repodata_info.to_py()
15 except ImportError:
16 if IN_BROWSER:
17 raise
18 # Otherwise, this is pytest test collection so let it go.
19
20
21 async def fetch_bytes(url: str, kwargs: dict[str, str]) -> IO[bytes]:
22 parsed_url = urlparse(url)
23 if parsed_url.scheme == "emfs":
24 return open(parsed_url.path, "rb")
25 if parsed_url.scheme == "file":
26 result_bytes = (await loadBinaryFile("", parsed_url.path)).to_bytes()
27 else:
28 result_bytes = await (await pyfetch(url, **kwargs)).bytes()
29 return BytesIO(result_bytes)
30
31
32 async def fetch_string(url: str, kwargs: dict[str, str]) -> str:
33 return await (await pyfetch(url, **kwargs)).string()
34
35
36 __all__ = [
37 "fetch_bytes",
38 "fetch_string",
39 "REPODATA_INFO",
40 "REPODATA_PACKAGES",
41 "loadedPackages",
42 "loadDynlib",
43 "loadPackage",
44 ]
45
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/packages/micropip/src/micropip/_compat_in_pyodide.py b/packages/micropip/src/micropip/_compat_in_pyodide.py
--- a/packages/micropip/src/micropip/_compat_in_pyodide.py
+++ b/packages/micropip/src/micropip/_compat_in_pyodide.py
@@ -23,7 +23,7 @@
if parsed_url.scheme == "emfs":
return open(parsed_url.path, "rb")
if parsed_url.scheme == "file":
- result_bytes = (await loadBinaryFile("", parsed_url.path)).to_bytes()
+ result_bytes = (await loadBinaryFile(parsed_url.path)).to_bytes()
else:
result_bytes = await (await pyfetch(url, **kwargs)).bytes()
return BytesIO(result_bytes)
| {"golden_diff": "diff --git a/packages/micropip/src/micropip/_compat_in_pyodide.py b/packages/micropip/src/micropip/_compat_in_pyodide.py\n--- a/packages/micropip/src/micropip/_compat_in_pyodide.py\n+++ b/packages/micropip/src/micropip/_compat_in_pyodide.py\n@@ -23,7 +23,7 @@\n if parsed_url.scheme == \"emfs\":\n return open(parsed_url.path, \"rb\")\n if parsed_url.scheme == \"file\":\n- result_bytes = (await loadBinaryFile(\"\", parsed_url.path)).to_bytes()\n+ result_bytes = (await loadBinaryFile(parsed_url.path)).to_bytes()\n else:\n result_bytes = await (await pyfetch(url, **kwargs)).bytes()\n return BytesIO(result_bytes)\n", "issue": "Relative URLs in pyodide.loadPackage\n## \ud83d\udc1b Bug\r\n\r\n<!-- A clear and concise description of what the bug is. -->\r\nThe documentation states that [pyodide.loadPackage](https://pyodide.org/en/stable/usage/api/js-api.html#pyodide.loadPackage) supports relative URLs. I'm trying to load an out-of-tree wheel from my local webserver, but this doesn't seem to work out well.\r\n\r\n### To Reproduce\r\n\r\n<!-- Minimal code example to reproduce the bug. -->\r\n```js\r\nawait pyodide.loadPackage(\"dist/igraph-0.9.11-cp310-cp310-emscripten_3_1_14_wasm32.whl\");\r\n```\r\nor\r\n```js\r\nawait pyodide.loadPackage(\"./dist/igraph-0.9.11-cp310-cp310-emscripten_3_1_14_wasm32.whl\");\r\n```\r\nPyodide tries to load the wheel from `https://cdn.jsdelivr.net/pyodide/v0.21.1/full/dist/igraph-0.9.11-cp310-cp310-emscripten_3_1_14_wasm32.whl`.\r\n\r\n### Expected behavior\r\n\r\n<!-- FILL IN -->\r\nLoad the wheel from the relative URL.\r\n\r\n### Environment\r\n\r\n- Pyodide Version<!-- (e.g. 1.8.1) -->: 0.21.1\r\n- Browser version<!-- (e.g. Chrome 95.0.4638.54) -->: Firefox ESR 91.12.0, Chromium 104.0.5112.101\r\n- Any other relevant information:\r\n\r\n<!-- If you are building Pyodide by yourself, please also include these information: -->\r\n\r\n<!--\r\n- Commit hash of Pyodide git repository:\r\n- Build environment<!--(e.g. Ubuntu 18.04, pyodide/pyodide-env:19 docker)- ->:\r\n-->\r\n\r\n### Additional context\r\n\r\n<!-- Add any other context about the problem here. -->\r\n\n", "before_files": [{"content": "from io import BytesIO\nfrom typing import IO\nfrom urllib.parse import urlparse\n\nfrom pyodide._core import IN_BROWSER\nfrom pyodide.http import pyfetch\n\ntry:\n import pyodide_js\n from pyodide_js import loadedPackages, loadPackage\n from pyodide_js._api import loadBinaryFile, loadDynlib # type: ignore[import]\n\n REPODATA_PACKAGES = pyodide_js._api.repodata_packages.to_py()\n REPODATA_INFO = pyodide_js._api.repodata_info.to_py()\nexcept ImportError:\n if IN_BROWSER:\n raise\n # Otherwise, this is pytest test collection so let it go.\n\n\nasync def fetch_bytes(url: str, kwargs: dict[str, str]) -> IO[bytes]:\n parsed_url = urlparse(url)\n if parsed_url.scheme == \"emfs\":\n return open(parsed_url.path, \"rb\")\n if parsed_url.scheme == \"file\":\n result_bytes = (await loadBinaryFile(\"\", parsed_url.path)).to_bytes()\n else:\n result_bytes = await (await pyfetch(url, **kwargs)).bytes()\n return BytesIO(result_bytes)\n\n\nasync def fetch_string(url: str, kwargs: dict[str, str]) -> str:\n return await (await pyfetch(url, **kwargs)).string()\n\n\n__all__ = [\n \"fetch_bytes\",\n \"fetch_string\",\n \"REPODATA_INFO\",\n \"REPODATA_PACKAGES\",\n \"loadedPackages\",\n \"loadDynlib\",\n \"loadPackage\",\n]\n", "path": "packages/micropip/src/micropip/_compat_in_pyodide.py"}], "after_files": [{"content": "from io import BytesIO\nfrom typing import IO\nfrom urllib.parse import urlparse\n\nfrom pyodide._core import IN_BROWSER\nfrom pyodide.http import pyfetch\n\ntry:\n import pyodide_js\n from pyodide_js import loadedPackages, loadPackage\n from pyodide_js._api import loadBinaryFile, loadDynlib # type: ignore[import]\n\n REPODATA_PACKAGES = pyodide_js._api.repodata_packages.to_py()\n REPODATA_INFO = pyodide_js._api.repodata_info.to_py()\nexcept ImportError:\n if IN_BROWSER:\n raise\n # Otherwise, this is pytest test collection so let it go.\n\n\nasync def fetch_bytes(url: str, kwargs: dict[str, str]) -> IO[bytes]:\n parsed_url = urlparse(url)\n if parsed_url.scheme == \"emfs\":\n return open(parsed_url.path, \"rb\")\n if parsed_url.scheme == \"file\":\n result_bytes = (await loadBinaryFile(parsed_url.path)).to_bytes()\n else:\n result_bytes = await (await pyfetch(url, **kwargs)).bytes()\n return BytesIO(result_bytes)\n\n\nasync def fetch_string(url: str, kwargs: dict[str, str]) -> str:\n return await (await pyfetch(url, **kwargs)).string()\n\n\n__all__ = [\n \"fetch_bytes\",\n \"fetch_string\",\n \"REPODATA_INFO\",\n \"REPODATA_PACKAGES\",\n \"loadedPackages\",\n \"loadDynlib\",\n \"loadPackage\",\n]\n", "path": "packages/micropip/src/micropip/_compat_in_pyodide.py"}]} | 1,141 | 182 |
gh_patches_debug_35885 | rasdani/github-patches | git_diff | sql-machine-learning__elasticdl-264 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[master]Use tf.ResourceVariable to store model
Currently we store model as a <string, ndarray> map. when using tf.optimizer.apply_gradient() to update model, we need to convert the map to ResourceVariable and back. It is better to change model to a <string, ResourceVariable> map to avoid copy and conversion.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticdl/master/servicer.py`
Content:
```
1 import threading
2
3 from proto import master_pb2
4 from proto import master_pb2_grpc
5 from util.converter import NdarrayToTensor, TensorToNdarray
6
7
8 class MasterServicer(master_pb2_grpc.MasterServicer):
9 """Master service implementation"""
10
11 def __init__(self, logger, grads_to_wait):
12 self.logger = logger
13 self._lock = threading.Lock()
14 # TODO: random initialization
15 self._model = {}
16 self._version = 0
17 self._gradient_sum = {}
18 self._grad_to_wait = grads_to_wait
19 self._grad_n = 0
20
21 def GetTask(self, request, context):
22 # TODO: implent task queues. Return an empty task for now.
23 res = master_pb2.Task()
24 res.shard_file_name = ""
25 res.model_version = self._version
26 return res
27
28 def GetModel(self, request, context):
29 if request.min_version > self._version:
30 err_msg = (
31 "Requested version %d not available yet, current version: %d"
32 % (request.min_version, self._version)
33 )
34 self.logger.warning(err_msg)
35 raise ValueError(err_msg)
36
37 res = master_pb2.Model()
38 with self._lock:
39 res.version = self._version
40 for k, v in self._model.items():
41 res.param[k].CopyFrom(NdarrayToTensor(v))
42 return res
43
44 def ReportTaskResult(self, request, context):
45 if request.model_version > self._version:
46 err_msg = "Model version %d out of range, current version: %d" % (
47 request.model_version,
48 self._version,
49 )
50 self.logger.warning(err_msg)
51 raise ValueError(err_msg)
52
53 res = master_pb2.ReportTaskResultReply()
54 if request.model_version < self._version:
55 self.logger.warning(
56 "Task result for outdated version %d dropped",
57 request.model_version,
58 )
59 res.accepted = False
60 res.model_version = self._version
61 return res
62
63 if request.err_message:
64 self.logger.warning("Worker error: %s" % request.err_message)
65 res.accepted = False
66 res.model_version = self._version
67 return res
68
69 # TODO: Update task queue with task_id
70 with self._lock:
71 tmp = {}
72 # Do sanity check before accumulating gradients.
73 for k, v in request.gradient.items():
74 if k not in self._model:
75 raise ValueError(
76 "Gradient key: %s is not part of model", k
77 )
78 arr = TensorToNdarray(v)
79 if arr.shape != self._model[k].shape:
80 raise ValueError(
81 "Gradient key: %s has incompatible dimension", k
82 )
83 tmp[k] = arr
84
85 for k, v in tmp.items():
86 if k in self._gradient_sum:
87 self._gradient_sum[k] = self._gradient_sum[k] + v
88 else:
89 self._gradient_sum[k] = v
90
91 self._grad_n += 1
92 if self._grad_n >= self._grad_to_wait:
93 # TODO: update model
94 self._version += 1
95 self._gradient_sum.clear()
96 self._grad_n = 0
97 res.accepted = True
98 res.model_version = self._version
99 return res
100
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/elasticdl/master/servicer.py b/elasticdl/master/servicer.py
--- a/elasticdl/master/servicer.py
+++ b/elasticdl/master/servicer.py
@@ -1,5 +1,7 @@
import threading
+import numpy as np
+import tensorflow as tf
from proto import master_pb2
from proto import master_pb2_grpc
from util.converter import NdarrayToTensor, TensorToNdarray
@@ -12,12 +14,21 @@
self.logger = logger
self._lock = threading.Lock()
# TODO: random initialization
+ # A <string, tf.ResourceVariable> map. We use tf.ResourceVariable
+ # instead ndarray to avoid copying and conversion when calling
+ # optimizer's apply_gradients() function.
self._model = {}
self._version = 0
self._gradient_sum = {}
self._grad_to_wait = grads_to_wait
self._grad_n = 0
+ def _set_model_var(self, name, value):
+ """Add or set model variable. Value should be a float32 ndarray"""
+ if value.dtype != np.float32:
+ raise ValueError("Value should be a float32 numpy array")
+ self._model[name] = tf.Variable(value, name=name, use_resource=True)
+
def GetTask(self, request, context):
# TODO: implent task queues. Return an empty task for now.
res = master_pb2.Task()
@@ -38,7 +49,7 @@
with self._lock:
res.version = self._version
for k, v in self._model.items():
- res.param[k].CopyFrom(NdarrayToTensor(v))
+ res.param[k].CopyFrom(NdarrayToTensor(v.numpy()))
return res
def ReportTaskResult(self, request, context):
@@ -76,7 +87,7 @@
"Gradient key: %s is not part of model", k
)
arr = TensorToNdarray(v)
- if arr.shape != self._model[k].shape:
+ if arr.shape != self._model[k].numpy().shape:
raise ValueError(
"Gradient key: %s has incompatible dimension", k
)
| {"golden_diff": "diff --git a/elasticdl/master/servicer.py b/elasticdl/master/servicer.py\n--- a/elasticdl/master/servicer.py\n+++ b/elasticdl/master/servicer.py\n@@ -1,5 +1,7 @@\n import threading\n+import numpy as np\n \n+import tensorflow as tf\n from proto import master_pb2\n from proto import master_pb2_grpc\n from util.converter import NdarrayToTensor, TensorToNdarray\n@@ -12,12 +14,21 @@\n self.logger = logger\n self._lock = threading.Lock()\n # TODO: random initialization\n+ # A <string, tf.ResourceVariable> map. We use tf.ResourceVariable\n+ # instead ndarray to avoid copying and conversion when calling\n+ # optimizer's apply_gradients() function.\n self._model = {}\n self._version = 0\n self._gradient_sum = {}\n self._grad_to_wait = grads_to_wait\n self._grad_n = 0\n \n+ def _set_model_var(self, name, value):\n+ \"\"\"Add or set model variable. Value should be a float32 ndarray\"\"\"\n+ if value.dtype != np.float32:\n+ raise ValueError(\"Value should be a float32 numpy array\")\n+ self._model[name] = tf.Variable(value, name=name, use_resource=True)\n+\n def GetTask(self, request, context):\n # TODO: implent task queues. Return an empty task for now.\n res = master_pb2.Task()\n@@ -38,7 +49,7 @@\n with self._lock:\n res.version = self._version\n for k, v in self._model.items():\n- res.param[k].CopyFrom(NdarrayToTensor(v))\n+ res.param[k].CopyFrom(NdarrayToTensor(v.numpy()))\n return res\n \n def ReportTaskResult(self, request, context):\n@@ -76,7 +87,7 @@\n \"Gradient key: %s is not part of model\", k\n )\n arr = TensorToNdarray(v)\n- if arr.shape != self._model[k].shape:\n+ if arr.shape != self._model[k].numpy().shape:\n raise ValueError(\n \"Gradient key: %s has incompatible dimension\", k\n )\n", "issue": "[master]Use tf.ResourceVariable to store model\nCurrently we store model as a <string, ndarray> map. when using tf.optimizer.apply_gradient() to update model, we need to convert the map to ResourceVariable and back. It is better to change model to a <string, ResourceVariable> map to avoid copy and conversion.\n", "before_files": [{"content": "import threading\n\nfrom proto import master_pb2\nfrom proto import master_pb2_grpc\nfrom util.converter import NdarrayToTensor, TensorToNdarray\n\n\nclass MasterServicer(master_pb2_grpc.MasterServicer):\n \"\"\"Master service implementation\"\"\"\n\n def __init__(self, logger, grads_to_wait):\n self.logger = logger\n self._lock = threading.Lock()\n # TODO: random initialization\n self._model = {}\n self._version = 0\n self._gradient_sum = {}\n self._grad_to_wait = grads_to_wait\n self._grad_n = 0\n\n def GetTask(self, request, context):\n # TODO: implent task queues. Return an empty task for now.\n res = master_pb2.Task()\n res.shard_file_name = \"\"\n res.model_version = self._version\n return res\n\n def GetModel(self, request, context):\n if request.min_version > self._version:\n err_msg = (\n \"Requested version %d not available yet, current version: %d\"\n % (request.min_version, self._version)\n )\n self.logger.warning(err_msg)\n raise ValueError(err_msg)\n\n res = master_pb2.Model()\n with self._lock:\n res.version = self._version\n for k, v in self._model.items():\n res.param[k].CopyFrom(NdarrayToTensor(v))\n return res\n\n def ReportTaskResult(self, request, context):\n if request.model_version > self._version:\n err_msg = \"Model version %d out of range, current version: %d\" % (\n request.model_version,\n self._version,\n )\n self.logger.warning(err_msg)\n raise ValueError(err_msg)\n\n res = master_pb2.ReportTaskResultReply()\n if request.model_version < self._version:\n self.logger.warning(\n \"Task result for outdated version %d dropped\",\n request.model_version,\n )\n res.accepted = False\n res.model_version = self._version\n return res\n\n if request.err_message:\n self.logger.warning(\"Worker error: %s\" % request.err_message)\n res.accepted = False\n res.model_version = self._version\n return res\n\n # TODO: Update task queue with task_id\n with self._lock:\n tmp = {}\n # Do sanity check before accumulating gradients.\n for k, v in request.gradient.items():\n if k not in self._model:\n raise ValueError(\n \"Gradient key: %s is not part of model\", k\n )\n arr = TensorToNdarray(v)\n if arr.shape != self._model[k].shape:\n raise ValueError(\n \"Gradient key: %s has incompatible dimension\", k\n )\n tmp[k] = arr\n\n for k, v in tmp.items():\n if k in self._gradient_sum:\n self._gradient_sum[k] = self._gradient_sum[k] + v\n else:\n self._gradient_sum[k] = v\n\n self._grad_n += 1\n if self._grad_n >= self._grad_to_wait:\n # TODO: update model\n self._version += 1\n self._gradient_sum.clear()\n self._grad_n = 0\n res.accepted = True\n res.model_version = self._version\n return res\n", "path": "elasticdl/master/servicer.py"}], "after_files": [{"content": "import threading\nimport numpy as np\n\nimport tensorflow as tf\nfrom proto import master_pb2\nfrom proto import master_pb2_grpc\nfrom util.converter import NdarrayToTensor, TensorToNdarray\n\n\nclass MasterServicer(master_pb2_grpc.MasterServicer):\n \"\"\"Master service implementation\"\"\"\n\n def __init__(self, logger, grads_to_wait):\n self.logger = logger\n self._lock = threading.Lock()\n # TODO: random initialization\n # A <string, tf.ResourceVariable> map. We use tf.ResourceVariable\n # instead ndarray to avoid copying and conversion when calling\n # optimizer's apply_gradients() function.\n self._model = {}\n self._version = 0\n self._gradient_sum = {}\n self._grad_to_wait = grads_to_wait\n self._grad_n = 0\n\n def _set_model_var(self, name, value):\n \"\"\"Add or set model variable. Value should be a float32 ndarray\"\"\"\n if value.dtype != np.float32:\n raise ValueError(\"Value should be a float32 numpy array\")\n self._model[name] = tf.Variable(value, name=name, use_resource=True)\n\n def GetTask(self, request, context):\n # TODO: implent task queues. Return an empty task for now.\n res = master_pb2.Task()\n res.shard_file_name = \"\"\n res.model_version = self._version\n return res\n\n def GetModel(self, request, context):\n if request.min_version > self._version:\n err_msg = (\n \"Requested version %d not available yet, current version: %d\"\n % (request.min_version, self._version)\n )\n self.logger.warning(err_msg)\n raise ValueError(err_msg)\n\n res = master_pb2.Model()\n with self._lock:\n res.version = self._version\n for k, v in self._model.items():\n res.param[k].CopyFrom(NdarrayToTensor(v.numpy()))\n return res\n\n def ReportTaskResult(self, request, context):\n if request.model_version > self._version:\n err_msg = \"Model version %d out of range, current version: %d\" % (\n request.model_version,\n self._version,\n )\n self.logger.warning(err_msg)\n raise ValueError(err_msg)\n\n res = master_pb2.ReportTaskResultReply()\n if request.model_version < self._version:\n self.logger.warning(\n \"Task result for outdated version %d dropped\",\n request.model_version,\n )\n res.accepted = False\n res.model_version = self._version\n return res\n\n if request.err_message:\n self.logger.warning(\"Worker error: %s\" % request.err_message)\n res.accepted = False\n res.model_version = self._version\n return res\n\n # TODO: Update task queue with task_id\n with self._lock:\n tmp = {}\n # Do sanity check before accumulating gradients.\n for k, v in request.gradient.items():\n if k not in self._model:\n raise ValueError(\n \"Gradient key: %s is not part of model\", k\n )\n arr = TensorToNdarray(v)\n if arr.shape != self._model[k].numpy().shape:\n raise ValueError(\n \"Gradient key: %s has incompatible dimension\", k\n )\n tmp[k] = arr\n\n for k, v in tmp.items():\n if k in self._gradient_sum:\n self._gradient_sum[k] = self._gradient_sum[k] + v\n else:\n self._gradient_sum[k] = v\n\n self._grad_n += 1\n if self._grad_n >= self._grad_to_wait:\n # TODO: update model\n self._version += 1\n self._gradient_sum.clear()\n self._grad_n = 0\n res.accepted = True\n res.model_version = self._version\n return res\n", "path": "elasticdl/master/servicer.py"}]} | 1,245 | 496 |
gh_patches_debug_189 | rasdani/github-patches | git_diff | qtile__qtile-1837 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
0.16.0: impossible to build from github sources (to run tests)
<!--
Please do not ask general questions here! There are [community
contact](https://github.com/qtile/qtile#community) options for that.
-->
# Issue description
Hi! I package qtile for Arch Linux. I'm currently trying to build 0.16.0.
Usually I also run the test suite against the release (although there are still problems: #1352 and #1130) to be able to at least ensure some kind of compatibility with the Arch Linux provided python3 ecosystem.
However, running tests is only possible with the github source tarballs (because the test files are included), which unfortunately is not the case for the pypi tarballs.
When running `python setup.py build` for 0.16.0 I am now getting this:
```
Traceback (most recent call last):
File "setup.py", line 91, in <module>
setup(
File "/usr/lib/python3.8/site-packages/setuptools/__init__.py", line 165, in setup
return distutils.core.setup(**attrs)
File "/usr/lib/python3.8/distutils/core.py", line 108, in setup
_setup_distribution = dist = klass(attrs)
File "/usr/lib/python3.8/site-packages/setuptools/dist.py", line 429, in __init__
_Distribution.__init__(self, {
File "/usr/lib/python3.8/distutils/dist.py", line 292, in __init__
self.finalize_options()
File "/usr/lib/python3.8/site-packages/setuptools/dist.py", line 721, in finalize_options
ep(self)
File "/usr/lib/python3.8/site-packages/setuptools/dist.py", line 728, in _finalize_setup_keywords
ep.load()(self, ep.name, value)
File "/usr/lib/python3.8/site-packages/setuptools_scm/integration.py", line 17, in version_keyword
dist.metadata.version = _get_version(config)
File "/usr/lib/python3.8/site-packages/setuptools_scm/__init__.py", line 148, in _get_version
parsed_version = _do_parse(config)
File "/usr/lib/python3.8/site-packages/setuptools_scm/__init__.py", line 110, in _do_parse
raise LookupError(
LookupError: setuptools-scm was unable to detect version for '/build/qtile/src/qtile-0.16.0'.
Make sure you're either building from a fully intact git repository or PyPI tarballs. Most other sources (such as GitHub's tarballs, a git checkout without the .git folder) don't contain the necessary metadata and will not work.
For example, if you're using pip, instead of https://github.com/user/proj/archive/master.zip use git+https://github.com/user/proj.git#egg=proj
```
It seems that setuptools_scm has been introduced. Unfortunately, this breaks the build for me.
It would be great to either include the tests in the pypi sdist tarballs or to start using [signed tags](https://github.com/qtile/qtile/tags) again, as then I can rely upon signed tags and a git repository (note: the latter might not help other distributions, as they have different policies).
If you choose the latter (both would be great too), please make sure to have @flacjacket sign the key of @tych0 so that a clear chain of trust can be established.
# Qtile version
0.16.0
# Stack traces
n/a
# Configuration
n/a
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python3
2
3 # Copyright (c) 2008 Aldo Cortesi
4 # Copyright (c) 2011 Mounier Florian
5 # Copyright (c) 2012 dmpayton
6 # Copyright (c) 2014 Sean Vig
7 # Copyright (c) 2014 roger
8 # Copyright (c) 2014 Pedro Algarvio
9 # Copyright (c) 2014-2015 Tycho Andersen
10 #
11 # Permission is hereby granted, free of charge, to any person obtaining a copy
12 # of this software and associated documentation files (the "Software"), to deal
13 # in the Software without restriction, including without limitation the rights
14 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
15 # copies of the Software, and to permit persons to whom the Software is
16 # furnished to do so, subject to the following conditions:
17 #
18 # The above copyright notice and this permission notice shall be included in
19 # all copies or substantial portions of the Software.
20 #
21 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
24 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
26 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
27 # SOFTWARE.
28
29 import sys
30 import textwrap
31
32 from setuptools import setup
33 from setuptools.command.install import install
34
35
36 class CheckCairoXcb(install):
37 def cairo_xcb_check(self):
38 try:
39 from cairocffi import cairo
40 cairo.cairo_xcb_surface_create
41 return True
42 except AttributeError:
43 return False
44
45 def finalize_options(self):
46 if not self.cairo_xcb_check():
47
48 print(textwrap.dedent("""
49
50 It looks like your cairocffi was not built with xcffib support. To fix this:
51
52 - Ensure a recent xcffib is installed (pip install 'xcffib>=0.5.0')
53 - The pip cache is cleared (remove ~/.cache/pip, if it exists)
54 - Reinstall cairocffi, either:
55
56 pip install --no-deps --ignore-installed cairocffi
57
58 or
59
60 pip uninstall cairocffi && pip install cairocffi
61 """))
62
63 sys.exit(1)
64 install.finalize_options(self)
65
66
67 def get_cffi_modules():
68 cffi_modules = [
69 'libqtile/pango_ffi_build.py:pango_ffi',
70 'libqtile/backend/x11/xcursors_ffi_build.py:xcursors_ffi',
71 ]
72 try:
73 from cffi.error import PkgConfigError
74 from cffi.pkgconfig import call
75 except ImportError:
76 # technically all ffi defined above wont be built
77 print('CFFI package is missing')
78 else:
79 try:
80 call('libpulse', '--libs')
81 except PkgConfigError:
82 print('Failed to find pulseaudio headers. '
83 'PulseVolume widget will be unavailable')
84 else:
85 cffi_modules.append(
86 'libqtile/widget/pulseaudio_ffi.py:pulseaudio_ffi'
87 )
88 return cffi_modules
89
90
91 setup(
92 cmdclass={'install': CheckCairoXcb},
93 use_scm_version=True,
94 cffi_modules=get_cffi_modules(),
95 install_requires=["cffi>=1.0.0"],
96 )
97
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -93,4 +93,5 @@
use_scm_version=True,
cffi_modules=get_cffi_modules(),
install_requires=["cffi>=1.0.0"],
+ include_package_data=True,
)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -93,4 +93,5 @@\n use_scm_version=True,\n cffi_modules=get_cffi_modules(),\n install_requires=[\"cffi>=1.0.0\"],\n+ include_package_data=True,\n )\n", "issue": "0.16.0: impossible to build from github sources (to run tests)\n<!--\r\nPlease do not ask general questions here! There are [community\r\ncontact](https://github.com/qtile/qtile#community) options for that.\r\n-->\r\n\r\n# Issue description\r\n\r\nHi! I package qtile for Arch Linux. I'm currently trying to build 0.16.0.\r\nUsually I also run the test suite against the release (although there are still problems: #1352 and #1130) to be able to at least ensure some kind of compatibility with the Arch Linux provided python3 ecosystem.\r\nHowever, running tests is only possible with the github source tarballs (because the test files are included), which unfortunately is not the case for the pypi tarballs.\r\n\r\nWhen running `python setup.py build` for 0.16.0 I am now getting this:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"setup.py\", line 91, in <module>\r\n setup(\r\n File \"/usr/lib/python3.8/site-packages/setuptools/__init__.py\", line 165, in setup\r\n return distutils.core.setup(**attrs)\r\n File \"/usr/lib/python3.8/distutils/core.py\", line 108, in setup\r\n _setup_distribution = dist = klass(attrs)\r\n File \"/usr/lib/python3.8/site-packages/setuptools/dist.py\", line 429, in __init__\r\n _Distribution.__init__(self, {\r\n File \"/usr/lib/python3.8/distutils/dist.py\", line 292, in __init__\r\n self.finalize_options()\r\n File \"/usr/lib/python3.8/site-packages/setuptools/dist.py\", line 721, in finalize_options\r\n ep(self)\r\n File \"/usr/lib/python3.8/site-packages/setuptools/dist.py\", line 728, in _finalize_setup_keywords\r\n ep.load()(self, ep.name, value)\r\n File \"/usr/lib/python3.8/site-packages/setuptools_scm/integration.py\", line 17, in version_keyword\r\n dist.metadata.version = _get_version(config)\r\n File \"/usr/lib/python3.8/site-packages/setuptools_scm/__init__.py\", line 148, in _get_version\r\n parsed_version = _do_parse(config)\r\n File \"/usr/lib/python3.8/site-packages/setuptools_scm/__init__.py\", line 110, in _do_parse\r\n raise LookupError(\r\nLookupError: setuptools-scm was unable to detect version for '/build/qtile/src/qtile-0.16.0'.\r\n\r\nMake sure you're either building from a fully intact git repository or PyPI tarballs. Most other sources (such as GitHub's tarballs, a git checkout without the .git folder) don't contain the necessary metadata and will not work.\r\n\r\nFor example, if you're using pip, instead of https://github.com/user/proj/archive/master.zip use git+https://github.com/user/proj.git#egg=proj\r\n```\r\n\r\nIt seems that setuptools_scm has been introduced. Unfortunately, this breaks the build for me.\r\n\r\nIt would be great to either include the tests in the pypi sdist tarballs or to start using [signed tags](https://github.com/qtile/qtile/tags) again, as then I can rely upon signed tags and a git repository (note: the latter might not help other distributions, as they have different policies).\r\nIf you choose the latter (both would be great too), please make sure to have @flacjacket sign the key of @tych0 so that a clear chain of trust can be established.\r\n\r\n# Qtile version\r\n\r\n0.16.0\r\n\r\n# Stack traces\r\n\r\nn/a\r\n\r\n# Configuration\r\n\r\nn/a\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) 2008 Aldo Cortesi\n# Copyright (c) 2011 Mounier Florian\n# Copyright (c) 2012 dmpayton\n# Copyright (c) 2014 Sean Vig\n# Copyright (c) 2014 roger\n# Copyright (c) 2014 Pedro Algarvio\n# Copyright (c) 2014-2015 Tycho Andersen\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport sys\nimport textwrap\n\nfrom setuptools import setup\nfrom setuptools.command.install import install\n\n\nclass CheckCairoXcb(install):\n def cairo_xcb_check(self):\n try:\n from cairocffi import cairo\n cairo.cairo_xcb_surface_create\n return True\n except AttributeError:\n return False\n\n def finalize_options(self):\n if not self.cairo_xcb_check():\n\n print(textwrap.dedent(\"\"\"\n\n It looks like your cairocffi was not built with xcffib support. To fix this:\n\n - Ensure a recent xcffib is installed (pip install 'xcffib>=0.5.0')\n - The pip cache is cleared (remove ~/.cache/pip, if it exists)\n - Reinstall cairocffi, either:\n\n pip install --no-deps --ignore-installed cairocffi\n\n or\n\n pip uninstall cairocffi && pip install cairocffi\n \"\"\"))\n\n sys.exit(1)\n install.finalize_options(self)\n\n\ndef get_cffi_modules():\n cffi_modules = [\n 'libqtile/pango_ffi_build.py:pango_ffi',\n 'libqtile/backend/x11/xcursors_ffi_build.py:xcursors_ffi',\n ]\n try:\n from cffi.error import PkgConfigError\n from cffi.pkgconfig import call\n except ImportError:\n # technically all ffi defined above wont be built\n print('CFFI package is missing')\n else:\n try:\n call('libpulse', '--libs')\n except PkgConfigError:\n print('Failed to find pulseaudio headers. '\n 'PulseVolume widget will be unavailable')\n else:\n cffi_modules.append(\n 'libqtile/widget/pulseaudio_ffi.py:pulseaudio_ffi'\n )\n return cffi_modules\n\n\nsetup(\n cmdclass={'install': CheckCairoXcb},\n use_scm_version=True,\n cffi_modules=get_cffi_modules(),\n install_requires=[\"cffi>=1.0.0\"],\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) 2008 Aldo Cortesi\n# Copyright (c) 2011 Mounier Florian\n# Copyright (c) 2012 dmpayton\n# Copyright (c) 2014 Sean Vig\n# Copyright (c) 2014 roger\n# Copyright (c) 2014 Pedro Algarvio\n# Copyright (c) 2014-2015 Tycho Andersen\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport sys\nimport textwrap\n\nfrom setuptools import setup\nfrom setuptools.command.install import install\n\n\nclass CheckCairoXcb(install):\n def cairo_xcb_check(self):\n try:\n from cairocffi import cairo\n cairo.cairo_xcb_surface_create\n return True\n except AttributeError:\n return False\n\n def finalize_options(self):\n if not self.cairo_xcb_check():\n\n print(textwrap.dedent(\"\"\"\n\n It looks like your cairocffi was not built with xcffib support. To fix this:\n\n - Ensure a recent xcffib is installed (pip install 'xcffib>=0.5.0')\n - The pip cache is cleared (remove ~/.cache/pip, if it exists)\n - Reinstall cairocffi, either:\n\n pip install --no-deps --ignore-installed cairocffi\n\n or\n\n pip uninstall cairocffi && pip install cairocffi\n \"\"\"))\n\n sys.exit(1)\n install.finalize_options(self)\n\n\ndef get_cffi_modules():\n cffi_modules = [\n 'libqtile/pango_ffi_build.py:pango_ffi',\n 'libqtile/backend/x11/xcursors_ffi_build.py:xcursors_ffi',\n ]\n try:\n from cffi.error import PkgConfigError\n from cffi.pkgconfig import call\n except ImportError:\n # technically all ffi defined above wont be built\n print('CFFI package is missing')\n else:\n try:\n call('libpulse', '--libs')\n except PkgConfigError:\n print('Failed to find pulseaudio headers. '\n 'PulseVolume widget will be unavailable')\n else:\n cffi_modules.append(\n 'libqtile/widget/pulseaudio_ffi.py:pulseaudio_ffi'\n )\n return cffi_modules\n\n\nsetup(\n cmdclass={'install': CheckCairoXcb},\n use_scm_version=True,\n cffi_modules=get_cffi_modules(),\n install_requires=[\"cffi>=1.0.0\"],\n include_package_data=True,\n)\n", "path": "setup.py"}]} | 2,023 | 69 |
gh_patches_debug_27358 | rasdani/github-patches | git_diff | modoboa__modoboa-759 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Passwords complexity
We must ensure passwords respect a minimum complexity.
See https://github.com/modoboa/modoboa-admin/issues/27
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `modoboa/core/forms.py`
Content:
```
1 # coding: utf-8
2
3 """Core forms."""
4
5 from django import forms
6 from django.utils.translation import ugettext as _, ugettext_lazy
7
8 from modoboa.core.models import User
9 from modoboa.lib import parameters
10
11
12 class LoginForm(forms.Form):
13 username = forms.CharField(
14 label=ugettext_lazy("Username"),
15 widget=forms.TextInput(attrs={"class": "form-control"})
16 )
17 password = forms.CharField(
18 label=ugettext_lazy("Password"),
19 widget=forms.PasswordInput(attrs={"class": "form-control"})
20 )
21 rememberme = forms.BooleanField(
22 initial=False,
23 required=False
24 )
25
26
27 class ProfileForm(forms.ModelForm):
28 oldpassword = forms.CharField(
29 label=ugettext_lazy("Old password"), required=False,
30 widget=forms.PasswordInput(attrs={"class": "form-control"})
31 )
32 newpassword = forms.CharField(
33 label=ugettext_lazy("New password"), required=False,
34 widget=forms.PasswordInput(attrs={"class": "form-control"})
35 )
36 confirmation = forms.CharField(
37 label=ugettext_lazy("Confirmation"), required=False,
38 widget=forms.PasswordInput(attrs={"class": "form-control"})
39 )
40
41 class Meta:
42 model = User
43 fields = ("first_name", "last_name")
44 widgets = {
45 'first_name': forms.TextInput(attrs={'class': 'form-control'}),
46 'last_name': forms.TextInput(attrs={'class': 'form-control'})
47 }
48
49 def __init__(self, update_password, *args, **kwargs):
50 super(ProfileForm, self).__init__(*args, **kwargs)
51 if not update_password:
52 del self.fields["oldpassword"]
53 del self.fields["newpassword"]
54 del self.fields["confirmation"]
55
56 def clean_oldpassword(self):
57 if self.cleaned_data["oldpassword"] == "":
58 return self.cleaned_data["oldpassword"]
59
60 if parameters.get_admin("AUTHENTICATION_TYPE") != "local":
61 return self.cleaned_data["oldpassword"]
62
63 if not self.instance.check_password(self.cleaned_data["oldpassword"]):
64 raise forms.ValidationError(_("Old password mismatchs"))
65 return self.cleaned_data["oldpassword"]
66
67 def clean_confirmation(self):
68 newpassword = self.cleaned_data["newpassword"]
69 confirmation = self.cleaned_data["confirmation"]
70 if newpassword != confirmation:
71 raise forms.ValidationError(_("Passwords mismatch"))
72 return self.cleaned_data["confirmation"]
73
74 def save(self, commit=True):
75 user = super(ProfileForm, self).save(commit=False)
76 if commit:
77 if self.cleaned_data.get("confirmation", "") != "":
78 user.set_password(
79 self.cleaned_data["confirmation"],
80 self.cleaned_data["oldpassword"]
81 )
82 user.save()
83 return user
84
```
Path: `modoboa/core/dev_settings.py`
Content:
```
1 # Development settings
2 import os
3
4 BOWER_COMPONENTS_ROOT = os.path.join(
5 os.path.dirname(__file__), ".."
6 )
7
8 BOWER_INSTALLED_APPS = (
9 "jquery#1.9",
10 "jquery-ui#1.11",
11 "bootstrap#3.3.1",
12 "bootstrap-select#1.6",
13 "d3#3.5.0",
14 "eonasdan-bootstrap-datetimepicker#3.1.3",
15 "font-awesome#4.2.0",
16 "c3#0.4.10",
17 )
18
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/modoboa/core/dev_settings.py b/modoboa/core/dev_settings.py
--- a/modoboa/core/dev_settings.py
+++ b/modoboa/core/dev_settings.py
@@ -8,7 +8,7 @@
BOWER_INSTALLED_APPS = (
"jquery#1.9",
"jquery-ui#1.11",
- "bootstrap#3.3.1",
+ "bootstrap#3.3.5",
"bootstrap-select#1.6",
"d3#3.5.0",
"eonasdan-bootstrap-datetimepicker#3.1.3",
diff --git a/modoboa/core/forms.py b/modoboa/core/forms.py
--- a/modoboa/core/forms.py
+++ b/modoboa/core/forms.py
@@ -5,6 +5,8 @@
from django import forms
from django.utils.translation import ugettext as _, ugettext_lazy
+from passwords.fields import PasswordField
+
from modoboa.core.models import User
from modoboa.lib import parameters
@@ -29,11 +31,11 @@
label=ugettext_lazy("Old password"), required=False,
widget=forms.PasswordInput(attrs={"class": "form-control"})
)
- newpassword = forms.CharField(
+ newpassword = PasswordField(
label=ugettext_lazy("New password"), required=False,
widget=forms.PasswordInput(attrs={"class": "form-control"})
)
- confirmation = forms.CharField(
+ confirmation = PasswordField(
label=ugettext_lazy("Confirmation"), required=False,
widget=forms.PasswordInput(attrs={"class": "form-control"})
)
| {"golden_diff": "diff --git a/modoboa/core/dev_settings.py b/modoboa/core/dev_settings.py\n--- a/modoboa/core/dev_settings.py\n+++ b/modoboa/core/dev_settings.py\n@@ -8,7 +8,7 @@\n BOWER_INSTALLED_APPS = (\n \"jquery#1.9\",\n \"jquery-ui#1.11\",\n- \"bootstrap#3.3.1\",\n+ \"bootstrap#3.3.5\",\n \"bootstrap-select#1.6\",\n \"d3#3.5.0\",\n \"eonasdan-bootstrap-datetimepicker#3.1.3\",\ndiff --git a/modoboa/core/forms.py b/modoboa/core/forms.py\n--- a/modoboa/core/forms.py\n+++ b/modoboa/core/forms.py\n@@ -5,6 +5,8 @@\n from django import forms\n from django.utils.translation import ugettext as _, ugettext_lazy\n \n+from passwords.fields import PasswordField\n+\n from modoboa.core.models import User\n from modoboa.lib import parameters\n \n@@ -29,11 +31,11 @@\n label=ugettext_lazy(\"Old password\"), required=False,\n widget=forms.PasswordInput(attrs={\"class\": \"form-control\"})\n )\n- newpassword = forms.CharField(\n+ newpassword = PasswordField(\n label=ugettext_lazy(\"New password\"), required=False,\n widget=forms.PasswordInput(attrs={\"class\": \"form-control\"})\n )\n- confirmation = forms.CharField(\n+ confirmation = PasswordField(\n label=ugettext_lazy(\"Confirmation\"), required=False,\n widget=forms.PasswordInput(attrs={\"class\": \"form-control\"})\n )\n", "issue": "Passwords complexity\nWe must ensure passwords respect a minimum complexity.\n\nSee https://github.com/modoboa/modoboa-admin/issues/27\n\n", "before_files": [{"content": "# coding: utf-8\n\n\"\"\"Core forms.\"\"\"\n\nfrom django import forms\nfrom django.utils.translation import ugettext as _, ugettext_lazy\n\nfrom modoboa.core.models import User\nfrom modoboa.lib import parameters\n\n\nclass LoginForm(forms.Form):\n username = forms.CharField(\n label=ugettext_lazy(\"Username\"),\n widget=forms.TextInput(attrs={\"class\": \"form-control\"})\n )\n password = forms.CharField(\n label=ugettext_lazy(\"Password\"),\n widget=forms.PasswordInput(attrs={\"class\": \"form-control\"})\n )\n rememberme = forms.BooleanField(\n initial=False,\n required=False\n )\n\n\nclass ProfileForm(forms.ModelForm):\n oldpassword = forms.CharField(\n label=ugettext_lazy(\"Old password\"), required=False,\n widget=forms.PasswordInput(attrs={\"class\": \"form-control\"})\n )\n newpassword = forms.CharField(\n label=ugettext_lazy(\"New password\"), required=False,\n widget=forms.PasswordInput(attrs={\"class\": \"form-control\"})\n )\n confirmation = forms.CharField(\n label=ugettext_lazy(\"Confirmation\"), required=False,\n widget=forms.PasswordInput(attrs={\"class\": \"form-control\"})\n )\n\n class Meta:\n model = User\n fields = (\"first_name\", \"last_name\")\n widgets = {\n 'first_name': forms.TextInput(attrs={'class': 'form-control'}),\n 'last_name': forms.TextInput(attrs={'class': 'form-control'})\n }\n\n def __init__(self, update_password, *args, **kwargs):\n super(ProfileForm, self).__init__(*args, **kwargs)\n if not update_password:\n del self.fields[\"oldpassword\"]\n del self.fields[\"newpassword\"]\n del self.fields[\"confirmation\"]\n\n def clean_oldpassword(self):\n if self.cleaned_data[\"oldpassword\"] == \"\":\n return self.cleaned_data[\"oldpassword\"]\n\n if parameters.get_admin(\"AUTHENTICATION_TYPE\") != \"local\":\n return self.cleaned_data[\"oldpassword\"]\n\n if not self.instance.check_password(self.cleaned_data[\"oldpassword\"]):\n raise forms.ValidationError(_(\"Old password mismatchs\"))\n return self.cleaned_data[\"oldpassword\"]\n\n def clean_confirmation(self):\n newpassword = self.cleaned_data[\"newpassword\"]\n confirmation = self.cleaned_data[\"confirmation\"]\n if newpassword != confirmation:\n raise forms.ValidationError(_(\"Passwords mismatch\"))\n return self.cleaned_data[\"confirmation\"]\n\n def save(self, commit=True):\n user = super(ProfileForm, self).save(commit=False)\n if commit:\n if self.cleaned_data.get(\"confirmation\", \"\") != \"\":\n user.set_password(\n self.cleaned_data[\"confirmation\"],\n self.cleaned_data[\"oldpassword\"]\n )\n user.save()\n return user\n", "path": "modoboa/core/forms.py"}, {"content": "# Development settings\nimport os\n\nBOWER_COMPONENTS_ROOT = os.path.join(\n os.path.dirname(__file__), \"..\"\n)\n\nBOWER_INSTALLED_APPS = (\n \"jquery#1.9\",\n \"jquery-ui#1.11\",\n \"bootstrap#3.3.1\",\n \"bootstrap-select#1.6\",\n \"d3#3.5.0\",\n \"eonasdan-bootstrap-datetimepicker#3.1.3\",\n \"font-awesome#4.2.0\",\n \"c3#0.4.10\",\n)\n", "path": "modoboa/core/dev_settings.py"}], "after_files": [{"content": "# coding: utf-8\n\n\"\"\"Core forms.\"\"\"\n\nfrom django import forms\nfrom django.utils.translation import ugettext as _, ugettext_lazy\n\nfrom passwords.fields import PasswordField\n\nfrom modoboa.core.models import User\nfrom modoboa.lib import parameters\n\n\nclass LoginForm(forms.Form):\n username = forms.CharField(\n label=ugettext_lazy(\"Username\"),\n widget=forms.TextInput(attrs={\"class\": \"form-control\"})\n )\n password = forms.CharField(\n label=ugettext_lazy(\"Password\"),\n widget=forms.PasswordInput(attrs={\"class\": \"form-control\"})\n )\n rememberme = forms.BooleanField(\n initial=False,\n required=False\n )\n\n\nclass ProfileForm(forms.ModelForm):\n oldpassword = forms.CharField(\n label=ugettext_lazy(\"Old password\"), required=False,\n widget=forms.PasswordInput(attrs={\"class\": \"form-control\"})\n )\n newpassword = PasswordField(\n label=ugettext_lazy(\"New password\"), required=False,\n widget=forms.PasswordInput(attrs={\"class\": \"form-control\"})\n )\n confirmation = PasswordField(\n label=ugettext_lazy(\"Confirmation\"), required=False,\n widget=forms.PasswordInput(attrs={\"class\": \"form-control\"})\n )\n\n class Meta:\n model = User\n fields = (\"first_name\", \"last_name\")\n widgets = {\n 'first_name': forms.TextInput(attrs={'class': 'form-control'}),\n 'last_name': forms.TextInput(attrs={'class': 'form-control'})\n }\n\n def __init__(self, update_password, *args, **kwargs):\n super(ProfileForm, self).__init__(*args, **kwargs)\n if not update_password:\n del self.fields[\"oldpassword\"]\n del self.fields[\"newpassword\"]\n del self.fields[\"confirmation\"]\n\n def clean_oldpassword(self):\n if self.cleaned_data[\"oldpassword\"] == \"\":\n return self.cleaned_data[\"oldpassword\"]\n\n if parameters.get_admin(\"AUTHENTICATION_TYPE\") != \"local\":\n return self.cleaned_data[\"oldpassword\"]\n\n if not self.instance.check_password(self.cleaned_data[\"oldpassword\"]):\n raise forms.ValidationError(_(\"Old password mismatchs\"))\n return self.cleaned_data[\"oldpassword\"]\n\n def clean_confirmation(self):\n newpassword = self.cleaned_data[\"newpassword\"]\n confirmation = self.cleaned_data[\"confirmation\"]\n if newpassword != confirmation:\n raise forms.ValidationError(_(\"Passwords mismatch\"))\n return self.cleaned_data[\"confirmation\"]\n\n def save(self, commit=True):\n user = super(ProfileForm, self).save(commit=False)\n if commit:\n if self.cleaned_data.get(\"confirmation\", \"\") != \"\":\n user.set_password(\n self.cleaned_data[\"confirmation\"],\n self.cleaned_data[\"oldpassword\"]\n )\n user.save()\n return user\n", "path": "modoboa/core/forms.py"}, {"content": "# Development settings\nimport os\n\nBOWER_COMPONENTS_ROOT = os.path.join(\n os.path.dirname(__file__), \"..\"\n)\n\nBOWER_INSTALLED_APPS = (\n \"jquery#1.9\",\n \"jquery-ui#1.11\",\n \"bootstrap#3.3.5\",\n \"bootstrap-select#1.6\",\n \"d3#3.5.0\",\n \"eonasdan-bootstrap-datetimepicker#3.1.3\",\n \"font-awesome#4.2.0\",\n \"c3#0.4.10\",\n)\n", "path": "modoboa/core/dev_settings.py"}]} | 1,177 | 347 |
gh_patches_debug_29775 | rasdani/github-patches | git_diff | liqd__adhocracy4-476 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
adding multiple answer text to answer page
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `adhocracy4/comments_async/serializers.py`
Content:
```
1 from django.conf import settings
2 from django.utils.translation import ugettext as _
3 from easy_thumbnails.files import get_thumbnailer
4 from rest_framework import serializers
5
6 from adhocracy4.comments.models import Comment
7
8
9 class CommentSerializer(serializers.ModelSerializer):
10 """Default Serializer for the comments."""
11
12 user_name = serializers.SerializerMethodField()
13 user_pk = serializers.SerializerMethodField()
14 user_profile_url = serializers.SerializerMethodField()
15 user_image = serializers.SerializerMethodField()
16 is_deleted = serializers.SerializerMethodField()
17 ratings = serializers.SerializerMethodField()
18 is_moderator = serializers.SerializerMethodField()
19
20 class Meta:
21 model = Comment
22 read_only_fields = ('modified', 'created', 'id',
23 'user_name', 'user_pk', 'user_image',
24 'ratings', 'content_type', 'object_pk')
25 exclude = ('creator', 'is_censored', 'is_removed')
26
27 def to_representation(self, instance):
28 """
29 Create a dictionary form categories.
30
31 Gets the categories and adds them along with their values
32 to a dictionary.
33 """
34 ret = super().to_representation(instance)
35 categories = {}
36 if ret['comment_categories']:
37 category_choices = getattr(settings,
38 'A4_COMMENT_CATEGORIES', '')
39 if category_choices:
40 category_choices = dict((x, str(y)) for x, y
41 in category_choices)
42 category_list = ret['comment_categories'].strip('[]').split(',')
43 for category in category_list:
44 if category in category_choices:
45 categories[category] = category_choices[category]
46 else:
47 categories[category] = category
48 ret['comment_categories'] = categories
49 return ret
50
51 def to_internal_value(self, data):
52 data = super().to_internal_value(data)
53 if 'comment_categories' in data:
54 value = data.get('comment_categories')
55 if value == '' or value == '[]':
56 raise serializers.ValidationError({
57 'comment_categories': _('Please choose a category')
58 })
59 return data
60
61 def get_user_pk(self, obj):
62 if (obj.is_censored or obj.is_removed):
63 return -1
64 return str(obj.creator.id)
65
66 def get_user_profile_url(self, obj):
67 if obj.is_censored or obj.is_removed:
68 return ''
69 try:
70 return obj.creator.get_absolute_url()
71 except AttributeError:
72 return ''
73
74 def get_user_name(self, obj):
75 """Don't show username if comment is marked removed or censored."""
76 if(obj.is_censored or obj.is_removed):
77 return _('unknown user')
78 return obj.creator.get_short_name()
79
80 def get_user_image(self, obj):
81 """Load small thumbnail images for user images."""
82 if(obj.is_censored or obj.is_removed):
83 return None
84 try:
85 if obj.creator.avatar:
86 avatar = get_thumbnailer(obj.creator.avatar)['avatar']
87 return avatar.url
88 except AttributeError:
89 pass
90 return None
91
92 def get_is_moderator(self, obj):
93 return obj.project.has_moderator(obj.creator)
94
95 def get_is_deleted(self, obj):
96 """Return true if one of the flags is set."""
97 return (obj.is_censored or obj.is_removed)
98
99 def get_ratings(self, comment):
100 """
101 Get positive and negative rating count.
102
103 As well as info on the request users rating
104 """
105 user = self.context['request'].user
106 positive_ratings = comment.ratings.filter(value=1).count()
107 negative_ratings = comment.ratings.filter(value=-1).count()
108
109 if user.is_authenticated:
110 user_rating = comment.ratings.filter(creator=user).first()
111 else:
112 user_rating = None
113
114 if user_rating:
115 user_rating_value = user_rating.value
116 user_rating_id = user_rating.pk
117 else:
118 user_rating_value = None
119 user_rating_id = None
120
121 result = {
122 'positive_ratings': positive_ratings,
123 'negative_ratings': negative_ratings,
124 'current_user_rating_value': user_rating_value,
125 'current_user_rating_id': user_rating_id
126 }
127
128 return result
129
130
131 class CommentListSerializer(CommentSerializer):
132 """Serializer for the comments to be used when viewed as list."""
133
134 comment = serializers.SerializerMethodField()
135
136 def get_comment(self, obj):
137 if obj.is_removed:
138 return _('deleted by creator')
139 if obj.is_censored:
140 return _('deleted by moderator')
141 return obj.comment
142
143
144 class ThreadSerializer(CommentSerializer):
145 """Serializes a comment including child comment (replies)."""
146
147 child_comments = CommentSerializer(many=True, read_only=True)
148
149
150 class ThreadListSerializer(CommentListSerializer):
151 """
152 Serializes comments when viewed.
153
154 As list including child comment (replies).
155 """
156
157 child_comments = CommentListSerializer(many=True, read_only=True)
158
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/adhocracy4/comments_async/serializers.py b/adhocracy4/comments_async/serializers.py
--- a/adhocracy4/comments_async/serializers.py
+++ b/adhocracy4/comments_async/serializers.py
@@ -21,7 +21,8 @@
model = Comment
read_only_fields = ('modified', 'created', 'id',
'user_name', 'user_pk', 'user_image',
- 'ratings', 'content_type', 'object_pk')
+ 'user_image_fallback', 'ratings',
+ 'content_type', 'object_pk')
exclude = ('creator', 'is_censored', 'is_removed')
def to_representation(self, instance):
@@ -77,6 +78,17 @@
return _('unknown user')
return obj.creator.get_short_name()
+ def get_user_image_fallback(self, obj):
+ """Load small thumbnail images for default user images."""
+ if(obj.is_censored or obj.is_removed):
+ return None
+ try:
+ if obj.creator.avatar_fallback:
+ return obj.creator.avatar_fallback
+ except AttributeError:
+ pass
+ return None
+
def get_user_image(self, obj):
"""Load small thumbnail images for user images."""
if(obj.is_censored or obj.is_removed):
@@ -87,7 +99,7 @@
return avatar.url
except AttributeError:
pass
- return None
+ return self.get_user_image_fallback(obj)
def get_is_moderator(self, obj):
return obj.project.has_moderator(obj.creator)
| {"golden_diff": "diff --git a/adhocracy4/comments_async/serializers.py b/adhocracy4/comments_async/serializers.py\n--- a/adhocracy4/comments_async/serializers.py\n+++ b/adhocracy4/comments_async/serializers.py\n@@ -21,7 +21,8 @@\n model = Comment\n read_only_fields = ('modified', 'created', 'id',\n 'user_name', 'user_pk', 'user_image',\n- 'ratings', 'content_type', 'object_pk')\n+ 'user_image_fallback', 'ratings',\n+ 'content_type', 'object_pk')\n exclude = ('creator', 'is_censored', 'is_removed')\n \n def to_representation(self, instance):\n@@ -77,6 +78,17 @@\n return _('unknown user')\n return obj.creator.get_short_name()\n \n+ def get_user_image_fallback(self, obj):\n+ \"\"\"Load small thumbnail images for default user images.\"\"\"\n+ if(obj.is_censored or obj.is_removed):\n+ return None\n+ try:\n+ if obj.creator.avatar_fallback:\n+ return obj.creator.avatar_fallback\n+ except AttributeError:\n+ pass\n+ return None\n+\n def get_user_image(self, obj):\n \"\"\"Load small thumbnail images for user images.\"\"\"\n if(obj.is_censored or obj.is_removed):\n@@ -87,7 +99,7 @@\n return avatar.url\n except AttributeError:\n pass\n- return None\n+ return self.get_user_image_fallback(obj)\n \n def get_is_moderator(self, obj):\n return obj.project.has_moderator(obj.creator)\n", "issue": "adding multiple answer text to answer page\n\n", "before_files": [{"content": "from django.conf import settings\nfrom django.utils.translation import ugettext as _\nfrom easy_thumbnails.files import get_thumbnailer\nfrom rest_framework import serializers\n\nfrom adhocracy4.comments.models import Comment\n\n\nclass CommentSerializer(serializers.ModelSerializer):\n \"\"\"Default Serializer for the comments.\"\"\"\n\n user_name = serializers.SerializerMethodField()\n user_pk = serializers.SerializerMethodField()\n user_profile_url = serializers.SerializerMethodField()\n user_image = serializers.SerializerMethodField()\n is_deleted = serializers.SerializerMethodField()\n ratings = serializers.SerializerMethodField()\n is_moderator = serializers.SerializerMethodField()\n\n class Meta:\n model = Comment\n read_only_fields = ('modified', 'created', 'id',\n 'user_name', 'user_pk', 'user_image',\n 'ratings', 'content_type', 'object_pk')\n exclude = ('creator', 'is_censored', 'is_removed')\n\n def to_representation(self, instance):\n \"\"\"\n Create a dictionary form categories.\n\n Gets the categories and adds them along with their values\n to a dictionary.\n \"\"\"\n ret = super().to_representation(instance)\n categories = {}\n if ret['comment_categories']:\n category_choices = getattr(settings,\n 'A4_COMMENT_CATEGORIES', '')\n if category_choices:\n category_choices = dict((x, str(y)) for x, y\n in category_choices)\n category_list = ret['comment_categories'].strip('[]').split(',')\n for category in category_list:\n if category in category_choices:\n categories[category] = category_choices[category]\n else:\n categories[category] = category\n ret['comment_categories'] = categories\n return ret\n\n def to_internal_value(self, data):\n data = super().to_internal_value(data)\n if 'comment_categories' in data:\n value = data.get('comment_categories')\n if value == '' or value == '[]':\n raise serializers.ValidationError({\n 'comment_categories': _('Please choose a category')\n })\n return data\n\n def get_user_pk(self, obj):\n if (obj.is_censored or obj.is_removed):\n return -1\n return str(obj.creator.id)\n\n def get_user_profile_url(self, obj):\n if obj.is_censored or obj.is_removed:\n return ''\n try:\n return obj.creator.get_absolute_url()\n except AttributeError:\n return ''\n\n def get_user_name(self, obj):\n \"\"\"Don't show username if comment is marked removed or censored.\"\"\"\n if(obj.is_censored or obj.is_removed):\n return _('unknown user')\n return obj.creator.get_short_name()\n\n def get_user_image(self, obj):\n \"\"\"Load small thumbnail images for user images.\"\"\"\n if(obj.is_censored or obj.is_removed):\n return None\n try:\n if obj.creator.avatar:\n avatar = get_thumbnailer(obj.creator.avatar)['avatar']\n return avatar.url\n except AttributeError:\n pass\n return None\n\n def get_is_moderator(self, obj):\n return obj.project.has_moderator(obj.creator)\n\n def get_is_deleted(self, obj):\n \"\"\"Return true if one of the flags is set.\"\"\"\n return (obj.is_censored or obj.is_removed)\n\n def get_ratings(self, comment):\n \"\"\"\n Get positive and negative rating count.\n\n As well as info on the request users rating\n \"\"\"\n user = self.context['request'].user\n positive_ratings = comment.ratings.filter(value=1).count()\n negative_ratings = comment.ratings.filter(value=-1).count()\n\n if user.is_authenticated:\n user_rating = comment.ratings.filter(creator=user).first()\n else:\n user_rating = None\n\n if user_rating:\n user_rating_value = user_rating.value\n user_rating_id = user_rating.pk\n else:\n user_rating_value = None\n user_rating_id = None\n\n result = {\n 'positive_ratings': positive_ratings,\n 'negative_ratings': negative_ratings,\n 'current_user_rating_value': user_rating_value,\n 'current_user_rating_id': user_rating_id\n }\n\n return result\n\n\nclass CommentListSerializer(CommentSerializer):\n \"\"\"Serializer for the comments to be used when viewed as list.\"\"\"\n\n comment = serializers.SerializerMethodField()\n\n def get_comment(self, obj):\n if obj.is_removed:\n return _('deleted by creator')\n if obj.is_censored:\n return _('deleted by moderator')\n return obj.comment\n\n\nclass ThreadSerializer(CommentSerializer):\n \"\"\"Serializes a comment including child comment (replies).\"\"\"\n\n child_comments = CommentSerializer(many=True, read_only=True)\n\n\nclass ThreadListSerializer(CommentListSerializer):\n \"\"\"\n Serializes comments when viewed.\n\n As list including child comment (replies).\n \"\"\"\n\n child_comments = CommentListSerializer(many=True, read_only=True)\n", "path": "adhocracy4/comments_async/serializers.py"}], "after_files": [{"content": "from django.conf import settings\nfrom django.utils.translation import ugettext as _\nfrom easy_thumbnails.files import get_thumbnailer\nfrom rest_framework import serializers\n\nfrom adhocracy4.comments.models import Comment\n\n\nclass CommentSerializer(serializers.ModelSerializer):\n \"\"\"Default Serializer for the comments.\"\"\"\n\n user_name = serializers.SerializerMethodField()\n user_pk = serializers.SerializerMethodField()\n user_profile_url = serializers.SerializerMethodField()\n user_image = serializers.SerializerMethodField()\n is_deleted = serializers.SerializerMethodField()\n ratings = serializers.SerializerMethodField()\n is_moderator = serializers.SerializerMethodField()\n\n class Meta:\n model = Comment\n read_only_fields = ('modified', 'created', 'id',\n 'user_name', 'user_pk', 'user_image',\n 'user_image_fallback', 'ratings',\n 'content_type', 'object_pk')\n exclude = ('creator', 'is_censored', 'is_removed')\n\n def to_representation(self, instance):\n \"\"\"\n Create a dictionary form categories.\n\n Gets the categories and adds them along with their values\n to a dictionary.\n \"\"\"\n ret = super().to_representation(instance)\n categories = {}\n if ret['comment_categories']:\n category_choices = getattr(settings,\n 'A4_COMMENT_CATEGORIES', '')\n if category_choices:\n category_choices = dict((x, str(y)) for x, y\n in category_choices)\n category_list = ret['comment_categories'].strip('[]').split(',')\n for category in category_list:\n if category in category_choices:\n categories[category] = category_choices[category]\n else:\n categories[category] = category\n ret['comment_categories'] = categories\n return ret\n\n def to_internal_value(self, data):\n data = super().to_internal_value(data)\n if 'comment_categories' in data:\n value = data.get('comment_categories')\n if value == '' or value == '[]':\n raise serializers.ValidationError({\n 'comment_categories': _('Please choose a category')\n })\n return data\n\n def get_user_pk(self, obj):\n if (obj.is_censored or obj.is_removed):\n return -1\n return str(obj.creator.id)\n\n def get_user_profile_url(self, obj):\n if obj.is_censored or obj.is_removed:\n return ''\n try:\n return obj.creator.get_absolute_url()\n except AttributeError:\n return ''\n\n def get_user_name(self, obj):\n \"\"\"Don't show username if comment is marked removed or censored.\"\"\"\n if(obj.is_censored or obj.is_removed):\n return _('unknown user')\n return obj.creator.get_short_name()\n\n def get_user_image_fallback(self, obj):\n \"\"\"Load small thumbnail images for default user images.\"\"\"\n if(obj.is_censored or obj.is_removed):\n return None\n try:\n if obj.creator.avatar_fallback:\n return obj.creator.avatar_fallback\n except AttributeError:\n pass\n return None\n\n def get_user_image(self, obj):\n \"\"\"Load small thumbnail images for user images.\"\"\"\n if(obj.is_censored or obj.is_removed):\n return None\n try:\n if obj.creator.avatar:\n avatar = get_thumbnailer(obj.creator.avatar)['avatar']\n return avatar.url\n except AttributeError:\n pass\n return self.get_user_image_fallback(obj)\n\n def get_is_moderator(self, obj):\n return obj.project.has_moderator(obj.creator)\n\n def get_is_deleted(self, obj):\n \"\"\"Return true if one of the flags is set.\"\"\"\n return (obj.is_censored or obj.is_removed)\n\n def get_ratings(self, comment):\n \"\"\"\n Get positive and negative rating count.\n\n As well as info on the request users rating\n \"\"\"\n user = self.context['request'].user\n positive_ratings = comment.ratings.filter(value=1).count()\n negative_ratings = comment.ratings.filter(value=-1).count()\n\n if user.is_authenticated:\n user_rating = comment.ratings.filter(creator=user).first()\n else:\n user_rating = None\n\n if user_rating:\n user_rating_value = user_rating.value\n user_rating_id = user_rating.pk\n else:\n user_rating_value = None\n user_rating_id = None\n\n result = {\n 'positive_ratings': positive_ratings,\n 'negative_ratings': negative_ratings,\n 'current_user_rating_value': user_rating_value,\n 'current_user_rating_id': user_rating_id\n }\n\n return result\n\n\nclass CommentListSerializer(CommentSerializer):\n \"\"\"Serializer for the comments to be used when viewed as list.\"\"\"\n\n comment = serializers.SerializerMethodField()\n\n def get_comment(self, obj):\n if obj.is_removed:\n return _('deleted by creator')\n if obj.is_censored:\n return _('deleted by moderator')\n return obj.comment\n\n\nclass ThreadSerializer(CommentSerializer):\n \"\"\"Serializes a comment including child comment (replies).\"\"\"\n\n child_comments = CommentSerializer(many=True, read_only=True)\n\n\nclass ThreadListSerializer(CommentListSerializer):\n \"\"\"\n Serializes comments when viewed.\n\n As list including child comment (replies).\n \"\"\"\n\n child_comments = CommentListSerializer(many=True, read_only=True)\n", "path": "adhocracy4/comments_async/serializers.py"}]} | 1,669 | 353 |
gh_patches_debug_18594 | rasdani/github-patches | git_diff | modoboa__modoboa-973 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Test failure when using tox
This is because tox use sqlite and has no user set
```
======================================================================
ERROR: test_map_upgrade (modoboa.admin.tests.test_mapfiles.MapFilesTestCase)
Check that map content is used.
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/gawel/bear/modoboa/.tox/py27/lib/python2.7/site-packages/modoboa/admin/tests/test_mapfiles.py", line 45, in test_map_upgrade
self.assertEqual(mapcontent["user"], dbsettings["USER"])
KeyError: 'user'
----------------------------------------------------------------------
Ran 178 tests in 44.553s
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `modoboa/core/management/commands/generate_postfix_maps.py`
Content:
```
1 """Management command to generate/update postfix map files."""
2
3 import copy
4 import hashlib
5 import os
6 import sys
7
8 from django.conf import settings
9 from django.core.management.base import BaseCommand
10 from django.template import Context, Template
11 from django.utils import timezone
12
13 import dj_database_url
14
15 from ... import signals
16 from ... import utils
17
18 MAP_FILE_TEMPLATE = """# This file was generated on {{ date }} by running:
19 # {{ commandline }}
20 # DO NOT EDIT!
21 """
22
23
24 class Command(BaseCommand):
25 """Command class."""
26
27 help = "Generate/update postfix map files."
28
29 def add_arguments(self, parser):
30 """Add extra arguments."""
31 parser.add_argument(
32 "--dburl", help="Custom database url")
33 parser.add_argument(
34 "--destdir", default=".",
35 help="Directory where files will be created")
36 parser.add_argument(
37 "--force-overwrite", action="store_true", default=False,
38 help="Force overwrite of existing map files")
39
40 def __load_checksums(self, destdir):
41 """Load existing checksums if possible."""
42 self.__checksums_file = os.path.join(
43 destdir, "modoboa-postfix-maps.chk")
44 self.__checksums = {}
45 if not os.path.exists(self.__checksums_file):
46 return
47 with open(self.__checksums_file) as fp:
48 for line in fp:
49 fname, dbtype, checksum = line.split(":")
50 self.__checksums[fname.strip()] = {
51 "dbtype": dbtype, "checksum": checksum.strip()
52 }
53
54 def __register_map_files(self):
55 """Load specified applications."""
56 responses = signals.register_postfix_maps.send(sender=self.__class__)
57 mapfiles = []
58 for response in responses:
59 mapfiles += response[1]
60 return mapfiles
61
62 def __check_file(self, path):
63 """Check if map file has been modified."""
64 fname = os.path.basename(path)
65 condition = (
66 not self.__checksums or
67 fname not in self.__checksums)
68 if condition:
69 return True
70 with open(path) as fp:
71 checksum = hashlib.md5(fp.read()).hexdigest()
72 return checksum == self.__checksums[fname]["checksum"]
73
74 def get_template(self, dbtype):
75 """Return map file template."""
76 tplcontent = MAP_FILE_TEMPLATE
77 if dbtype == "sqlite":
78 tplcontent += """dbpath = {{ dbname }}
79 query = {{ query|safe }}
80 """
81 else:
82 tplcontent += """user = {{ dbuser }}
83 password = {{ dbpass }}
84 dbname = {{ dbname }}
85 hosts = {{ dbhost }}
86 query = {{ query|safe }}
87 """
88 return Template(tplcontent)
89
90 def get_template_context(self, options):
91 """Build the context used to render templates."""
92 dburl = options.get("dburl")
93 db_settings = (
94 dj_database_url.config(default=dburl)
95 if dburl else settings.DATABASES["default"])
96 if "sqlite" in db_settings["ENGINE"]:
97 dbtype = "sqlite"
98 elif "psycopg2" in db_settings["ENGINE"]:
99 dbtype = "postgres"
100 else:
101 dbtype = "mysql"
102 commandline = "{} {}".format(
103 os.path.basename(sys.argv[0]), " ".join(sys.argv[1:]))
104 context = {
105 "date": timezone.now(),
106 "commandline": commandline,
107 "dbtype": dbtype,
108 "dbuser": db_settings["USER"],
109 "dbpass": db_settings["PASSWORD"],
110 "dbname": db_settings["NAME"],
111 "dbhost": db_settings.get("HOST", "127.0.0.1"),
112 }
113 return context
114
115 def __render_map_file(
116 self, mapobject, destdir, context, force_overwrite=False):
117 """Render a map file."""
118 fullpath = os.path.join(destdir, mapobject.filename)
119 if os.path.exists(fullpath) and not force_overwrite:
120 if not self.__check_file(fullpath):
121 print(
122 "Cannot upgrade '{}' map because it has been modified."
123 .format(mapobject.filename))
124 return self.__checksums[mapobject.filename]
125 mapcontent = utils.parse_map_file(fullpath)
126 context = copy.deepcopy(context)
127 context["dbtype"] = self.__checksums[mapobject.filename]["dbtype"]
128 context["dbuser"] = mapcontent["user"]
129 context["dbpass"] = mapcontent["password"]
130 context["dbname"] = mapcontent["dbname"]
131 context["dbhost"] = mapcontent["hosts"]
132 content = self.get_template(context["dbtype"]).render(
133 Context(
134 dict(context.items(),
135 query=getattr(mapobject, context["dbtype"]))
136 )
137 )
138 fullpath = os.path.join(destdir, mapobject.filename)
139 with open(fullpath, "w") as fp:
140 fp.write(content)
141 return hashlib.md5(content).hexdigest()
142
143 def handle(self, *args, **options):
144 """Command entry point."""
145 mapfiles = self.__register_map_files()
146 destdir = os.path.realpath(options["destdir"])
147 try:
148 os.mkdir(destdir)
149 except OSError:
150 pass
151 self.__load_checksums(destdir)
152 context = self.get_template_context(options)
153 checksums = {}
154 for mapobject in mapfiles:
155 checksum = self.__render_map_file(
156 mapobject, destdir, context,
157 force_overwrite=options["force_overwrite"])
158 checksums[mapobject.filename] = checksum
159 with open(self.__checksums_file, "w") as fp:
160 for fname, checksum in checksums.items():
161 fp.write("{}:{}:{}\n".format(
162 fname, context["dbtype"], checksum))
163
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/modoboa/core/management/commands/generate_postfix_maps.py b/modoboa/core/management/commands/generate_postfix_maps.py
--- a/modoboa/core/management/commands/generate_postfix_maps.py
+++ b/modoboa/core/management/commands/generate_postfix_maps.py
@@ -125,10 +125,13 @@
mapcontent = utils.parse_map_file(fullpath)
context = copy.deepcopy(context)
context["dbtype"] = self.__checksums[mapobject.filename]["dbtype"]
- context["dbuser"] = mapcontent["user"]
- context["dbpass"] = mapcontent["password"]
- context["dbname"] = mapcontent["dbname"]
- context["dbhost"] = mapcontent["hosts"]
+ if context["dbtype"] == "sqlite":
+ context["dbname"] = mapcontent["dbpath"]
+ else:
+ context["dbuser"] = mapcontent["user"]
+ context["dbpass"] = mapcontent["password"]
+ context["dbname"] = mapcontent["dbname"]
+ context["dbhost"] = mapcontent["hosts"]
content = self.get_template(context["dbtype"]).render(
Context(
dict(context.items(),
| {"golden_diff": "diff --git a/modoboa/core/management/commands/generate_postfix_maps.py b/modoboa/core/management/commands/generate_postfix_maps.py\n--- a/modoboa/core/management/commands/generate_postfix_maps.py\n+++ b/modoboa/core/management/commands/generate_postfix_maps.py\n@@ -125,10 +125,13 @@\n mapcontent = utils.parse_map_file(fullpath)\n context = copy.deepcopy(context)\n context[\"dbtype\"] = self.__checksums[mapobject.filename][\"dbtype\"]\n- context[\"dbuser\"] = mapcontent[\"user\"]\n- context[\"dbpass\"] = mapcontent[\"password\"]\n- context[\"dbname\"] = mapcontent[\"dbname\"]\n- context[\"dbhost\"] = mapcontent[\"hosts\"]\n+ if context[\"dbtype\"] == \"sqlite\":\n+ context[\"dbname\"] = mapcontent[\"dbpath\"]\n+ else:\n+ context[\"dbuser\"] = mapcontent[\"user\"]\n+ context[\"dbpass\"] = mapcontent[\"password\"]\n+ context[\"dbname\"] = mapcontent[\"dbname\"]\n+ context[\"dbhost\"] = mapcontent[\"hosts\"]\n content = self.get_template(context[\"dbtype\"]).render(\n Context(\n dict(context.items(),\n", "issue": "Test failure when using tox\nThis is because tox use sqlite and has no user set\n\n```\n======================================================================\nERROR: test_map_upgrade (modoboa.admin.tests.test_mapfiles.MapFilesTestCase)\nCheck that map content is used.\n----------------------------------------------------------------------\nTraceback (most recent call last):\n File \"/home/gawel/bear/modoboa/.tox/py27/lib/python2.7/site-packages/modoboa/admin/tests/test_mapfiles.py\", line 45, in test_map_upgrade\n self.assertEqual(mapcontent[\"user\"], dbsettings[\"USER\"])\nKeyError: 'user'\n\n----------------------------------------------------------------------\nRan 178 tests in 44.553s\n```\n\n", "before_files": [{"content": "\"\"\"Management command to generate/update postfix map files.\"\"\"\n\nimport copy\nimport hashlib\nimport os\nimport sys\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\nfrom django.template import Context, Template\nfrom django.utils import timezone\n\nimport dj_database_url\n\nfrom ... import signals\nfrom ... import utils\n\nMAP_FILE_TEMPLATE = \"\"\"# This file was generated on {{ date }} by running:\n# {{ commandline }}\n# DO NOT EDIT!\n\"\"\"\n\n\nclass Command(BaseCommand):\n \"\"\"Command class.\"\"\"\n\n help = \"Generate/update postfix map files.\"\n\n def add_arguments(self, parser):\n \"\"\"Add extra arguments.\"\"\"\n parser.add_argument(\n \"--dburl\", help=\"Custom database url\")\n parser.add_argument(\n \"--destdir\", default=\".\",\n help=\"Directory where files will be created\")\n parser.add_argument(\n \"--force-overwrite\", action=\"store_true\", default=False,\n help=\"Force overwrite of existing map files\")\n\n def __load_checksums(self, destdir):\n \"\"\"Load existing checksums if possible.\"\"\"\n self.__checksums_file = os.path.join(\n destdir, \"modoboa-postfix-maps.chk\")\n self.__checksums = {}\n if not os.path.exists(self.__checksums_file):\n return\n with open(self.__checksums_file) as fp:\n for line in fp:\n fname, dbtype, checksum = line.split(\":\")\n self.__checksums[fname.strip()] = {\n \"dbtype\": dbtype, \"checksum\": checksum.strip()\n }\n\n def __register_map_files(self):\n \"\"\"Load specified applications.\"\"\"\n responses = signals.register_postfix_maps.send(sender=self.__class__)\n mapfiles = []\n for response in responses:\n mapfiles += response[1]\n return mapfiles\n\n def __check_file(self, path):\n \"\"\"Check if map file has been modified.\"\"\"\n fname = os.path.basename(path)\n condition = (\n not self.__checksums or\n fname not in self.__checksums)\n if condition:\n return True\n with open(path) as fp:\n checksum = hashlib.md5(fp.read()).hexdigest()\n return checksum == self.__checksums[fname][\"checksum\"]\n\n def get_template(self, dbtype):\n \"\"\"Return map file template.\"\"\"\n tplcontent = MAP_FILE_TEMPLATE\n if dbtype == \"sqlite\":\n tplcontent += \"\"\"dbpath = {{ dbname }}\nquery = {{ query|safe }}\n\"\"\"\n else:\n tplcontent += \"\"\"user = {{ dbuser }}\npassword = {{ dbpass }}\ndbname = {{ dbname }}\nhosts = {{ dbhost }}\nquery = {{ query|safe }}\n\"\"\"\n return Template(tplcontent)\n\n def get_template_context(self, options):\n \"\"\"Build the context used to render templates.\"\"\"\n dburl = options.get(\"dburl\")\n db_settings = (\n dj_database_url.config(default=dburl)\n if dburl else settings.DATABASES[\"default\"])\n if \"sqlite\" in db_settings[\"ENGINE\"]:\n dbtype = \"sqlite\"\n elif \"psycopg2\" in db_settings[\"ENGINE\"]:\n dbtype = \"postgres\"\n else:\n dbtype = \"mysql\"\n commandline = \"{} {}\".format(\n os.path.basename(sys.argv[0]), \" \".join(sys.argv[1:]))\n context = {\n \"date\": timezone.now(),\n \"commandline\": commandline,\n \"dbtype\": dbtype,\n \"dbuser\": db_settings[\"USER\"],\n \"dbpass\": db_settings[\"PASSWORD\"],\n \"dbname\": db_settings[\"NAME\"],\n \"dbhost\": db_settings.get(\"HOST\", \"127.0.0.1\"),\n }\n return context\n\n def __render_map_file(\n self, mapobject, destdir, context, force_overwrite=False):\n \"\"\"Render a map file.\"\"\"\n fullpath = os.path.join(destdir, mapobject.filename)\n if os.path.exists(fullpath) and not force_overwrite:\n if not self.__check_file(fullpath):\n print(\n \"Cannot upgrade '{}' map because it has been modified.\"\n .format(mapobject.filename))\n return self.__checksums[mapobject.filename]\n mapcontent = utils.parse_map_file(fullpath)\n context = copy.deepcopy(context)\n context[\"dbtype\"] = self.__checksums[mapobject.filename][\"dbtype\"]\n context[\"dbuser\"] = mapcontent[\"user\"]\n context[\"dbpass\"] = mapcontent[\"password\"]\n context[\"dbname\"] = mapcontent[\"dbname\"]\n context[\"dbhost\"] = mapcontent[\"hosts\"]\n content = self.get_template(context[\"dbtype\"]).render(\n Context(\n dict(context.items(),\n query=getattr(mapobject, context[\"dbtype\"]))\n )\n )\n fullpath = os.path.join(destdir, mapobject.filename)\n with open(fullpath, \"w\") as fp:\n fp.write(content)\n return hashlib.md5(content).hexdigest()\n\n def handle(self, *args, **options):\n \"\"\"Command entry point.\"\"\"\n mapfiles = self.__register_map_files()\n destdir = os.path.realpath(options[\"destdir\"])\n try:\n os.mkdir(destdir)\n except OSError:\n pass\n self.__load_checksums(destdir)\n context = self.get_template_context(options)\n checksums = {}\n for mapobject in mapfiles:\n checksum = self.__render_map_file(\n mapobject, destdir, context,\n force_overwrite=options[\"force_overwrite\"])\n checksums[mapobject.filename] = checksum\n with open(self.__checksums_file, \"w\") as fp:\n for fname, checksum in checksums.items():\n fp.write(\"{}:{}:{}\\n\".format(\n fname, context[\"dbtype\"], checksum))\n", "path": "modoboa/core/management/commands/generate_postfix_maps.py"}], "after_files": [{"content": "\"\"\"Management command to generate/update postfix map files.\"\"\"\n\nimport copy\nimport hashlib\nimport os\nimport sys\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\nfrom django.template import Context, Template\nfrom django.utils import timezone\n\nimport dj_database_url\n\nfrom ... import signals\nfrom ... import utils\n\nMAP_FILE_TEMPLATE = \"\"\"# This file was generated on {{ date }} by running:\n# {{ commandline }}\n# DO NOT EDIT!\n\"\"\"\n\n\nclass Command(BaseCommand):\n \"\"\"Command class.\"\"\"\n\n help = \"Generate/update postfix map files.\"\n\n def add_arguments(self, parser):\n \"\"\"Add extra arguments.\"\"\"\n parser.add_argument(\n \"--dburl\", help=\"Custom database url\")\n parser.add_argument(\n \"--destdir\", default=\".\",\n help=\"Directory where files will be created\")\n parser.add_argument(\n \"--force-overwrite\", action=\"store_true\", default=False,\n help=\"Force overwrite of existing map files\")\n\n def __load_checksums(self, destdir):\n \"\"\"Load existing checksums if possible.\"\"\"\n self.__checksums_file = os.path.join(\n destdir, \"modoboa-postfix-maps.chk\")\n self.__checksums = {}\n if not os.path.exists(self.__checksums_file):\n return\n with open(self.__checksums_file) as fp:\n for line in fp:\n fname, dbtype, checksum = line.split(\":\")\n self.__checksums[fname.strip()] = {\n \"dbtype\": dbtype, \"checksum\": checksum.strip()\n }\n\n def __register_map_files(self):\n \"\"\"Load specified applications.\"\"\"\n responses = signals.register_postfix_maps.send(sender=self.__class__)\n mapfiles = []\n for response in responses:\n mapfiles += response[1]\n return mapfiles\n\n def __check_file(self, path):\n \"\"\"Check if map file has been modified.\"\"\"\n fname = os.path.basename(path)\n condition = (\n not self.__checksums or\n fname not in self.__checksums)\n if condition:\n return True\n with open(path) as fp:\n checksum = hashlib.md5(fp.read()).hexdigest()\n return checksum == self.__checksums[fname][\"checksum\"]\n\n def get_template(self, dbtype):\n \"\"\"Return map file template.\"\"\"\n tplcontent = MAP_FILE_TEMPLATE\n if dbtype == \"sqlite\":\n tplcontent += \"\"\"dbpath = {{ dbname }}\nquery = {{ query|safe }}\n\"\"\"\n else:\n tplcontent += \"\"\"user = {{ dbuser }}\npassword = {{ dbpass }}\ndbname = {{ dbname }}\nhosts = {{ dbhost }}\nquery = {{ query|safe }}\n\"\"\"\n return Template(tplcontent)\n\n def get_template_context(self, options):\n \"\"\"Build the context used to render templates.\"\"\"\n dburl = options.get(\"dburl\")\n db_settings = (\n dj_database_url.config(default=dburl)\n if dburl else settings.DATABASES[\"default\"])\n if \"sqlite\" in db_settings[\"ENGINE\"]:\n dbtype = \"sqlite\"\n elif \"psycopg2\" in db_settings[\"ENGINE\"]:\n dbtype = \"postgres\"\n else:\n dbtype = \"mysql\"\n commandline = \"{} {}\".format(\n os.path.basename(sys.argv[0]), \" \".join(sys.argv[1:]))\n context = {\n \"date\": timezone.now(),\n \"commandline\": commandline,\n \"dbtype\": dbtype,\n \"dbuser\": db_settings[\"USER\"],\n \"dbpass\": db_settings[\"PASSWORD\"],\n \"dbname\": db_settings[\"NAME\"],\n \"dbhost\": db_settings.get(\"HOST\", \"127.0.0.1\"),\n }\n return context\n\n def __render_map_file(\n self, mapobject, destdir, context, force_overwrite=False):\n \"\"\"Render a map file.\"\"\"\n fullpath = os.path.join(destdir, mapobject.filename)\n if os.path.exists(fullpath) and not force_overwrite:\n if not self.__check_file(fullpath):\n print(\n \"Cannot upgrade '{}' map because it has been modified.\"\n .format(mapobject.filename))\n return self.__checksums[mapobject.filename]\n mapcontent = utils.parse_map_file(fullpath)\n context = copy.deepcopy(context)\n context[\"dbtype\"] = self.__checksums[mapobject.filename][\"dbtype\"]\n if context[\"dbtype\"] == \"sqlite\":\n context[\"dbname\"] = mapcontent[\"dbpath\"]\n else:\n context[\"dbuser\"] = mapcontent[\"user\"]\n context[\"dbpass\"] = mapcontent[\"password\"]\n context[\"dbname\"] = mapcontent[\"dbname\"]\n context[\"dbhost\"] = mapcontent[\"hosts\"]\n content = self.get_template(context[\"dbtype\"]).render(\n Context(\n dict(context.items(),\n query=getattr(mapobject, context[\"dbtype\"]))\n )\n )\n fullpath = os.path.join(destdir, mapobject.filename)\n with open(fullpath, \"w\") as fp:\n fp.write(content)\n return hashlib.md5(content).hexdigest()\n\n def handle(self, *args, **options):\n \"\"\"Command entry point.\"\"\"\n mapfiles = self.__register_map_files()\n destdir = os.path.realpath(options[\"destdir\"])\n try:\n os.mkdir(destdir)\n except OSError:\n pass\n self.__load_checksums(destdir)\n context = self.get_template_context(options)\n checksums = {}\n for mapobject in mapfiles:\n checksum = self.__render_map_file(\n mapobject, destdir, context,\n force_overwrite=options[\"force_overwrite\"])\n checksums[mapobject.filename] = checksum\n with open(self.__checksums_file, \"w\") as fp:\n for fname, checksum in checksums.items():\n fp.write(\"{}:{}:{}\\n\".format(\n fname, context[\"dbtype\"], checksum))\n", "path": "modoboa/core/management/commands/generate_postfix_maps.py"}]} | 2,019 | 273 |
gh_patches_debug_75 | rasdani/github-patches | git_diff | kedro-org__kedro-2092 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release Kedro `0.18.4`
### Depends on:
- Dataset issues
- Spaceflights tutorial documentation
- Open PRs related to datasets:
- [x] https://github.com/kedro-org/kedro/pull/2082
- [x] https://github.com/kedro-org/kedro/pull/1746
- [x] https://github.com/kedro-org/kedro/pull/1992
- [x] https://github.com/kedro-org/kedro/pull/1865
- [x] https://github.com/kedro-org/kedro/pull/1312
- [x] https://github.com/kedro-org/kedro/pull/1844
- [x] https://github.com/kedro-org/kedro/pull/1962
- [x] https://github.com/kedro-org/kedro/pull/1964
- [x] https://github.com/kedro-org/kedro/pull/1931
- [x] https://github.com/kedro-org/kedro/pull/1587
For the above PRs: if it's nearly finished, but the author isn't responding, we as a team can take over and finish the PR. If the PR still needs a lot of work and the author isn't responding, I suggest we close it and ask them to re-open in the new `kedro-datasets` repo.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kedro/__init__.py`
Content:
```
1 """Kedro is a framework that makes it easy to build robust and scalable
2 data pipelines by providing uniform project templates, data abstraction,
3 configuration and pipeline assembly.
4 """
5
6 __version__ = "0.18.3"
7
8
9 import logging
10
11 logging.getLogger(__name__).addHandler(logging.NullHandler())
12
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kedro/__init__.py b/kedro/__init__.py
--- a/kedro/__init__.py
+++ b/kedro/__init__.py
@@ -3,7 +3,7 @@
configuration and pipeline assembly.
"""
-__version__ = "0.18.3"
+__version__ = "0.18.4"
import logging
| {"golden_diff": "diff --git a/kedro/__init__.py b/kedro/__init__.py\n--- a/kedro/__init__.py\n+++ b/kedro/__init__.py\n@@ -3,7 +3,7 @@\n configuration and pipeline assembly.\n \"\"\"\n \n-__version__ = \"0.18.3\"\n+__version__ = \"0.18.4\"\n \n \n import logging\n", "issue": "Release Kedro `0.18.4`\n### Depends on:\n- Dataset issues\n- Spaceflights tutorial documentation\n- Open PRs related to datasets:\n - [x] https://github.com/kedro-org/kedro/pull/2082\n - [x] https://github.com/kedro-org/kedro/pull/1746\n - [x] https://github.com/kedro-org/kedro/pull/1992\n - [x] https://github.com/kedro-org/kedro/pull/1865\n - [x] https://github.com/kedro-org/kedro/pull/1312\n - [x] https://github.com/kedro-org/kedro/pull/1844\n - [x] https://github.com/kedro-org/kedro/pull/1962\n - [x] https://github.com/kedro-org/kedro/pull/1964\n - [x] https://github.com/kedro-org/kedro/pull/1931\n - [x] https://github.com/kedro-org/kedro/pull/1587\n\nFor the above PRs: if it's nearly finished, but the author isn't responding, we as a team can take over and finish the PR. If the PR still needs a lot of work and the author isn't responding, I suggest we close it and ask them to re-open in the new `kedro-datasets` repo. \n\n", "before_files": [{"content": "\"\"\"Kedro is a framework that makes it easy to build robust and scalable\ndata pipelines by providing uniform project templates, data abstraction,\nconfiguration and pipeline assembly.\n\"\"\"\n\n__version__ = \"0.18.3\"\n\n\nimport logging\n\nlogging.getLogger(__name__).addHandler(logging.NullHandler())\n", "path": "kedro/__init__.py"}], "after_files": [{"content": "\"\"\"Kedro is a framework that makes it easy to build robust and scalable\ndata pipelines by providing uniform project templates, data abstraction,\nconfiguration and pipeline assembly.\n\"\"\"\n\n__version__ = \"0.18.4\"\n\n\nimport logging\n\nlogging.getLogger(__name__).addHandler(logging.NullHandler())\n", "path": "kedro/__init__.py"}]} | 684 | 87 |
gh_patches_debug_16511 | rasdani/github-patches | git_diff | cupy__cupy-7405 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Drop support for Python 3.7, NumPy 1.20, and SciPy 1.6 on document and setup.py
#7405 has some CI issues, so we update the documentation and setup.py first for the next release.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 import glob
4 import os
5 from setuptools import setup, find_packages
6 import sys
7
8 source_root = os.path.abspath(os.path.dirname(__file__))
9 sys.path.append(os.path.join(source_root, 'install'))
10
11 import cupy_builder # NOQA
12 from cupy_builder import cupy_setup_build # NOQA
13
14 ctx = cupy_builder.Context(source_root)
15 cupy_builder.initialize(ctx)
16 if not cupy_builder.preflight_check(ctx):
17 sys.exit(1)
18
19
20 # TODO(kmaehashi): migrate to pyproject.toml (see #4727, #4619)
21 setup_requires = [
22 'Cython>=0.29.22,<3',
23 'fastrlock>=0.5',
24 ]
25 install_requires = [
26 'numpy>=1.20,<1.27', # see #4773
27 'fastrlock>=0.5',
28 ]
29 extras_require = {
30 'all': [
31 'scipy>=1.6,<1.12', # see #4773
32 'Cython>=0.29.22,<3',
33 'optuna>=2.0',
34 ],
35 # TODO(kmaehashi): remove stylecheck and update the contribution guide
36 'stylecheck': [
37 'autopep8==1.5.5',
38 'flake8==3.8.4',
39 'pbr==5.5.1',
40 'pycodestyle==2.6.0',
41
42 'mypy==0.950',
43 'types-setuptools==57.4.14',
44 ],
45 'test': [
46 # 4.2 <= pytest < 6.2 is slow collecting tests and times out on CI.
47 # pytest < 7.2 has some different behavior that makes our CI fail
48 'pytest>=7.2',
49 'hypothesis>=6.37.2,<6.55.0',
50 ],
51 }
52 tests_require = extras_require['test']
53
54
55 # List of files that needs to be in the distribution (sdist/wheel).
56 # Notes:
57 # - Files only needed in sdist should be added to `MANIFEST.in`.
58 # - The following glob (`**`) ignores items starting with `.`.
59 cupy_package_data = [
60 'cupy/cuda/cupy_thrust.cu',
61 'cupy/cuda/cupy_cub.cu',
62 'cupy/cuda/cupy_cufftXt.cu', # for cuFFT callback
63 'cupy/cuda/cupy_cufftXt.h', # for cuFFT callback
64 'cupy/cuda/cupy_cufft.h', # for cuFFT callback
65 'cupy/cuda/cufft.pxd', # for cuFFT callback
66 'cupy/cuda/cufft.pyx', # for cuFFT callback
67 'cupy/random/cupy_distributions.cu',
68 'cupy/random/cupy_distributions.cuh',
69 ] + [
70 x for x in glob.glob('cupy/_core/include/cupy/**', recursive=True)
71 if os.path.isfile(x)
72 ]
73
74 package_data = {
75 'cupy': [
76 os.path.relpath(x, 'cupy') for x in cupy_package_data
77 ],
78 }
79
80 package_data['cupy'] += cupy_setup_build.prepare_wheel_libs(ctx)
81
82
83 if len(sys.argv) < 2 or sys.argv[1] == 'egg_info':
84 # Extensions are unnecessary for egg_info generation as all sources files
85 # can be enumerated via MANIFEST.in.
86 ext_modules = []
87 else:
88 ext_modules = cupy_setup_build.get_ext_modules(True, ctx)
89
90
91 # Get __version__ variable
92 with open(os.path.join(source_root, 'cupy', '_version.py')) as f:
93 exec(f.read())
94
95 long_description = None
96 if ctx.long_description_path is not None:
97 with open(ctx.long_description_path) as f:
98 long_description = f.read()
99
100
101 CLASSIFIERS = """\
102 Development Status :: 5 - Production/Stable
103 Intended Audience :: Science/Research
104 Intended Audience :: Developers
105 License :: OSI Approved :: MIT License
106 Programming Language :: Python
107 Programming Language :: Python :: 3
108 Programming Language :: Python :: 3.7
109 Programming Language :: Python :: 3.8
110 Programming Language :: Python :: 3.9
111 Programming Language :: Python :: 3.10
112 Programming Language :: Python :: 3.11
113 Programming Language :: Python :: 3 :: Only
114 Programming Language :: Cython
115 Topic :: Software Development
116 Topic :: Scientific/Engineering
117 Operating System :: POSIX
118 Operating System :: Microsoft :: Windows
119 """
120
121
122 setup(
123 name=ctx.package_name,
124 version=__version__, # NOQA
125 description='CuPy: NumPy & SciPy for GPU',
126 long_description=long_description,
127 author='Seiya Tokui',
128 author_email='[email protected]',
129 maintainer='CuPy Developers',
130 url='https://cupy.dev/',
131 license='MIT License',
132 project_urls={
133 "Bug Tracker": "https://github.com/cupy/cupy/issues",
134 "Documentation": "https://docs.cupy.dev/",
135 "Source Code": "https://github.com/cupy/cupy",
136 },
137 classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f],
138 packages=find_packages(exclude=['install', 'tests']),
139 package_data=package_data,
140 zip_safe=False,
141 python_requires='>=3.7',
142 setup_requires=setup_requires,
143 install_requires=install_requires,
144 tests_require=tests_require,
145 extras_require=extras_require,
146 ext_modules=ext_modules,
147 cmdclass={'build_ext': cupy_builder._command.custom_build_ext},
148 )
149
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -105,7 +105,6 @@
License :: OSI Approved :: MIT License
Programming Language :: Python
Programming Language :: Python :: 3
-Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
Programming Language :: Python :: 3.9
Programming Language :: Python :: 3.10
@@ -138,7 +137,7 @@
packages=find_packages(exclude=['install', 'tests']),
package_data=package_data,
zip_safe=False,
- python_requires='>=3.7',
+ python_requires='>=3.8',
setup_requires=setup_requires,
install_requires=install_requires,
tests_require=tests_require,
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -105,7 +105,6 @@\n License :: OSI Approved :: MIT License\n Programming Language :: Python\n Programming Language :: Python :: 3\n-Programming Language :: Python :: 3.7\n Programming Language :: Python :: 3.8\n Programming Language :: Python :: 3.9\n Programming Language :: Python :: 3.10\n@@ -138,7 +137,7 @@\n packages=find_packages(exclude=['install', 'tests']),\n package_data=package_data,\n zip_safe=False,\n- python_requires='>=3.7',\n+ python_requires='>=3.8',\n setup_requires=setup_requires,\n install_requires=install_requires,\n tests_require=tests_require,\n", "issue": "Drop support for Python 3.7, NumPy 1.20, and SciPy 1.6 on document and setup.py\n#7405 has some CI issues, so we update the documentation and setup.py first for the next release.\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport glob\nimport os\nfrom setuptools import setup, find_packages\nimport sys\n\nsource_root = os.path.abspath(os.path.dirname(__file__))\nsys.path.append(os.path.join(source_root, 'install'))\n\nimport cupy_builder # NOQA\nfrom cupy_builder import cupy_setup_build # NOQA\n\nctx = cupy_builder.Context(source_root)\ncupy_builder.initialize(ctx)\nif not cupy_builder.preflight_check(ctx):\n sys.exit(1)\n\n\n# TODO(kmaehashi): migrate to pyproject.toml (see #4727, #4619)\nsetup_requires = [\n 'Cython>=0.29.22,<3',\n 'fastrlock>=0.5',\n]\ninstall_requires = [\n 'numpy>=1.20,<1.27', # see #4773\n 'fastrlock>=0.5',\n]\nextras_require = {\n 'all': [\n 'scipy>=1.6,<1.12', # see #4773\n 'Cython>=0.29.22,<3',\n 'optuna>=2.0',\n ],\n # TODO(kmaehashi): remove stylecheck and update the contribution guide\n 'stylecheck': [\n 'autopep8==1.5.5',\n 'flake8==3.8.4',\n 'pbr==5.5.1',\n 'pycodestyle==2.6.0',\n\n 'mypy==0.950',\n 'types-setuptools==57.4.14',\n ],\n 'test': [\n # 4.2 <= pytest < 6.2 is slow collecting tests and times out on CI.\n # pytest < 7.2 has some different behavior that makes our CI fail\n 'pytest>=7.2',\n 'hypothesis>=6.37.2,<6.55.0',\n ],\n}\ntests_require = extras_require['test']\n\n\n# List of files that needs to be in the distribution (sdist/wheel).\n# Notes:\n# - Files only needed in sdist should be added to `MANIFEST.in`.\n# - The following glob (`**`) ignores items starting with `.`.\ncupy_package_data = [\n 'cupy/cuda/cupy_thrust.cu',\n 'cupy/cuda/cupy_cub.cu',\n 'cupy/cuda/cupy_cufftXt.cu', # for cuFFT callback\n 'cupy/cuda/cupy_cufftXt.h', # for cuFFT callback\n 'cupy/cuda/cupy_cufft.h', # for cuFFT callback\n 'cupy/cuda/cufft.pxd', # for cuFFT callback\n 'cupy/cuda/cufft.pyx', # for cuFFT callback\n 'cupy/random/cupy_distributions.cu',\n 'cupy/random/cupy_distributions.cuh',\n] + [\n x for x in glob.glob('cupy/_core/include/cupy/**', recursive=True)\n if os.path.isfile(x)\n]\n\npackage_data = {\n 'cupy': [\n os.path.relpath(x, 'cupy') for x in cupy_package_data\n ],\n}\n\npackage_data['cupy'] += cupy_setup_build.prepare_wheel_libs(ctx)\n\n\nif len(sys.argv) < 2 or sys.argv[1] == 'egg_info':\n # Extensions are unnecessary for egg_info generation as all sources files\n # can be enumerated via MANIFEST.in.\n ext_modules = []\nelse:\n ext_modules = cupy_setup_build.get_ext_modules(True, ctx)\n\n\n# Get __version__ variable\nwith open(os.path.join(source_root, 'cupy', '_version.py')) as f:\n exec(f.read())\n\nlong_description = None\nif ctx.long_description_path is not None:\n with open(ctx.long_description_path) as f:\n long_description = f.read()\n\n\nCLASSIFIERS = \"\"\"\\\nDevelopment Status :: 5 - Production/Stable\nIntended Audience :: Science/Research\nIntended Audience :: Developers\nLicense :: OSI Approved :: MIT License\nProgramming Language :: Python\nProgramming Language :: Python :: 3\nProgramming Language :: Python :: 3.7\nProgramming Language :: Python :: 3.8\nProgramming Language :: Python :: 3.9\nProgramming Language :: Python :: 3.10\nProgramming Language :: Python :: 3.11\nProgramming Language :: Python :: 3 :: Only\nProgramming Language :: Cython\nTopic :: Software Development\nTopic :: Scientific/Engineering\nOperating System :: POSIX\nOperating System :: Microsoft :: Windows\n\"\"\"\n\n\nsetup(\n name=ctx.package_name,\n version=__version__, # NOQA\n description='CuPy: NumPy & SciPy for GPU',\n long_description=long_description,\n author='Seiya Tokui',\n author_email='[email protected]',\n maintainer='CuPy Developers',\n url='https://cupy.dev/',\n license='MIT License',\n project_urls={\n \"Bug Tracker\": \"https://github.com/cupy/cupy/issues\",\n \"Documentation\": \"https://docs.cupy.dev/\",\n \"Source Code\": \"https://github.com/cupy/cupy\",\n },\n classifiers=[_f for _f in CLASSIFIERS.split('\\n') if _f],\n packages=find_packages(exclude=['install', 'tests']),\n package_data=package_data,\n zip_safe=False,\n python_requires='>=3.7',\n setup_requires=setup_requires,\n install_requires=install_requires,\n tests_require=tests_require,\n extras_require=extras_require,\n ext_modules=ext_modules,\n cmdclass={'build_ext': cupy_builder._command.custom_build_ext},\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nimport glob\nimport os\nfrom setuptools import setup, find_packages\nimport sys\n\nsource_root = os.path.abspath(os.path.dirname(__file__))\nsys.path.append(os.path.join(source_root, 'install'))\n\nimport cupy_builder # NOQA\nfrom cupy_builder import cupy_setup_build # NOQA\n\nctx = cupy_builder.Context(source_root)\ncupy_builder.initialize(ctx)\nif not cupy_builder.preflight_check(ctx):\n sys.exit(1)\n\n\n# TODO(kmaehashi): migrate to pyproject.toml (see #4727, #4619)\nsetup_requires = [\n 'Cython>=0.29.22,<3',\n 'fastrlock>=0.5',\n]\ninstall_requires = [\n 'numpy>=1.20,<1.27', # see #4773\n 'fastrlock>=0.5',\n]\nextras_require = {\n 'all': [\n 'scipy>=1.6,<1.12', # see #4773\n 'Cython>=0.29.22,<3',\n 'optuna>=2.0',\n ],\n # TODO(kmaehashi): remove stylecheck and update the contribution guide\n 'stylecheck': [\n 'autopep8==1.5.5',\n 'flake8==3.8.4',\n 'pbr==5.5.1',\n 'pycodestyle==2.6.0',\n\n 'mypy==0.950',\n 'types-setuptools==57.4.14',\n ],\n 'test': [\n # 4.2 <= pytest < 6.2 is slow collecting tests and times out on CI.\n # pytest < 7.2 has some different behavior that makes our CI fail\n 'pytest>=7.2',\n 'hypothesis>=6.37.2,<6.55.0',\n ],\n}\ntests_require = extras_require['test']\n\n\n# List of files that needs to be in the distribution (sdist/wheel).\n# Notes:\n# - Files only needed in sdist should be added to `MANIFEST.in`.\n# - The following glob (`**`) ignores items starting with `.`.\ncupy_package_data = [\n 'cupy/cuda/cupy_thrust.cu',\n 'cupy/cuda/cupy_cub.cu',\n 'cupy/cuda/cupy_cufftXt.cu', # for cuFFT callback\n 'cupy/cuda/cupy_cufftXt.h', # for cuFFT callback\n 'cupy/cuda/cupy_cufft.h', # for cuFFT callback\n 'cupy/cuda/cufft.pxd', # for cuFFT callback\n 'cupy/cuda/cufft.pyx', # for cuFFT callback\n 'cupy/random/cupy_distributions.cu',\n 'cupy/random/cupy_distributions.cuh',\n] + [\n x for x in glob.glob('cupy/_core/include/cupy/**', recursive=True)\n if os.path.isfile(x)\n]\n\npackage_data = {\n 'cupy': [\n os.path.relpath(x, 'cupy') for x in cupy_package_data\n ],\n}\n\npackage_data['cupy'] += cupy_setup_build.prepare_wheel_libs(ctx)\n\n\nif len(sys.argv) < 2 or sys.argv[1] == 'egg_info':\n # Extensions are unnecessary for egg_info generation as all sources files\n # can be enumerated via MANIFEST.in.\n ext_modules = []\nelse:\n ext_modules = cupy_setup_build.get_ext_modules(True, ctx)\n\n\n# Get __version__ variable\nwith open(os.path.join(source_root, 'cupy', '_version.py')) as f:\n exec(f.read())\n\nlong_description = None\nif ctx.long_description_path is not None:\n with open(ctx.long_description_path) as f:\n long_description = f.read()\n\n\nCLASSIFIERS = \"\"\"\\\nDevelopment Status :: 5 - Production/Stable\nIntended Audience :: Science/Research\nIntended Audience :: Developers\nLicense :: OSI Approved :: MIT License\nProgramming Language :: Python\nProgramming Language :: Python :: 3\nProgramming Language :: Python :: 3.8\nProgramming Language :: Python :: 3.9\nProgramming Language :: Python :: 3.10\nProgramming Language :: Python :: 3.11\nProgramming Language :: Python :: 3 :: Only\nProgramming Language :: Cython\nTopic :: Software Development\nTopic :: Scientific/Engineering\nOperating System :: POSIX\nOperating System :: Microsoft :: Windows\n\"\"\"\n\n\nsetup(\n name=ctx.package_name,\n version=__version__, # NOQA\n description='CuPy: NumPy & SciPy for GPU',\n long_description=long_description,\n author='Seiya Tokui',\n author_email='[email protected]',\n maintainer='CuPy Developers',\n url='https://cupy.dev/',\n license='MIT License',\n project_urls={\n \"Bug Tracker\": \"https://github.com/cupy/cupy/issues\",\n \"Documentation\": \"https://docs.cupy.dev/\",\n \"Source Code\": \"https://github.com/cupy/cupy\",\n },\n classifiers=[_f for _f in CLASSIFIERS.split('\\n') if _f],\n packages=find_packages(exclude=['install', 'tests']),\n package_data=package_data,\n zip_safe=False,\n python_requires='>=3.8',\n setup_requires=setup_requires,\n install_requires=install_requires,\n tests_require=tests_require,\n extras_require=extras_require,\n ext_modules=ext_modules,\n cmdclass={'build_ext': cupy_builder._command.custom_build_ext},\n)\n", "path": "setup.py"}]} | 1,903 | 176 |
gh_patches_debug_13872 | rasdani/github-patches | git_diff | LMFDB__lmfdb-4241 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bad links on HMF browse pages
A report from the bug report system, from Paul Gunnells:
"On
https://www.lmfdb.org/ModularForm/GL2/TotallyReal/browse/2/
the links in the middle column (the Number of newforms column) are all the same and point to https://www.lmfdb.org/ModularForm/GL2/TotallyReal/?field_label=2.2.497.1 instead of urls the modular form data they're supposed to."
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lmfdb/hilbert_modular_forms/hmf_stats.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from flask import url_for
3 from lmfdb import db
4 from lmfdb.utils import comma
5 from lmfdb.utils.display_stats import StatsDisplay, proportioners, totaler
6 from lmfdb.logger import make_logger
7 from lmfdb.number_fields.web_number_field import nf_display_knowl
8 from sage.misc.cachefunc import cached_method
9
10 logger = make_logger("hmf")
11
12 class HMFstats(StatsDisplay):
13 """
14 Class for creating and displaying statistics for Hilbert modular forms
15 """
16 def __init__(self):
17 self.nforms = db.hmf_forms.count()
18
19 table = db.hmf_forms
20 baseurl_func = ".hilbert_modular_form_render_webpage"
21
22 stat_list = [
23 {'cols': ['level_norm', 'deg'],
24 'totaler': totaler(),
25 'proportioner': proportioners.per_col_total},
26 {'cols': ['level_norm', 'dimension'],
27 'totaler': totaler(),
28 'proportioner': proportioners.per_col_total},
29 ]
30 buckets = {'level_norm': ['1', '2-10', '11-100', '101-1000', '1001-10000'],
31 'dimension': ['1', '2', '3', '4', '5-10', '11-20', '21-100', '101-1000']}
32 knowls = {'level_norm': 'mf.hilbert.level_norm',
33 'dimension': 'mf.hilbert.dimension',
34 'deg': 'nf.degree'}
35 short_display = {'deg': 'degree'}
36
37 @property
38 def short_summary(self):
39 return self.summary + " Here are some <a href='%s'>further statistics</a>." % (url_for(".statistics"),)
40
41 @property
42 def summary(self):
43 hmf_knowl = '<a knowl="mf.hilbert">Hilbert modular forms</a>'
44 nf_knowl = '<a knowl="nf.totally_real">totally real number fields</a>'
45 deg_knowl = '<a knowl="nf.degree">degree</a>'
46 return "The database currently contains %s %s over %s %s of %s 2 to %s." % (comma(self.nforms), hmf_knowl, self.counts()["nfields"], nf_knowl, deg_knowl, self.counts()["maxdeg"])
47
48 def degree_summary(self, d):
49 stats = self.statistics(d)
50 hmf_knowl = '<a knowl="mf.hilbert">Hilbert modular forms</a>'
51 nf_knowl = '<a knowl="nf.totally_real">totally real number fields</a>'
52 deg_knowl = '<a knowl="nf.degree">degree</a>'
53 level_knowl = '<a knowl="mf.hilbert.level_norm">level norm</a>'
54 return ''.join([r'The database currently contains %s ' % stats['nforms'],
55 hmf_knowl,
56 r' defined over %s ' % stats['nfields'],
57 nf_knowl,
58 r' of %s %s, with ' % (deg_knowl, d),
59 level_knowl,
60 r' up to %s.' % stats['maxnorm']])
61
62 @cached_method
63 def counts(self):
64 counts = {}
65
66
67 counts['nforms'] = self.nforms
68 counts['nforms_c'] = comma(self.nforms)
69
70 attrs = ["degree", "discriminant", "label"]
71 fields = list(db.hmf_fields.search({}, attrs, sort=attrs))
72 degrees = sorted(set(F["degree"] for F in fields))
73 by_deg = {d: [F for F in fields if F["degree"] == d] for d in degrees}
74 counts["degrees"] = degrees
75 counts["nfields"] = len(fields)
76 counts["nfields_c"] = comma(len(fields))
77 counts["maxdeg"] = max(degrees)
78 counts["max_deg_c"] = comma(max(degrees))
79 counts["fields_by_degree"] = {d : [F["label"] for F in by_deg[d]] for d in degrees}
80 counts["nfields_by_degree"] = {d : len(by_deg[d]) for d in degrees}
81 counts["max_disc_by_degree"] = {d : max(F["discriminant"] for F in by_deg[d]) for d in degrees}
82 return counts
83
84 @cached_method
85 def statistics(self, d=None):
86 if d is not None:
87 return self.statistics()[int(d)]
88 nstats = db.hmf_forms.stats.numstats("level_norm", "field_label")
89 counts = db.hmf_forms.stats.column_counts("field_label")
90 nstats_by_deg = db.hmf_forms.stats.numstats("level_norm", "deg")
91 counts_by_deg = db.hmf_forms.stats.column_counts("deg")
92 C = self.counts()
93 stats = {d: {"fields": C["fields_by_degree"][d],
94 "nfields": C["nfields_by_degree"][d],
95 "nforms": counts_by_deg[d],
96 "maxnorm": nstats_by_deg[d]["max"],
97 "counts": {F: {"nforms": counts[F],
98 "maxnorm": nstats[F]["max"],
99 "field_knowl": nf_display_knowl(F, F),
100 "forms": lambda : url_for('hmf.hilbert_modular_form_render_webpage', field_label=F)}
101 for F in C["fields_by_degree"][d]}}
102 for d in C["degrees"]}
103 return stats
104
105 def setup(self, attributes=None, delete=False):
106 if attributes is None:
107 # Per-degree statistics aren't updated by the normal setup function
108 # The assert is for pyflakes
109 assert self.statistics()
110 super().setup(attributes, delete)
111
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lmfdb/hilbert_modular_forms/hmf_stats.py b/lmfdb/hilbert_modular_forms/hmf_stats.py
--- a/lmfdb/hilbert_modular_forms/hmf_stats.py
+++ b/lmfdb/hilbert_modular_forms/hmf_stats.py
@@ -97,7 +97,7 @@
"counts": {F: {"nforms": counts[F],
"maxnorm": nstats[F]["max"],
"field_knowl": nf_display_knowl(F, F),
- "forms": lambda : url_for('hmf.hilbert_modular_form_render_webpage', field_label=F)}
+ "forms": lambda label: url_for('hmf.hilbert_modular_form_render_webpage', field_label=label)}
for F in C["fields_by_degree"][d]}}
for d in C["degrees"]}
return stats
| {"golden_diff": "diff --git a/lmfdb/hilbert_modular_forms/hmf_stats.py b/lmfdb/hilbert_modular_forms/hmf_stats.py\n--- a/lmfdb/hilbert_modular_forms/hmf_stats.py\n+++ b/lmfdb/hilbert_modular_forms/hmf_stats.py\n@@ -97,7 +97,7 @@\n \"counts\": {F: {\"nforms\": counts[F],\n \"maxnorm\": nstats[F][\"max\"],\n \"field_knowl\": nf_display_knowl(F, F),\n- \"forms\": lambda : url_for('hmf.hilbert_modular_form_render_webpage', field_label=F)}\n+ \"forms\": lambda label: url_for('hmf.hilbert_modular_form_render_webpage', field_label=label)}\n for F in C[\"fields_by_degree\"][d]}}\n for d in C[\"degrees\"]}\n return stats\n", "issue": "Bad links on HMF browse pages\nA report from the bug report system, from Paul Gunnells: \r\n\r\n\"On \r\n\r\nhttps://www.lmfdb.org/ModularForm/GL2/TotallyReal/browse/2/ \r\n\r\nthe links in the middle column (the Number of newforms column) are all the same and point to https://www.lmfdb.org/ModularForm/GL2/TotallyReal/?field_label=2.2.497.1 instead of urls the modular form data they're supposed to.\"\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom flask import url_for\nfrom lmfdb import db\nfrom lmfdb.utils import comma\nfrom lmfdb.utils.display_stats import StatsDisplay, proportioners, totaler\nfrom lmfdb.logger import make_logger\nfrom lmfdb.number_fields.web_number_field import nf_display_knowl\nfrom sage.misc.cachefunc import cached_method\n\nlogger = make_logger(\"hmf\")\n\nclass HMFstats(StatsDisplay):\n \"\"\"\n Class for creating and displaying statistics for Hilbert modular forms\n \"\"\"\n def __init__(self):\n self.nforms = db.hmf_forms.count()\n\n table = db.hmf_forms\n baseurl_func = \".hilbert_modular_form_render_webpage\"\n\n stat_list = [\n {'cols': ['level_norm', 'deg'],\n 'totaler': totaler(),\n 'proportioner': proportioners.per_col_total},\n {'cols': ['level_norm', 'dimension'],\n 'totaler': totaler(),\n 'proportioner': proportioners.per_col_total},\n ]\n buckets = {'level_norm': ['1', '2-10', '11-100', '101-1000', '1001-10000'],\n 'dimension': ['1', '2', '3', '4', '5-10', '11-20', '21-100', '101-1000']}\n knowls = {'level_norm': 'mf.hilbert.level_norm',\n 'dimension': 'mf.hilbert.dimension',\n 'deg': 'nf.degree'}\n short_display = {'deg': 'degree'}\n\n @property\n def short_summary(self):\n return self.summary + \" Here are some <a href='%s'>further statistics</a>.\" % (url_for(\".statistics\"),)\n\n @property\n def summary(self):\n hmf_knowl = '<a knowl=\"mf.hilbert\">Hilbert modular forms</a>'\n nf_knowl = '<a knowl=\"nf.totally_real\">totally real number fields</a>'\n deg_knowl = '<a knowl=\"nf.degree\">degree</a>'\n return \"The database currently contains %s %s over %s %s of %s 2 to %s.\" % (comma(self.nforms), hmf_knowl, self.counts()[\"nfields\"], nf_knowl, deg_knowl, self.counts()[\"maxdeg\"])\n\n def degree_summary(self, d):\n stats = self.statistics(d)\n hmf_knowl = '<a knowl=\"mf.hilbert\">Hilbert modular forms</a>'\n nf_knowl = '<a knowl=\"nf.totally_real\">totally real number fields</a>'\n deg_knowl = '<a knowl=\"nf.degree\">degree</a>'\n level_knowl = '<a knowl=\"mf.hilbert.level_norm\">level norm</a>'\n return ''.join([r'The database currently contains %s ' % stats['nforms'],\n hmf_knowl,\n r' defined over %s ' % stats['nfields'],\n nf_knowl,\n r' of %s %s, with ' % (deg_knowl, d),\n level_knowl,\n r' up to %s.' % stats['maxnorm']])\n\n @cached_method\n def counts(self):\n counts = {}\n\n\n counts['nforms'] = self.nforms\n counts['nforms_c'] = comma(self.nforms)\n\n attrs = [\"degree\", \"discriminant\", \"label\"]\n fields = list(db.hmf_fields.search({}, attrs, sort=attrs))\n degrees = sorted(set(F[\"degree\"] for F in fields))\n by_deg = {d: [F for F in fields if F[\"degree\"] == d] for d in degrees}\n counts[\"degrees\"] = degrees\n counts[\"nfields\"] = len(fields)\n counts[\"nfields_c\"] = comma(len(fields))\n counts[\"maxdeg\"] = max(degrees)\n counts[\"max_deg_c\"] = comma(max(degrees))\n counts[\"fields_by_degree\"] = {d : [F[\"label\"] for F in by_deg[d]] for d in degrees}\n counts[\"nfields_by_degree\"] = {d : len(by_deg[d]) for d in degrees}\n counts[\"max_disc_by_degree\"] = {d : max(F[\"discriminant\"] for F in by_deg[d]) for d in degrees}\n return counts\n\n @cached_method\n def statistics(self, d=None):\n if d is not None:\n return self.statistics()[int(d)]\n nstats = db.hmf_forms.stats.numstats(\"level_norm\", \"field_label\")\n counts = db.hmf_forms.stats.column_counts(\"field_label\")\n nstats_by_deg = db.hmf_forms.stats.numstats(\"level_norm\", \"deg\")\n counts_by_deg = db.hmf_forms.stats.column_counts(\"deg\")\n C = self.counts()\n stats = {d: {\"fields\": C[\"fields_by_degree\"][d],\n \"nfields\": C[\"nfields_by_degree\"][d],\n \"nforms\": counts_by_deg[d],\n \"maxnorm\": nstats_by_deg[d][\"max\"],\n \"counts\": {F: {\"nforms\": counts[F],\n \"maxnorm\": nstats[F][\"max\"],\n \"field_knowl\": nf_display_knowl(F, F),\n \"forms\": lambda : url_for('hmf.hilbert_modular_form_render_webpage', field_label=F)}\n for F in C[\"fields_by_degree\"][d]}}\n for d in C[\"degrees\"]}\n return stats\n\n def setup(self, attributes=None, delete=False):\n if attributes is None:\n # Per-degree statistics aren't updated by the normal setup function\n # The assert is for pyflakes\n assert self.statistics()\n super().setup(attributes, delete)\n", "path": "lmfdb/hilbert_modular_forms/hmf_stats.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom flask import url_for\nfrom lmfdb import db\nfrom lmfdb.utils import comma\nfrom lmfdb.utils.display_stats import StatsDisplay, proportioners, totaler\nfrom lmfdb.logger import make_logger\nfrom lmfdb.number_fields.web_number_field import nf_display_knowl\nfrom sage.misc.cachefunc import cached_method\n\nlogger = make_logger(\"hmf\")\n\nclass HMFstats(StatsDisplay):\n \"\"\"\n Class for creating and displaying statistics for Hilbert modular forms\n \"\"\"\n def __init__(self):\n self.nforms = db.hmf_forms.count()\n\n table = db.hmf_forms\n baseurl_func = \".hilbert_modular_form_render_webpage\"\n\n stat_list = [\n {'cols': ['level_norm', 'deg'],\n 'totaler': totaler(),\n 'proportioner': proportioners.per_col_total},\n {'cols': ['level_norm', 'dimension'],\n 'totaler': totaler(),\n 'proportioner': proportioners.per_col_total},\n ]\n buckets = {'level_norm': ['1', '2-10', '11-100', '101-1000', '1001-10000'],\n 'dimension': ['1', '2', '3', '4', '5-10', '11-20', '21-100', '101-1000']}\n knowls = {'level_norm': 'mf.hilbert.level_norm',\n 'dimension': 'mf.hilbert.dimension',\n 'deg': 'nf.degree'}\n short_display = {'deg': 'degree'}\n\n @property\n def short_summary(self):\n return self.summary + \" Here are some <a href='%s'>further statistics</a>.\" % (url_for(\".statistics\"),)\n\n @property\n def summary(self):\n hmf_knowl = '<a knowl=\"mf.hilbert\">Hilbert modular forms</a>'\n nf_knowl = '<a knowl=\"nf.totally_real\">totally real number fields</a>'\n deg_knowl = '<a knowl=\"nf.degree\">degree</a>'\n return \"The database currently contains %s %s over %s %s of %s 2 to %s.\" % (comma(self.nforms), hmf_knowl, self.counts()[\"nfields\"], nf_knowl, deg_knowl, self.counts()[\"maxdeg\"])\n\n def degree_summary(self, d):\n stats = self.statistics(d)\n hmf_knowl = '<a knowl=\"mf.hilbert\">Hilbert modular forms</a>'\n nf_knowl = '<a knowl=\"nf.totally_real\">totally real number fields</a>'\n deg_knowl = '<a knowl=\"nf.degree\">degree</a>'\n level_knowl = '<a knowl=\"mf.hilbert.level_norm\">level norm</a>'\n return ''.join([r'The database currently contains %s ' % stats['nforms'],\n hmf_knowl,\n r' defined over %s ' % stats['nfields'],\n nf_knowl,\n r' of %s %s, with ' % (deg_knowl, d),\n level_knowl,\n r' up to %s.' % stats['maxnorm']])\n\n @cached_method\n def counts(self):\n counts = {}\n\n\n counts['nforms'] = self.nforms\n counts['nforms_c'] = comma(self.nforms)\n\n attrs = [\"degree\", \"discriminant\", \"label\"]\n fields = list(db.hmf_fields.search({}, attrs, sort=attrs))\n degrees = sorted(set(F[\"degree\"] for F in fields))\n by_deg = {d: [F for F in fields if F[\"degree\"] == d] for d in degrees}\n counts[\"degrees\"] = degrees\n counts[\"nfields\"] = len(fields)\n counts[\"nfields_c\"] = comma(len(fields))\n counts[\"maxdeg\"] = max(degrees)\n counts[\"max_deg_c\"] = comma(max(degrees))\n counts[\"fields_by_degree\"] = {d : [F[\"label\"] for F in by_deg[d]] for d in degrees}\n counts[\"nfields_by_degree\"] = {d : len(by_deg[d]) for d in degrees}\n counts[\"max_disc_by_degree\"] = {d : max(F[\"discriminant\"] for F in by_deg[d]) for d in degrees}\n return counts\n\n @cached_method\n def statistics(self, d=None):\n if d is not None:\n return self.statistics()[int(d)]\n nstats = db.hmf_forms.stats.numstats(\"level_norm\", \"field_label\")\n counts = db.hmf_forms.stats.column_counts(\"field_label\")\n nstats_by_deg = db.hmf_forms.stats.numstats(\"level_norm\", \"deg\")\n counts_by_deg = db.hmf_forms.stats.column_counts(\"deg\")\n C = self.counts()\n stats = {d: {\"fields\": C[\"fields_by_degree\"][d],\n \"nfields\": C[\"nfields_by_degree\"][d],\n \"nforms\": counts_by_deg[d],\n \"maxnorm\": nstats_by_deg[d][\"max\"],\n \"counts\": {F: {\"nforms\": counts[F],\n \"maxnorm\": nstats[F][\"max\"],\n \"field_knowl\": nf_display_knowl(F, F),\n \"forms\": lambda label: url_for('hmf.hilbert_modular_form_render_webpage', field_label=label)}\n for F in C[\"fields_by_degree\"][d]}}\n for d in C[\"degrees\"]}\n return stats\n\n def setup(self, attributes=None, delete=False):\n if attributes is None:\n # Per-degree statistics aren't updated by the normal setup function\n # The assert is for pyflakes\n assert self.statistics()\n super().setup(attributes, delete)\n", "path": "lmfdb/hilbert_modular_forms/hmf_stats.py"}]} | 1,900 | 195 |
gh_patches_debug_62231 | rasdani/github-patches | git_diff | obspy__obspy-1673 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Parsing SEED: 'Date is required.' Warning
Hi,
Each time I want to read a dataless with different periods of time, I have this annoying warning message:
```
from obspy.io.xseed import Parser
from obspy import UTCDateTime
Parser('http://geoscope.ipgp.fr/metadata/G/dataless.G.CAN.seed')
/Users/bonaime/git/obspy/obspy/io/xseed/fields.py:374: UserWarning: Date is required. warnings.warn('Date is required.', UserWarning)
```
Is there a nice way to avoid this warning ? I try that but it is not working
``` code
from obspy.io.xseed import Parser
from obspy import UTCDateTime
Parser('http://geoscope.ipgp.fr/metadata/G/dataless.G.CAN.seed').get_paz('G.CAN.00.BHZ', datetime=UTCDateTime())
```
and the result is
```
/Users/bonaime/git/obspy/obspy/io/xseed/fields.py:374: UserWarning: Date is required.
warnings.warn('Date is required.', UserWarning)
Out[1]:
{u'digitizer_gain': 1677720.0,
u'gain': 1.24658e+17,
u'poles': [(-0.0120768+0.011706j),
(-0.0120768-0.011706j),
(-36.4684+66.8452j),
(-36.4684-66.8452j),
(-29.8656+380.54j),
(-29.8656-380.54j),
(-12145.6+0j),
(-12145.6+0j)],
u'seismometer_gain': 3450.0,
u'sensitivity': 5788280000.0,
u'zeros': [0j, 0j]}
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `obspy/io/xseed/blockette/blockette051.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from __future__ import (absolute_import, division, print_function,
3 unicode_literals)
4 from future.builtins import * # NOQA
5
6 from .blockette import Blockette
7 from ..fields import Integer, VariableString
8
9
10 class Blockette051(Blockette):
11 """
12 Blockette 051: Station Comment Blockette.
13
14 Sample:
15 05100351992,001~1992,002~0740000000
16 """
17
18 id = 51
19 name = "Station Comment"
20 fields = [
21 VariableString(3, "Beginning effective time", 1, 22, 'T'),
22 VariableString(4, "End effective time", 1, 22, 'T', optional=True),
23 Integer(5, "Comment code key", 4, xpath=31),
24 Integer(6, "Comment level", 6, ignore=True)
25 ]
26
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/obspy/io/xseed/blockette/blockette051.py b/obspy/io/xseed/blockette/blockette051.py
--- a/obspy/io/xseed/blockette/blockette051.py
+++ b/obspy/io/xseed/blockette/blockette051.py
@@ -19,7 +19,7 @@
name = "Station Comment"
fields = [
VariableString(3, "Beginning effective time", 1, 22, 'T'),
- VariableString(4, "End effective time", 1, 22, 'T', optional=True),
+ VariableString(4, "End effective time", 0, 22, 'T', optional=True),
Integer(5, "Comment code key", 4, xpath=31),
Integer(6, "Comment level", 6, ignore=True)
]
| {"golden_diff": "diff --git a/obspy/io/xseed/blockette/blockette051.py b/obspy/io/xseed/blockette/blockette051.py\n--- a/obspy/io/xseed/blockette/blockette051.py\n+++ b/obspy/io/xseed/blockette/blockette051.py\n@@ -19,7 +19,7 @@\n name = \"Station Comment\"\n fields = [\n VariableString(3, \"Beginning effective time\", 1, 22, 'T'),\n- VariableString(4, \"End effective time\", 1, 22, 'T', optional=True),\n+ VariableString(4, \"End effective time\", 0, 22, 'T', optional=True),\n Integer(5, \"Comment code key\", 4, xpath=31),\n Integer(6, \"Comment level\", 6, ignore=True)\n ]\n", "issue": "Parsing SEED: 'Date is required.' Warning\nHi,\n\nEach time I want to read a dataless with different periods of time, I have this annoying warning message:\n\n```\nfrom obspy.io.xseed import Parser\nfrom obspy import UTCDateTime\nParser('http://geoscope.ipgp.fr/metadata/G/dataless.G.CAN.seed')\n/Users/bonaime/git/obspy/obspy/io/xseed/fields.py:374: UserWarning: Date is required. warnings.warn('Date is required.', UserWarning)\n```\n\nIs there a nice way to avoid this warning ? I try that but it is not working\n\n``` code\nfrom obspy.io.xseed import Parser\nfrom obspy import UTCDateTime\nParser('http://geoscope.ipgp.fr/metadata/G/dataless.G.CAN.seed').get_paz('G.CAN.00.BHZ', datetime=UTCDateTime())\n\n```\n\nand the result is\n\n```\n/Users/bonaime/git/obspy/obspy/io/xseed/fields.py:374: UserWarning: Date is required.\n warnings.warn('Date is required.', UserWarning)\nOut[1]:\n{u'digitizer_gain': 1677720.0,\n u'gain': 1.24658e+17,\n u'poles': [(-0.0120768+0.011706j),\n (-0.0120768-0.011706j),\n (-36.4684+66.8452j),\n (-36.4684-66.8452j),\n (-29.8656+380.54j),\n (-29.8656-380.54j),\n (-12145.6+0j),\n (-12145.6+0j)],\n u'seismometer_gain': 3450.0,\n u'sensitivity': 5788280000.0,\n u'zeros': [0j, 0j]}\n```\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\nfrom future.builtins import * # NOQA\n\nfrom .blockette import Blockette\nfrom ..fields import Integer, VariableString\n\n\nclass Blockette051(Blockette):\n \"\"\"\n Blockette 051: Station Comment Blockette.\n\n Sample:\n 05100351992,001~1992,002~0740000000\n \"\"\"\n\n id = 51\n name = \"Station Comment\"\n fields = [\n VariableString(3, \"Beginning effective time\", 1, 22, 'T'),\n VariableString(4, \"End effective time\", 1, 22, 'T', optional=True),\n Integer(5, \"Comment code key\", 4, xpath=31),\n Integer(6, \"Comment level\", 6, ignore=True)\n ]\n", "path": "obspy/io/xseed/blockette/blockette051.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\nfrom future.builtins import * # NOQA\n\nfrom .blockette import Blockette\nfrom ..fields import Integer, VariableString\n\n\nclass Blockette051(Blockette):\n \"\"\"\n Blockette 051: Station Comment Blockette.\n\n Sample:\n 05100351992,001~1992,002~0740000000\n \"\"\"\n\n id = 51\n name = \"Station Comment\"\n fields = [\n VariableString(3, \"Beginning effective time\", 1, 22, 'T'),\n VariableString(4, \"End effective time\", 0, 22, 'T', optional=True),\n Integer(5, \"Comment code key\", 4, xpath=31),\n Integer(6, \"Comment level\", 6, ignore=True)\n ]\n", "path": "obspy/io/xseed/blockette/blockette051.py"}]} | 1,007 | 197 |
gh_patches_debug_12780 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-2017 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
E7003 Errors when using Fn::Transform inside a Mapping
*cfn-lint version: 0.49.2*
*Description of issue.*
#2006 tightened what is considered valid for use in a Mapping. This causes it to reject what otherwise appears to be a valid use of `Fn::Transform` as the body of a Mapping.
For example, this snippet is valid CFN:
```yaml
Mappings:
AwsAgentPlatformMap:
Fn::Transform:
Name: AWS::Include
Parameters:
Location: s3://my-bucket-name/version/3.0.1/amazonlinux2/a-json-file.json
```
This usage trips the newly enhanced regex:
```
E7003 Mapping key (Fn::Transform) has invalid name. Name has to be alphanumeric, '-' or '.'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cfnlint/rules/mappings/KeyName.py`
Content:
```
1 """
2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5 import re
6 import six
7 from cfnlint.rules import CloudFormationLintRule
8 from cfnlint.rules import RuleMatch
9 from cfnlint.helpers import REGEX_ALPHANUMERIC
10
11
12 class KeyName(CloudFormationLintRule):
13 """Check if Mapping Keys are type string"""
14 id = 'E7003'
15 shortdesc = 'Mapping keys are strings and alphanumeric'
16 description = 'Check if Mappings keys are properly typed as strings and alphanumeric'
17 source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/mappings-section-structure.html'
18 tags = ['mappings']
19
20 def check_attribute(self, key, path):
21 """ Check the key name for string and alphanumeric"""
22 matches = []
23 if not isinstance(key, six.string_types):
24 message = 'Mapping attribute ({0}) has to be a string.'
25 matches.append(RuleMatch(path[:], message.format(key)))
26 elif not re.match(REGEX_ALPHANUMERIC, key):
27 message = 'Mapping attribute ({0}) has invalid name. Name has to be alphanumeric.'
28 matches.append(RuleMatch(path[:], message.format(key)))
29
30 return matches
31
32 def check_key(self, key, path):
33 """ Check the key name for string and alphanumeric"""
34 matches = []
35 if not isinstance(key, six.string_types):
36 message = 'Mapping key ({0}) has to be a string.'
37 matches.append(RuleMatch(path[:], message.format(key)))
38 elif not re.match('^[a-zA-Z0-9.-]{1,255}$', key):
39 message = 'Mapping key ({0}) has invalid name. Name has to be alphanumeric, \'-\' or \'.\''
40 matches.append(RuleMatch(path[:], message.format(key)))
41
42 return matches
43
44 def match(self, cfn):
45 matches = []
46
47 mappings = cfn.template.get('Mappings', {})
48 for mapping_name, mapping_value in mappings.items():
49 if isinstance(mapping_value, dict):
50 for key_name, key_value in mapping_value.items():
51 matches.extend(self.check_key(
52 key_name, ['Mappings', mapping_name, key_name]))
53 if isinstance(key_value, dict):
54 for sub_key_name, _ in key_value.items():
55 matches.extend(
56 self.check_attribute(
57 sub_key_name, ['Mappings', mapping_name, key_name, sub_key_name]))
58
59 return matches
60
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cfnlint/rules/mappings/KeyName.py b/src/cfnlint/rules/mappings/KeyName.py
--- a/src/cfnlint/rules/mappings/KeyName.py
+++ b/src/cfnlint/rules/mappings/KeyName.py
@@ -35,7 +35,7 @@
if not isinstance(key, six.string_types):
message = 'Mapping key ({0}) has to be a string.'
matches.append(RuleMatch(path[:], message.format(key)))
- elif not re.match('^[a-zA-Z0-9.-]{1,255}$', key):
+ elif not re.match('^[a-zA-Z0-9.-]{1,255}$', key) and key != 'Fn::Transform':
message = 'Mapping key ({0}) has invalid name. Name has to be alphanumeric, \'-\' or \'.\''
matches.append(RuleMatch(path[:], message.format(key)))
| {"golden_diff": "diff --git a/src/cfnlint/rules/mappings/KeyName.py b/src/cfnlint/rules/mappings/KeyName.py\n--- a/src/cfnlint/rules/mappings/KeyName.py\n+++ b/src/cfnlint/rules/mappings/KeyName.py\n@@ -35,7 +35,7 @@\n if not isinstance(key, six.string_types):\n message = 'Mapping key ({0}) has to be a string.'\n matches.append(RuleMatch(path[:], message.format(key)))\n- elif not re.match('^[a-zA-Z0-9.-]{1,255}$', key):\n+ elif not re.match('^[a-zA-Z0-9.-]{1,255}$', key) and key != 'Fn::Transform':\n message = 'Mapping key ({0}) has invalid name. Name has to be alphanumeric, \\'-\\' or \\'.\\''\n matches.append(RuleMatch(path[:], message.format(key)))\n", "issue": "E7003 Errors when using Fn::Transform inside a Mapping\n*cfn-lint version: 0.49.2*\r\n\r\n*Description of issue.*\r\n#2006 tightened what is considered valid for use in a Mapping. This causes it to reject what otherwise appears to be a valid use of `Fn::Transform` as the body of a Mapping.\r\n\r\nFor example, this snippet is valid CFN:\r\n\r\n```yaml\r\nMappings:\r\n AwsAgentPlatformMap:\r\n Fn::Transform:\r\n Name: AWS::Include\r\n Parameters:\r\n Location: s3://my-bucket-name/version/3.0.1/amazonlinux2/a-json-file.json\r\n```\r\n\r\nThis usage trips the newly enhanced regex:\r\n\r\n```\r\nE7003 Mapping key (Fn::Transform) has invalid name. Name has to be alphanumeric, '-' or '.'\r\n```\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport re\nimport six\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\nfrom cfnlint.helpers import REGEX_ALPHANUMERIC\n\n\nclass KeyName(CloudFormationLintRule):\n \"\"\"Check if Mapping Keys are type string\"\"\"\n id = 'E7003'\n shortdesc = 'Mapping keys are strings and alphanumeric'\n description = 'Check if Mappings keys are properly typed as strings and alphanumeric'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/mappings-section-structure.html'\n tags = ['mappings']\n\n def check_attribute(self, key, path):\n \"\"\" Check the key name for string and alphanumeric\"\"\"\n matches = []\n if not isinstance(key, six.string_types):\n message = 'Mapping attribute ({0}) has to be a string.'\n matches.append(RuleMatch(path[:], message.format(key)))\n elif not re.match(REGEX_ALPHANUMERIC, key):\n message = 'Mapping attribute ({0}) has invalid name. Name has to be alphanumeric.'\n matches.append(RuleMatch(path[:], message.format(key)))\n\n return matches\n\n def check_key(self, key, path):\n \"\"\" Check the key name for string and alphanumeric\"\"\"\n matches = []\n if not isinstance(key, six.string_types):\n message = 'Mapping key ({0}) has to be a string.'\n matches.append(RuleMatch(path[:], message.format(key)))\n elif not re.match('^[a-zA-Z0-9.-]{1,255}$', key):\n message = 'Mapping key ({0}) has invalid name. Name has to be alphanumeric, \\'-\\' or \\'.\\''\n matches.append(RuleMatch(path[:], message.format(key)))\n\n return matches\n\n def match(self, cfn):\n matches = []\n\n mappings = cfn.template.get('Mappings', {})\n for mapping_name, mapping_value in mappings.items():\n if isinstance(mapping_value, dict):\n for key_name, key_value in mapping_value.items():\n matches.extend(self.check_key(\n key_name, ['Mappings', mapping_name, key_name]))\n if isinstance(key_value, dict):\n for sub_key_name, _ in key_value.items():\n matches.extend(\n self.check_attribute(\n sub_key_name, ['Mappings', mapping_name, key_name, sub_key_name]))\n\n return matches\n", "path": "src/cfnlint/rules/mappings/KeyName.py"}], "after_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport re\nimport six\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\nfrom cfnlint.helpers import REGEX_ALPHANUMERIC\n\n\nclass KeyName(CloudFormationLintRule):\n \"\"\"Check if Mapping Keys are type string\"\"\"\n id = 'E7003'\n shortdesc = 'Mapping keys are strings and alphanumeric'\n description = 'Check if Mappings keys are properly typed as strings and alphanumeric'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/mappings-section-structure.html'\n tags = ['mappings']\n\n def check_attribute(self, key, path):\n \"\"\" Check the key name for string and alphanumeric\"\"\"\n matches = []\n if not isinstance(key, six.string_types):\n message = 'Mapping attribute ({0}) has to be a string.'\n matches.append(RuleMatch(path[:], message.format(key)))\n elif not re.match(REGEX_ALPHANUMERIC, key):\n message = 'Mapping attribute ({0}) has invalid name. Name has to be alphanumeric.'\n matches.append(RuleMatch(path[:], message.format(key)))\n\n return matches\n\n def check_key(self, key, path):\n \"\"\" Check the key name for string and alphanumeric\"\"\"\n matches = []\n if not isinstance(key, six.string_types):\n message = 'Mapping key ({0}) has to be a string.'\n matches.append(RuleMatch(path[:], message.format(key)))\n elif not re.match('^[a-zA-Z0-9.-]{1,255}$', key) and key != 'Fn::Transform':\n message = 'Mapping key ({0}) has invalid name. Name has to be alphanumeric, \\'-\\' or \\'.\\''\n matches.append(RuleMatch(path[:], message.format(key)))\n\n return matches\n\n def match(self, cfn):\n matches = []\n\n mappings = cfn.template.get('Mappings', {})\n for mapping_name, mapping_value in mappings.items():\n if isinstance(mapping_value, dict):\n for key_name, key_value in mapping_value.items():\n matches.extend(self.check_key(\n key_name, ['Mappings', mapping_name, key_name]))\n if isinstance(key_value, dict):\n for sub_key_name, _ in key_value.items():\n matches.extend(\n self.check_attribute(\n sub_key_name, ['Mappings', mapping_name, key_name, sub_key_name]))\n\n return matches\n", "path": "src/cfnlint/rules/mappings/KeyName.py"}]} | 1,089 | 200 |
gh_patches_debug_26751 | rasdani/github-patches | git_diff | blaze__blaze-1196 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Blaze server yaml file error
Testing the Blaze server from a file and getting the following error:
``` python
$ blaze-server server.yaml
Traceback (most recent call last):
File "/anaconda/envs/ep-blaze/bin/blaze-server", line 6, in <module>
sys.exit(_main())
File "/anaconda/envs/ep-blaze/lib/python2.7/site-packages/blaze/server/spider.py", line 130, in _main
ignore = tuple(getattr(__builtins__, e) for e in args.ignored_exception)
File "/anaconda/envs/ep-blaze/lib/python2.7/site-packages/blaze/server/spider.py", line 130, in <genexpr>
ignore = tuple(getattr(__builtins__, e) for e in args.ignored_exception)
AttributeError: 'dict' object has no attribute 'Exception'
```
I believe problem is in this line:
https://github.com/ContinuumIO/blaze/blob/06991f6d368f23700019e36b337ea2800f37ab14/blaze/server/spider.py#L130
when no ignored_exception in the args is passed.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `blaze/server/spider.py`
Content:
```
1 #!/usr/bin/env python
2
3 from __future__ import absolute_import
4
5 import os
6 import sys
7 import argparse
8
9 import yaml
10
11 from odo import resource
12 from odo.utils import ignoring
13
14 from .server import Server, DEFAULT_PORT
15
16
17 __all__ = 'spider', 'from_yaml'
18
19
20 def _spider(resource_path, ignore, followlinks, hidden):
21 resources = {}
22 for filename in (os.path.join(resource_path, x)
23 for x in os.listdir(resource_path)):
24 basename = os.path.basename(filename)
25 if (basename.startswith(os.curdir) and not hidden or
26 os.path.islink(filename) and not followlinks):
27 continue
28 if os.path.isdir(filename):
29 new_resources = _spider(filename, ignore=ignore,
30 followlinks=followlinks, hidden=hidden)
31 if new_resources:
32 resources[basename] = new_resources
33 else:
34 with ignoring(*ignore):
35 resources[basename] = resource(filename)
36 return resources
37
38
39 def spider(path, ignore=(ValueError, NotImplementedError), followlinks=True,
40 hidden=False):
41 """Traverse a directory and call ``odo.resource`` on its contentso
42
43 Parameters
44 ----------
45 path : str
46 Path to a directory of resources to load
47 ignore : tuple of Exception, optional
48 Ignore these exceptions when calling resource
49 followlinks : bool, optional
50 Follow symbolic links
51 hidden : bool, optional
52 Load hidden files
53
54 Returns
55 -------
56 dict
57 Possibly nested dictionary of containing basenames mapping to resources
58 """
59 return {
60 os.path.basename(path): _spider(path, ignore=ignore,
61 followlinks=followlinks,
62 hidden=hidden)
63 }
64
65
66 def from_yaml(path, ignore=(ValueError, NotImplementedError), followlinks=True,
67 hidden=False):
68 """Construct a dictionary of resources from a YAML specification.
69
70 Parameters
71 ----------
72 path : str
73 Path to a YAML specification of resources to load
74 ignore : tuple of Exception, optional
75 Ignore these exceptions when calling resource
76 followlinks : bool, optional
77 Follow symbolic links
78 hidden : bool, optional
79 Load hidden files
80
81 Returns
82 -------
83 dict
84 A dictionary mapping top level keys in a YAML file to resources.
85
86 See Also
87 --------
88 spider : Traverse a directory tree for resources
89 """
90 resources = {}
91 for name, info in yaml.load(path.read()).items():
92 if 'source' not in info:
93 raise ValueError('source key not found for data source named %r' %
94 name)
95 source = info['source']
96 if os.path.isdir(source):
97 resources[name] = spider(os.path.expanduser(source),
98 ignore=ignore,
99 followlinks=followlinks,
100 hidden=hidden)
101 else:
102 resources[name] = resource(source, dshape=info.get('dshape'))
103 return resources
104
105
106 def _parse_args():
107 p = argparse.ArgumentParser(
108 formatter_class=argparse.ArgumentDefaultsHelpFormatter)
109 p.add_argument('path', type=argparse.FileType('r'), nargs='?',
110 default=sys.stdin,
111 help='A YAML file specifying the resources to load')
112 p.add_argument('-p', '--port', type=int, default=DEFAULT_PORT,
113 help='Port number')
114 p.add_argument('-H', '--host', type=str, default='127.0.0.1',
115 help='Host name. Use 0.0.0.0 to listen on all public IPs')
116 p.add_argument('-l', '--follow-links', action='store_true',
117 help='Follow links when listing files')
118 p.add_argument('-e', '--ignored-exception', nargs='*',
119 default=['Exception'],
120 help='Exceptions to ignore when calling resource on a file')
121 p.add_argument('-d', '--hidden', action='store_true',
122 help='Call resource on hidden files')
123 p.add_argument('-D', '--debug', action='store_true',
124 help='Start the Flask server in debug mode')
125 return p.parse_args()
126
127
128 def _main():
129 args = _parse_args()
130 ignore = tuple(getattr(__builtins__, e) for e in args.ignored_exception)
131 resources = from_yaml(args.path,
132 ignore=ignore,
133 followlinks=args.follow_links,
134 hidden=args.hidden)
135 Server(resources).run(host=args.host, port=args.port, debug=args.debug)
136
137
138 if __name__ == '__main__':
139 _main()
140
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/blaze/server/spider.py b/blaze/server/spider.py
--- a/blaze/server/spider.py
+++ b/blaze/server/spider.py
@@ -13,6 +13,11 @@
from .server import Server, DEFAULT_PORT
+try:
+ import __builtin__ as builtins
+except ImportError:
+ import builtins
+
__all__ = 'spider', 'from_yaml'
@@ -115,7 +120,7 @@
help='Host name. Use 0.0.0.0 to listen on all public IPs')
p.add_argument('-l', '--follow-links', action='store_true',
help='Follow links when listing files')
- p.add_argument('-e', '--ignored-exception', nargs='*',
+ p.add_argument('-e', '--ignored-exception', nargs='+',
default=['Exception'],
help='Exceptions to ignore when calling resource on a file')
p.add_argument('-d', '--hidden', action='store_true',
@@ -127,7 +132,7 @@
def _main():
args = _parse_args()
- ignore = tuple(getattr(__builtins__, e) for e in args.ignored_exception)
+ ignore = tuple(getattr(builtins, e) for e in args.ignored_exception)
resources = from_yaml(args.path,
ignore=ignore,
followlinks=args.follow_links,
| {"golden_diff": "diff --git a/blaze/server/spider.py b/blaze/server/spider.py\n--- a/blaze/server/spider.py\n+++ b/blaze/server/spider.py\n@@ -13,6 +13,11 @@\n \n from .server import Server, DEFAULT_PORT\n \n+try:\n+ import __builtin__ as builtins\n+except ImportError:\n+ import builtins\n+\n \n __all__ = 'spider', 'from_yaml'\n \n@@ -115,7 +120,7 @@\n help='Host name. Use 0.0.0.0 to listen on all public IPs')\n p.add_argument('-l', '--follow-links', action='store_true',\n help='Follow links when listing files')\n- p.add_argument('-e', '--ignored-exception', nargs='*',\n+ p.add_argument('-e', '--ignored-exception', nargs='+',\n default=['Exception'],\n help='Exceptions to ignore when calling resource on a file')\n p.add_argument('-d', '--hidden', action='store_true',\n@@ -127,7 +132,7 @@\n \n def _main():\n args = _parse_args()\n- ignore = tuple(getattr(__builtins__, e) for e in args.ignored_exception)\n+ ignore = tuple(getattr(builtins, e) for e in args.ignored_exception)\n resources = from_yaml(args.path,\n ignore=ignore,\n followlinks=args.follow_links,\n", "issue": "Blaze server yaml file error\nTesting the Blaze server from a file and getting the following error:\n\n``` python\n$ blaze-server server.yaml\nTraceback (most recent call last):\n File \"/anaconda/envs/ep-blaze/bin/blaze-server\", line 6, in <module>\n sys.exit(_main())\n File \"/anaconda/envs/ep-blaze/lib/python2.7/site-packages/blaze/server/spider.py\", line 130, in _main\n ignore = tuple(getattr(__builtins__, e) for e in args.ignored_exception)\n File \"/anaconda/envs/ep-blaze/lib/python2.7/site-packages/blaze/server/spider.py\", line 130, in <genexpr>\n ignore = tuple(getattr(__builtins__, e) for e in args.ignored_exception)\nAttributeError: 'dict' object has no attribute 'Exception'\n```\n\nI believe problem is in this line:\nhttps://github.com/ContinuumIO/blaze/blob/06991f6d368f23700019e36b337ea2800f37ab14/blaze/server/spider.py#L130\nwhen no ignored_exception in the args is passed.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom __future__ import absolute_import\n\nimport os\nimport sys\nimport argparse\n\nimport yaml\n\nfrom odo import resource\nfrom odo.utils import ignoring\n\nfrom .server import Server, DEFAULT_PORT\n\n\n__all__ = 'spider', 'from_yaml'\n\n\ndef _spider(resource_path, ignore, followlinks, hidden):\n resources = {}\n for filename in (os.path.join(resource_path, x)\n for x in os.listdir(resource_path)):\n basename = os.path.basename(filename)\n if (basename.startswith(os.curdir) and not hidden or\n os.path.islink(filename) and not followlinks):\n continue\n if os.path.isdir(filename):\n new_resources = _spider(filename, ignore=ignore,\n followlinks=followlinks, hidden=hidden)\n if new_resources:\n resources[basename] = new_resources\n else:\n with ignoring(*ignore):\n resources[basename] = resource(filename)\n return resources\n\n\ndef spider(path, ignore=(ValueError, NotImplementedError), followlinks=True,\n hidden=False):\n \"\"\"Traverse a directory and call ``odo.resource`` on its contentso\n\n Parameters\n ----------\n path : str\n Path to a directory of resources to load\n ignore : tuple of Exception, optional\n Ignore these exceptions when calling resource\n followlinks : bool, optional\n Follow symbolic links\n hidden : bool, optional\n Load hidden files\n\n Returns\n -------\n dict\n Possibly nested dictionary of containing basenames mapping to resources\n \"\"\"\n return {\n os.path.basename(path): _spider(path, ignore=ignore,\n followlinks=followlinks,\n hidden=hidden)\n }\n\n\ndef from_yaml(path, ignore=(ValueError, NotImplementedError), followlinks=True,\n hidden=False):\n \"\"\"Construct a dictionary of resources from a YAML specification.\n\n Parameters\n ----------\n path : str\n Path to a YAML specification of resources to load\n ignore : tuple of Exception, optional\n Ignore these exceptions when calling resource\n followlinks : bool, optional\n Follow symbolic links\n hidden : bool, optional\n Load hidden files\n\n Returns\n -------\n dict\n A dictionary mapping top level keys in a YAML file to resources.\n\n See Also\n --------\n spider : Traverse a directory tree for resources\n \"\"\"\n resources = {}\n for name, info in yaml.load(path.read()).items():\n if 'source' not in info:\n raise ValueError('source key not found for data source named %r' %\n name)\n source = info['source']\n if os.path.isdir(source):\n resources[name] = spider(os.path.expanduser(source),\n ignore=ignore,\n followlinks=followlinks,\n hidden=hidden)\n else:\n resources[name] = resource(source, dshape=info.get('dshape'))\n return resources\n\n\ndef _parse_args():\n p = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n p.add_argument('path', type=argparse.FileType('r'), nargs='?',\n default=sys.stdin,\n help='A YAML file specifying the resources to load')\n p.add_argument('-p', '--port', type=int, default=DEFAULT_PORT,\n help='Port number')\n p.add_argument('-H', '--host', type=str, default='127.0.0.1',\n help='Host name. Use 0.0.0.0 to listen on all public IPs')\n p.add_argument('-l', '--follow-links', action='store_true',\n help='Follow links when listing files')\n p.add_argument('-e', '--ignored-exception', nargs='*',\n default=['Exception'],\n help='Exceptions to ignore when calling resource on a file')\n p.add_argument('-d', '--hidden', action='store_true',\n help='Call resource on hidden files')\n p.add_argument('-D', '--debug', action='store_true',\n help='Start the Flask server in debug mode')\n return p.parse_args()\n\n\ndef _main():\n args = _parse_args()\n ignore = tuple(getattr(__builtins__, e) for e in args.ignored_exception)\n resources = from_yaml(args.path,\n ignore=ignore,\n followlinks=args.follow_links,\n hidden=args.hidden)\n Server(resources).run(host=args.host, port=args.port, debug=args.debug)\n\n\nif __name__ == '__main__':\n _main()\n", "path": "blaze/server/spider.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nfrom __future__ import absolute_import\n\nimport os\nimport sys\nimport argparse\n\nimport yaml\n\nfrom odo import resource\nfrom odo.utils import ignoring\n\nfrom .server import Server, DEFAULT_PORT\n\ntry:\n import __builtin__ as builtins\nexcept ImportError:\n import builtins\n\n\n__all__ = 'spider', 'from_yaml'\n\n\ndef _spider(resource_path, ignore, followlinks, hidden):\n resources = {}\n for filename in (os.path.join(resource_path, x)\n for x in os.listdir(resource_path)):\n basename = os.path.basename(filename)\n if (basename.startswith(os.curdir) and not hidden or\n os.path.islink(filename) and not followlinks):\n continue\n if os.path.isdir(filename):\n new_resources = _spider(filename, ignore=ignore,\n followlinks=followlinks, hidden=hidden)\n if new_resources:\n resources[basename] = new_resources\n else:\n with ignoring(*ignore):\n resources[basename] = resource(filename)\n return resources\n\n\ndef spider(path, ignore=(ValueError, NotImplementedError), followlinks=True,\n hidden=False):\n \"\"\"Traverse a directory and call ``odo.resource`` on its contentso\n\n Parameters\n ----------\n path : str\n Path to a directory of resources to load\n ignore : tuple of Exception, optional\n Ignore these exceptions when calling resource\n followlinks : bool, optional\n Follow symbolic links\n hidden : bool, optional\n Load hidden files\n\n Returns\n -------\n dict\n Possibly nested dictionary of containing basenames mapping to resources\n \"\"\"\n return {\n os.path.basename(path): _spider(path, ignore=ignore,\n followlinks=followlinks,\n hidden=hidden)\n }\n\n\ndef from_yaml(path, ignore=(ValueError, NotImplementedError), followlinks=True,\n hidden=False):\n \"\"\"Construct a dictionary of resources from a YAML specification.\n\n Parameters\n ----------\n path : str\n Path to a YAML specification of resources to load\n ignore : tuple of Exception, optional\n Ignore these exceptions when calling resource\n followlinks : bool, optional\n Follow symbolic links\n hidden : bool, optional\n Load hidden files\n\n Returns\n -------\n dict\n A dictionary mapping top level keys in a YAML file to resources.\n\n See Also\n --------\n spider : Traverse a directory tree for resources\n \"\"\"\n resources = {}\n for name, info in yaml.load(path.read()).items():\n if 'source' not in info:\n raise ValueError('source key not found for data source named %r' %\n name)\n source = info['source']\n if os.path.isdir(source):\n resources[name] = spider(os.path.expanduser(source),\n ignore=ignore,\n followlinks=followlinks,\n hidden=hidden)\n else:\n resources[name] = resource(source, dshape=info.get('dshape'))\n return resources\n\n\ndef _parse_args():\n p = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n p.add_argument('path', type=argparse.FileType('r'), nargs='?',\n default=sys.stdin,\n help='A YAML file specifying the resources to load')\n p.add_argument('-p', '--port', type=int, default=DEFAULT_PORT,\n help='Port number')\n p.add_argument('-H', '--host', type=str, default='127.0.0.1',\n help='Host name. Use 0.0.0.0 to listen on all public IPs')\n p.add_argument('-l', '--follow-links', action='store_true',\n help='Follow links when listing files')\n p.add_argument('-e', '--ignored-exception', nargs='+',\n default=['Exception'],\n help='Exceptions to ignore when calling resource on a file')\n p.add_argument('-d', '--hidden', action='store_true',\n help='Call resource on hidden files')\n p.add_argument('-D', '--debug', action='store_true',\n help='Start the Flask server in debug mode')\n return p.parse_args()\n\n\ndef _main():\n args = _parse_args()\n ignore = tuple(getattr(builtins, e) for e in args.ignored_exception)\n resources = from_yaml(args.path,\n ignore=ignore,\n followlinks=args.follow_links,\n hidden=args.hidden)\n Server(resources).run(host=args.host, port=args.port, debug=args.debug)\n\n\nif __name__ == '__main__':\n _main()\n", "path": "blaze/server/spider.py"}]} | 1,802 | 312 |
gh_patches_debug_28855 | rasdani/github-patches | git_diff | ultrabug__py3status-2101 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
external_script modifies numeric output
The external_script module converts numeric values to a numeric type. This removes the original formatting of the input and is undesired.
To reproduce create an external script and simply echo "0.123000", the output in the bar will be "0.123".
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `py3status/modules/external_script.py`
Content:
```
1 """
2 Display output of a given script.
3
4 Display output of any executable script set by `script_path`. Only the first
5 two lines of output will be used. The first line is used as the displayed
6 text. If the output has two or more lines, the second line is set as the text
7 color (and should hence be a valid hex color code such as #FF0000 for red).
8 The script should not have any parameters, but it could work.
9
10 Configuration parameters:
11 button_show_notification: button to show notification with full output
12 (default None)
13 cache_timeout: how often we refresh this module in seconds
14 (default 15)
15 format: see placeholders below (default '{output}')
16 localize: should script output be localized (if available)
17 (default True)
18 script_path: script you want to show output of (compulsory)
19 (default None)
20 strip_output: shall we strip leading and trailing spaces from output
21 (default False)
22
23 Format placeholders:
24 {lines} number of lines in the output
25 {output} output of script given by "script_path"
26
27 Examples:
28 ```
29 external_script {
30 format = "my name is {output}"
31 script_path = "/usr/bin/whoami"
32 }
33 ```
34
35 @author frimdo [email protected]
36
37 SAMPLE OUTPUT
38 {'full_text': 'script output'}
39
40 example
41 {'full_text': 'It is now: Wed Feb 22 22:24:13'}
42 """
43
44 import re
45
46 STRING_ERROR = "missing script_path"
47
48
49 class Py3status:
50 """
51 """
52
53 # available configuration parameters
54 button_show_notification = None
55 cache_timeout = 15
56 format = "{output}"
57 localize = True
58 script_path = None
59 strip_output = False
60
61 def post_config_hook(self):
62 if not self.script_path:
63 raise Exception(STRING_ERROR)
64
65 def external_script(self):
66 output_lines = None
67 response = {}
68 response["cached_until"] = self.py3.time_in(self.cache_timeout)
69 try:
70 self.output = self.py3.command_output(
71 self.script_path, shell=True, localized=self.localize
72 )
73 output_lines = self.output.splitlines()
74 if len(output_lines) > 1:
75 output_color = output_lines[1]
76 if re.search(r"^#[0-9a-fA-F]{6}$", output_color):
77 response["color"] = output_color
78 except self.py3.CommandError as e:
79 # something went wrong show error to user
80 output = e.output or e.error
81 self.py3.error(output)
82
83 if output_lines:
84 output = output_lines[0]
85 if self.strip_output:
86 output = output.strip()
87 # If we get something that looks numeric then we convert it
88 # to a numeric type because this can be helpful. for example:
89 #
90 # external_script {
91 # format = "file is [\?if=output>10 big|small]"
92 # script_path = "cat /tmp/my_file | wc -l"
93 # }
94 try:
95 output = int(output)
96 except ValueError:
97 try:
98 output = float(output)
99 except ValueError:
100 pass
101 else:
102 output = ""
103
104 response["full_text"] = self.py3.safe_format(
105 self.format, {"output": output, "lines": len(output_lines)}
106 )
107 return response
108
109 def on_click(self, event):
110 button = event["button"]
111 if button == self.button_show_notification:
112 self.py3.notify_user(self.output)
113 self.py3.prevent_refresh()
114
115
116 if __name__ == "__main__":
117 """
118 Run module in test mode.
119 """
120 from py3status.module_test import module_test
121
122 module_test(Py3status)
123
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/py3status/modules/external_script.py b/py3status/modules/external_script.py
--- a/py3status/modules/external_script.py
+++ b/py3status/modules/external_script.py
@@ -12,6 +12,8 @@
(default None)
cache_timeout: how often we refresh this module in seconds
(default 15)
+ convert_numbers: convert decimal numbers to a numeric type
+ (default True)
format: see placeholders below (default '{output}')
localize: should script output be localized (if available)
(default True)
@@ -53,6 +55,7 @@
# available configuration parameters
button_show_notification = None
cache_timeout = 15
+ convert_numbers = True
format = "{output}"
localize = True
script_path = None
@@ -91,13 +94,14 @@
# format = "file is [\?if=output>10 big|small]"
# script_path = "cat /tmp/my_file | wc -l"
# }
- try:
- output = int(output)
- except ValueError:
+ if self.convert_numbers is True:
try:
- output = float(output)
+ output = int(output)
except ValueError:
- pass
+ try:
+ output = float(output)
+ except ValueError:
+ pass
else:
output = ""
| {"golden_diff": "diff --git a/py3status/modules/external_script.py b/py3status/modules/external_script.py\n--- a/py3status/modules/external_script.py\n+++ b/py3status/modules/external_script.py\n@@ -12,6 +12,8 @@\n (default None)\n cache_timeout: how often we refresh this module in seconds\n (default 15)\n+ convert_numbers: convert decimal numbers to a numeric type\n+ (default True)\n format: see placeholders below (default '{output}')\n localize: should script output be localized (if available)\n (default True)\n@@ -53,6 +55,7 @@\n # available configuration parameters\n button_show_notification = None\n cache_timeout = 15\n+ convert_numbers = True\n format = \"{output}\"\n localize = True\n script_path = None\n@@ -91,13 +94,14 @@\n # format = \"file is [\\?if=output>10 big|small]\"\n # script_path = \"cat /tmp/my_file | wc -l\"\n # }\n- try:\n- output = int(output)\n- except ValueError:\n+ if self.convert_numbers is True:\n try:\n- output = float(output)\n+ output = int(output)\n except ValueError:\n- pass\n+ try:\n+ output = float(output)\n+ except ValueError:\n+ pass\n else:\n output = \"\"\n", "issue": "external_script modifies numeric output\nThe external_script module converts numeric values to a numeric type. This removes the original formatting of the input and is undesired.\r\n\r\nTo reproduce create an external script and simply echo \"0.123000\", the output in the bar will be \"0.123\".\n", "before_files": [{"content": "\"\"\"\nDisplay output of a given script.\n\nDisplay output of any executable script set by `script_path`. Only the first\ntwo lines of output will be used. The first line is used as the displayed\ntext. If the output has two or more lines, the second line is set as the text\ncolor (and should hence be a valid hex color code such as #FF0000 for red).\nThe script should not have any parameters, but it could work.\n\nConfiguration parameters:\n button_show_notification: button to show notification with full output\n (default None)\n cache_timeout: how often we refresh this module in seconds\n (default 15)\n format: see placeholders below (default '{output}')\n localize: should script output be localized (if available)\n (default True)\n script_path: script you want to show output of (compulsory)\n (default None)\n strip_output: shall we strip leading and trailing spaces from output\n (default False)\n\nFormat placeholders:\n {lines} number of lines in the output\n {output} output of script given by \"script_path\"\n\nExamples:\n```\nexternal_script {\n format = \"my name is {output}\"\n script_path = \"/usr/bin/whoami\"\n}\n```\n\n@author frimdo [email protected]\n\nSAMPLE OUTPUT\n{'full_text': 'script output'}\n\nexample\n{'full_text': 'It is now: Wed Feb 22 22:24:13'}\n\"\"\"\n\nimport re\n\nSTRING_ERROR = \"missing script_path\"\n\n\nclass Py3status:\n \"\"\"\n \"\"\"\n\n # available configuration parameters\n button_show_notification = None\n cache_timeout = 15\n format = \"{output}\"\n localize = True\n script_path = None\n strip_output = False\n\n def post_config_hook(self):\n if not self.script_path:\n raise Exception(STRING_ERROR)\n\n def external_script(self):\n output_lines = None\n response = {}\n response[\"cached_until\"] = self.py3.time_in(self.cache_timeout)\n try:\n self.output = self.py3.command_output(\n self.script_path, shell=True, localized=self.localize\n )\n output_lines = self.output.splitlines()\n if len(output_lines) > 1:\n output_color = output_lines[1]\n if re.search(r\"^#[0-9a-fA-F]{6}$\", output_color):\n response[\"color\"] = output_color\n except self.py3.CommandError as e:\n # something went wrong show error to user\n output = e.output or e.error\n self.py3.error(output)\n\n if output_lines:\n output = output_lines[0]\n if self.strip_output:\n output = output.strip()\n # If we get something that looks numeric then we convert it\n # to a numeric type because this can be helpful. for example:\n #\n # external_script {\n # format = \"file is [\\?if=output>10 big|small]\"\n # script_path = \"cat /tmp/my_file | wc -l\"\n # }\n try:\n output = int(output)\n except ValueError:\n try:\n output = float(output)\n except ValueError:\n pass\n else:\n output = \"\"\n\n response[\"full_text\"] = self.py3.safe_format(\n self.format, {\"output\": output, \"lines\": len(output_lines)}\n )\n return response\n\n def on_click(self, event):\n button = event[\"button\"]\n if button == self.button_show_notification:\n self.py3.notify_user(self.output)\n self.py3.prevent_refresh()\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Run module in test mode.\n \"\"\"\n from py3status.module_test import module_test\n\n module_test(Py3status)\n", "path": "py3status/modules/external_script.py"}], "after_files": [{"content": "\"\"\"\nDisplay output of a given script.\n\nDisplay output of any executable script set by `script_path`. Only the first\ntwo lines of output will be used. The first line is used as the displayed\ntext. If the output has two or more lines, the second line is set as the text\ncolor (and should hence be a valid hex color code such as #FF0000 for red).\nThe script should not have any parameters, but it could work.\n\nConfiguration parameters:\n button_show_notification: button to show notification with full output\n (default None)\n cache_timeout: how often we refresh this module in seconds\n (default 15)\n convert_numbers: convert decimal numbers to a numeric type\n (default True)\n format: see placeholders below (default '{output}')\n localize: should script output be localized (if available)\n (default True)\n script_path: script you want to show output of (compulsory)\n (default None)\n strip_output: shall we strip leading and trailing spaces from output\n (default False)\n\nFormat placeholders:\n {lines} number of lines in the output\n {output} output of script given by \"script_path\"\n\nExamples:\n```\nexternal_script {\n format = \"my name is {output}\"\n script_path = \"/usr/bin/whoami\"\n}\n```\n\n@author frimdo [email protected]\n\nSAMPLE OUTPUT\n{'full_text': 'script output'}\n\nexample\n{'full_text': 'It is now: Wed Feb 22 22:24:13'}\n\"\"\"\n\nimport re\n\nSTRING_ERROR = \"missing script_path\"\n\n\nclass Py3status:\n \"\"\"\n \"\"\"\n\n # available configuration parameters\n button_show_notification = None\n cache_timeout = 15\n convert_numbers = True\n format = \"{output}\"\n localize = True\n script_path = None\n strip_output = False\n\n def post_config_hook(self):\n if not self.script_path:\n raise Exception(STRING_ERROR)\n\n def external_script(self):\n output_lines = None\n response = {}\n response[\"cached_until\"] = self.py3.time_in(self.cache_timeout)\n try:\n self.output = self.py3.command_output(\n self.script_path, shell=True, localized=self.localize\n )\n output_lines = self.output.splitlines()\n if len(output_lines) > 1:\n output_color = output_lines[1]\n if re.search(r\"^#[0-9a-fA-F]{6}$\", output_color):\n response[\"color\"] = output_color\n except self.py3.CommandError as e:\n # something went wrong show error to user\n output = e.output or e.error\n self.py3.error(output)\n\n if output_lines:\n output = output_lines[0]\n if self.strip_output:\n output = output.strip()\n # If we get something that looks numeric then we convert it\n # to a numeric type because this can be helpful. for example:\n #\n # external_script {\n # format = \"file is [\\?if=output>10 big|small]\"\n # script_path = \"cat /tmp/my_file | wc -l\"\n # }\n if self.convert_numbers is True:\n try:\n output = int(output)\n except ValueError:\n try:\n output = float(output)\n except ValueError:\n pass\n else:\n output = \"\"\n\n response[\"full_text\"] = self.py3.safe_format(\n self.format, {\"output\": output, \"lines\": len(output_lines)}\n )\n return response\n\n def on_click(self, event):\n button = event[\"button\"]\n if button == self.button_show_notification:\n self.py3.notify_user(self.output)\n self.py3.prevent_refresh()\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Run module in test mode.\n \"\"\"\n from py3status.module_test import module_test\n\n module_test(Py3status)\n", "path": "py3status/modules/external_script.py"}]} | 1,408 | 315 |
gh_patches_debug_6294 | rasdani/github-patches | git_diff | e-valuation__EvaP-1353 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Importing a backup made by update_production.sh does not work flawlessly.
Last week we wanted to do a production update. The json dump file created during that update could not be imported without issues:
- The dump does not contain the cronjob user, but foreign key references to it. This can not be imported
- The dump contains data included by django by default (auth, permission, ...). These need to be excluded when importing.
There should be some kind of documentation on what needs to be executed to import this dump back into the database. We should also add some test (could probably just run on travis) that ensures this always works (dump, flush database, migrate, load dump).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `evap/evaluation/management/commands/dump_testdata.py`
Content:
```
1 import os
2
3 from django.conf import settings
4 from django.core.management.base import BaseCommand
5 from django.core.management import call_command
6
7
8 class Command(BaseCommand):
9 args = ''
10 help = 'Dumps all relevant contents of the database into test_data.json.'
11 requires_migrations_checks = True
12
13 def handle(self, *args, **options):
14 outfile_name = os.path.join(settings.BASE_DIR, "evaluation", "fixtures", "test_data.json")
15 call_command("dumpdata", "auth.group", "evaluation", "rewards", "grades", indent=2, output=outfile_name)
16
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/evap/evaluation/management/commands/dump_testdata.py b/evap/evaluation/management/commands/dump_testdata.py
--- a/evap/evaluation/management/commands/dump_testdata.py
+++ b/evap/evaluation/management/commands/dump_testdata.py
@@ -12,4 +12,6 @@
def handle(self, *args, **options):
outfile_name = os.path.join(settings.BASE_DIR, "evaluation", "fixtures", "test_data.json")
- call_command("dumpdata", "auth.group", "evaluation", "rewards", "grades", indent=2, output=outfile_name)
+ call_command(
+ "dumpdata", "auth.group", "evaluation", "rewards", "grades", indent=2,
+ output=outfile_name, natural_foreign=True, natural_primary=True)
| {"golden_diff": "diff --git a/evap/evaluation/management/commands/dump_testdata.py b/evap/evaluation/management/commands/dump_testdata.py\n--- a/evap/evaluation/management/commands/dump_testdata.py\n+++ b/evap/evaluation/management/commands/dump_testdata.py\n@@ -12,4 +12,6 @@\n \n def handle(self, *args, **options):\n outfile_name = os.path.join(settings.BASE_DIR, \"evaluation\", \"fixtures\", \"test_data.json\")\n- call_command(\"dumpdata\", \"auth.group\", \"evaluation\", \"rewards\", \"grades\", indent=2, output=outfile_name)\n+ call_command(\n+ \"dumpdata\", \"auth.group\", \"evaluation\", \"rewards\", \"grades\", indent=2,\n+ output=outfile_name, natural_foreign=True, natural_primary=True)\n", "issue": "Importing a backup made by update_production.sh does not work flawlessly.\nLast week we wanted to do a production update. The json dump file created during that update could not be imported without issues:\r\n- The dump does not contain the cronjob user, but foreign key references to it. This can not be imported\r\n- The dump contains data included by django by default (auth, permission, ...). These need to be excluded when importing.\r\n\r\nThere should be some kind of documentation on what needs to be executed to import this dump back into the database. We should also add some test (could probably just run on travis) that ensures this always works (dump, flush database, migrate, load dump).\n", "before_files": [{"content": "import os\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\nfrom django.core.management import call_command\n\n\nclass Command(BaseCommand):\n args = ''\n help = 'Dumps all relevant contents of the database into test_data.json.'\n requires_migrations_checks = True\n\n def handle(self, *args, **options):\n outfile_name = os.path.join(settings.BASE_DIR, \"evaluation\", \"fixtures\", \"test_data.json\")\n call_command(\"dumpdata\", \"auth.group\", \"evaluation\", \"rewards\", \"grades\", indent=2, output=outfile_name)\n", "path": "evap/evaluation/management/commands/dump_testdata.py"}], "after_files": [{"content": "import os\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\nfrom django.core.management import call_command\n\n\nclass Command(BaseCommand):\n args = ''\n help = 'Dumps all relevant contents of the database into test_data.json.'\n requires_migrations_checks = True\n\n def handle(self, *args, **options):\n outfile_name = os.path.join(settings.BASE_DIR, \"evaluation\", \"fixtures\", \"test_data.json\")\n call_command(\n \"dumpdata\", \"auth.group\", \"evaluation\", \"rewards\", \"grades\", indent=2,\n output=outfile_name, natural_foreign=True, natural_primary=True)\n", "path": "evap/evaluation/management/commands/dump_testdata.py"}]} | 559 | 189 |
gh_patches_debug_48905 | rasdani/github-patches | git_diff | hylang__hy-1322 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The lexer hits the maximum recursion depth given a file with too many comment lines
$ yes ';' | head -n 500 >/tmp/foo.hy
$ hy /tmp/foo.hy
Traceback (most recent call last):
File "/home/hippo/Desktop/hyenv/bin/hy", line 11, in <module>
load_entry_point('hy', 'console_scripts', 'hy')()
File "/home/hippo/Desktop/hyenv/hy/hy/cmdline.py", line 344, in hy_main
sys.exit(cmdline_handler("hy", sys.argv))
File "/home/hippo/Desktop/hyenv/hy/hy/cmdline.py", line 332, in cmdline_handler
return run_file(options.args[0])
File "/home/hippo/Desktop/hyenv/hy/hy/cmdline.py", line 211, in run_file
pretty_error(import_file_to_module, "__main__", filename)
File "/home/hippo/Desktop/hyenv/hy/hy/cmdline.py", line 184, in pretty_error
return func(*args, **kw)
File "/home/hippo/Desktop/hyenv/hy/hy/importer.py", line 95, in import_file_to_module
_ast = import_file_to_ast(fpath, module_name)
File "/home/hippo/Desktop/hyenv/hy/hy/importer.py", line 53, in import_file_to_ast
return hy_compile(import_file_to_hst(fpath), module_name)
File "/home/hippo/Desktop/hyenv/hy/hy/importer.py", line 41, in import_file_to_hst
return import_buffer_to_hst(f.read())
File "/home/hippo/Desktop/hyenv/hy/hy/importer.py", line 34, in import_buffer_to_hst
return tokenize(buf + "\n")
File "/home/hippo/Desktop/hyenv/hy/hy/lex/__init__.py", line 17, in tokenize
return parser.parse(lexer.lex(buf))
File "/home/hippo/Desktop/hyenv/lib/python3.6/site-packages/rply/parser.py", line 32, in parse
lookahead = next(tokenizer)
File "/home/hippo/Desktop/hyenv/lib/python3.6/site-packages/rply/lexer.py", line 56, in __next__
return self.next()
File "/home/hippo/Desktop/hyenv/lib/python3.6/site-packages/rply/lexer.py", line 41, in next
return self.next()
File "/home/hippo/Desktop/hyenv/lib/python3.6/site-packages/rply/lexer.py", line 41, in next
return self.next()
File "/home/hippo/Desktop/hyenv/lib/python3.6/site-packages/rply/lexer.py", line 41, in next
return self.next()
[Previous line repeated 976 more times]
File "/home/hippo/Desktop/hyenv/lib/python3.6/site-packages/rply/lexer.py", line 38, in next
match = rule.matches(self.s, self.idx)
File "/home/hippo/Desktop/hyenv/lib/python3.6/site-packages/rply/lexergenerator.py", line 33, in matches
return Match(*m.span(0)) if m is not None else None
RecursionError: maximum recursion depth exceeded
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # Copyright 2017 the authors.
3 # This file is part of Hy, which is free software licensed under the Expat
4 # license. See the LICENSE.
5
6 import sys, os
7
8 from setuptools import find_packages, setup
9 from setuptools.command.install import install
10
11 from get_version import __version__
12
13 os.chdir(os.path.split(os.path.abspath(__file__))[0])
14
15 PKG = "hy"
16
17 long_description = """Hy is a Python <--> Lisp layer. It helps
18 make things work nicer, and lets Python and the Hy lisp variant play
19 nice together. """
20
21 class Install(install):
22 def run(self):
23 # Import each Hy module to ensure it's compiled.
24 import os, importlib
25 for dirpath, _, filenames in sorted(os.walk("hy")):
26 for filename in sorted(filenames):
27 if filename.endswith(".hy"):
28 importlib.import_module(
29 dirpath.replace("/", ".").replace("\\", ".") +
30 "." + filename[:-len(".hy")])
31 install.run(self)
32
33 install_requires = ['rply>=0.7.0', 'astor>=0.5', 'clint>=0.4']
34 if os.name == 'nt':
35 install_requires.append('pyreadline>=2.1')
36
37 ver = sys.version_info[0]
38
39 setup(
40 name=PKG,
41 version=__version__,
42 install_requires=install_requires,
43 cmdclass=dict(install=Install),
44 entry_points={
45 'console_scripts': [
46 'hy = hy.cmdline:hy_main',
47 'hy%d = hy.cmdline:hy_main' % ver,
48 'hyc = hy.cmdline:hyc_main',
49 'hyc%d = hy.cmdline:hyc_main' % ver,
50 'hy2py = hy.cmdline:hy2py_main',
51 'hy2py%d = hy.cmdline:hy2py_main' % ver,
52 ]
53 },
54 packages=find_packages(exclude=['tests*']),
55 package_data={
56 'hy.contrib': ['*.hy', '__pycache__/*'],
57 'hy.core': ['*.hy', '__pycache__/*'],
58 'hy.extra': ['*.hy', '__pycache__/*'],
59 },
60 data_files=[
61 ('get_version', ['get_version.py'])
62 ],
63 author="Paul Tagliamonte",
64 author_email="[email protected]",
65 long_description=long_description,
66 description='Lisp and Python love each other.',
67 license="Expat",
68 url="http://hylang.org/",
69 platforms=['any'],
70 classifiers=[
71 "Development Status :: 4 - Beta",
72 "Intended Audience :: Developers",
73 "License :: DFSG approved",
74 "License :: OSI Approved :: MIT License", # Really "Expat". Ugh.
75 "Operating System :: OS Independent",
76 "Programming Language :: Lisp",
77 "Programming Language :: Python",
78 "Programming Language :: Python :: 2",
79 "Programming Language :: Python :: 2.7",
80 "Programming Language :: Python :: 3",
81 "Programming Language :: Python :: 3.3",
82 "Programming Language :: Python :: 3.4",
83 "Programming Language :: Python :: 3.5",
84 "Programming Language :: Python :: 3.6",
85 "Topic :: Software Development :: Code Generators",
86 "Topic :: Software Development :: Compilers",
87 "Topic :: Software Development :: Libraries",
88 ]
89 )
90
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -30,7 +30,7 @@
"." + filename[:-len(".hy")])
install.run(self)
-install_requires = ['rply>=0.7.0', 'astor>=0.5', 'clint>=0.4']
+install_requires = ['rply>=0.7.5', 'astor>=0.5', 'clint>=0.4']
if os.name == 'nt':
install_requires.append('pyreadline>=2.1')
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -30,7 +30,7 @@\n \".\" + filename[:-len(\".hy\")])\n install.run(self)\n \n-install_requires = ['rply>=0.7.0', 'astor>=0.5', 'clint>=0.4']\n+install_requires = ['rply>=0.7.5', 'astor>=0.5', 'clint>=0.4']\n if os.name == 'nt':\n install_requires.append('pyreadline>=2.1')\n", "issue": "The lexer hits the maximum recursion depth given a file with too many comment lines\n $ yes ';' | head -n 500 >/tmp/foo.hy\r\n $ hy /tmp/foo.hy\r\n Traceback (most recent call last):\r\n File \"/home/hippo/Desktop/hyenv/bin/hy\", line 11, in <module>\r\n load_entry_point('hy', 'console_scripts', 'hy')()\r\n File \"/home/hippo/Desktop/hyenv/hy/hy/cmdline.py\", line 344, in hy_main\r\n sys.exit(cmdline_handler(\"hy\", sys.argv))\r\n File \"/home/hippo/Desktop/hyenv/hy/hy/cmdline.py\", line 332, in cmdline_handler\r\n return run_file(options.args[0])\r\n File \"/home/hippo/Desktop/hyenv/hy/hy/cmdline.py\", line 211, in run_file\r\n pretty_error(import_file_to_module, \"__main__\", filename)\r\n File \"/home/hippo/Desktop/hyenv/hy/hy/cmdline.py\", line 184, in pretty_error\r\n return func(*args, **kw)\r\n File \"/home/hippo/Desktop/hyenv/hy/hy/importer.py\", line 95, in import_file_to_module\r\n _ast = import_file_to_ast(fpath, module_name)\r\n File \"/home/hippo/Desktop/hyenv/hy/hy/importer.py\", line 53, in import_file_to_ast\r\n return hy_compile(import_file_to_hst(fpath), module_name)\r\n File \"/home/hippo/Desktop/hyenv/hy/hy/importer.py\", line 41, in import_file_to_hst\r\n return import_buffer_to_hst(f.read())\r\n File \"/home/hippo/Desktop/hyenv/hy/hy/importer.py\", line 34, in import_buffer_to_hst\r\n return tokenize(buf + \"\\n\")\r\n File \"/home/hippo/Desktop/hyenv/hy/hy/lex/__init__.py\", line 17, in tokenize\r\n return parser.parse(lexer.lex(buf))\r\n File \"/home/hippo/Desktop/hyenv/lib/python3.6/site-packages/rply/parser.py\", line 32, in parse\r\n lookahead = next(tokenizer)\r\n File \"/home/hippo/Desktop/hyenv/lib/python3.6/site-packages/rply/lexer.py\", line 56, in __next__\r\n return self.next()\r\n File \"/home/hippo/Desktop/hyenv/lib/python3.6/site-packages/rply/lexer.py\", line 41, in next\r\n return self.next()\r\n File \"/home/hippo/Desktop/hyenv/lib/python3.6/site-packages/rply/lexer.py\", line 41, in next\r\n return self.next()\r\n File \"/home/hippo/Desktop/hyenv/lib/python3.6/site-packages/rply/lexer.py\", line 41, in next\r\n return self.next()\r\n [Previous line repeated 976 more times]\r\n File \"/home/hippo/Desktop/hyenv/lib/python3.6/site-packages/rply/lexer.py\", line 38, in next\r\n match = rule.matches(self.s, self.idx)\r\n File \"/home/hippo/Desktop/hyenv/lib/python3.6/site-packages/rply/lexergenerator.py\", line 33, in matches\r\n return Match(*m.span(0)) if m is not None else None\r\n RecursionError: maximum recursion depth exceeded\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# Copyright 2017 the authors.\n# This file is part of Hy, which is free software licensed under the Expat\n# license. See the LICENSE.\n\nimport sys, os\n\nfrom setuptools import find_packages, setup\nfrom setuptools.command.install import install\n\nfrom get_version import __version__\n\nos.chdir(os.path.split(os.path.abspath(__file__))[0])\n\nPKG = \"hy\"\n\nlong_description = \"\"\"Hy is a Python <--> Lisp layer. It helps\nmake things work nicer, and lets Python and the Hy lisp variant play\nnice together. \"\"\"\n\nclass Install(install):\n def run(self):\n # Import each Hy module to ensure it's compiled.\n import os, importlib\n for dirpath, _, filenames in sorted(os.walk(\"hy\")):\n for filename in sorted(filenames):\n if filename.endswith(\".hy\"):\n importlib.import_module(\n dirpath.replace(\"/\", \".\").replace(\"\\\\\", \".\") +\n \".\" + filename[:-len(\".hy\")])\n install.run(self)\n\ninstall_requires = ['rply>=0.7.0', 'astor>=0.5', 'clint>=0.4']\nif os.name == 'nt':\n install_requires.append('pyreadline>=2.1')\n\nver = sys.version_info[0]\n\nsetup(\n name=PKG,\n version=__version__,\n install_requires=install_requires,\n cmdclass=dict(install=Install),\n entry_points={\n 'console_scripts': [\n 'hy = hy.cmdline:hy_main',\n 'hy%d = hy.cmdline:hy_main' % ver,\n 'hyc = hy.cmdline:hyc_main',\n 'hyc%d = hy.cmdline:hyc_main' % ver,\n 'hy2py = hy.cmdline:hy2py_main',\n 'hy2py%d = hy.cmdline:hy2py_main' % ver,\n ]\n },\n packages=find_packages(exclude=['tests*']),\n package_data={\n 'hy.contrib': ['*.hy', '__pycache__/*'],\n 'hy.core': ['*.hy', '__pycache__/*'],\n 'hy.extra': ['*.hy', '__pycache__/*'],\n },\n data_files=[\n ('get_version', ['get_version.py'])\n ],\n author=\"Paul Tagliamonte\",\n author_email=\"[email protected]\",\n long_description=long_description,\n description='Lisp and Python love each other.',\n license=\"Expat\",\n url=\"http://hylang.org/\",\n platforms=['any'],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: DFSG approved\",\n \"License :: OSI Approved :: MIT License\", # Really \"Expat\". Ugh.\n \"Operating System :: OS Independent\",\n \"Programming Language :: Lisp\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Topic :: Software Development :: Code Generators\",\n \"Topic :: Software Development :: Compilers\",\n \"Topic :: Software Development :: Libraries\",\n ]\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# Copyright 2017 the authors.\n# This file is part of Hy, which is free software licensed under the Expat\n# license. See the LICENSE.\n\nimport sys, os\n\nfrom setuptools import find_packages, setup\nfrom setuptools.command.install import install\n\nfrom get_version import __version__\n\nos.chdir(os.path.split(os.path.abspath(__file__))[0])\n\nPKG = \"hy\"\n\nlong_description = \"\"\"Hy is a Python <--> Lisp layer. It helps\nmake things work nicer, and lets Python and the Hy lisp variant play\nnice together. \"\"\"\n\nclass Install(install):\n def run(self):\n # Import each Hy module to ensure it's compiled.\n import os, importlib\n for dirpath, _, filenames in sorted(os.walk(\"hy\")):\n for filename in sorted(filenames):\n if filename.endswith(\".hy\"):\n importlib.import_module(\n dirpath.replace(\"/\", \".\").replace(\"\\\\\", \".\") +\n \".\" + filename[:-len(\".hy\")])\n install.run(self)\n\ninstall_requires = ['rply>=0.7.5', 'astor>=0.5', 'clint>=0.4']\nif os.name == 'nt':\n install_requires.append('pyreadline>=2.1')\n\nver = sys.version_info[0]\n\nsetup(\n name=PKG,\n version=__version__,\n install_requires=install_requires,\n cmdclass=dict(install=Install),\n entry_points={\n 'console_scripts': [\n 'hy = hy.cmdline:hy_main',\n 'hy%d = hy.cmdline:hy_main' % ver,\n 'hyc = hy.cmdline:hyc_main',\n 'hyc%d = hy.cmdline:hyc_main' % ver,\n 'hy2py = hy.cmdline:hy2py_main',\n 'hy2py%d = hy.cmdline:hy2py_main' % ver,\n ]\n },\n packages=find_packages(exclude=['tests*']),\n package_data={\n 'hy.contrib': ['*.hy', '__pycache__/*'],\n 'hy.core': ['*.hy', '__pycache__/*'],\n 'hy.extra': ['*.hy', '__pycache__/*'],\n },\n data_files=[\n ('get_version', ['get_version.py'])\n ],\n author=\"Paul Tagliamonte\",\n author_email=\"[email protected]\",\n long_description=long_description,\n description='Lisp and Python love each other.',\n license=\"Expat\",\n url=\"http://hylang.org/\",\n platforms=['any'],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: DFSG approved\",\n \"License :: OSI Approved :: MIT License\", # Really \"Expat\". Ugh.\n \"Operating System :: OS Independent\",\n \"Programming Language :: Lisp\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Topic :: Software Development :: Code Generators\",\n \"Topic :: Software Development :: Compilers\",\n \"Topic :: Software Development :: Libraries\",\n ]\n)\n", "path": "setup.py"}]} | 1,922 | 127 |
gh_patches_debug_1061 | rasdani/github-patches | git_diff | kymatio__kymatio-352 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ENH+TST find a way of testing GPU code
With not too much investment in 💲 💰 it should be possible to set up a `jenkins` testing suite on amazon aws: The idea is to have a micro machine that costs 1c/h run the jenkins server. When tests should be run, this should somehow spawn a couple of GPU machines with different GPUs, ideally as spot instances, run the tests and then shut them down again.
I looked into this at the very beginning of `kymatio`, but I don't really know how to set this up yet. If anybody has experience with this, feel free to try! :)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 import csv
5 import importlib
6 import os
7 import shutil
8 import sys
9 from setuptools import setup, find_packages
10
11 # Constants
12 DISTNAME = 'kymatio'
13 DESCRIPTION = 'Wavelet scattering transforms in Python with GPU acceleration'
14 URL = 'https://www.kymat.io'
15 LICENSE = 'BSD-3-Clause'
16
17
18 # Parse description
19 with open('README.md') as f:
20 README = f.read().split('\n')
21 LONG_DESCRIPTION = '\n'.join([x for x in README if not x[:3]=='[!['])
22
23
24 # Parse version.py
25 kymatio_version_spec = importlib.util.spec_from_file_location(
26 'kymatio_version', 'kymatio/version.py')
27 kymatio_version_module = importlib.util.module_from_spec(kymatio_version_spec)
28 kymatio_version_spec.loader.exec_module(kymatio_version_module)
29 VERSION = kymatio_version_module.version
30
31
32 # Parse requirements.txt
33 with open('requirements.txt', 'r') as f:
34 REQUIREMENTS = f.read().split('\n')
35
36
37 setup_info = dict(
38 # Metadata
39 name=DISTNAME,
40 version=VERSION,
41 author=('Edouard Oyallon, Eugene Belilovsky, Sergey Zagoruyko, '
42 'Michael Eickenberg, Mathieu Andreux, Georgios Exarchakis, '
43 'Louis Thiry, Vincent Lostanlen, Joakim Andén, '
44 'Tomás Angles, Gabriel Huang, Roberto Leonarduzzi'),
45 author_email=('[email protected], [email protected], '
46 '[email protected], [email protected], '
47 '[email protected], [email protected], '
48 '[email protected], [email protected], [email protected], '
49 '[email protected], [email protected], [email protected]'),
50 url=URL,
51 download_url='https://github.com/kymatio/kymatio/releases',
52 project_urls={
53 'Documentation': 'https://www.kymat.io/codereference.html',
54 'Source': 'https://github.com/kymatio/kymatio/',
55 'Tracker': 'https://github.com/kymatio/kymatio/issues',
56 'Authors': 'https://github.com/kymatio/kymatio/blob/master/AUTHORS.md'
57 },
58 classifiers=['Intended Audience :: Education',
59 'Intended Audience :: Science/Research',
60 'License :: OSI Approved :: BSD License',
61 'Natural Language :: English',
62 'Operating System :: MacOS',
63 'Operating System :: POSIX :: Linux',
64 'Programming Language :: Python :: 3.5',
65 'Programming Language :: Python :: 3.6',
66 'Programming Language :: Python :: 3.7',
67 'Programming Language :: Python :: 3.8',
68 'Topic :: Multimedia :: Graphics :: 3D Modeling',
69 'Topic :: Multimedia :: Sound/Audio :: Analysis',
70 'Topic :: Scientific/Engineering :: Artificial Intelligence',
71 'Topic :: Scientific/Engineering :: Chemistry',
72 'Topic :: Scientific/Engineering :: Image Recognition',
73 'Topic :: Scientific/Engineering :: Information Analysis',
74 'Topic :: Scientific/Engineering :: Mathematics',
75 'Topic :: Scientific/Engineering :: Physics',
76 'Topic :: Software Development :: Libraries :: Python Modules',
77 ],
78 description=DESCRIPTION,
79 long_description=LONG_DESCRIPTION,
80 long_description_content_type='text/markdown',
81 python_requires='>=3.5',
82 license=LICENSE,
83 packages=find_packages(exclude=('test',)),
84 install_requires=REQUIREMENTS,
85 zip_safe=True,
86 )
87
88 setup(**setup_info)
89
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -16,7 +16,7 @@
# Parse description
-with open('README.md') as f:
+with open('README.md', encoding='utf8') as f:
README = f.read().split('\n')
LONG_DESCRIPTION = '\n'.join([x for x in README if not x[:3]=='[!['])
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -16,7 +16,7 @@\n \n \n # Parse description\n-with open('README.md') as f:\n+with open('README.md', encoding='utf8') as f:\n README = f.read().split('\\n')\n LONG_DESCRIPTION = '\\n'.join([x for x in README if not x[:3]=='[!['])\n", "issue": "ENH+TST find a way of testing GPU code\nWith not too much investment in \ud83d\udcb2 \ud83d\udcb0 it should be possible to set up a `jenkins` testing suite on amazon aws: The idea is to have a micro machine that costs 1c/h run the jenkins server. When tests should be run, this should somehow spawn a couple of GPU machines with different GPUs, ideally as spot instances, run the tests and then shut them down again.\r\nI looked into this at the very beginning of `kymatio`, but I don't really know how to set this up yet. If anybody has experience with this, feel free to try! :)\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport csv\nimport importlib\nimport os\nimport shutil\nimport sys\nfrom setuptools import setup, find_packages\n\n# Constants\nDISTNAME = 'kymatio'\nDESCRIPTION = 'Wavelet scattering transforms in Python with GPU acceleration'\nURL = 'https://www.kymat.io'\nLICENSE = 'BSD-3-Clause'\n\n\n# Parse description\nwith open('README.md') as f:\n README = f.read().split('\\n')\n LONG_DESCRIPTION = '\\n'.join([x for x in README if not x[:3]=='[!['])\n\n\n# Parse version.py\nkymatio_version_spec = importlib.util.spec_from_file_location(\n 'kymatio_version', 'kymatio/version.py')\nkymatio_version_module = importlib.util.module_from_spec(kymatio_version_spec)\nkymatio_version_spec.loader.exec_module(kymatio_version_module)\nVERSION = kymatio_version_module.version\n\n\n# Parse requirements.txt\nwith open('requirements.txt', 'r') as f:\n REQUIREMENTS = f.read().split('\\n')\n\n\nsetup_info = dict(\n # Metadata\n name=DISTNAME,\n version=VERSION,\n author=('Edouard Oyallon, Eugene Belilovsky, Sergey Zagoruyko, '\n 'Michael Eickenberg, Mathieu Andreux, Georgios Exarchakis, '\n 'Louis Thiry, Vincent Lostanlen, Joakim And\u00e9n, '\n 'Tom\u00e1s Angles, Gabriel Huang, Roberto Leonarduzzi'),\n author_email=('[email protected], [email protected], '\n '[email protected], [email protected], '\n '[email protected], [email protected], '\n '[email protected], [email protected], [email protected], '\n '[email protected], [email protected], [email protected]'),\n url=URL,\n download_url='https://github.com/kymatio/kymatio/releases',\n project_urls={\n 'Documentation': 'https://www.kymat.io/codereference.html',\n 'Source': 'https://github.com/kymatio/kymatio/',\n 'Tracker': 'https://github.com/kymatio/kymatio/issues',\n 'Authors': 'https://github.com/kymatio/kymatio/blob/master/AUTHORS.md'\n },\n classifiers=['Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n 'Operating System :: MacOS',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Topic :: Multimedia :: Graphics :: 3D Modeling',\n 'Topic :: Multimedia :: Sound/Audio :: Analysis',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Scientific/Engineering :: Chemistry',\n 'Topic :: Scientific/Engineering :: Image Recognition',\n 'Topic :: Scientific/Engineering :: Information Analysis',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Scientific/Engineering :: Physics',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type='text/markdown',\n python_requires='>=3.5',\n license=LICENSE,\n packages=find_packages(exclude=('test',)),\n install_requires=REQUIREMENTS,\n zip_safe=True,\n)\n\nsetup(**setup_info)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport csv\nimport importlib\nimport os\nimport shutil\nimport sys\nfrom setuptools import setup, find_packages\n\n# Constants\nDISTNAME = 'kymatio'\nDESCRIPTION = 'Wavelet scattering transforms in Python with GPU acceleration'\nURL = 'https://www.kymat.io'\nLICENSE = 'BSD-3-Clause'\n\n\n# Parse description\nwith open('README.md', encoding='utf8') as f:\n README = f.read().split('\\n')\n LONG_DESCRIPTION = '\\n'.join([x for x in README if not x[:3]=='[!['])\n\n\n# Parse version.py\nkymatio_version_spec = importlib.util.spec_from_file_location(\n 'kymatio_version', 'kymatio/version.py')\nkymatio_version_module = importlib.util.module_from_spec(kymatio_version_spec)\nkymatio_version_spec.loader.exec_module(kymatio_version_module)\nVERSION = kymatio_version_module.version\n\n\n# Parse requirements.txt\nwith open('requirements.txt', 'r') as f:\n REQUIREMENTS = f.read().split('\\n')\n\n\nsetup_info = dict(\n # Metadata\n name=DISTNAME,\n version=VERSION,\n author=('Edouard Oyallon, Eugene Belilovsky, Sergey Zagoruyko, '\n 'Michael Eickenberg, Mathieu Andreux, Georgios Exarchakis, '\n 'Louis Thiry, Vincent Lostanlen, Joakim And\u00e9n, '\n 'Tom\u00e1s Angles, Gabriel Huang, Roberto Leonarduzzi'),\n author_email=('[email protected], [email protected], '\n '[email protected], [email protected], '\n '[email protected], [email protected], '\n '[email protected], [email protected], [email protected], '\n '[email protected], [email protected], [email protected]'),\n url=URL,\n download_url='https://github.com/kymatio/kymatio/releases',\n project_urls={\n 'Documentation': 'https://www.kymat.io/codereference.html',\n 'Source': 'https://github.com/kymatio/kymatio/',\n 'Tracker': 'https://github.com/kymatio/kymatio/issues',\n 'Authors': 'https://github.com/kymatio/kymatio/blob/master/AUTHORS.md'\n },\n classifiers=['Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n 'Operating System :: MacOS',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Topic :: Multimedia :: Graphics :: 3D Modeling',\n 'Topic :: Multimedia :: Sound/Audio :: Analysis',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Scientific/Engineering :: Chemistry',\n 'Topic :: Scientific/Engineering :: Image Recognition',\n 'Topic :: Scientific/Engineering :: Information Analysis',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Scientific/Engineering :: Physics',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type='text/markdown',\n python_requires='>=3.5',\n license=LICENSE,\n packages=find_packages(exclude=('test',)),\n install_requires=REQUIREMENTS,\n zip_safe=True,\n)\n\nsetup(**setup_info)\n", "path": "setup.py"}]} | 1,397 | 94 |
gh_patches_debug_6301 | rasdani/github-patches | git_diff | azavea__raster-vision-1235 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Predictor does not reset the scene's aoi_geometries and the raster source's extent_crop
Currently, the `Predictor` re-uses a `SceneConfig` from the pipeline config in the bundle (instead of creating a new one) and resets its `label_source` and `aoi_uris`.
https://github.com/azavea/raster-vision/blob/master/rastervision_core/rastervision/core/predictor.py#L70-L71
However, it should also do this for `raster_source.extent_crop` (#1030) and `aoi_geometries` (#1033). In general, it should be done for every field that cannot be safely assumed to be the same for the input scene.
Instead of having to add to this every time something new is added to the `SceneConfig` or any of its member classes, it might be better to create a new scene in the predictor with options from the command line.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rastervision_core/rastervision/core/predictor.py`
Content:
```
1 from os.path import join
2 import zipfile
3 import logging
4
5 from rastervision.pipeline import rv_config
6 from rastervision.pipeline.config import (build_config, upgrade_config)
7 from rastervision.pipeline.file_system.utils import (download_if_needed,
8 make_dir, file_to_json)
9 from rastervision.core.data.raster_source import ChannelOrderError
10 from rastervision.core.analyzer import StatsAnalyzerConfig
11
12 log = logging.getLogger(__name__)
13
14
15 class Predictor():
16 """Class for making predictions based off of a model bundle."""
17
18 def __init__(self,
19 model_bundle_uri,
20 tmp_dir,
21 update_stats=False,
22 channel_order=None):
23 """Creates a new Predictor.
24
25 Args:
26 model_bundle_uri: URI of the model bundle to use. Can be any
27 type of URI that Raster Vision can read.
28 tmp_dir: Temporary directory in which to store files that are used
29 by the Predictor. This directory is not cleaned up by this
30 class.
31 channel_order: Option for a new channel order to use for the
32 imagery being predicted against. If not present, the
33 channel_order from the original configuration in the predict
34 package will be used.
35 """
36 self.tmp_dir = tmp_dir
37 self.update_stats = update_stats
38 self.model_loaded = False
39
40 bundle_path = download_if_needed(model_bundle_uri, tmp_dir)
41 bundle_dir = join(tmp_dir, 'bundle')
42 make_dir(bundle_dir)
43 with zipfile.ZipFile(bundle_path, 'r') as bundle_zip:
44 bundle_zip.extractall(path=bundle_dir)
45
46 config_path = join(bundle_dir, 'pipeline-config.json')
47 config_dict = file_to_json(config_path)
48 rv_config.set_everett_config(
49 config_overrides=config_dict.get('rv_config'))
50 config_dict = upgrade_config(config_dict)
51 self.config = build_config(config_dict)
52 self.scene = self.config.dataset.validation_scenes[0]
53
54 if not hasattr(self.scene.raster_source, 'uris'):
55 raise Exception(
56 'raster_source in model bundle must have uris as field')
57
58 if not hasattr(self.scene.label_store, 'uri'):
59 raise Exception(
60 'label_store in model bundle must have uri as field')
61
62 for t in self.scene.raster_source.transformers:
63 t.update_root(bundle_dir)
64
65 if self.update_stats:
66 stats_analyzer = StatsAnalyzerConfig(
67 output_uri=join(bundle_dir, 'stats.json'))
68 self.config.analyzers = [stats_analyzer]
69
70 self.scene.label_source = None
71 self.scene.aoi_uris = None
72 self.config.dataset.train_scenes = [self.scene]
73 self.config.dataset.validation_scenes = [self.scene]
74 self.config.dataset.test_scenes = []
75 self.config.train_uri = bundle_dir
76
77 if channel_order is not None:
78 self.scene.raster_source.channel_order = channel_order
79
80 self.pipeline = None
81
82 def predict(self, image_uris, label_uri, vector_label_uri=None):
83 """Generate predictions for the given image.
84
85 Args:
86 image_uris: URIs of the images to make predictions against.
87 This can be any type of URI readable by Raster Vision
88 FileSystems.
89 label_uri: URI to save labels off into
90 vector_label_uri: URI to save vectorized labels for semantic segmentation
91 model bundles that support it
92 """
93 if self.pipeline is None:
94 self.scene.raster_source.uris = image_uris
95 self.pipeline = self.config.build(self.tmp_dir)
96 if not hasattr(self.pipeline, 'predict'):
97 raise Exception(
98 'pipeline in model bundle must have predict method')
99
100 try:
101 self.scene.raster_source.uris = image_uris
102 self.scene.label_store.uri = label_uri
103 if (hasattr(self.scene.label_store, 'vector_output')
104 and self.scene.label_store.vector_output):
105 if vector_label_uri:
106 for vo in self.scene.label_store.vector_output:
107 vo.uri = join(
108 vector_label_uri, '{}-{}.json'.format(
109 vo.class_id, vo.get_mode()))
110 else:
111 self.scene.label_store.vector_output = []
112 elif vector_label_uri:
113 log.warn(
114 'vector_label_uri was supplied but this model bundle does not '
115 'generate vector labels.')
116
117 if self.update_stats:
118 self.pipeline.analyze()
119 self.pipeline.predict()
120 except ChannelOrderError:
121 raise ValueError(
122 'The predict package is using a channel_order '
123 'with channels unavailable in the imagery.\nTo set a new '
124 'channel_order that only uses channels available in the '
125 'imagery, use the --channel-order option.')
126
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/rastervision_core/rastervision/core/predictor.py b/rastervision_core/rastervision/core/predictor.py
--- a/rastervision_core/rastervision/core/predictor.py
+++ b/rastervision_core/rastervision/core/predictor.py
@@ -69,6 +69,9 @@
self.scene.label_source = None
self.scene.aoi_uris = None
+ self.scene.aoi_geometries = None
+ self.scene.raster_source.extent_crop = None
+
self.config.dataset.train_scenes = [self.scene]
self.config.dataset.validation_scenes = [self.scene]
self.config.dataset.test_scenes = []
| {"golden_diff": "diff --git a/rastervision_core/rastervision/core/predictor.py b/rastervision_core/rastervision/core/predictor.py\n--- a/rastervision_core/rastervision/core/predictor.py\n+++ b/rastervision_core/rastervision/core/predictor.py\n@@ -69,6 +69,9 @@\n \n self.scene.label_source = None\n self.scene.aoi_uris = None\n+ self.scene.aoi_geometries = None\n+ self.scene.raster_source.extent_crop = None\n+\n self.config.dataset.train_scenes = [self.scene]\n self.config.dataset.validation_scenes = [self.scene]\n self.config.dataset.test_scenes = []\n", "issue": "Predictor does not reset the scene's aoi_geometries and the raster source's extent_crop\nCurrently, the `Predictor` re-uses a `SceneConfig` from the pipeline config in the bundle (instead of creating a new one) and resets its `label_source` and `aoi_uris`.\r\nhttps://github.com/azavea/raster-vision/blob/master/rastervision_core/rastervision/core/predictor.py#L70-L71\r\n\r\nHowever, it should also do this for `raster_source.extent_crop` (#1030) and `aoi_geometries` (#1033). In general, it should be done for every field that cannot be safely assumed to be the same for the input scene.\r\n\r\nInstead of having to add to this every time something new is added to the `SceneConfig` or any of its member classes, it might be better to create a new scene in the predictor with options from the command line.\n", "before_files": [{"content": "from os.path import join\nimport zipfile\nimport logging\n\nfrom rastervision.pipeline import rv_config\nfrom rastervision.pipeline.config import (build_config, upgrade_config)\nfrom rastervision.pipeline.file_system.utils import (download_if_needed,\n make_dir, file_to_json)\nfrom rastervision.core.data.raster_source import ChannelOrderError\nfrom rastervision.core.analyzer import StatsAnalyzerConfig\n\nlog = logging.getLogger(__name__)\n\n\nclass Predictor():\n \"\"\"Class for making predictions based off of a model bundle.\"\"\"\n\n def __init__(self,\n model_bundle_uri,\n tmp_dir,\n update_stats=False,\n channel_order=None):\n \"\"\"Creates a new Predictor.\n\n Args:\n model_bundle_uri: URI of the model bundle to use. Can be any\n type of URI that Raster Vision can read.\n tmp_dir: Temporary directory in which to store files that are used\n by the Predictor. This directory is not cleaned up by this\n class.\n channel_order: Option for a new channel order to use for the\n imagery being predicted against. If not present, the\n channel_order from the original configuration in the predict\n package will be used.\n \"\"\"\n self.tmp_dir = tmp_dir\n self.update_stats = update_stats\n self.model_loaded = False\n\n bundle_path = download_if_needed(model_bundle_uri, tmp_dir)\n bundle_dir = join(tmp_dir, 'bundle')\n make_dir(bundle_dir)\n with zipfile.ZipFile(bundle_path, 'r') as bundle_zip:\n bundle_zip.extractall(path=bundle_dir)\n\n config_path = join(bundle_dir, 'pipeline-config.json')\n config_dict = file_to_json(config_path)\n rv_config.set_everett_config(\n config_overrides=config_dict.get('rv_config'))\n config_dict = upgrade_config(config_dict)\n self.config = build_config(config_dict)\n self.scene = self.config.dataset.validation_scenes[0]\n\n if not hasattr(self.scene.raster_source, 'uris'):\n raise Exception(\n 'raster_source in model bundle must have uris as field')\n\n if not hasattr(self.scene.label_store, 'uri'):\n raise Exception(\n 'label_store in model bundle must have uri as field')\n\n for t in self.scene.raster_source.transformers:\n t.update_root(bundle_dir)\n\n if self.update_stats:\n stats_analyzer = StatsAnalyzerConfig(\n output_uri=join(bundle_dir, 'stats.json'))\n self.config.analyzers = [stats_analyzer]\n\n self.scene.label_source = None\n self.scene.aoi_uris = None\n self.config.dataset.train_scenes = [self.scene]\n self.config.dataset.validation_scenes = [self.scene]\n self.config.dataset.test_scenes = []\n self.config.train_uri = bundle_dir\n\n if channel_order is not None:\n self.scene.raster_source.channel_order = channel_order\n\n self.pipeline = None\n\n def predict(self, image_uris, label_uri, vector_label_uri=None):\n \"\"\"Generate predictions for the given image.\n\n Args:\n image_uris: URIs of the images to make predictions against.\n This can be any type of URI readable by Raster Vision\n FileSystems.\n label_uri: URI to save labels off into\n vector_label_uri: URI to save vectorized labels for semantic segmentation\n model bundles that support it\n \"\"\"\n if self.pipeline is None:\n self.scene.raster_source.uris = image_uris\n self.pipeline = self.config.build(self.tmp_dir)\n if not hasattr(self.pipeline, 'predict'):\n raise Exception(\n 'pipeline in model bundle must have predict method')\n\n try:\n self.scene.raster_source.uris = image_uris\n self.scene.label_store.uri = label_uri\n if (hasattr(self.scene.label_store, 'vector_output')\n and self.scene.label_store.vector_output):\n if vector_label_uri:\n for vo in self.scene.label_store.vector_output:\n vo.uri = join(\n vector_label_uri, '{}-{}.json'.format(\n vo.class_id, vo.get_mode()))\n else:\n self.scene.label_store.vector_output = []\n elif vector_label_uri:\n log.warn(\n 'vector_label_uri was supplied but this model bundle does not '\n 'generate vector labels.')\n\n if self.update_stats:\n self.pipeline.analyze()\n self.pipeline.predict()\n except ChannelOrderError:\n raise ValueError(\n 'The predict package is using a channel_order '\n 'with channels unavailable in the imagery.\\nTo set a new '\n 'channel_order that only uses channels available in the '\n 'imagery, use the --channel-order option.')\n", "path": "rastervision_core/rastervision/core/predictor.py"}], "after_files": [{"content": "from os.path import join\nimport zipfile\nimport logging\n\nfrom rastervision.pipeline import rv_config\nfrom rastervision.pipeline.config import (build_config, upgrade_config)\nfrom rastervision.pipeline.file_system.utils import (download_if_needed,\n make_dir, file_to_json)\nfrom rastervision.core.data.raster_source import ChannelOrderError\nfrom rastervision.core.analyzer import StatsAnalyzerConfig\n\nlog = logging.getLogger(__name__)\n\n\nclass Predictor():\n \"\"\"Class for making predictions based off of a model bundle.\"\"\"\n\n def __init__(self,\n model_bundle_uri,\n tmp_dir,\n update_stats=False,\n channel_order=None):\n \"\"\"Creates a new Predictor.\n\n Args:\n model_bundle_uri: URI of the model bundle to use. Can be any\n type of URI that Raster Vision can read.\n tmp_dir: Temporary directory in which to store files that are used\n by the Predictor. This directory is not cleaned up by this\n class.\n channel_order: Option for a new channel order to use for the\n imagery being predicted against. If not present, the\n channel_order from the original configuration in the predict\n package will be used.\n \"\"\"\n self.tmp_dir = tmp_dir\n self.update_stats = update_stats\n self.model_loaded = False\n\n bundle_path = download_if_needed(model_bundle_uri, tmp_dir)\n bundle_dir = join(tmp_dir, 'bundle')\n make_dir(bundle_dir)\n with zipfile.ZipFile(bundle_path, 'r') as bundle_zip:\n bundle_zip.extractall(path=bundle_dir)\n\n config_path = join(bundle_dir, 'pipeline-config.json')\n config_dict = file_to_json(config_path)\n rv_config.set_everett_config(\n config_overrides=config_dict.get('rv_config'))\n config_dict = upgrade_config(config_dict)\n self.config = build_config(config_dict)\n self.scene = self.config.dataset.validation_scenes[0]\n\n if not hasattr(self.scene.raster_source, 'uris'):\n raise Exception(\n 'raster_source in model bundle must have uris as field')\n\n if not hasattr(self.scene.label_store, 'uri'):\n raise Exception(\n 'label_store in model bundle must have uri as field')\n\n for t in self.scene.raster_source.transformers:\n t.update_root(bundle_dir)\n\n if self.update_stats:\n stats_analyzer = StatsAnalyzerConfig(\n output_uri=join(bundle_dir, 'stats.json'))\n self.config.analyzers = [stats_analyzer]\n\n self.scene.label_source = None\n self.scene.aoi_uris = None\n self.scene.aoi_geometries = None\n self.scene.raster_source.extent_crop = None\n\n self.config.dataset.train_scenes = [self.scene]\n self.config.dataset.validation_scenes = [self.scene]\n self.config.dataset.test_scenes = []\n self.config.train_uri = bundle_dir\n\n if channel_order is not None:\n self.scene.raster_source.channel_order = channel_order\n\n self.pipeline = None\n\n def predict(self, image_uris, label_uri, vector_label_uri=None):\n \"\"\"Generate predictions for the given image.\n\n Args:\n image_uris: URIs of the images to make predictions against.\n This can be any type of URI readable by Raster Vision\n FileSystems.\n label_uri: URI to save labels off into\n vector_label_uri: URI to save vectorized labels for semantic segmentation\n model bundles that support it\n \"\"\"\n if self.pipeline is None:\n self.scene.raster_source.uris = image_uris\n self.pipeline = self.config.build(self.tmp_dir)\n if not hasattr(self.pipeline, 'predict'):\n raise Exception(\n 'pipeline in model bundle must have predict method')\n\n try:\n self.scene.raster_source.uris = image_uris\n self.scene.label_store.uri = label_uri\n if (hasattr(self.scene.label_store, 'vector_output')\n and self.scene.label_store.vector_output):\n if vector_label_uri:\n for vo in self.scene.label_store.vector_output:\n vo.uri = join(\n vector_label_uri, '{}-{}.json'.format(\n vo.class_id, vo.get_mode()))\n else:\n self.scene.label_store.vector_output = []\n elif vector_label_uri:\n log.warn(\n 'vector_label_uri was supplied but this model bundle does not '\n 'generate vector labels.')\n\n if self.update_stats:\n self.pipeline.analyze()\n self.pipeline.predict()\n except ChannelOrderError:\n raise ValueError(\n 'The predict package is using a channel_order '\n 'with channels unavailable in the imagery.\\nTo set a new '\n 'channel_order that only uses channels available in the '\n 'imagery, use the --channel-order option.')\n", "path": "rastervision_core/rastervision/core/predictor.py"}]} | 1,741 | 159 |
gh_patches_debug_17813 | rasdani/github-patches | git_diff | translate__pootle-4679 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Snippet caching is not cleared between tests
Currently if you run a test that saves data in the exports cache, the data is still there in the next test
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pytest_pootle/fixtures/site.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 import tempfile
10
11 import pytest
12
13 from pytest_pootle.env import PootleTestEnv
14
15
16 @pytest.fixture(autouse=True, scope='session')
17 def setup_db_if_needed(request):
18 """Sets up the site DB only if tests requested to use the DB (autouse)."""
19 is_db_marker_set = [
20 item for item in request.node.items
21 if item.get_marker('django_db')
22 ]
23 if is_db_marker_set:
24 return request.getfuncargvalue('post_db_setup')
25
26 return None
27
28
29 @pytest.fixture(scope='session')
30 def post_db_setup(translations_directory, _django_db_setup,
31 _django_cursor_wrapper, request):
32 """Sets up the site DB for the test session."""
33 with _django_cursor_wrapper:
34 PootleTestEnv(request).setup()
35
36
37 @pytest.fixture
38 def no_projects():
39 from pootle_project.models import Project
40
41 Project.objects.all().delete()
42
43
44 @pytest.fixture
45 def no_permissions():
46 from django.contrib.auth.models import Permission
47
48 Permission.objects.all().delete()
49
50
51 @pytest.fixture
52 def no_permission_sets():
53 from pootle_app.models import PermissionSet
54
55 PermissionSet.objects.all().delete()
56
57
58 @pytest.fixture
59 def no_submissions():
60 from pootle_statistics.models import Submission
61
62 Submission.objects.all().delete()
63
64
65 @pytest.fixture
66 def no_users():
67 from django.contrib.auth import get_user_model
68
69 User = get_user_model()
70 User.objects.all().delete()
71
72
73 @pytest.fixture
74 def no_extra_users():
75 from django.contrib.auth import get_user_model
76
77 User = get_user_model()
78 User.objects.exclude(
79 username__in=["system", "default", "nobody"]).delete()
80
81
82 @pytest.fixture(autouse=True, scope="session")
83 def translations_directory(request):
84 """used by PootleEnv"""
85 from django.conf import settings
86 settings.POOTLE_TRANSLATION_DIRECTORY = tempfile.mkdtemp()
87
```
Path: `pytest_pootle/fixtures/revision.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright (C) Pootle contributors.
5 #
6 # This file is a part of the Pootle project. It is distributed under the GPL3
7 # or later license. See the LICENSE file for a copy of the license and the
8 # AUTHORS file for copyright and authorship information.
9
10 import pytest
11
12
13 @pytest.fixture(autouse=True)
14 def revision():
15 """Sets up the revision counter for each test call."""
16 from pootle.core.models import Revision
17
18 Revision.initialize()
19
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pytest_pootle/fixtures/revision.py b/pytest_pootle/fixtures/revision.py
--- a/pytest_pootle/fixtures/revision.py
+++ b/pytest_pootle/fixtures/revision.py
@@ -11,8 +11,12 @@
@pytest.fixture(autouse=True)
-def revision():
- """Sets up the revision counter for each test call."""
+def revision(request, clear_cache):
+ """Sets up the cached revision counter for each test call."""
from pootle.core.models import Revision
+ from pootle_store.models import Unit
- Revision.initialize()
+ if request.node.get_marker("django_db"):
+ Revision.set(Unit.max_revision())
+ else:
+ Revision.initialize()
diff --git a/pytest_pootle/fixtures/site.py b/pytest_pootle/fixtures/site.py
--- a/pytest_pootle/fixtures/site.py
+++ b/pytest_pootle/fixtures/site.py
@@ -84,3 +84,13 @@
"""used by PootleEnv"""
from django.conf import settings
settings.POOTLE_TRANSLATION_DIRECTORY = tempfile.mkdtemp()
+
+
[email protected](autouse=True)
+def clear_cache(request):
+ """Currently tests only use one cache so this clears all"""
+
+ from django_redis import get_redis_connection
+
+ r_con = get_redis_connection('default')
+ r_con.flushdb()
| {"golden_diff": "diff --git a/pytest_pootle/fixtures/revision.py b/pytest_pootle/fixtures/revision.py\n--- a/pytest_pootle/fixtures/revision.py\n+++ b/pytest_pootle/fixtures/revision.py\n@@ -11,8 +11,12 @@\n \n \n @pytest.fixture(autouse=True)\n-def revision():\n- \"\"\"Sets up the revision counter for each test call.\"\"\"\n+def revision(request, clear_cache):\n+ \"\"\"Sets up the cached revision counter for each test call.\"\"\"\n from pootle.core.models import Revision\n+ from pootle_store.models import Unit\n \n- Revision.initialize()\n+ if request.node.get_marker(\"django_db\"):\n+ Revision.set(Unit.max_revision())\n+ else:\n+ Revision.initialize()\ndiff --git a/pytest_pootle/fixtures/site.py b/pytest_pootle/fixtures/site.py\n--- a/pytest_pootle/fixtures/site.py\n+++ b/pytest_pootle/fixtures/site.py\n@@ -84,3 +84,13 @@\n \"\"\"used by PootleEnv\"\"\"\n from django.conf import settings\n settings.POOTLE_TRANSLATION_DIRECTORY = tempfile.mkdtemp()\n+\n+\[email protected](autouse=True)\n+def clear_cache(request):\n+ \"\"\"Currently tests only use one cache so this clears all\"\"\"\n+\n+ from django_redis import get_redis_connection\n+\n+ r_con = get_redis_connection('default')\n+ r_con.flushdb()\n", "issue": "Snippet caching is not cleared between tests\nCurrently if you run a test that saves data in the exports cache, the data is still there in the next test\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport tempfile\n\nimport pytest\n\nfrom pytest_pootle.env import PootleTestEnv\n\n\[email protected](autouse=True, scope='session')\ndef setup_db_if_needed(request):\n \"\"\"Sets up the site DB only if tests requested to use the DB (autouse).\"\"\"\n is_db_marker_set = [\n item for item in request.node.items\n if item.get_marker('django_db')\n ]\n if is_db_marker_set:\n return request.getfuncargvalue('post_db_setup')\n\n return None\n\n\[email protected](scope='session')\ndef post_db_setup(translations_directory, _django_db_setup,\n _django_cursor_wrapper, request):\n \"\"\"Sets up the site DB for the test session.\"\"\"\n with _django_cursor_wrapper:\n PootleTestEnv(request).setup()\n\n\[email protected]\ndef no_projects():\n from pootle_project.models import Project\n\n Project.objects.all().delete()\n\n\[email protected]\ndef no_permissions():\n from django.contrib.auth.models import Permission\n\n Permission.objects.all().delete()\n\n\[email protected]\ndef no_permission_sets():\n from pootle_app.models import PermissionSet\n\n PermissionSet.objects.all().delete()\n\n\[email protected]\ndef no_submissions():\n from pootle_statistics.models import Submission\n\n Submission.objects.all().delete()\n\n\[email protected]\ndef no_users():\n from django.contrib.auth import get_user_model\n\n User = get_user_model()\n User.objects.all().delete()\n\n\[email protected]\ndef no_extra_users():\n from django.contrib.auth import get_user_model\n\n User = get_user_model()\n User.objects.exclude(\n username__in=[\"system\", \"default\", \"nobody\"]).delete()\n\n\[email protected](autouse=True, scope=\"session\")\ndef translations_directory(request):\n \"\"\"used by PootleEnv\"\"\"\n from django.conf import settings\n settings.POOTLE_TRANSLATION_DIRECTORY = tempfile.mkdtemp()\n", "path": "pytest_pootle/fixtures/site.py"}, {"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport pytest\n\n\[email protected](autouse=True)\ndef revision():\n \"\"\"Sets up the revision counter for each test call.\"\"\"\n from pootle.core.models import Revision\n\n Revision.initialize()\n", "path": "pytest_pootle/fixtures/revision.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport tempfile\n\nimport pytest\n\nfrom pytest_pootle.env import PootleTestEnv\n\n\[email protected](autouse=True, scope='session')\ndef setup_db_if_needed(request):\n \"\"\"Sets up the site DB only if tests requested to use the DB (autouse).\"\"\"\n is_db_marker_set = [\n item for item in request.node.items\n if item.get_marker('django_db')\n ]\n if is_db_marker_set:\n return request.getfuncargvalue('post_db_setup')\n\n return None\n\n\[email protected](scope='session')\ndef post_db_setup(translations_directory, _django_db_setup,\n _django_cursor_wrapper, request):\n \"\"\"Sets up the site DB for the test session.\"\"\"\n with _django_cursor_wrapper:\n PootleTestEnv(request).setup()\n\n\[email protected]\ndef no_projects():\n from pootle_project.models import Project\n\n Project.objects.all().delete()\n\n\[email protected]\ndef no_permissions():\n from django.contrib.auth.models import Permission\n\n Permission.objects.all().delete()\n\n\[email protected]\ndef no_permission_sets():\n from pootle_app.models import PermissionSet\n\n PermissionSet.objects.all().delete()\n\n\[email protected]\ndef no_submissions():\n from pootle_statistics.models import Submission\n\n Submission.objects.all().delete()\n\n\[email protected]\ndef no_users():\n from django.contrib.auth import get_user_model\n\n User = get_user_model()\n User.objects.all().delete()\n\n\[email protected]\ndef no_extra_users():\n from django.contrib.auth import get_user_model\n\n User = get_user_model()\n User.objects.exclude(\n username__in=[\"system\", \"default\", \"nobody\"]).delete()\n\n\[email protected](autouse=True, scope=\"session\")\ndef translations_directory(request):\n \"\"\"used by PootleEnv\"\"\"\n from django.conf import settings\n settings.POOTLE_TRANSLATION_DIRECTORY = tempfile.mkdtemp()\n\n\[email protected](autouse=True)\ndef clear_cache(request):\n \"\"\"Currently tests only use one cache so this clears all\"\"\"\n\n from django_redis import get_redis_connection\n\n r_con = get_redis_connection('default')\n r_con.flushdb()\n", "path": "pytest_pootle/fixtures/site.py"}, {"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport pytest\n\n\[email protected](autouse=True)\ndef revision(request, clear_cache):\n \"\"\"Sets up the cached revision counter for each test call.\"\"\"\n from pootle.core.models import Revision\n from pootle_store.models import Unit\n\n if request.node.get_marker(\"django_db\"):\n Revision.set(Unit.max_revision())\n else:\n Revision.initialize()\n", "path": "pytest_pootle/fixtures/revision.py"}]} | 1,117 | 310 |
gh_patches_debug_18334 | rasdani/github-patches | git_diff | googleapis__google-api-python-client-1295 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Modify noxfile to build and test the package
Versions `2.0.0` and `2.0.1` were yanked from PyPI last week due to an issue where discovery documents were not included in the published package causing `discovery.build()` to fail(#1214). A basic check could be added to verify the package works correctly using the steps in #1214. Ideally it should be done on every PR and push to master so the issue can be caught before the package is published.
Use these steps from #1214 to re-produce the issue with version `2.0.0` and `2.0.1`:
1. Start with a clean clone of `google-api-python-client`
2. Checkout version `2.0.0` or `2.0.1`, using `git checkout 2.0.0`
3. Run `python setup.py sdist`
4. Run `pip install dist/google-api-python-client-<version>.tar.gz`
5. Run
```
$ python3
Python 3.8.7 (default, Jan 27 2021, 18:44:05)
[GCC 10.2.1 20201224] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> from googleapiclient import discovery
>>> client = discovery.build("cloudprofiler", "v2")
...
```
Before closing this issue, we should ensure that we have checks in place so that a PR will fail if `package_data` [here](https://github.com/googleapis/google-api-python-client/blob/master/setup.py#L78) is empty.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `noxfile.py`
Content:
```
1
2 # Copyright 2020 Google LLC
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 import sys
17
18 import nox
19
20 test_dependencies = [
21 "django>=2.0.0",
22 "google-auth",
23 "google-auth-httplib2",
24 "mox",
25 "parameterized",
26 "pyopenssl",
27 "pytest",
28 "pytest-cov",
29 "webtest",
30 "coverage",
31 "unittest2",
32 "mock",
33 ]
34
35
36 @nox.session(python=["3.7"])
37 def lint(session):
38 session.install("flake8")
39 session.run(
40 "flake8",
41 "googleapiclient",
42 "tests",
43 "--count",
44 "--select=E9,F63,F7,F82",
45 "--show-source",
46 "--statistics",
47 )
48
49
50 @nox.session(python=["3.6", "3.7", "3.8", "3.9"])
51 @nox.parametrize(
52 "oauth2client",
53 [
54 "oauth2client<2dev",
55 "oauth2client>=2,<=3dev",
56 "oauth2client>=3,<=4dev",
57 "oauth2client>=4,<=5dev",
58 ],
59 )
60 def unit(session, oauth2client):
61 session.install(*test_dependencies)
62 session.install(oauth2client)
63 session.install('.')
64
65 # Run py.test against the unit tests.
66 session.run(
67 "py.test",
68 "--quiet",
69 "--cov=googleapiclient",
70 "--cov=tests",
71 "--cov-append",
72 "--cov-config=.coveragerc",
73 "--cov-report=",
74 "--cov-fail-under=85",
75 "tests",
76 *session.posargs,
77 )
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/noxfile.py b/noxfile.py
--- a/noxfile.py
+++ b/noxfile.py
@@ -16,6 +16,8 @@
import sys
import nox
+import os
+import shutil
test_dependencies = [
"django>=2.0.0",
@@ -58,9 +60,22 @@
],
)
def unit(session, oauth2client):
+ # Clean up dist and build folders
+ shutil.rmtree('dist', ignore_errors=True)
+ shutil.rmtree('build', ignore_errors=True)
+
session.install(*test_dependencies)
session.install(oauth2client)
- session.install('.')
+
+ # Create and install wheels
+ session.run('python3', 'setup.py', 'bdist_wheel')
+ session.install(os.path.join('dist', os.listdir('dist').pop()))
+
+ # Run tests from a different directory to test the package artifacts
+ root_dir = os.path.dirname(os.path.realpath(__file__))
+ temp_dir = session.create_tmp()
+ session.chdir(temp_dir)
+ shutil.copytree(os.path.join(root_dir, 'tests'), 'tests')
# Run py.test against the unit tests.
session.run(
| {"golden_diff": "diff --git a/noxfile.py b/noxfile.py\n--- a/noxfile.py\n+++ b/noxfile.py\n@@ -16,6 +16,8 @@\n import sys\n \n import nox\n+import os\n+import shutil\n \n test_dependencies = [\n \"django>=2.0.0\",\n@@ -58,9 +60,22 @@\n ],\n )\n def unit(session, oauth2client):\n+ # Clean up dist and build folders\n+ shutil.rmtree('dist', ignore_errors=True)\n+ shutil.rmtree('build', ignore_errors=True)\n+\n session.install(*test_dependencies)\n session.install(oauth2client)\n- session.install('.')\n+\n+ # Create and install wheels\n+ session.run('python3', 'setup.py', 'bdist_wheel')\n+ session.install(os.path.join('dist', os.listdir('dist').pop()))\n+\n+ # Run tests from a different directory to test the package artifacts\n+ root_dir = os.path.dirname(os.path.realpath(__file__))\n+ temp_dir = session.create_tmp()\n+ session.chdir(temp_dir)\n+ shutil.copytree(os.path.join(root_dir, 'tests'), 'tests')\n \n # Run py.test against the unit tests.\n session.run(\n", "issue": "Modify noxfile to build and test the package\nVersions `2.0.0` and `2.0.1` were yanked from PyPI last week due to an issue where discovery documents were not included in the published package causing `discovery.build()` to fail(#1214). A basic check could be added to verify the package works correctly using the steps in #1214. Ideally it should be done on every PR and push to master so the issue can be caught before the package is published. \r\n\r\nUse these steps from #1214 to re-produce the issue with version `2.0.0` and `2.0.1`:\r\n1. Start with a clean clone of `google-api-python-client`\r\n2. Checkout version `2.0.0` or `2.0.1`, using `git checkout 2.0.0`\r\n3. Run `python setup.py sdist`\r\n4. Run `pip install dist/google-api-python-client-<version>.tar.gz`\r\n5. Run \r\n```\r\n$ python3\r\nPython 3.8.7 (default, Jan 27 2021, 18:44:05) \r\n[GCC 10.2.1 20201224] on linux\r\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\r\n>>> from googleapiclient import discovery\r\n>>> client = discovery.build(\"cloudprofiler\", \"v2\")\r\n...\r\n```\r\n\r\nBefore closing this issue, we should ensure that we have checks in place so that a PR will fail if `package_data` [here](https://github.com/googleapis/google-api-python-client/blob/master/setup.py#L78) is empty.\r\n\n", "before_files": [{"content": "\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\n\nimport nox\n\ntest_dependencies = [\n \"django>=2.0.0\",\n \"google-auth\",\n \"google-auth-httplib2\",\n \"mox\",\n \"parameterized\",\n \"pyopenssl\",\n \"pytest\",\n \"pytest-cov\",\n \"webtest\",\n \"coverage\",\n \"unittest2\",\n \"mock\",\n]\n\n\[email protected](python=[\"3.7\"])\ndef lint(session):\n session.install(\"flake8\")\n session.run(\n \"flake8\",\n \"googleapiclient\",\n \"tests\",\n \"--count\",\n \"--select=E9,F63,F7,F82\",\n \"--show-source\",\n \"--statistics\",\n )\n\n\[email protected](python=[\"3.6\", \"3.7\", \"3.8\", \"3.9\"])\[email protected](\n \"oauth2client\",\n [\n \"oauth2client<2dev\",\n \"oauth2client>=2,<=3dev\",\n \"oauth2client>=3,<=4dev\",\n \"oauth2client>=4,<=5dev\",\n ],\n)\ndef unit(session, oauth2client):\n session.install(*test_dependencies)\n session.install(oauth2client)\n session.install('.')\n\n # Run py.test against the unit tests.\n session.run(\n \"py.test\",\n \"--quiet\",\n \"--cov=googleapiclient\",\n \"--cov=tests\",\n \"--cov-append\",\n \"--cov-config=.coveragerc\",\n \"--cov-report=\",\n \"--cov-fail-under=85\",\n \"tests\",\n *session.posargs,\n )\n", "path": "noxfile.py"}], "after_files": [{"content": "\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\n\nimport nox\nimport os\nimport shutil\n\ntest_dependencies = [\n \"django>=2.0.0\",\n \"google-auth\",\n \"google-auth-httplib2\",\n \"mox\",\n \"parameterized\",\n \"pyopenssl\",\n \"pytest\",\n \"pytest-cov\",\n \"webtest\",\n \"coverage\",\n \"unittest2\",\n \"mock\",\n]\n\n\[email protected](python=[\"3.7\"])\ndef lint(session):\n session.install(\"flake8\")\n session.run(\n \"flake8\",\n \"googleapiclient\",\n \"tests\",\n \"--count\",\n \"--select=E9,F63,F7,F82\",\n \"--show-source\",\n \"--statistics\",\n )\n\n\[email protected](python=[\"3.6\", \"3.7\", \"3.8\", \"3.9\"])\[email protected](\n \"oauth2client\",\n [\n \"oauth2client<2dev\",\n \"oauth2client>=2,<=3dev\",\n \"oauth2client>=3,<=4dev\",\n \"oauth2client>=4,<=5dev\",\n ],\n)\ndef unit(session, oauth2client):\n # Clean up dist and build folders\n shutil.rmtree('dist', ignore_errors=True)\n shutil.rmtree('build', ignore_errors=True)\n\n session.install(*test_dependencies)\n session.install(oauth2client)\n\n # Create and install wheels\n session.run('python3', 'setup.py', 'bdist_wheel')\n session.install(os.path.join('dist', os.listdir('dist').pop()))\n\n # Run tests from a different directory to test the package artifacts\n root_dir = os.path.dirname(os.path.realpath(__file__))\n temp_dir = session.create_tmp()\n session.chdir(temp_dir)\n shutil.copytree(os.path.join(root_dir, 'tests'), 'tests')\n\n # Run py.test against the unit tests.\n session.run(\n \"py.test\",\n \"--quiet\",\n \"--cov=googleapiclient\",\n \"--cov=tests\",\n \"--cov-append\",\n \"--cov-config=.coveragerc\",\n \"--cov-report=\",\n \"--cov-fail-under=85\",\n \"tests\",\n *session.posargs,\n )\n", "path": "noxfile.py"}]} | 1,259 | 267 |
gh_patches_debug_24814 | rasdani/github-patches | git_diff | coala__coala-bears-1276 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The bear HaskellLintBear raised an exception
I've used HaskellLintBear to linting https://github.com/wisn/elm-reactor/
Here is the log
https://travis-ci.org/wisn/elm-reactor/builds/180417562
The build result is green, but the bear HaskellLintBear raised an exception.
It seems HaskellLintBear have a problem
```
[WARNING][14:56:00] Bear HaskellLintBear failed to run. Take a look at debug messages (`-V`) for further information.
```
I've collected the traceback information:
```
Traceback (most recent call last):
File "/coala-bears/bears/haskell/HaskellLintBear.py", line 41, in process_output
assert issue['startLine'] == issue['endLine']
AssertionError
File "/coala-bears/bears/haskell/HaskellLintBear.py", line 45, in process_output
newline = line_to_change.replace(issue['from'], issue['to'])
TypeError: Can't convert 'NoneType' object to str implicitly
```
I think `TypeError: Can't convert 'NoneType' object to str implicitly` is the main problem.
Then, followed by `AssertionError`.
Unfortunately, I can't trace manually with `hlint` because my PC freezes when compiling (in installing) it. Hope this information will be helpful. Thanks and sorry for my bad English...
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bears/haskell/HaskellLintBear.py`
Content:
```
1 import json
2
3 from coalib.bearlib.abstractions.Linter import linter
4 from dependency_management.requirements.DistributionRequirement import (
5 DistributionRequirement)
6 from coalib.results.Diff import Diff
7 from coalib.results.Result import Result
8 from coalib.results.RESULT_SEVERITY import RESULT_SEVERITY
9
10
11 @linter(executable='hlint')
12 class HaskellLintBear:
13 """
14 Check Haskell code for possible problems. This bear can propose patches for
15 using alternative functions, simplifying code and removing redundancies.
16
17 See <http://community.haskell.org/~ndm/darcs/hlint/hlint.htm> for more
18 information.
19 """
20
21 LANGUAGES = {'Haskell'}
22 REQUIREMENTS = {DistributionRequirement(apt_get='hlint')}
23 AUTHORS = {'The coala developers'}
24 AUTHORS_EMAILS = {'[email protected]'}
25 LICENSE = 'AGPL-3.0'
26 CAN_DETECT = {'Duplication'}
27 CAN_FIX = {'Unused Code', 'Code Simplification'}
28
29 severity_map = {'Error': RESULT_SEVERITY.MAJOR,
30 'Warning': RESULT_SEVERITY.NORMAL,
31 'Suggestion': RESULT_SEVERITY.INFO}
32
33 @staticmethod
34 def create_arguments(filename, file, config_file):
35 return '--json', filename
36
37 def process_output(self, output, filename, file):
38 output = json.loads(output)
39
40 for issue in output:
41 assert issue['startLine'] == issue['endLine']
42 diff = Diff(file)
43 line_nr = issue['startLine']
44 line_to_change = file[line_nr-1]
45 newline = line_to_change.replace(issue['from'], issue['to'])
46 diff.change_line(line_nr, line_to_change, newline)
47
48 yield Result.from_values(
49 origin=self,
50 message=issue['hint'],
51 file=filename,
52 severity=self.severity_map[issue['severity']],
53 line=issue['startLine'],
54 diffs={filename: diff})
55
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bears/haskell/HaskellLintBear.py b/bears/haskell/HaskellLintBear.py
--- a/bears/haskell/HaskellLintBear.py
+++ b/bears/haskell/HaskellLintBear.py
@@ -38,11 +38,15 @@
output = json.loads(output)
for issue in output:
- assert issue['startLine'] == issue['endLine']
diff = Diff(file)
+ from_lines = issue['from'].splitlines()
+ to_lines = issue['to'].splitlines()
+ assert len(from_lines) == len(to_lines)
+ for other_lines in range(1, len(from_lines)):
+ assert from_lines[other_lines] == to_lines[other_lines]
line_nr = issue['startLine']
line_to_change = file[line_nr-1]
- newline = line_to_change.replace(issue['from'], issue['to'])
+ newline = line_to_change.replace(from_lines[0], to_lines[0])
diff.change_line(line_nr, line_to_change, newline)
yield Result.from_values(
@@ -51,4 +55,7 @@
file=filename,
severity=self.severity_map[issue['severity']],
line=issue['startLine'],
+ column=issue['startColumn'],
+ end_line=issue['endLine'],
+ end_column=issue['endColumn'],
diffs={filename: diff})
| {"golden_diff": "diff --git a/bears/haskell/HaskellLintBear.py b/bears/haskell/HaskellLintBear.py\n--- a/bears/haskell/HaskellLintBear.py\n+++ b/bears/haskell/HaskellLintBear.py\n@@ -38,11 +38,15 @@\n output = json.loads(output)\n \n for issue in output:\n- assert issue['startLine'] == issue['endLine']\n diff = Diff(file)\n+ from_lines = issue['from'].splitlines()\n+ to_lines = issue['to'].splitlines()\n+ assert len(from_lines) == len(to_lines)\n+ for other_lines in range(1, len(from_lines)):\n+ assert from_lines[other_lines] == to_lines[other_lines]\n line_nr = issue['startLine']\n line_to_change = file[line_nr-1]\n- newline = line_to_change.replace(issue['from'], issue['to'])\n+ newline = line_to_change.replace(from_lines[0], to_lines[0])\n diff.change_line(line_nr, line_to_change, newline)\n \n yield Result.from_values(\n@@ -51,4 +55,7 @@\n file=filename,\n severity=self.severity_map[issue['severity']],\n line=issue['startLine'],\n+ column=issue['startColumn'],\n+ end_line=issue['endLine'],\n+ end_column=issue['endColumn'],\n diffs={filename: diff})\n", "issue": "The bear HaskellLintBear raised an exception\nI've used HaskellLintBear to linting https://github.com/wisn/elm-reactor/\r\n\r\nHere is the log\r\nhttps://travis-ci.org/wisn/elm-reactor/builds/180417562\r\n\r\nThe build result is green, but the bear HaskellLintBear raised an exception.\r\n\r\nIt seems HaskellLintBear have a problem\r\n```\r\n[WARNING][14:56:00] Bear HaskellLintBear failed to run. Take a look at debug messages (`-V`) for further information.\r\n```\r\n\r\nI've collected the traceback information:\r\n```\r\nTraceback (most recent call last):\r\n File \"/coala-bears/bears/haskell/HaskellLintBear.py\", line 41, in process_output\r\n assert issue['startLine'] == issue['endLine']\r\n AssertionError\r\n\r\n File \"/coala-bears/bears/haskell/HaskellLintBear.py\", line 45, in process_output\r\n newline = line_to_change.replace(issue['from'], issue['to'])\r\n TypeError: Can't convert 'NoneType' object to str implicitly\r\n```\r\n\r\nI think `TypeError: Can't convert 'NoneType' object to str implicitly` is the main problem.\r\nThen, followed by `AssertionError`.\r\n\r\nUnfortunately, I can't trace manually with `hlint` because my PC freezes when compiling (in installing) it. Hope this information will be helpful. Thanks and sorry for my bad English...\n", "before_files": [{"content": "import json\n\nfrom coalib.bearlib.abstractions.Linter import linter\nfrom dependency_management.requirements.DistributionRequirement import (\n DistributionRequirement)\nfrom coalib.results.Diff import Diff\nfrom coalib.results.Result import Result\nfrom coalib.results.RESULT_SEVERITY import RESULT_SEVERITY\n\n\n@linter(executable='hlint')\nclass HaskellLintBear:\n \"\"\"\n Check Haskell code for possible problems. This bear can propose patches for\n using alternative functions, simplifying code and removing redundancies.\n\n See <http://community.haskell.org/~ndm/darcs/hlint/hlint.htm> for more\n information.\n \"\"\"\n\n LANGUAGES = {'Haskell'}\n REQUIREMENTS = {DistributionRequirement(apt_get='hlint')}\n AUTHORS = {'The coala developers'}\n AUTHORS_EMAILS = {'[email protected]'}\n LICENSE = 'AGPL-3.0'\n CAN_DETECT = {'Duplication'}\n CAN_FIX = {'Unused Code', 'Code Simplification'}\n\n severity_map = {'Error': RESULT_SEVERITY.MAJOR,\n 'Warning': RESULT_SEVERITY.NORMAL,\n 'Suggestion': RESULT_SEVERITY.INFO}\n\n @staticmethod\n def create_arguments(filename, file, config_file):\n return '--json', filename\n\n def process_output(self, output, filename, file):\n output = json.loads(output)\n\n for issue in output:\n assert issue['startLine'] == issue['endLine']\n diff = Diff(file)\n line_nr = issue['startLine']\n line_to_change = file[line_nr-1]\n newline = line_to_change.replace(issue['from'], issue['to'])\n diff.change_line(line_nr, line_to_change, newline)\n\n yield Result.from_values(\n origin=self,\n message=issue['hint'],\n file=filename,\n severity=self.severity_map[issue['severity']],\n line=issue['startLine'],\n diffs={filename: diff})\n", "path": "bears/haskell/HaskellLintBear.py"}], "after_files": [{"content": "import json\n\nfrom coalib.bearlib.abstractions.Linter import linter\nfrom dependency_management.requirements.DistributionRequirement import (\n DistributionRequirement)\nfrom coalib.results.Diff import Diff\nfrom coalib.results.Result import Result\nfrom coalib.results.RESULT_SEVERITY import RESULT_SEVERITY\n\n\n@linter(executable='hlint')\nclass HaskellLintBear:\n \"\"\"\n Check Haskell code for possible problems. This bear can propose patches for\n using alternative functions, simplifying code and removing redundancies.\n\n See <http://community.haskell.org/~ndm/darcs/hlint/hlint.htm> for more\n information.\n \"\"\"\n\n LANGUAGES = {'Haskell'}\n REQUIREMENTS = {DistributionRequirement(apt_get='hlint')}\n AUTHORS = {'The coala developers'}\n AUTHORS_EMAILS = {'[email protected]'}\n LICENSE = 'AGPL-3.0'\n CAN_DETECT = {'Duplication'}\n CAN_FIX = {'Unused Code', 'Code Simplification'}\n\n severity_map = {'Error': RESULT_SEVERITY.MAJOR,\n 'Warning': RESULT_SEVERITY.NORMAL,\n 'Suggestion': RESULT_SEVERITY.INFO}\n\n @staticmethod\n def create_arguments(filename, file, config_file):\n return '--json', filename\n\n def process_output(self, output, filename, file):\n output = json.loads(output)\n\n for issue in output:\n diff = Diff(file)\n from_lines = issue['from'].splitlines()\n to_lines = issue['to'].splitlines()\n assert len(from_lines) == len(to_lines)\n for other_lines in range(1, len(from_lines)):\n assert from_lines[other_lines] == to_lines[other_lines]\n line_nr = issue['startLine']\n line_to_change = file[line_nr-1]\n newline = line_to_change.replace(from_lines[0], to_lines[0])\n diff.change_line(line_nr, line_to_change, newline)\n\n yield Result.from_values(\n origin=self,\n message=issue['hint'],\n file=filename,\n severity=self.severity_map[issue['severity']],\n line=issue['startLine'],\n column=issue['startColumn'],\n end_line=issue['endLine'],\n end_column=issue['endColumn'],\n diffs={filename: diff})\n", "path": "bears/haskell/HaskellLintBear.py"}]} | 1,095 | 308 |
gh_patches_debug_1616 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-3193 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Switching editions changes "shelved" date
**Describe the bug**
When switching editions of a book already on your "To Read" list, the "shelved" date is changed to today's date.
**To Reproduce**
Steps to reproduce the behavior:
1. Pick any book on your "To read" list with more than one edition
2. Pick another edition and switch to this
3. Observe that the book's shelved date is now today
**Expected behavior**
This shouldn't changed the shelved date
**Instance**
https://books.theunseen.city
---
**Desktop (please complete the following information):**
- OS: MacOS 14.1
- Browser: Firefox
- Version: 20.0 (64-bit)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bookwyrm/views/books/editions.py`
Content:
```
1 """ the good stuff! the books! """
2 from functools import reduce
3 import operator
4
5 from django.contrib.auth.decorators import login_required
6 from django.core.paginator import Paginator
7 from django.db import transaction
8 from django.db.models import Q
9 from django.shortcuts import get_object_or_404, redirect
10 from django.template.response import TemplateResponse
11 from django.views import View
12 from django.views.decorators.http import require_POST
13
14 from bookwyrm import forms, models
15 from bookwyrm.activitypub import ActivitypubResponse
16 from bookwyrm.settings import PAGE_LENGTH
17 from bookwyrm.views.helpers import is_api_request
18
19
20 # pylint: disable=no-self-use
21 class Editions(View):
22 """list of editions"""
23
24 def get(self, request, book_id):
25 """list of editions of a book"""
26 work = get_object_or_404(models.Work, id=book_id)
27
28 if is_api_request(request):
29 return ActivitypubResponse(work.to_edition_list(**request.GET))
30 filters = {}
31
32 if request.GET.get("language"):
33 filters["languages__contains"] = [request.GET.get("language")]
34 if request.GET.get("format"):
35 filters["physical_format__iexact"] = request.GET.get("format")
36
37 editions = work.editions.order_by("-edition_rank")
38 languages = set(sum(editions.values_list("languages", flat=True), []))
39
40 editions = editions.filter(**filters)
41
42 query = request.GET.get("q")
43 if query:
44 searchable_array_fields = ["languages", "publishers"]
45 searchable_fields = [
46 "title",
47 "physical_format",
48 "isbn_10",
49 "isbn_13",
50 "oclc_number",
51 "asin",
52 "aasin",
53 "isfdb",
54 ]
55 search_filter_entries = [
56 {f"{f}__icontains": query} for f in searchable_fields
57 ] + [{f"{f}__iexact": query} for f in searchable_array_fields]
58 editions = editions.filter(
59 reduce(operator.or_, (Q(**f) for f in search_filter_entries))
60 )
61
62 paginated = Paginator(editions, PAGE_LENGTH)
63 page = paginated.get_page(request.GET.get("page"))
64 data = {
65 "editions": page,
66 "page_range": paginated.get_elided_page_range(
67 page.number, on_each_side=2, on_ends=1
68 ),
69 "work": work,
70 "work_form": forms.EditionFromWorkForm(instance=work),
71 "languages": languages,
72 "formats": set(
73 e.physical_format.lower() for e in editions if e.physical_format
74 ),
75 }
76 return TemplateResponse(request, "book/editions/editions.html", data)
77
78
79 @login_required
80 @require_POST
81 @transaction.atomic
82 def switch_edition(request):
83 """switch your copy of a book to a different edition"""
84 edition_id = request.POST.get("edition")
85 new_edition = get_object_or_404(models.Edition, id=edition_id)
86 shelfbooks = models.ShelfBook.objects.filter(
87 book__parent_work=new_edition.parent_work, shelf__user=request.user
88 )
89 for shelfbook in shelfbooks.all():
90 with transaction.atomic():
91 models.ShelfBook.objects.create(
92 created_date=shelfbook.created_date,
93 user=shelfbook.user,
94 shelf=shelfbook.shelf,
95 book=new_edition,
96 )
97 shelfbook.delete()
98
99 readthroughs = models.ReadThrough.objects.filter(
100 book__parent_work=new_edition.parent_work, user=request.user
101 )
102 for readthrough in readthroughs.all():
103 readthrough.book = new_edition
104 readthrough.save()
105
106 return redirect(f"/book/{new_edition.id}")
107
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bookwyrm/views/books/editions.py b/bookwyrm/views/books/editions.py
--- a/bookwyrm/views/books/editions.py
+++ b/bookwyrm/views/books/editions.py
@@ -93,6 +93,7 @@
user=shelfbook.user,
shelf=shelfbook.shelf,
book=new_edition,
+ shelved_date=shelfbook.shelved_date,
)
shelfbook.delete()
| {"golden_diff": "diff --git a/bookwyrm/views/books/editions.py b/bookwyrm/views/books/editions.py\n--- a/bookwyrm/views/books/editions.py\n+++ b/bookwyrm/views/books/editions.py\n@@ -93,6 +93,7 @@\n user=shelfbook.user,\n shelf=shelfbook.shelf,\n book=new_edition,\n+ shelved_date=shelfbook.shelved_date,\n )\n shelfbook.delete()\n", "issue": "Switching editions changes \"shelved\" date\n**Describe the bug**\r\nWhen switching editions of a book already on your \"To Read\" list, the \"shelved\" date is changed to today's date.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Pick any book on your \"To read\" list with more than one edition\r\n2. Pick another edition and switch to this\r\n3. Observe that the book's shelved date is now today\r\n\r\n**Expected behavior**\r\nThis shouldn't changed the shelved date\r\n\r\n**Instance**\r\nhttps://books.theunseen.city\r\n\r\n---\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: MacOS 14.1\r\n - Browser: Firefox\r\n - Version: 20.0 (64-bit)\r\n\n", "before_files": [{"content": "\"\"\" the good stuff! the books! \"\"\"\nfrom functools import reduce\nimport operator\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator\nfrom django.db import transaction\nfrom django.db.models import Q\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.views import View\nfrom django.views.decorators.http import require_POST\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.activitypub import ActivitypubResponse\nfrom bookwyrm.settings import PAGE_LENGTH\nfrom bookwyrm.views.helpers import is_api_request\n\n\n# pylint: disable=no-self-use\nclass Editions(View):\n \"\"\"list of editions\"\"\"\n\n def get(self, request, book_id):\n \"\"\"list of editions of a book\"\"\"\n work = get_object_or_404(models.Work, id=book_id)\n\n if is_api_request(request):\n return ActivitypubResponse(work.to_edition_list(**request.GET))\n filters = {}\n\n if request.GET.get(\"language\"):\n filters[\"languages__contains\"] = [request.GET.get(\"language\")]\n if request.GET.get(\"format\"):\n filters[\"physical_format__iexact\"] = request.GET.get(\"format\")\n\n editions = work.editions.order_by(\"-edition_rank\")\n languages = set(sum(editions.values_list(\"languages\", flat=True), []))\n\n editions = editions.filter(**filters)\n\n query = request.GET.get(\"q\")\n if query:\n searchable_array_fields = [\"languages\", \"publishers\"]\n searchable_fields = [\n \"title\",\n \"physical_format\",\n \"isbn_10\",\n \"isbn_13\",\n \"oclc_number\",\n \"asin\",\n \"aasin\",\n \"isfdb\",\n ]\n search_filter_entries = [\n {f\"{f}__icontains\": query} for f in searchable_fields\n ] + [{f\"{f}__iexact\": query} for f in searchable_array_fields]\n editions = editions.filter(\n reduce(operator.or_, (Q(**f) for f in search_filter_entries))\n )\n\n paginated = Paginator(editions, PAGE_LENGTH)\n page = paginated.get_page(request.GET.get(\"page\"))\n data = {\n \"editions\": page,\n \"page_range\": paginated.get_elided_page_range(\n page.number, on_each_side=2, on_ends=1\n ),\n \"work\": work,\n \"work_form\": forms.EditionFromWorkForm(instance=work),\n \"languages\": languages,\n \"formats\": set(\n e.physical_format.lower() for e in editions if e.physical_format\n ),\n }\n return TemplateResponse(request, \"book/editions/editions.html\", data)\n\n\n@login_required\n@require_POST\[email protected]\ndef switch_edition(request):\n \"\"\"switch your copy of a book to a different edition\"\"\"\n edition_id = request.POST.get(\"edition\")\n new_edition = get_object_or_404(models.Edition, id=edition_id)\n shelfbooks = models.ShelfBook.objects.filter(\n book__parent_work=new_edition.parent_work, shelf__user=request.user\n )\n for shelfbook in shelfbooks.all():\n with transaction.atomic():\n models.ShelfBook.objects.create(\n created_date=shelfbook.created_date,\n user=shelfbook.user,\n shelf=shelfbook.shelf,\n book=new_edition,\n )\n shelfbook.delete()\n\n readthroughs = models.ReadThrough.objects.filter(\n book__parent_work=new_edition.parent_work, user=request.user\n )\n for readthrough in readthroughs.all():\n readthrough.book = new_edition\n readthrough.save()\n\n return redirect(f\"/book/{new_edition.id}\")\n", "path": "bookwyrm/views/books/editions.py"}], "after_files": [{"content": "\"\"\" the good stuff! the books! \"\"\"\nfrom functools import reduce\nimport operator\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator\nfrom django.db import transaction\nfrom django.db.models import Q\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.views import View\nfrom django.views.decorators.http import require_POST\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.activitypub import ActivitypubResponse\nfrom bookwyrm.settings import PAGE_LENGTH\nfrom bookwyrm.views.helpers import is_api_request\n\n\n# pylint: disable=no-self-use\nclass Editions(View):\n \"\"\"list of editions\"\"\"\n\n def get(self, request, book_id):\n \"\"\"list of editions of a book\"\"\"\n work = get_object_or_404(models.Work, id=book_id)\n\n if is_api_request(request):\n return ActivitypubResponse(work.to_edition_list(**request.GET))\n filters = {}\n\n if request.GET.get(\"language\"):\n filters[\"languages__contains\"] = [request.GET.get(\"language\")]\n if request.GET.get(\"format\"):\n filters[\"physical_format__iexact\"] = request.GET.get(\"format\")\n\n editions = work.editions.order_by(\"-edition_rank\")\n languages = set(sum(editions.values_list(\"languages\", flat=True), []))\n\n editions = editions.filter(**filters)\n\n query = request.GET.get(\"q\")\n if query:\n searchable_array_fields = [\"languages\", \"publishers\"]\n searchable_fields = [\n \"title\",\n \"physical_format\",\n \"isbn_10\",\n \"isbn_13\",\n \"oclc_number\",\n \"asin\",\n \"aasin\",\n \"isfdb\",\n ]\n search_filter_entries = [\n {f\"{f}__icontains\": query} for f in searchable_fields\n ] + [{f\"{f}__iexact\": query} for f in searchable_array_fields]\n editions = editions.filter(\n reduce(operator.or_, (Q(**f) for f in search_filter_entries))\n )\n\n paginated = Paginator(editions, PAGE_LENGTH)\n page = paginated.get_page(request.GET.get(\"page\"))\n data = {\n \"editions\": page,\n \"page_range\": paginated.get_elided_page_range(\n page.number, on_each_side=2, on_ends=1\n ),\n \"work\": work,\n \"work_form\": forms.EditionFromWorkForm(instance=work),\n \"languages\": languages,\n \"formats\": set(\n e.physical_format.lower() for e in editions if e.physical_format\n ),\n }\n return TemplateResponse(request, \"book/editions/editions.html\", data)\n\n\n@login_required\n@require_POST\[email protected]\ndef switch_edition(request):\n \"\"\"switch your copy of a book to a different edition\"\"\"\n edition_id = request.POST.get(\"edition\")\n new_edition = get_object_or_404(models.Edition, id=edition_id)\n shelfbooks = models.ShelfBook.objects.filter(\n book__parent_work=new_edition.parent_work, shelf__user=request.user\n )\n for shelfbook in shelfbooks.all():\n with transaction.atomic():\n models.ShelfBook.objects.create(\n created_date=shelfbook.created_date,\n user=shelfbook.user,\n shelf=shelfbook.shelf,\n book=new_edition,\n shelved_date=shelfbook.shelved_date,\n )\n shelfbook.delete()\n\n readthroughs = models.ReadThrough.objects.filter(\n book__parent_work=new_edition.parent_work, user=request.user\n )\n for readthrough in readthroughs.all():\n readthrough.book = new_edition\n readthrough.save()\n\n return redirect(f\"/book/{new_edition.id}\")\n", "path": "bookwyrm/views/books/editions.py"}]} | 1,443 | 101 |
gh_patches_debug_20183 | rasdani/github-patches | git_diff | saleor__saleor-2826 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Django 2.1 compatibility
We should switch our supported Django version to the following list:
* Django 1.11 (current LTS)
* Django 2.1 (latest stable)
Current blockers:
* [x] `graphene-django` depends on an old version of `django-filters` (https://github.com/graphql-python/graphene-django/pull/492)
* [x] WeightInput passes floats to its base class which is a DecimalField
* [x] Some form widgets pass `renderer` to functions that don't expect it
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `saleor/core/weight.py`
Content:
```
1 """In Saleor we are using 'weight' instead of a 'mass'.
2
3 For those of us who are earth-bound, weight is what we usually experience.
4 Mass is a theoretical construct.
5 Unless we are dealing with inertia and momentum, we are encountering
6 the attractive force between ourselves and the earth,
7 the isolated effects of mass alone being a little more esoteric.
8
9 So even though mass is more fundamental, most people think
10 in terms of weight.
11
12 In the end, it does not really matter unless you travel between
13 different planets.
14 """
15 from decimal import Decimal
16 from enum import Enum
17
18 from django import forms
19 from django.contrib.sites.models import Site
20 from django.core.validators import MinValueValidator
21 from django.template.loader import render_to_string
22 from django.utils.translation import pgettext_lazy
23 from measurement.measures import Weight
24
25
26 class WeightUnits:
27 KILOGRAM = 'kg'
28 POUND = 'lb'
29 OUNCE = 'oz'
30 GRAM = 'g'
31
32 CHOICES = [
33 (KILOGRAM, pgettext_lazy('Kilogram weight unit symbol', 'kg')),
34 (POUND, pgettext_lazy('Pound weight unit symbol', 'lb')),
35 (OUNCE, pgettext_lazy('Ounce weight unit symbol', 'oz')),
36 (GRAM, pgettext_lazy('Gram weight unit symbol', 'g'))]
37
38
39 WeightUnitsEnum = Enum(
40 'WeightUnitsEnum',
41 {unit: unit for unit in WeightUnits.CHOICES})
42
43
44 def zero_weight():
45 """Function used as a model's default."""
46 return Weight(kg=0)
47
48
49 def convert_weight(weight, unit):
50 # Weight amount from the Weight instance can be retrived in serveral units
51 # via its properties. eg. Weight(lb=10).kg
52 converted_weight = getattr(weight, unit)
53 return Weight(**{unit: converted_weight})
54
55
56 def get_default_weight_unit():
57 site = Site.objects.get_current()
58 return site.settings.default_weight_unit
59
60
61 class WeightInput(forms.TextInput):
62 template = 'dashboard/shipping/weight_widget.html'
63 input_type = 'number'
64
65 def format_value(self, value):
66 if isinstance(value, Weight):
67 unit = get_default_weight_unit()
68 if value.unit != unit:
69 value = convert_weight(value, unit)
70 return value.value
71 return value
72
73 def render(self, name, value, attrs=None):
74 widget = super().render(name, value, attrs=attrs)
75 unit = get_default_weight_unit()
76 translated_unit = dict(WeightUnits.CHOICES)[unit]
77 return render_to_string(
78 self.template,
79 {'widget': widget, 'value': value, 'unit': translated_unit})
80
81
82 class WeightField(forms.DecimalField):
83 def __init__(self, *args, widget=WeightInput, min_value=0, **kwargs):
84 if isinstance(widget, type):
85 widget = widget(attrs={'type': 'number', 'step': 'any'})
86 super().__init__(*args, widget=widget, **kwargs)
87 if min_value is not None:
88 self.validators.append(MinValueValidator(min_value))
89
90 def to_python(self, value):
91 value = super().to_python(value)
92 if value is None:
93 return value
94 unit = get_default_weight_unit()
95 return Weight(**{unit: value})
96
97 def validate(self, weight):
98 if weight is None or weight in self.empty_values:
99 super().validate(weight)
100 else:
101 unit = get_default_weight_unit()
102 if not isinstance(weight, Weight):
103 raise Exception(
104 '%r is not a valid weight.' % (weight,))
105 if weight.unit != unit:
106 raise forms.ValidationError(
107 'Invalid unit: %r (expected %r).' % (
108 weight.unit, unit))
109 super().validate(weight.value)
110
111 def clean(self, value):
112 value = value_to_be_validated = self.to_python(value)
113 self.validate(value_to_be_validated)
114 if isinstance(value, Weight):
115 value_to_be_validated = Decimal(value.value)
116 # default decimal validators can be used for Weight's value only
117 self.run_validators(value_to_be_validated)
118 return value
119
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/saleor/core/weight.py b/saleor/core/weight.py
--- a/saleor/core/weight.py
+++ b/saleor/core/weight.py
@@ -70,8 +70,8 @@
return value.value
return value
- def render(self, name, value, attrs=None):
- widget = super().render(name, value, attrs=attrs)
+ def render(self, name, value, attrs=None, renderer=None):
+ widget = super().render(name, value, attrs=attrs, renderer=renderer)
unit = get_default_weight_unit()
translated_unit = dict(WeightUnits.CHOICES)[unit]
return render_to_string(
@@ -79,7 +79,7 @@
{'widget': widget, 'value': value, 'unit': translated_unit})
-class WeightField(forms.DecimalField):
+class WeightField(forms.FloatField):
def __init__(self, *args, widget=WeightInput, min_value=0, **kwargs):
if isinstance(widget, type):
widget = widget(attrs={'type': 'number', 'step': 'any'})
| {"golden_diff": "diff --git a/saleor/core/weight.py b/saleor/core/weight.py\n--- a/saleor/core/weight.py\n+++ b/saleor/core/weight.py\n@@ -70,8 +70,8 @@\n return value.value\n return value\n \n- def render(self, name, value, attrs=None):\n- widget = super().render(name, value, attrs=attrs)\n+ def render(self, name, value, attrs=None, renderer=None):\n+ widget = super().render(name, value, attrs=attrs, renderer=renderer)\n unit = get_default_weight_unit()\n translated_unit = dict(WeightUnits.CHOICES)[unit]\n return render_to_string(\n@@ -79,7 +79,7 @@\n {'widget': widget, 'value': value, 'unit': translated_unit})\n \n \n-class WeightField(forms.DecimalField):\n+class WeightField(forms.FloatField):\n def __init__(self, *args, widget=WeightInput, min_value=0, **kwargs):\n if isinstance(widget, type):\n widget = widget(attrs={'type': 'number', 'step': 'any'})\n", "issue": "Django 2.1 compatibility\nWe should switch our supported Django version to the following list:\r\n* Django 1.11 (current LTS)\r\n* Django 2.1 (latest stable)\r\n\r\nCurrent blockers:\r\n* [x] `graphene-django` depends on an old version of `django-filters` (https://github.com/graphql-python/graphene-django/pull/492)\r\n* [x] WeightInput passes floats to its base class which is a DecimalField\r\n* [x] Some form widgets pass `renderer` to functions that don't expect it\n", "before_files": [{"content": "\"\"\"In Saleor we are using 'weight' instead of a 'mass'.\n\nFor those of us who are earth-bound, weight is what we usually experience.\nMass is a theoretical construct.\nUnless we are dealing with inertia and momentum, we are encountering\nthe attractive force between ourselves and the earth,\nthe isolated effects of mass alone being a little more esoteric.\n\nSo even though mass is more fundamental, most people think\nin terms of weight.\n\nIn the end, it does not really matter unless you travel between\ndifferent planets.\n\"\"\"\nfrom decimal import Decimal\nfrom enum import Enum\n\nfrom django import forms\nfrom django.contrib.sites.models import Site\nfrom django.core.validators import MinValueValidator\nfrom django.template.loader import render_to_string\nfrom django.utils.translation import pgettext_lazy\nfrom measurement.measures import Weight\n\n\nclass WeightUnits:\n KILOGRAM = 'kg'\n POUND = 'lb'\n OUNCE = 'oz'\n GRAM = 'g'\n\n CHOICES = [\n (KILOGRAM, pgettext_lazy('Kilogram weight unit symbol', 'kg')),\n (POUND, pgettext_lazy('Pound weight unit symbol', 'lb')),\n (OUNCE, pgettext_lazy('Ounce weight unit symbol', 'oz')),\n (GRAM, pgettext_lazy('Gram weight unit symbol', 'g'))]\n\n\nWeightUnitsEnum = Enum(\n 'WeightUnitsEnum',\n {unit: unit for unit in WeightUnits.CHOICES})\n\n\ndef zero_weight():\n \"\"\"Function used as a model's default.\"\"\"\n return Weight(kg=0)\n\n\ndef convert_weight(weight, unit):\n # Weight amount from the Weight instance can be retrived in serveral units\n # via its properties. eg. Weight(lb=10).kg\n converted_weight = getattr(weight, unit)\n return Weight(**{unit: converted_weight})\n\n\ndef get_default_weight_unit():\n site = Site.objects.get_current()\n return site.settings.default_weight_unit\n\n\nclass WeightInput(forms.TextInput):\n template = 'dashboard/shipping/weight_widget.html'\n input_type = 'number'\n\n def format_value(self, value):\n if isinstance(value, Weight):\n unit = get_default_weight_unit()\n if value.unit != unit:\n value = convert_weight(value, unit)\n return value.value\n return value\n\n def render(self, name, value, attrs=None):\n widget = super().render(name, value, attrs=attrs)\n unit = get_default_weight_unit()\n translated_unit = dict(WeightUnits.CHOICES)[unit]\n return render_to_string(\n self.template,\n {'widget': widget, 'value': value, 'unit': translated_unit})\n\n\nclass WeightField(forms.DecimalField):\n def __init__(self, *args, widget=WeightInput, min_value=0, **kwargs):\n if isinstance(widget, type):\n widget = widget(attrs={'type': 'number', 'step': 'any'})\n super().__init__(*args, widget=widget, **kwargs)\n if min_value is not None:\n self.validators.append(MinValueValidator(min_value))\n\n def to_python(self, value):\n value = super().to_python(value)\n if value is None:\n return value\n unit = get_default_weight_unit()\n return Weight(**{unit: value})\n\n def validate(self, weight):\n if weight is None or weight in self.empty_values:\n super().validate(weight)\n else:\n unit = get_default_weight_unit()\n if not isinstance(weight, Weight):\n raise Exception(\n '%r is not a valid weight.' % (weight,))\n if weight.unit != unit:\n raise forms.ValidationError(\n 'Invalid unit: %r (expected %r).' % (\n weight.unit, unit))\n super().validate(weight.value)\n\n def clean(self, value):\n value = value_to_be_validated = self.to_python(value)\n self.validate(value_to_be_validated)\n if isinstance(value, Weight):\n value_to_be_validated = Decimal(value.value)\n # default decimal validators can be used for Weight's value only\n self.run_validators(value_to_be_validated)\n return value\n", "path": "saleor/core/weight.py"}], "after_files": [{"content": "\"\"\"In Saleor we are using 'weight' instead of a 'mass'.\n\nFor those of us who are earth-bound, weight is what we usually experience.\nMass is a theoretical construct.\nUnless we are dealing with inertia and momentum, we are encountering\nthe attractive force between ourselves and the earth,\nthe isolated effects of mass alone being a little more esoteric.\n\nSo even though mass is more fundamental, most people think\nin terms of weight.\n\nIn the end, it does not really matter unless you travel between\ndifferent planets.\n\"\"\"\nfrom decimal import Decimal\nfrom enum import Enum\n\nfrom django import forms\nfrom django.contrib.sites.models import Site\nfrom django.core.validators import MinValueValidator\nfrom django.template.loader import render_to_string\nfrom django.utils.translation import pgettext_lazy\nfrom measurement.measures import Weight\n\n\nclass WeightUnits:\n KILOGRAM = 'kg'\n POUND = 'lb'\n OUNCE = 'oz'\n GRAM = 'g'\n\n CHOICES = [\n (KILOGRAM, pgettext_lazy('Kilogram weight unit symbol', 'kg')),\n (POUND, pgettext_lazy('Pound weight unit symbol', 'lb')),\n (OUNCE, pgettext_lazy('Ounce weight unit symbol', 'oz')),\n (GRAM, pgettext_lazy('Gram weight unit symbol', 'g'))]\n\n\nWeightUnitsEnum = Enum(\n 'WeightUnitsEnum',\n {unit: unit for unit in WeightUnits.CHOICES})\n\n\ndef zero_weight():\n \"\"\"Function used as a model's default.\"\"\"\n return Weight(kg=0)\n\n\ndef convert_weight(weight, unit):\n # Weight amount from the Weight instance can be retrived in serveral units\n # via its properties. eg. Weight(lb=10).kg\n converted_weight = getattr(weight, unit)\n return Weight(**{unit: converted_weight})\n\n\ndef get_default_weight_unit():\n site = Site.objects.get_current()\n return site.settings.default_weight_unit\n\n\nclass WeightInput(forms.TextInput):\n template = 'dashboard/shipping/weight_widget.html'\n input_type = 'number'\n\n def format_value(self, value):\n if isinstance(value, Weight):\n unit = get_default_weight_unit()\n if value.unit != unit:\n value = convert_weight(value, unit)\n return value.value\n return value\n\n def render(self, name, value, attrs=None, renderer=None):\n widget = super().render(name, value, attrs=attrs, renderer=renderer)\n unit = get_default_weight_unit()\n translated_unit = dict(WeightUnits.CHOICES)[unit]\n return render_to_string(\n self.template,\n {'widget': widget, 'value': value, 'unit': translated_unit})\n\n\nclass WeightField(forms.FloatField):\n def __init__(self, *args, widget=WeightInput, min_value=0, **kwargs):\n if isinstance(widget, type):\n widget = widget(attrs={'type': 'number', 'step': 'any'})\n super().__init__(*args, widget=widget, **kwargs)\n if min_value is not None:\n self.validators.append(MinValueValidator(min_value))\n\n def to_python(self, value):\n value = super().to_python(value)\n if value is None:\n return value\n unit = get_default_weight_unit()\n return Weight(**{unit: value})\n\n def validate(self, weight):\n if weight is None or weight in self.empty_values:\n super().validate(weight)\n else:\n unit = get_default_weight_unit()\n if not isinstance(weight, Weight):\n raise Exception(\n '%r is not a valid weight.' % (weight,))\n if weight.unit != unit:\n raise forms.ValidationError(\n 'Invalid unit: %r (expected %r).' % (\n weight.unit, unit))\n super().validate(weight.value)\n\n def clean(self, value):\n value = value_to_be_validated = self.to_python(value)\n self.validate(value_to_be_validated)\n if isinstance(value, Weight):\n value_to_be_validated = Decimal(value.value)\n # default decimal validators can be used for Weight's value only\n self.run_validators(value_to_be_validated)\n return value\n", "path": "saleor/core/weight.py"}]} | 1,516 | 242 |
gh_patches_debug_61923 | rasdani/github-patches | git_diff | ray-project__ray-3109 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Ship Modin with Ray
### Describe the problem
<!-- Describe the problem clearly here. -->
I think it makes sense to ship Modin with Ray. I suggest doing this similar to how pyarrow is shipped with Ray.
We don't need to rely on the dependencies of Modin, but some of the Modin source will have to be updated to make sure that the pandas version is correct.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/ray/__init__.py`
Content:
```
1 from __future__ import absolute_import
2 from __future__ import division
3 from __future__ import print_function
4
5 import os
6 import sys
7
8 if "pyarrow" in sys.modules:
9 raise ImportError("Ray must be imported before pyarrow because Ray "
10 "requires a specific version of pyarrow (which is "
11 "packaged along with Ray).")
12
13 # Add the directory containing pyarrow to the Python path so that we find the
14 # pyarrow version packaged with ray and not a pre-existing pyarrow.
15 pyarrow_path = os.path.join(
16 os.path.abspath(os.path.dirname(__file__)), "pyarrow_files")
17 sys.path.insert(0, pyarrow_path)
18
19 # See https://github.com/ray-project/ray/issues/131.
20 helpful_message = """
21
22 If you are using Anaconda, try fixing this problem by running:
23
24 conda install libgcc
25 """
26
27 try:
28 import pyarrow # noqa: F401
29 except ImportError as e:
30 if ((hasattr(e, "msg") and isinstance(e.msg, str)
31 and ("libstdc++" in e.msg or "CXX" in e.msg))):
32 # This code path should be taken with Python 3.
33 e.msg += helpful_message
34 elif (hasattr(e, "message") and isinstance(e.message, str)
35 and ("libstdc++" in e.message or "CXX" in e.message)):
36 # This code path should be taken with Python 2.
37 condition = (hasattr(e, "args") and isinstance(e.args, tuple)
38 and len(e.args) == 1 and isinstance(e.args[0], str))
39 if condition:
40 e.args = (e.args[0] + helpful_message, )
41 else:
42 if not hasattr(e, "args"):
43 e.args = ()
44 elif not isinstance(e.args, tuple):
45 e.args = (e.args, )
46 e.args += (helpful_message, )
47 raise
48
49 from ray.raylet import ObjectID, _config # noqa: E402
50 from ray.profiling import profile # noqa: E402
51 from ray.worker import (error_info, init, connect, disconnect, get, put, wait,
52 remote, get_gpu_ids, get_resource_ids, get_webui_url,
53 register_custom_serializer, shutdown,
54 is_initialized) # noqa: E402
55 from ray.worker import (SCRIPT_MODE, WORKER_MODE, LOCAL_MODE,
56 PYTHON_MODE) # noqa: E402
57 from ray.worker import global_state # noqa: E402
58 import ray.internal # noqa: E402
59 # We import ray.actor because some code is run in actor.py which initializes
60 # some functions in the worker.
61 import ray.actor # noqa: F401
62 from ray.actor import method # noqa: E402
63
64 # Ray version string.
65 __version__ = "0.5.3"
66
67 __all__ = [
68 "error_info", "init", "connect", "disconnect", "get", "put", "wait",
69 "remote", "profile", "actor", "method", "get_gpu_ids", "get_resource_ids",
70 "get_webui_url", "register_custom_serializer", "shutdown",
71 "is_initialized", "SCRIPT_MODE", "WORKER_MODE", "LOCAL_MODE",
72 "PYTHON_MODE", "global_state", "ObjectID", "_config", "__version__",
73 "internal"
74 ]
75
76 import ctypes # noqa: E402
77 # Windows only
78 if hasattr(ctypes, "windll"):
79 # Makes sure that all child processes die when we die. Also makes sure that
80 # fatal crashes result in process termination rather than an error dialog
81 # (the latter is annoying since we have a lot of processes). This is done
82 # by associating all child processes with a "job" object that imposes this
83 # behavior.
84 (lambda kernel32: (lambda job: (lambda n: kernel32.SetInformationJobObject(job, 9, "\0" * 17 + chr(0x8 | 0x4 | 0x20) + "\0" * (n - 18), n))(0x90 if ctypes.sizeof(ctypes.c_void_p) > ctypes.sizeof(ctypes.c_int) else 0x70) and kernel32.AssignProcessToJobObject(job, ctypes.c_void_p(kernel32.GetCurrentProcess())))(ctypes.c_void_p(kernel32.CreateJobObjectW(None, None))) if kernel32 is not None else None)(ctypes.windll.kernel32) # noqa: E501
85
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/python/ray/__init__.py b/python/ray/__init__.py
--- a/python/ray/__init__.py
+++ b/python/ray/__init__.py
@@ -46,6 +46,9 @@
e.args += (helpful_message, )
raise
+modin_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), "modin")
+sys.path.insert(0, modin_path)
+
from ray.raylet import ObjectID, _config # noqa: E402
from ray.profiling import profile # noqa: E402
from ray.worker import (error_info, init, connect, disconnect, get, put, wait,
| {"golden_diff": "diff --git a/python/ray/__init__.py b/python/ray/__init__.py\n--- a/python/ray/__init__.py\n+++ b/python/ray/__init__.py\n@@ -46,6 +46,9 @@\n e.args += (helpful_message, )\n raise\n \n+modin_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), \"modin\")\n+sys.path.insert(0, modin_path)\n+\n from ray.raylet import ObjectID, _config # noqa: E402\n from ray.profiling import profile # noqa: E402\n from ray.worker import (error_info, init, connect, disconnect, get, put, wait,\n", "issue": "Ship Modin with Ray\n### Describe the problem\r\n<!-- Describe the problem clearly here. -->\r\nI think it makes sense to ship Modin with Ray. I suggest doing this similar to how pyarrow is shipped with Ray.\r\n\r\nWe don't need to rely on the dependencies of Modin, but some of the Modin source will have to be updated to make sure that the pandas version is correct.\r\n\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\n\nif \"pyarrow\" in sys.modules:\n raise ImportError(\"Ray must be imported before pyarrow because Ray \"\n \"requires a specific version of pyarrow (which is \"\n \"packaged along with Ray).\")\n\n# Add the directory containing pyarrow to the Python path so that we find the\n# pyarrow version packaged with ray and not a pre-existing pyarrow.\npyarrow_path = os.path.join(\n os.path.abspath(os.path.dirname(__file__)), \"pyarrow_files\")\nsys.path.insert(0, pyarrow_path)\n\n# See https://github.com/ray-project/ray/issues/131.\nhelpful_message = \"\"\"\n\nIf you are using Anaconda, try fixing this problem by running:\n\n conda install libgcc\n\"\"\"\n\ntry:\n import pyarrow # noqa: F401\nexcept ImportError as e:\n if ((hasattr(e, \"msg\") and isinstance(e.msg, str)\n and (\"libstdc++\" in e.msg or \"CXX\" in e.msg))):\n # This code path should be taken with Python 3.\n e.msg += helpful_message\n elif (hasattr(e, \"message\") and isinstance(e.message, str)\n and (\"libstdc++\" in e.message or \"CXX\" in e.message)):\n # This code path should be taken with Python 2.\n condition = (hasattr(e, \"args\") and isinstance(e.args, tuple)\n and len(e.args) == 1 and isinstance(e.args[0], str))\n if condition:\n e.args = (e.args[0] + helpful_message, )\n else:\n if not hasattr(e, \"args\"):\n e.args = ()\n elif not isinstance(e.args, tuple):\n e.args = (e.args, )\n e.args += (helpful_message, )\n raise\n\nfrom ray.raylet import ObjectID, _config # noqa: E402\nfrom ray.profiling import profile # noqa: E402\nfrom ray.worker import (error_info, init, connect, disconnect, get, put, wait,\n remote, get_gpu_ids, get_resource_ids, get_webui_url,\n register_custom_serializer, shutdown,\n is_initialized) # noqa: E402\nfrom ray.worker import (SCRIPT_MODE, WORKER_MODE, LOCAL_MODE,\n PYTHON_MODE) # noqa: E402\nfrom ray.worker import global_state # noqa: E402\nimport ray.internal # noqa: E402\n# We import ray.actor because some code is run in actor.py which initializes\n# some functions in the worker.\nimport ray.actor # noqa: F401\nfrom ray.actor import method # noqa: E402\n\n# Ray version string.\n__version__ = \"0.5.3\"\n\n__all__ = [\n \"error_info\", \"init\", \"connect\", \"disconnect\", \"get\", \"put\", \"wait\",\n \"remote\", \"profile\", \"actor\", \"method\", \"get_gpu_ids\", \"get_resource_ids\",\n \"get_webui_url\", \"register_custom_serializer\", \"shutdown\",\n \"is_initialized\", \"SCRIPT_MODE\", \"WORKER_MODE\", \"LOCAL_MODE\",\n \"PYTHON_MODE\", \"global_state\", \"ObjectID\", \"_config\", \"__version__\",\n \"internal\"\n]\n\nimport ctypes # noqa: E402\n# Windows only\nif hasattr(ctypes, \"windll\"):\n # Makes sure that all child processes die when we die. Also makes sure that\n # fatal crashes result in process termination rather than an error dialog\n # (the latter is annoying since we have a lot of processes). This is done\n # by associating all child processes with a \"job\" object that imposes this\n # behavior.\n (lambda kernel32: (lambda job: (lambda n: kernel32.SetInformationJobObject(job, 9, \"\\0\" * 17 + chr(0x8 | 0x4 | 0x20) + \"\\0\" * (n - 18), n))(0x90 if ctypes.sizeof(ctypes.c_void_p) > ctypes.sizeof(ctypes.c_int) else 0x70) and kernel32.AssignProcessToJobObject(job, ctypes.c_void_p(kernel32.GetCurrentProcess())))(ctypes.c_void_p(kernel32.CreateJobObjectW(None, None))) if kernel32 is not None else None)(ctypes.windll.kernel32) # noqa: E501\n", "path": "python/ray/__init__.py"}], "after_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\n\nif \"pyarrow\" in sys.modules:\n raise ImportError(\"Ray must be imported before pyarrow because Ray \"\n \"requires a specific version of pyarrow (which is \"\n \"packaged along with Ray).\")\n\n# Add the directory containing pyarrow to the Python path so that we find the\n# pyarrow version packaged with ray and not a pre-existing pyarrow.\npyarrow_path = os.path.join(\n os.path.abspath(os.path.dirname(__file__)), \"pyarrow_files\")\nsys.path.insert(0, pyarrow_path)\n\n# See https://github.com/ray-project/ray/issues/131.\nhelpful_message = \"\"\"\n\nIf you are using Anaconda, try fixing this problem by running:\n\n conda install libgcc\n\"\"\"\n\ntry:\n import pyarrow # noqa: F401\nexcept ImportError as e:\n if ((hasattr(e, \"msg\") and isinstance(e.msg, str)\n and (\"libstdc++\" in e.msg or \"CXX\" in e.msg))):\n # This code path should be taken with Python 3.\n e.msg += helpful_message\n elif (hasattr(e, \"message\") and isinstance(e.message, str)\n and (\"libstdc++\" in e.message or \"CXX\" in e.message)):\n # This code path should be taken with Python 2.\n condition = (hasattr(e, \"args\") and isinstance(e.args, tuple)\n and len(e.args) == 1 and isinstance(e.args[0], str))\n if condition:\n e.args = (e.args[0] + helpful_message, )\n else:\n if not hasattr(e, \"args\"):\n e.args = ()\n elif not isinstance(e.args, tuple):\n e.args = (e.args, )\n e.args += (helpful_message, )\n raise\n\nmodin_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), \"modin\")\nsys.path.insert(0, modin_path)\n\nfrom ray.raylet import ObjectID, _config # noqa: E402\nfrom ray.profiling import profile # noqa: E402\nfrom ray.worker import (error_info, init, connect, disconnect, get, put, wait,\n remote, get_gpu_ids, get_resource_ids, get_webui_url,\n register_custom_serializer, shutdown,\n is_initialized) # noqa: E402\nfrom ray.worker import (SCRIPT_MODE, WORKER_MODE, LOCAL_MODE,\n PYTHON_MODE) # noqa: E402\nfrom ray.worker import global_state # noqa: E402\nimport ray.internal # noqa: E402\n# We import ray.actor because some code is run in actor.py which initializes\n# some functions in the worker.\nimport ray.actor # noqa: F401\nfrom ray.actor import method # noqa: E402\n\n# Ray version string.\n__version__ = \"0.5.3\"\n\n__all__ = [\n \"error_info\", \"init\", \"connect\", \"disconnect\", \"get\", \"put\", \"wait\",\n \"remote\", \"profile\", \"actor\", \"method\", \"get_gpu_ids\", \"get_resource_ids\",\n \"get_webui_url\", \"register_custom_serializer\", \"shutdown\",\n \"is_initialized\", \"SCRIPT_MODE\", \"WORKER_MODE\", \"LOCAL_MODE\",\n \"PYTHON_MODE\", \"global_state\", \"ObjectID\", \"_config\", \"__version__\",\n \"internal\"\n]\n\nimport ctypes # noqa: E402\n# Windows only\nif hasattr(ctypes, \"windll\"):\n # Makes sure that all child processes die when we die. Also makes sure that\n # fatal crashes result in process termination rather than an error dialog\n # (the latter is annoying since we have a lot of processes). This is done\n # by associating all child processes with a \"job\" object that imposes this\n # behavior.\n (lambda kernel32: (lambda job: (lambda n: kernel32.SetInformationJobObject(job, 9, \"\\0\" * 17 + chr(0x8 | 0x4 | 0x20) + \"\\0\" * (n - 18), n))(0x90 if ctypes.sizeof(ctypes.c_void_p) > ctypes.sizeof(ctypes.c_int) else 0x70) and kernel32.AssignProcessToJobObject(job, ctypes.c_void_p(kernel32.GetCurrentProcess())))(ctypes.c_void_p(kernel32.CreateJobObjectW(None, None))) if kernel32 is not None else None)(ctypes.windll.kernel32) # noqa: E501\n", "path": "python/ray/__init__.py"}]} | 1,506 | 154 |
gh_patches_debug_15080 | rasdani/github-patches | git_diff | pulp__pulpcore-5190 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix import in wsgi preventing startup
**Version**
Confirmed with Katello folks using 3.49 branch.
**Describe the bug**
We're getting an error during the startup stage:
```python
Starting Pulp API Server...
Traceback (most recent call last):
File "/usr/bin/pulpcore-api", line 33, in <module>
sys.exit(load_entry_point('pulpcore==3.49.1', 'console_scripts', 'pulpcore-api')())
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/lib/python3.11/site-packages/click/core.py", line 1130, in __call__
return self.main(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/lib/python3.11/site-packages/click/core.py", line 1055, in main
rv = self.invoke(ctx)
^^^^^^^^^^^^^^^^
File "/usr/lib/python3.11/site-packages/click/core.py", line 1404, in invoke
return ctx.invoke(self.callback, **ctx.params)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/lib/python3.11/site-packages/click/core.py", line 760, in invoke
return __callback(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/lib/python3.11/site-packages/pulpcore/app/entrypoint.py", line 140, in main
PulpcoreApiApplication(options).run()
File "/usr/lib/python3.11/site-packages/gunicorn/app/base.py", line 231, in run
super().run()
File "/usr/lib/python3.11/site-packages/gunicorn/app/base.py", line 72, in run
Arbiter(self).run()
^^^^^^^^^^^^^
File "/usr/lib/python3.11/site-packages/gunicorn/arbiter.py", line 58, in __init__
self.setup(app)
File "/usr/lib/python3.11/site-packages/gunicorn/arbiter.py", line 118, in setup
self.app.wsgi()
File "/usr/lib/python3.11/site-packages/gunicorn/app/base.py", line 67, in wsgi
self.callable = self.load()
^^^^^^^^^^^
File "/usr/lib/python3.11/site-packages/pulpcore/app/entrypoint.py", line 95, in load
import pulpcore.app.wsgi
File "/usr/lib/python3.11/site-packages/pulpcore/app/wsgi.py", line 14, in <module>
from pulpcore.app.util import init_domain_metrics_exporter
File "/usr/lib/python3.11/site-packages/pulpcore/app/util.py", line 24, in <module>
from pulpcore.app import models
File "/usr/lib/python3.11/site-packages/pulpcore/app/models/__init__.py", line 4, in <module>
from .base import (
File "/usr/lib/python3.11/site-packages/pulpcore/app/models/base.py", line 3, in <module>
from django.contrib.contenttypes.fields import GenericRelation
File "/usr/lib/python3.11/site-packages/django/contrib/contenttypes/fields.py", line 7, in <module>
from django.contrib.contenttypes.models import ContentType
File "/usr/lib/python3.11/site-packages/django/contrib/contenttypes/models.py", line 139, in <module>
class ContentType(models.Model):
File "/usr/lib/python3.11/site-packages/django/db/models/base.py", line 129, in __new__
app_config = apps.get_containing_app_config(module)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/lib/python3.11/site-packages/django/apps/registry.py", line 260, in get_containing_app_config
```
and what got our eye was this line:
```python
File "/usr/lib/python3.11/site-packages/pulpcore/app/wsgi.py", line 14, in <module>
from pulpcore.app.util import init_domain_metrics_exporter
```
Also, there's already a fix for this in the main branch #5178
**To Reproduce**
Installing using pip and rpm packages.
**Expected behavior**
The application should start without issues
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pulpcore/app/wsgi.py`
Content:
```
1 """
2 WSGI config for pulp project.
3
4 It exposes the WSGI callable as a module-level variable named ``application``.
5
6 For more information on this file, see
7 https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
8 """
9
10 from django.core.wsgi import get_wsgi_application
11 from opentelemetry.instrumentation.wsgi import OpenTelemetryMiddleware
12
13 from pulpcore.app.entrypoint import using_pulp_api_worker
14 from pulpcore.app.util import init_domain_metrics_exporter
15
16 if not using_pulp_api_worker.get(False):
17 raise RuntimeError("This app must be executed using pulpcore-api entrypoint.")
18
19 application = get_wsgi_application()
20 application = OpenTelemetryMiddleware(application)
21
22 init_domain_metrics_exporter()
23
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pulpcore/app/wsgi.py b/pulpcore/app/wsgi.py
--- a/pulpcore/app/wsgi.py
+++ b/pulpcore/app/wsgi.py
@@ -11,7 +11,6 @@
from opentelemetry.instrumentation.wsgi import OpenTelemetryMiddleware
from pulpcore.app.entrypoint import using_pulp_api_worker
-from pulpcore.app.util import init_domain_metrics_exporter
if not using_pulp_api_worker.get(False):
raise RuntimeError("This app must be executed using pulpcore-api entrypoint.")
@@ -19,4 +18,6 @@
application = get_wsgi_application()
application = OpenTelemetryMiddleware(application)
+from pulpcore.app.util import init_domain_metrics_exporter # noqa: E402
+
init_domain_metrics_exporter()
| {"golden_diff": "diff --git a/pulpcore/app/wsgi.py b/pulpcore/app/wsgi.py\n--- a/pulpcore/app/wsgi.py\n+++ b/pulpcore/app/wsgi.py\n@@ -11,7 +11,6 @@\n from opentelemetry.instrumentation.wsgi import OpenTelemetryMiddleware\n \n from pulpcore.app.entrypoint import using_pulp_api_worker\n-from pulpcore.app.util import init_domain_metrics_exporter\n \n if not using_pulp_api_worker.get(False):\n raise RuntimeError(\"This app must be executed using pulpcore-api entrypoint.\")\n@@ -19,4 +18,6 @@\n application = get_wsgi_application()\n application = OpenTelemetryMiddleware(application)\n \n+from pulpcore.app.util import init_domain_metrics_exporter # noqa: E402\n+\n init_domain_metrics_exporter()\n", "issue": "Fix import in wsgi preventing startup\n**Version**\r\nConfirmed with Katello folks using 3.49 branch.\r\n\r\n**Describe the bug**\r\nWe're getting an error during the startup stage:\r\n```python\r\nStarting Pulp API Server...\r\nTraceback (most recent call last):\r\n File \"/usr/bin/pulpcore-api\", line 33, in <module>\r\n sys.exit(load_entry_point('pulpcore==3.49.1', 'console_scripts', 'pulpcore-api')())\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/lib/python3.11/site-packages/click/core.py\", line 1130, in __call__\r\n return self.main(*args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/lib/python3.11/site-packages/click/core.py\", line 1055, in main\r\n rv = self.invoke(ctx)\r\n ^^^^^^^^^^^^^^^^\r\n File \"/usr/lib/python3.11/site-packages/click/core.py\", line 1404, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/lib/python3.11/site-packages/click/core.py\", line 760, in invoke\r\n return __callback(*args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/lib/python3.11/site-packages/pulpcore/app/entrypoint.py\", line 140, in main\r\n PulpcoreApiApplication(options).run()\r\n File \"/usr/lib/python3.11/site-packages/gunicorn/app/base.py\", line 231, in run\r\n super().run()\r\n File \"/usr/lib/python3.11/site-packages/gunicorn/app/base.py\", line 72, in run\r\n Arbiter(self).run()\r\n ^^^^^^^^^^^^^\r\n File \"/usr/lib/python3.11/site-packages/gunicorn/arbiter.py\", line 58, in __init__\r\n self.setup(app)\r\n File \"/usr/lib/python3.11/site-packages/gunicorn/arbiter.py\", line 118, in setup\r\n self.app.wsgi()\r\n File \"/usr/lib/python3.11/site-packages/gunicorn/app/base.py\", line 67, in wsgi\r\n self.callable = self.load()\r\n ^^^^^^^^^^^\r\n File \"/usr/lib/python3.11/site-packages/pulpcore/app/entrypoint.py\", line 95, in load\r\n import pulpcore.app.wsgi\r\n File \"/usr/lib/python3.11/site-packages/pulpcore/app/wsgi.py\", line 14, in <module>\r\n from pulpcore.app.util import init_domain_metrics_exporter\r\n File \"/usr/lib/python3.11/site-packages/pulpcore/app/util.py\", line 24, in <module>\r\n from pulpcore.app import models\r\n File \"/usr/lib/python3.11/site-packages/pulpcore/app/models/__init__.py\", line 4, in <module>\r\n from .base import (\r\n File \"/usr/lib/python3.11/site-packages/pulpcore/app/models/base.py\", line 3, in <module>\r\n from django.contrib.contenttypes.fields import GenericRelation\r\n File \"/usr/lib/python3.11/site-packages/django/contrib/contenttypes/fields.py\", line 7, in <module>\r\n from django.contrib.contenttypes.models import ContentType\r\n File \"/usr/lib/python3.11/site-packages/django/contrib/contenttypes/models.py\", line 139, in <module>\r\n class ContentType(models.Model):\r\n File \"/usr/lib/python3.11/site-packages/django/db/models/base.py\", line 129, in __new__\r\n app_config = apps.get_containing_app_config(module)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/lib/python3.11/site-packages/django/apps/registry.py\", line 260, in get_containing_app_config\r\n```\r\n\r\nand what got our eye was this line:\r\n```python\r\nFile \"/usr/lib/python3.11/site-packages/pulpcore/app/wsgi.py\", line 14, in <module>\r\n from pulpcore.app.util import init_domain_metrics_exporter\r\n```\r\n\r\nAlso, there's already a fix for this in the main branch #5178\r\n\r\n**To Reproduce**\r\nInstalling using pip and rpm packages.\r\n\r\n**Expected behavior**\r\nThe application should start without issues\r\n\n", "before_files": [{"content": "\"\"\"\nWSGI config for pulp project.\n\nIt exposes the WSGI callable as a module-level variable named ``application``.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/\n\"\"\"\n\nfrom django.core.wsgi import get_wsgi_application\nfrom opentelemetry.instrumentation.wsgi import OpenTelemetryMiddleware\n\nfrom pulpcore.app.entrypoint import using_pulp_api_worker\nfrom pulpcore.app.util import init_domain_metrics_exporter\n\nif not using_pulp_api_worker.get(False):\n raise RuntimeError(\"This app must be executed using pulpcore-api entrypoint.\")\n\napplication = get_wsgi_application()\napplication = OpenTelemetryMiddleware(application)\n\ninit_domain_metrics_exporter()\n", "path": "pulpcore/app/wsgi.py"}], "after_files": [{"content": "\"\"\"\nWSGI config for pulp project.\n\nIt exposes the WSGI callable as a module-level variable named ``application``.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/\n\"\"\"\n\nfrom django.core.wsgi import get_wsgi_application\nfrom opentelemetry.instrumentation.wsgi import OpenTelemetryMiddleware\n\nfrom pulpcore.app.entrypoint import using_pulp_api_worker\n\nif not using_pulp_api_worker.get(False):\n raise RuntimeError(\"This app must be executed using pulpcore-api entrypoint.\")\n\napplication = get_wsgi_application()\napplication = OpenTelemetryMiddleware(application)\n\nfrom pulpcore.app.util import init_domain_metrics_exporter # noqa: E402\n\ninit_domain_metrics_exporter()\n", "path": "pulpcore/app/wsgi.py"}]} | 1,439 | 173 |
gh_patches_debug_24448 | rasdani/github-patches | git_diff | conan-io__conan-center-index-9862 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Package]OpenSUSE Tumbleweed fix problem - glu/system
https://github.com/conan-io/conan-center-index/blob/8658ae021ce225d889fa4ee38d30cb80877a7c75/recipes/glu/all/conanfile.py#L17-L32
This fix the problem in openSUSE Tumbleweed:
```
elif tools.os_info.with_zypper:
packages = ["glu-devel"]
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `recipes/glu/all/conanfile.py`
Content:
```
1 from conans import ConanFile, tools
2 from conans.errors import ConanException
3 import os
4
5
6 class SysConfigGLUConan(ConanFile):
7 name = "glu"
8 version = "system"
9 description = "cross-platform virtual conan package for the GLU support"
10 topics = ("conan", "opengl", "glu")
11 url = "https://github.com/conan-io/conan-center-index"
12 homepage = "https://cgit.freedesktop.org/mesa/glu/"
13 license = "SGI-B-2.0"
14 settings = "os"
15 requires = "opengl/system"
16
17 def system_requirements(self):
18 packages = []
19 if tools.os_info.is_linux and self.settings.os == "Linux":
20 if tools.os_info.with_yum or tools.os_info.with_dnf:
21 packages = ["mesa-libGLU-devel"]
22 elif tools.os_info.with_apt:
23 packages = ["libglu1-mesa-dev"]
24 elif tools.os_info.with_pacman:
25 packages = ["glu"]
26 elif tools.os_info.with_zypper:
27 packages = ["Mesa-libGLU-devel"]
28 else:
29 self.output.warn("Don't know how to install GLU for your distro")
30 if tools.os_info.is_freebsd and self.settings.os == "FreeBSD":
31 packages = ["libGLU"]
32 if packages:
33 package_tool = tools.SystemPackageTool(conanfile=self, default_mode='verify')
34 for p in packages:
35 package_tool.install(update=True, packages=p)
36
37 def _fill_cppinfo_from_pkgconfig(self, name):
38 pkg_config = tools.PkgConfig(name)
39 if not pkg_config.provides:
40 raise ConanException("GLU development files aren't available, giving up")
41 libs = [lib[2:] for lib in pkg_config.libs_only_l]
42 lib_dirs = [lib[2:] for lib in pkg_config.libs_only_L]
43 ldflags = [flag for flag in pkg_config.libs_only_other]
44 include_dirs = [include[2:] for include in pkg_config.cflags_only_I]
45 cflags = [flag for flag in pkg_config.cflags_only_other if not flag.startswith("-D")]
46 defines = [flag[2:] for flag in pkg_config.cflags_only_other if flag.startswith("-D")]
47
48 self.cpp_info.system_libs.extend(libs)
49 self.cpp_info.libdirs.extend(lib_dirs)
50 self.cpp_info.sharedlinkflags.extend(ldflags)
51 self.cpp_info.exelinkflags.extend(ldflags)
52 self.cpp_info.defines.extend(defines)
53 self.cpp_info.includedirs.extend(include_dirs)
54 self.cpp_info.cflags.extend(cflags)
55 self.cpp_info.cxxflags.extend(cflags)
56
57 def package_info(self):
58 self.cpp_info.includedirs = []
59 self.cpp_info.libdirs = []
60
61 if self.settings.os == "Windows":
62 self.cpp_info.system_libs = ["Glu32"]
63 elif self.settings.os in ["Linux", "FreeBSD"]:
64 self._fill_cppinfo_from_pkgconfig("glu")
65
66 def package_id(self):
67 self.info.header_only()
68
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/recipes/glu/all/conanfile.py b/recipes/glu/all/conanfile.py
--- a/recipes/glu/all/conanfile.py
+++ b/recipes/glu/all/conanfile.py
@@ -1,13 +1,12 @@
from conans import ConanFile, tools
from conans.errors import ConanException
-import os
class SysConfigGLUConan(ConanFile):
name = "glu"
version = "system"
description = "cross-platform virtual conan package for the GLU support"
- topics = ("conan", "opengl", "glu")
+ topics = ("opengl", "glu")
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://cgit.freedesktop.org/mesa/glu/"
license = "SGI-B-2.0"
@@ -24,7 +23,7 @@
elif tools.os_info.with_pacman:
packages = ["glu"]
elif tools.os_info.with_zypper:
- packages = ["Mesa-libGLU-devel"]
+ packages = ["glu-devel"]
else:
self.output.warn("Don't know how to install GLU for your distro")
if tools.os_info.is_freebsd and self.settings.os == "FreeBSD":
| {"golden_diff": "diff --git a/recipes/glu/all/conanfile.py b/recipes/glu/all/conanfile.py\n--- a/recipes/glu/all/conanfile.py\n+++ b/recipes/glu/all/conanfile.py\n@@ -1,13 +1,12 @@\n from conans import ConanFile, tools\n from conans.errors import ConanException\n-import os\n \n \n class SysConfigGLUConan(ConanFile):\n name = \"glu\"\n version = \"system\"\n description = \"cross-platform virtual conan package for the GLU support\"\n- topics = (\"conan\", \"opengl\", \"glu\")\n+ topics = (\"opengl\", \"glu\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://cgit.freedesktop.org/mesa/glu/\"\n license = \"SGI-B-2.0\"\n@@ -24,7 +23,7 @@\n elif tools.os_info.with_pacman:\n packages = [\"glu\"]\n elif tools.os_info.with_zypper:\n- packages = [\"Mesa-libGLU-devel\"]\n+ packages = [\"glu-devel\"]\n else:\n self.output.warn(\"Don't know how to install GLU for your distro\")\n if tools.os_info.is_freebsd and self.settings.os == \"FreeBSD\":\n", "issue": "[Package]OpenSUSE Tumbleweed fix problem - glu/system\nhttps://github.com/conan-io/conan-center-index/blob/8658ae021ce225d889fa4ee38d30cb80877a7c75/recipes/glu/all/conanfile.py#L17-L32\r\n\r\nThis fix the problem in openSUSE Tumbleweed:\r\n```\r\nelif tools.os_info.with_zypper:\r\n packages = [\"glu-devel\"]\r\n```\n", "before_files": [{"content": "from conans import ConanFile, tools\nfrom conans.errors import ConanException\nimport os\n\n\nclass SysConfigGLUConan(ConanFile):\n name = \"glu\"\n version = \"system\"\n description = \"cross-platform virtual conan package for the GLU support\"\n topics = (\"conan\", \"opengl\", \"glu\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://cgit.freedesktop.org/mesa/glu/\"\n license = \"SGI-B-2.0\"\n settings = \"os\"\n requires = \"opengl/system\"\n\n def system_requirements(self):\n packages = []\n if tools.os_info.is_linux and self.settings.os == \"Linux\":\n if tools.os_info.with_yum or tools.os_info.with_dnf:\n packages = [\"mesa-libGLU-devel\"]\n elif tools.os_info.with_apt:\n packages = [\"libglu1-mesa-dev\"]\n elif tools.os_info.with_pacman:\n packages = [\"glu\"]\n elif tools.os_info.with_zypper:\n packages = [\"Mesa-libGLU-devel\"]\n else:\n self.output.warn(\"Don't know how to install GLU for your distro\")\n if tools.os_info.is_freebsd and self.settings.os == \"FreeBSD\":\n packages = [\"libGLU\"]\n if packages:\n package_tool = tools.SystemPackageTool(conanfile=self, default_mode='verify')\n for p in packages:\n package_tool.install(update=True, packages=p)\n\n def _fill_cppinfo_from_pkgconfig(self, name):\n pkg_config = tools.PkgConfig(name)\n if not pkg_config.provides:\n raise ConanException(\"GLU development files aren't available, giving up\")\n libs = [lib[2:] for lib in pkg_config.libs_only_l]\n lib_dirs = [lib[2:] for lib in pkg_config.libs_only_L]\n ldflags = [flag for flag in pkg_config.libs_only_other]\n include_dirs = [include[2:] for include in pkg_config.cflags_only_I]\n cflags = [flag for flag in pkg_config.cflags_only_other if not flag.startswith(\"-D\")]\n defines = [flag[2:] for flag in pkg_config.cflags_only_other if flag.startswith(\"-D\")]\n\n self.cpp_info.system_libs.extend(libs)\n self.cpp_info.libdirs.extend(lib_dirs)\n self.cpp_info.sharedlinkflags.extend(ldflags)\n self.cpp_info.exelinkflags.extend(ldflags)\n self.cpp_info.defines.extend(defines)\n self.cpp_info.includedirs.extend(include_dirs)\n self.cpp_info.cflags.extend(cflags)\n self.cpp_info.cxxflags.extend(cflags)\n\n def package_info(self):\n self.cpp_info.includedirs = []\n self.cpp_info.libdirs = []\n\n if self.settings.os == \"Windows\":\n self.cpp_info.system_libs = [\"Glu32\"]\n elif self.settings.os in [\"Linux\", \"FreeBSD\"]:\n self._fill_cppinfo_from_pkgconfig(\"glu\")\n\n def package_id(self):\n self.info.header_only()\n", "path": "recipes/glu/all/conanfile.py"}], "after_files": [{"content": "from conans import ConanFile, tools\nfrom conans.errors import ConanException\n\n\nclass SysConfigGLUConan(ConanFile):\n name = \"glu\"\n version = \"system\"\n description = \"cross-platform virtual conan package for the GLU support\"\n topics = (\"opengl\", \"glu\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://cgit.freedesktop.org/mesa/glu/\"\n license = \"SGI-B-2.0\"\n settings = \"os\"\n requires = \"opengl/system\"\n\n def system_requirements(self):\n packages = []\n if tools.os_info.is_linux and self.settings.os == \"Linux\":\n if tools.os_info.with_yum or tools.os_info.with_dnf:\n packages = [\"mesa-libGLU-devel\"]\n elif tools.os_info.with_apt:\n packages = [\"libglu1-mesa-dev\"]\n elif tools.os_info.with_pacman:\n packages = [\"glu\"]\n elif tools.os_info.with_zypper:\n packages = [\"glu-devel\"]\n else:\n self.output.warn(\"Don't know how to install GLU for your distro\")\n if tools.os_info.is_freebsd and self.settings.os == \"FreeBSD\":\n packages = [\"libGLU\"]\n if packages:\n package_tool = tools.SystemPackageTool(conanfile=self, default_mode='verify')\n for p in packages:\n package_tool.install(update=True, packages=p)\n\n def _fill_cppinfo_from_pkgconfig(self, name):\n pkg_config = tools.PkgConfig(name)\n if not pkg_config.provides:\n raise ConanException(\"GLU development files aren't available, giving up\")\n libs = [lib[2:] for lib in pkg_config.libs_only_l]\n lib_dirs = [lib[2:] for lib in pkg_config.libs_only_L]\n ldflags = [flag for flag in pkg_config.libs_only_other]\n include_dirs = [include[2:] for include in pkg_config.cflags_only_I]\n cflags = [flag for flag in pkg_config.cflags_only_other if not flag.startswith(\"-D\")]\n defines = [flag[2:] for flag in pkg_config.cflags_only_other if flag.startswith(\"-D\")]\n\n self.cpp_info.system_libs.extend(libs)\n self.cpp_info.libdirs.extend(lib_dirs)\n self.cpp_info.sharedlinkflags.extend(ldflags)\n self.cpp_info.exelinkflags.extend(ldflags)\n self.cpp_info.defines.extend(defines)\n self.cpp_info.includedirs.extend(include_dirs)\n self.cpp_info.cflags.extend(cflags)\n self.cpp_info.cxxflags.extend(cflags)\n\n def package_info(self):\n self.cpp_info.includedirs = []\n self.cpp_info.libdirs = []\n\n if self.settings.os == \"Windows\":\n self.cpp_info.system_libs = [\"Glu32\"]\n elif self.settings.os in [\"Linux\", \"FreeBSD\"]:\n self._fill_cppinfo_from_pkgconfig(\"glu\")\n\n def package_id(self):\n self.info.header_only()\n", "path": "recipes/glu/all/conanfile.py"}]} | 1,170 | 292 |
gh_patches_debug_15440 | rasdani/github-patches | git_diff | akvo__akvo-rsr-3372 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Google maps API requests should use an API key
`For development purposes only` watermark is being shown on our maps as Google has made it mandatory to use an API key to talk to the maps API.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `akvo/rsr/context_processors.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """
3 Akvo RSR is covered by the GNU Affero General Public License.
4
5 See more details in the license.txt file located at the root folder of the
6 Akvo RSR module. For additional details on the GNU license please see
7 < http://www.gnu.org/licenses/agpl.html >.
8 """
9
10 import re
11 import django
12
13 from django.conf import settings
14 from django.core.exceptions import DisallowedHost
15 from django.contrib.sites.models import get_current_site
16
17
18 def extra_context(request, protocol="http"):
19 """Add information to the request context."""
20 try:
21 current_site = get_current_site(request)
22 except DisallowedHost:
23 current_site = None
24
25 django_version = django.get_version()
26 debug = getattr(settings, 'DEBUG', False)
27 deploy_tag = getattr(settings, 'DEPLOY_TAG', 'Unknown')
28 deploy_branch = getattr(settings, 'DEPLOY_BRANCH', 'Unknown')
29 deploy_commit_id = getattr(settings, 'DEPLOY_COMMIT_ID', 'Unknown')
30 deploy_commit_full_id = getattr(settings, 'DEPLOY_COMMIT_FULL_ID', 'Unknown')
31 sentry_dsn = get_sentry_dsn(settings)
32
33 return dict(
34 current_site=current_site,
35 django_version=django_version,
36 debug=debug,
37 deploy_tag=deploy_tag,
38 deploy_branch=deploy_branch,
39 deploy_commit_id=deploy_commit_id,
40 deploy_commit_full_id=deploy_commit_full_id,
41 sentry_dsn=sentry_dsn,
42 )
43
44
45 def get_sentry_dsn(settings):
46 sentry_dsn = getattr(settings, 'RAVEN_CONFIG', {}).get('dsn', '')
47 sentry_dsn = re.sub('(:\w*?)@', '@', sentry_dsn)
48 # Always use https!
49 sentry_dsn = sentry_dsn.replace('http://', 'https://')
50 return sentry_dsn
51
52
53 def get_current_path_without_lang(request):
54 """Return current path without lang."""
55 path = request.get_full_path()
56 path_bits = path.split('/')
57 path = '/'.join(path_bits[2:])
58 return {'current_path_without_lang': path}
59
60
61 def extra_pages_context(request):
62 """Add context information of an RSR Page."""
63 if request.rsr_page:
64 page = request.rsr_page
65 return {
66 'rsr_page': page,
67 'favicon': page.favicon,
68 'logo': page.logo,
69 'organisation': page.organisation,
70 'return_url': page.return_url,
71 'return_url_text': page.custom_return_url_text,
72 'page_stylesheet': page.stylesheet,
73 'akvoapp_root_url': '//{}'.format(settings.AKVOAPP_DOMAIN),
74 'domain_url': '//{}'.format(settings.RSR_DOMAIN),
75 'no_facebook': not page.facebook_button,
76 'facebook_app_id': page.facebook_app_id,
77 'no_twitter': not page.twitter_button,
78 }
79
80 return {}
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/akvo/rsr/context_processors.py b/akvo/rsr/context_processors.py
--- a/akvo/rsr/context_processors.py
+++ b/akvo/rsr/context_processors.py
@@ -29,6 +29,7 @@
deploy_commit_id = getattr(settings, 'DEPLOY_COMMIT_ID', 'Unknown')
deploy_commit_full_id = getattr(settings, 'DEPLOY_COMMIT_FULL_ID', 'Unknown')
sentry_dsn = get_sentry_dsn(settings)
+ gmaps_api_key = getattr(settings, 'GOOGLE_MAPS_API_KEY', 'NO_API_KEY')
return dict(
current_site=current_site,
@@ -39,6 +40,7 @@
deploy_commit_id=deploy_commit_id,
deploy_commit_full_id=deploy_commit_full_id,
sentry_dsn=sentry_dsn,
+ gmaps_api_key=gmaps_api_key,
)
| {"golden_diff": "diff --git a/akvo/rsr/context_processors.py b/akvo/rsr/context_processors.py\n--- a/akvo/rsr/context_processors.py\n+++ b/akvo/rsr/context_processors.py\n@@ -29,6 +29,7 @@\n deploy_commit_id = getattr(settings, 'DEPLOY_COMMIT_ID', 'Unknown')\n deploy_commit_full_id = getattr(settings, 'DEPLOY_COMMIT_FULL_ID', 'Unknown')\n sentry_dsn = get_sentry_dsn(settings)\n+ gmaps_api_key = getattr(settings, 'GOOGLE_MAPS_API_KEY', 'NO_API_KEY')\n \n return dict(\n current_site=current_site,\n@@ -39,6 +40,7 @@\n deploy_commit_id=deploy_commit_id,\n deploy_commit_full_id=deploy_commit_full_id,\n sentry_dsn=sentry_dsn,\n+ gmaps_api_key=gmaps_api_key,\n )\n", "issue": "Google maps API requests should use an API key\n`For development purposes only` watermark is being shown on our maps as Google has made it mandatory to use an API key to talk to the maps API. \n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nAkvo RSR is covered by the GNU Affero General Public License.\n\nSee more details in the license.txt file located at the root folder of the\nAkvo RSR module. For additional details on the GNU license please see\n< http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nimport re\nimport django\n\nfrom django.conf import settings\nfrom django.core.exceptions import DisallowedHost\nfrom django.contrib.sites.models import get_current_site\n\n\ndef extra_context(request, protocol=\"http\"):\n \"\"\"Add information to the request context.\"\"\"\n try:\n current_site = get_current_site(request)\n except DisallowedHost:\n current_site = None\n\n django_version = django.get_version()\n debug = getattr(settings, 'DEBUG', False)\n deploy_tag = getattr(settings, 'DEPLOY_TAG', 'Unknown')\n deploy_branch = getattr(settings, 'DEPLOY_BRANCH', 'Unknown')\n deploy_commit_id = getattr(settings, 'DEPLOY_COMMIT_ID', 'Unknown')\n deploy_commit_full_id = getattr(settings, 'DEPLOY_COMMIT_FULL_ID', 'Unknown')\n sentry_dsn = get_sentry_dsn(settings)\n\n return dict(\n current_site=current_site,\n django_version=django_version,\n debug=debug,\n deploy_tag=deploy_tag,\n deploy_branch=deploy_branch,\n deploy_commit_id=deploy_commit_id,\n deploy_commit_full_id=deploy_commit_full_id,\n sentry_dsn=sentry_dsn,\n )\n\n\ndef get_sentry_dsn(settings):\n sentry_dsn = getattr(settings, 'RAVEN_CONFIG', {}).get('dsn', '')\n sentry_dsn = re.sub('(:\\w*?)@', '@', sentry_dsn)\n # Always use https!\n sentry_dsn = sentry_dsn.replace('http://', 'https://')\n return sentry_dsn\n\n\ndef get_current_path_without_lang(request):\n \"\"\"Return current path without lang.\"\"\"\n path = request.get_full_path()\n path_bits = path.split('/')\n path = '/'.join(path_bits[2:])\n return {'current_path_without_lang': path}\n\n\ndef extra_pages_context(request):\n \"\"\"Add context information of an RSR Page.\"\"\"\n if request.rsr_page:\n page = request.rsr_page\n return {\n 'rsr_page': page,\n 'favicon': page.favicon,\n 'logo': page.logo,\n 'organisation': page.organisation,\n 'return_url': page.return_url,\n 'return_url_text': page.custom_return_url_text,\n 'page_stylesheet': page.stylesheet,\n 'akvoapp_root_url': '//{}'.format(settings.AKVOAPP_DOMAIN),\n 'domain_url': '//{}'.format(settings.RSR_DOMAIN),\n 'no_facebook': not page.facebook_button,\n 'facebook_app_id': page.facebook_app_id,\n 'no_twitter': not page.twitter_button,\n }\n\n return {}\n", "path": "akvo/rsr/context_processors.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nAkvo RSR is covered by the GNU Affero General Public License.\n\nSee more details in the license.txt file located at the root folder of the\nAkvo RSR module. For additional details on the GNU license please see\n< http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nimport re\nimport django\n\nfrom django.conf import settings\nfrom django.core.exceptions import DisallowedHost\nfrom django.contrib.sites.models import get_current_site\n\n\ndef extra_context(request, protocol=\"http\"):\n \"\"\"Add information to the request context.\"\"\"\n try:\n current_site = get_current_site(request)\n except DisallowedHost:\n current_site = None\n\n django_version = django.get_version()\n debug = getattr(settings, 'DEBUG', False)\n deploy_tag = getattr(settings, 'DEPLOY_TAG', 'Unknown')\n deploy_branch = getattr(settings, 'DEPLOY_BRANCH', 'Unknown')\n deploy_commit_id = getattr(settings, 'DEPLOY_COMMIT_ID', 'Unknown')\n deploy_commit_full_id = getattr(settings, 'DEPLOY_COMMIT_FULL_ID', 'Unknown')\n sentry_dsn = get_sentry_dsn(settings)\n gmaps_api_key = getattr(settings, 'GOOGLE_MAPS_API_KEY', 'NO_API_KEY')\n\n return dict(\n current_site=current_site,\n django_version=django_version,\n debug=debug,\n deploy_tag=deploy_tag,\n deploy_branch=deploy_branch,\n deploy_commit_id=deploy_commit_id,\n deploy_commit_full_id=deploy_commit_full_id,\n sentry_dsn=sentry_dsn,\n gmaps_api_key=gmaps_api_key,\n )\n\n\ndef get_sentry_dsn(settings):\n sentry_dsn = getattr(settings, 'RAVEN_CONFIG', {}).get('dsn', '')\n sentry_dsn = re.sub('(:\\w*?)@', '@', sentry_dsn)\n # Always use https!\n sentry_dsn = sentry_dsn.replace('http://', 'https://')\n return sentry_dsn\n\n\ndef get_current_path_without_lang(request):\n \"\"\"Return current path without lang.\"\"\"\n path = request.get_full_path()\n path_bits = path.split('/')\n path = '/'.join(path_bits[2:])\n return {'current_path_without_lang': path}\n\n\ndef extra_pages_context(request):\n \"\"\"Add context information of an RSR Page.\"\"\"\n if request.rsr_page:\n page = request.rsr_page\n return {\n 'rsr_page': page,\n 'favicon': page.favicon,\n 'logo': page.logo,\n 'organisation': page.organisation,\n 'return_url': page.return_url,\n 'return_url_text': page.custom_return_url_text,\n 'page_stylesheet': page.stylesheet,\n 'akvoapp_root_url': '//{}'.format(settings.AKVOAPP_DOMAIN),\n 'domain_url': '//{}'.format(settings.RSR_DOMAIN),\n 'no_facebook': not page.facebook_button,\n 'facebook_app_id': page.facebook_app_id,\n 'no_twitter': not page.twitter_button,\n }\n\n return {}\n", "path": "akvo/rsr/context_processors.py"}]} | 1,076 | 194 |
gh_patches_debug_30947 | rasdani/github-patches | git_diff | allegro__ralph-1541 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Ubuntu package
We should build ubuntu packages for ralph (without much of scan plugins) to be able to install easily if you're reluctant to use docker.
- all js and components integrated into the package
- /etc/ralph for system configuration
- only ubuntu supported
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/ralph/__main__.py`
Content:
```
1 #!/usr/bin/env python
2 import os
3 import sys
4
5
6 def main(settings_module='ralph.settings'):
7 os.environ.setdefault('DJANGO_SETTINGS_MODULE', settings_module)
8
9 from django.core.management import execute_from_command_line
10
11 execute_from_command_line(sys.argv)
12
13
14 def dev():
15 main('ralph.settings.dev')
16
17
18 def test():
19 main('ralph.settings.test')
20
21
22 if __name__ == '__main__':
23 main()
24
```
Path: `setup.py`
Content:
```
1 # -*- encoding: utf-8 -*-
2
3 import os
4 import sys
5 from setuptools import setup, find_packages
6
7 assert sys.version_info >= (3, 3), 'Python 3.3+ required.'
8
9
10 def read(fname):
11 return open(os.path.join(os.path.dirname(__file__), fname)).read()
12
13 setup(
14 name='ralph',
15 version='3.0.0', # TODO: import from ralph
16 author='Grupa Allegro Sp. z o.o. and Contributors',
17 author_email='[email protected]',
18 description="Advanced Asset Management and DCIM system for data center and back office.",
19 long_description='\n'.join([read('README.md'), read('CHANGES')]),
20 url='http://ralph.allegrogroup.com/',
21 keywords='',
22 platforms=['any'],
23 license='Apache Software License v2.0',
24 packages=find_packages('src'), # TODO: remove src intermediate directory
25 include_package_data=True,
26 package_dir={'': 'src'},
27 zip_safe=False, # because templates are loaded from file path
28 entry_points={
29 'console_scripts': [
30 'ralph = ralph.__main__:main',
31 'dev_ralph = ralph.__main__:dev',
32 'test_ralph = ralph.__main__:test',
33 ],
34 },
35 classifiers=[
36 'Development Status :: 4 - Beta',
37 'Framework :: Django',
38 'Intended Audience :: System Administrators',
39 'Intended Audience :: Information Technology',
40 'License :: OSI Approved :: Apache Software License',
41 'Natural Language :: English',
42 'Operating System :: POSIX',
43 'Operating System :: MacOS :: MacOS X',
44 'Operating System :: Microsoft :: Windows :: Windows NT/2000',
45 'Programming Language :: Python',
46 'Programming Language :: Python :: 3',
47 'Programming Language :: Python :: 3.4',
48 'Topic :: Internet :: WWW/HTTP',
49 ]
50 )
51
```
Path: `src/ralph/settings/prod.py`
Content:
```
1 from ralph.settings import * # noqa
2
3 STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage' # noqa
4 STATIC_ROOT = os.path.join(BASE_DIR, 'var', 'static')
5
6 LDAP_SERVER_OBJECT_USER_CLASS = 'user' # possible values: "user, person
7
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -12,7 +12,7 @@
setup(
name='ralph',
- version='3.0.0', # TODO: import from ralph
+ version=read('./VERSION').strip(),
author='Grupa Allegro Sp. z o.o. and Contributors',
author_email='[email protected]',
description="Advanced Asset Management and DCIM system for data center and back office.",
@@ -21,13 +21,13 @@
keywords='',
platforms=['any'],
license='Apache Software License v2.0',
- packages=find_packages('src'), # TODO: remove src intermediate directory
+ packages=find_packages('src'),
include_package_data=True,
package_dir={'': 'src'},
zip_safe=False, # because templates are loaded from file path
entry_points={
'console_scripts': [
- 'ralph = ralph.__main__:main',
+ 'ralph = ralph.__main__:prod',
'dev_ralph = ralph.__main__:dev',
'test_ralph = ralph.__main__:test',
],
diff --git a/src/ralph/__main__.py b/src/ralph/__main__.py
--- a/src/ralph/__main__.py
+++ b/src/ralph/__main__.py
@@ -19,5 +19,9 @@
main('ralph.settings.test')
+def prod():
+ main('ralph.settings.prod')
+
+
if __name__ == '__main__':
- main()
+ main('ralph.settings.prod')
diff --git a/src/ralph/settings/prod.py b/src/ralph/settings/prod.py
--- a/src/ralph/settings/prod.py
+++ b/src/ralph/settings/prod.py
@@ -4,3 +4,7 @@
STATIC_ROOT = os.path.join(BASE_DIR, 'var', 'static')
LDAP_SERVER_OBJECT_USER_CLASS = 'user' # possible values: "user, person
+
+# FIXME: when going for full production, change it to False
+
+DEBUG = True
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -12,7 +12,7 @@\n \n setup(\n name='ralph',\n- version='3.0.0', # TODO: import from ralph\n+ version=read('./VERSION').strip(),\n author='Grupa Allegro Sp. z o.o. and Contributors',\n author_email='[email protected]',\n description=\"Advanced Asset Management and DCIM system for data center and back office.\",\n@@ -21,13 +21,13 @@\n keywords='',\n platforms=['any'],\n license='Apache Software License v2.0',\n- packages=find_packages('src'), # TODO: remove src intermediate directory\n+ packages=find_packages('src'),\n include_package_data=True,\n package_dir={'': 'src'},\n zip_safe=False, # because templates are loaded from file path\n entry_points={\n 'console_scripts': [\n- 'ralph = ralph.__main__:main',\n+ 'ralph = ralph.__main__:prod',\n 'dev_ralph = ralph.__main__:dev',\n 'test_ralph = ralph.__main__:test',\n ],\ndiff --git a/src/ralph/__main__.py b/src/ralph/__main__.py\n--- a/src/ralph/__main__.py\n+++ b/src/ralph/__main__.py\n@@ -19,5 +19,9 @@\n main('ralph.settings.test')\n \n \n+def prod():\n+ main('ralph.settings.prod')\n+\n+\n if __name__ == '__main__':\n- main()\n+ main('ralph.settings.prod')\ndiff --git a/src/ralph/settings/prod.py b/src/ralph/settings/prod.py\n--- a/src/ralph/settings/prod.py\n+++ b/src/ralph/settings/prod.py\n@@ -4,3 +4,7 @@\n STATIC_ROOT = os.path.join(BASE_DIR, 'var', 'static')\n \n LDAP_SERVER_OBJECT_USER_CLASS = 'user' # possible values: \"user, person\n+\n+# FIXME: when going for full production, change it to False\n+\n+DEBUG = True\n", "issue": "Ubuntu package\nWe should build ubuntu packages for ralph (without much of scan plugins) to be able to install easily if you're reluctant to use docker.\n- all js and components integrated into the package\n- /etc/ralph for system configuration\n- only ubuntu supported\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport os\nimport sys\n\n\ndef main(settings_module='ralph.settings'):\n os.environ.setdefault('DJANGO_SETTINGS_MODULE', settings_module)\n\n from django.core.management import execute_from_command_line\n\n execute_from_command_line(sys.argv)\n\n\ndef dev():\n main('ralph.settings.dev')\n\n\ndef test():\n main('ralph.settings.test')\n\n\nif __name__ == '__main__':\n main()\n", "path": "src/ralph/__main__.py"}, {"content": "# -*- encoding: utf-8 -*-\n\nimport os\nimport sys\nfrom setuptools import setup, find_packages\n\nassert sys.version_info >= (3, 3), 'Python 3.3+ required.'\n\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\nsetup(\n name='ralph',\n version='3.0.0', # TODO: import from ralph\n author='Grupa Allegro Sp. z o.o. and Contributors',\n author_email='[email protected]',\n description=\"Advanced Asset Management and DCIM system for data center and back office.\",\n long_description='\\n'.join([read('README.md'), read('CHANGES')]),\n url='http://ralph.allegrogroup.com/',\n keywords='',\n platforms=['any'],\n license='Apache Software License v2.0',\n packages=find_packages('src'), # TODO: remove src intermediate directory\n include_package_data=True,\n package_dir={'': 'src'},\n zip_safe=False, # because templates are loaded from file path\n entry_points={\n 'console_scripts': [\n 'ralph = ralph.__main__:main',\n 'dev_ralph = ralph.__main__:dev',\n 'test_ralph = ralph.__main__:test',\n ],\n },\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Framework :: Django',\n 'Intended Audience :: System Administrators',\n 'Intended Audience :: Information Technology',\n 'License :: OSI Approved :: Apache Software License',\n 'Natural Language :: English',\n 'Operating System :: POSIX',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows :: Windows NT/2000',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Topic :: Internet :: WWW/HTTP',\n ]\n)\n", "path": "setup.py"}, {"content": "from ralph.settings import * # noqa\n\nSTATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage' # noqa\nSTATIC_ROOT = os.path.join(BASE_DIR, 'var', 'static')\n\nLDAP_SERVER_OBJECT_USER_CLASS = 'user' # possible values: \"user, person\n", "path": "src/ralph/settings/prod.py"}], "after_files": [{"content": "#!/usr/bin/env python\nimport os\nimport sys\n\n\ndef main(settings_module='ralph.settings'):\n os.environ.setdefault('DJANGO_SETTINGS_MODULE', settings_module)\n\n from django.core.management import execute_from_command_line\n\n execute_from_command_line(sys.argv)\n\n\ndef dev():\n main('ralph.settings.dev')\n\n\ndef test():\n main('ralph.settings.test')\n\n\ndef prod():\n main('ralph.settings.prod')\n\n\nif __name__ == '__main__':\n main('ralph.settings.prod')\n", "path": "src/ralph/__main__.py"}, {"content": "# -*- encoding: utf-8 -*-\n\nimport os\nimport sys\nfrom setuptools import setup, find_packages\n\nassert sys.version_info >= (3, 3), 'Python 3.3+ required.'\n\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\nsetup(\n name='ralph',\n version=read('./VERSION').strip(),\n author='Grupa Allegro Sp. z o.o. and Contributors',\n author_email='[email protected]',\n description=\"Advanced Asset Management and DCIM system for data center and back office.\",\n long_description='\\n'.join([read('README.md'), read('CHANGES')]),\n url='http://ralph.allegrogroup.com/',\n keywords='',\n platforms=['any'],\n license='Apache Software License v2.0',\n packages=find_packages('src'),\n include_package_data=True,\n package_dir={'': 'src'},\n zip_safe=False, # because templates are loaded from file path\n entry_points={\n 'console_scripts': [\n 'ralph = ralph.__main__:prod',\n 'dev_ralph = ralph.__main__:dev',\n 'test_ralph = ralph.__main__:test',\n ],\n },\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Framework :: Django',\n 'Intended Audience :: System Administrators',\n 'Intended Audience :: Information Technology',\n 'License :: OSI Approved :: Apache Software License',\n 'Natural Language :: English',\n 'Operating System :: POSIX',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows :: Windows NT/2000',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Topic :: Internet :: WWW/HTTP',\n ]\n)\n", "path": "setup.py"}, {"content": "from ralph.settings import * # noqa\n\nSTATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage' # noqa\nSTATIC_ROOT = os.path.join(BASE_DIR, 'var', 'static')\n\nLDAP_SERVER_OBJECT_USER_CLASS = 'user' # possible values: \"user, person\n\n# FIXME: when going for full production, change it to False\n\nDEBUG = True\n", "path": "src/ralph/settings/prod.py"}]} | 1,070 | 476 |
gh_patches_debug_39660 | rasdani/github-patches | git_diff | streamlink__streamlink-141 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Euronews plugin broken
I dig up EuroNews plugin which is broken since December 2014.
https://github.com/chrippa/livestreamer/issues/626
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/euronews.py`
Content:
```
1 import re
2
3 from itertools import chain
4
5 from streamlink.compat import urlparse
6 from streamlink.plugin import Plugin
7 from streamlink.plugin.api import http
8 from streamlink.stream import HLSStream, HTTPStream
9
10 from streamlink.plugin.api.support_plugin import common_jwplayer as jwplayer
11
12 _url_re = re.compile("http(s)?://(\w+\.)?euronews.com")
13
14
15 class Euronews(Plugin):
16 @classmethod
17 def can_handle_url(self, url):
18 return _url_re.match(url)
19
20 def _create_stream(self, source):
21 url = source["file"]
22
23 if urlparse(url).path.endswith("m3u8"):
24 streams = HLSStream.parse_variant_playlist(self.session, url)
25
26 # TODO: Replace with "yield from" when dropping Python 2.
27 for stream in streams.items():
28 yield stream
29 else:
30 name = source.get("label", "vod")
31 yield name, HTTPStream(self.session, url)
32
33 def _get_streams(self):
34 res = http.get(self.url)
35 playlist = jwplayer.parse_playlist(res)
36 if not playlist:
37 return
38
39 for item in playlist:
40 streams = map(self._create_stream, item["sources"])
41
42 # TODO: Replace with "yield from" when dropping Python 2.
43 for stream in chain.from_iterable(streams):
44 yield stream
45
46 __plugin__ = Euronews
47
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/streamlink/plugins/euronews.py b/src/streamlink/plugins/euronews.py
--- a/src/streamlink/plugins/euronews.py
+++ b/src/streamlink/plugins/euronews.py
@@ -1,46 +1,77 @@
import re
-from itertools import chain
-
-from streamlink.compat import urlparse
from streamlink.plugin import Plugin
from streamlink.plugin.api import http
+from streamlink.plugin.api import validate
from streamlink.stream import HLSStream, HTTPStream
-from streamlink.plugin.api.support_plugin import common_jwplayer as jwplayer
-
-_url_re = re.compile("http(s)?://(\w+\.)?euronews.com")
-
class Euronews(Plugin):
- @classmethod
- def can_handle_url(self, url):
- return _url_re.match(url)
+ _url_re = re.compile("http(?:s)?://(\w+)\.?euronews.com/(live|.*)")
+ _re_vod = re.compile(r'<meta\s+property="og:video"\s+content="(http.*?)"\s*/>')
+ _live_api_url = "http://fr.euronews.com/api/watchlive.json"
+ _live_schema = validate.Schema({
+ u"url": validate.url()
+ })
+ _stream_api_schema = validate.Schema({
+ u'status': u'ok',
+ u'primary': {
+ validate.text: {
+ validate.optional(u'hls'): validate.url(),
+ validate.optional(u'rtsp'): validate.url(scheme="rtsp")
+ }
+ },
+ validate.optional(u'backup'): {
+ validate.text: {
+ validate.optional(u'hls'): validate.url(),
+ validate.optional(u'rtsp'): validate.url(scheme="rtsp")
+ }
+ }
+ })
- def _create_stream(self, source):
- url = source["file"]
+ @classmethod
+ def can_handle_url(cls, url):
+ return cls._url_re.match(url)
- if urlparse(url).path.endswith("m3u8"):
- streams = HLSStream.parse_variant_playlist(self.session, url)
+ def _get_vod_stream(self):
+ """
+ Find the VOD video url
+ :return: video url
+ """
+ res = http.get(self.url)
+ video_urls = self._re_vod.findall(res.text)
+ if len(video_urls):
+ return dict(vod=HTTPStream(self.session, video_urls[0]))
- # TODO: Replace with "yield from" when dropping Python 2.
- for stream in streams.items():
- yield stream
- else:
- name = source.get("label", "vod")
- yield name, HTTPStream(self.session, url)
+ def _get_live_streams(self, language):
+ """
+ Get the live stream in a particular language
+ :param language:
+ :return:
+ """
+ res = http.get(self._live_api_url)
+ live_res = http.json(res, schema=self._live_schema)
+ api_res = http.get(live_res[u"url"])
+ stream_data = http.json(api_res, schema=self._stream_api_schema)
+ # find the stream in the requested language
+ if language in stream_data[u'primary']:
+ playlist_url = stream_data[u'primary'][language][u"hls"]
+ return HLSStream.parse_variant_playlist(self.session, playlist_url)
def _get_streams(self):
- res = http.get(self.url)
- playlist = jwplayer.parse_playlist(res)
- if not playlist:
- return
+ """
+ Find the streams for euronews
+ :return:
+ """
+ match = self._url_re.match(self.url)
+ language, path = match.groups()
- for item in playlist:
- streams = map(self._create_stream, item["sources"])
+ # remap domain to language (default to english)
+ language = {"www": "en", "": "en", "arabic": "ar"}.get(language, language)
- # TODO: Replace with "yield from" when dropping Python 2.
- for stream in chain.from_iterable(streams):
- yield stream
+ if path == "live":
+ return self._get_live_streams(language)
+ else:
+ return self._get_vod_stream()
__plugin__ = Euronews
| {"golden_diff": "diff --git a/src/streamlink/plugins/euronews.py b/src/streamlink/plugins/euronews.py\n--- a/src/streamlink/plugins/euronews.py\n+++ b/src/streamlink/plugins/euronews.py\n@@ -1,46 +1,77 @@\n import re\n \n-from itertools import chain\n-\n-from streamlink.compat import urlparse\n from streamlink.plugin import Plugin\n from streamlink.plugin.api import http\n+from streamlink.plugin.api import validate\n from streamlink.stream import HLSStream, HTTPStream\n \n-from streamlink.plugin.api.support_plugin import common_jwplayer as jwplayer\n-\n-_url_re = re.compile(\"http(s)?://(\\w+\\.)?euronews.com\")\n-\n \n class Euronews(Plugin):\n- @classmethod\n- def can_handle_url(self, url):\n- return _url_re.match(url)\n+ _url_re = re.compile(\"http(?:s)?://(\\w+)\\.?euronews.com/(live|.*)\")\n+ _re_vod = re.compile(r'<meta\\s+property=\"og:video\"\\s+content=\"(http.*?)\"\\s*/>')\n+ _live_api_url = \"http://fr.euronews.com/api/watchlive.json\"\n+ _live_schema = validate.Schema({\n+ u\"url\": validate.url()\n+ })\n+ _stream_api_schema = validate.Schema({\n+ u'status': u'ok',\n+ u'primary': {\n+ validate.text: {\n+ validate.optional(u'hls'): validate.url(),\n+ validate.optional(u'rtsp'): validate.url(scheme=\"rtsp\")\n+ }\n+ },\n+ validate.optional(u'backup'): {\n+ validate.text: {\n+ validate.optional(u'hls'): validate.url(),\n+ validate.optional(u'rtsp'): validate.url(scheme=\"rtsp\")\n+ }\n+ }\n+ })\n \n- def _create_stream(self, source):\n- url = source[\"file\"]\n+ @classmethod\n+ def can_handle_url(cls, url):\n+ return cls._url_re.match(url)\n \n- if urlparse(url).path.endswith(\"m3u8\"):\n- streams = HLSStream.parse_variant_playlist(self.session, url)\n+ def _get_vod_stream(self):\n+ \"\"\"\n+ Find the VOD video url\n+ :return: video url\n+ \"\"\"\n+ res = http.get(self.url)\n+ video_urls = self._re_vod.findall(res.text)\n+ if len(video_urls):\n+ return dict(vod=HTTPStream(self.session, video_urls[0]))\n \n- # TODO: Replace with \"yield from\" when dropping Python 2.\n- for stream in streams.items():\n- yield stream\n- else:\n- name = source.get(\"label\", \"vod\")\n- yield name, HTTPStream(self.session, url)\n+ def _get_live_streams(self, language):\n+ \"\"\"\n+ Get the live stream in a particular language\n+ :param language:\n+ :return:\n+ \"\"\"\n+ res = http.get(self._live_api_url)\n+ live_res = http.json(res, schema=self._live_schema)\n+ api_res = http.get(live_res[u\"url\"])\n+ stream_data = http.json(api_res, schema=self._stream_api_schema)\n+ # find the stream in the requested language\n+ if language in stream_data[u'primary']:\n+ playlist_url = stream_data[u'primary'][language][u\"hls\"]\n+ return HLSStream.parse_variant_playlist(self.session, playlist_url)\n \n def _get_streams(self):\n- res = http.get(self.url)\n- playlist = jwplayer.parse_playlist(res)\n- if not playlist:\n- return\n+ \"\"\"\n+ Find the streams for euronews\n+ :return:\n+ \"\"\"\n+ match = self._url_re.match(self.url)\n+ language, path = match.groups()\n \n- for item in playlist:\n- streams = map(self._create_stream, item[\"sources\"])\n+ # remap domain to language (default to english)\n+ language = {\"www\": \"en\", \"\": \"en\", \"arabic\": \"ar\"}.get(language, language)\n \n- # TODO: Replace with \"yield from\" when dropping Python 2.\n- for stream in chain.from_iterable(streams):\n- yield stream\n+ if path == \"live\":\n+ return self._get_live_streams(language)\n+ else:\n+ return self._get_vod_stream()\n \n __plugin__ = Euronews\n", "issue": "Euronews plugin broken\nI dig up EuroNews plugin which is broken since December 2014.\r\n\r\nhttps://github.com/chrippa/livestreamer/issues/626\n", "before_files": [{"content": "import re\n\nfrom itertools import chain\n\nfrom streamlink.compat import urlparse\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http\nfrom streamlink.stream import HLSStream, HTTPStream\n\nfrom streamlink.plugin.api.support_plugin import common_jwplayer as jwplayer\n\n_url_re = re.compile(\"http(s)?://(\\w+\\.)?euronews.com\")\n\n\nclass Euronews(Plugin):\n @classmethod\n def can_handle_url(self, url):\n return _url_re.match(url)\n\n def _create_stream(self, source):\n url = source[\"file\"]\n\n if urlparse(url).path.endswith(\"m3u8\"):\n streams = HLSStream.parse_variant_playlist(self.session, url)\n\n # TODO: Replace with \"yield from\" when dropping Python 2.\n for stream in streams.items():\n yield stream\n else:\n name = source.get(\"label\", \"vod\")\n yield name, HTTPStream(self.session, url)\n\n def _get_streams(self):\n res = http.get(self.url)\n playlist = jwplayer.parse_playlist(res)\n if not playlist:\n return\n\n for item in playlist:\n streams = map(self._create_stream, item[\"sources\"])\n\n # TODO: Replace with \"yield from\" when dropping Python 2.\n for stream in chain.from_iterable(streams):\n yield stream\n\n__plugin__ = Euronews\n", "path": "src/streamlink/plugins/euronews.py"}], "after_files": [{"content": "import re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream import HLSStream, HTTPStream\n\n\nclass Euronews(Plugin):\n _url_re = re.compile(\"http(?:s)?://(\\w+)\\.?euronews.com/(live|.*)\")\n _re_vod = re.compile(r'<meta\\s+property=\"og:video\"\\s+content=\"(http.*?)\"\\s*/>')\n _live_api_url = \"http://fr.euronews.com/api/watchlive.json\"\n _live_schema = validate.Schema({\n u\"url\": validate.url()\n })\n _stream_api_schema = validate.Schema({\n u'status': u'ok',\n u'primary': {\n validate.text: {\n validate.optional(u'hls'): validate.url(),\n validate.optional(u'rtsp'): validate.url(scheme=\"rtsp\")\n }\n },\n validate.optional(u'backup'): {\n validate.text: {\n validate.optional(u'hls'): validate.url(),\n validate.optional(u'rtsp'): validate.url(scheme=\"rtsp\")\n }\n }\n })\n\n @classmethod\n def can_handle_url(cls, url):\n return cls._url_re.match(url)\n\n def _get_vod_stream(self):\n \"\"\"\n Find the VOD video url\n :return: video url\n \"\"\"\n res = http.get(self.url)\n video_urls = self._re_vod.findall(res.text)\n if len(video_urls):\n return dict(vod=HTTPStream(self.session, video_urls[0]))\n\n def _get_live_streams(self, language):\n \"\"\"\n Get the live stream in a particular language\n :param language:\n :return:\n \"\"\"\n res = http.get(self._live_api_url)\n live_res = http.json(res, schema=self._live_schema)\n api_res = http.get(live_res[u\"url\"])\n stream_data = http.json(api_res, schema=self._stream_api_schema)\n # find the stream in the requested language\n if language in stream_data[u'primary']:\n playlist_url = stream_data[u'primary'][language][u\"hls\"]\n return HLSStream.parse_variant_playlist(self.session, playlist_url)\n\n def _get_streams(self):\n \"\"\"\n Find the streams for euronews\n :return:\n \"\"\"\n match = self._url_re.match(self.url)\n language, path = match.groups()\n\n # remap domain to language (default to english)\n language = {\"www\": \"en\", \"\": \"en\", \"arabic\": \"ar\"}.get(language, language)\n\n if path == \"live\":\n return self._get_live_streams(language)\n else:\n return self._get_vod_stream()\n\n__plugin__ = Euronews\n", "path": "src/streamlink/plugins/euronews.py"}]} | 694 | 981 |
gh_patches_debug_3261 | rasdani/github-patches | git_diff | Kinto__kinto-476 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error while trying to generate a configuration file without subfolder with CLI.
```
$ kinto --ini kinto.ini
Traceback (most recent call last):
File "~/.virtualenvs/kinto/bin/kinto", line 9, in <module>
load_entry_point('kinto', 'console_scripts', 'kinto')()
File "~/mozilla/kinto/kinto/__main__.py", line 72, in main
init(config_file, backend)
File "~/mozilla/kinto/kinto/config/__init__.py", line 50, in init
render_template("kinto.tpl", config_file, **values)
File "~/mozilla/kinto/kinto/config/__init__.py", line 14, in render_template
os.makedirs(folder)
File "~/.virtualenvs/kinto/lib/python2.7/os.py", line 157, in makedirs
mkdir(name, mode)
OSError: [Errno 2] No such file or directory: ''
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kinto/config/__init__.py`
Content:
```
1 import os
2 import codecs
3
4 from cliquet import utils as cliquet_utils
5
6 from kinto import logger
7
8 HERE = os.path.abspath(os.path.dirname(__file__))
9
10
11 def render_template(template, destination, **kwargs):
12 template = os.path.join(HERE, template)
13 folder = os.path.dirname(destination)
14
15 if not os.path.exists(folder):
16 os.makedirs(folder)
17
18 logger.info("Created config {}".format(os.path.abspath(destination)))
19
20 with codecs.open(template, 'r', encoding='utf-8') as f:
21 raw_template = f.read()
22 rendered = raw_template.format(**kwargs)
23 with codecs.open(destination, 'w+', encoding='utf-8') as output:
24 output.write(rendered)
25
26
27 def init(config_file, backend):
28 values = {}
29
30 values['secret'] = cliquet_utils.random_bytes_hex(32)
31
32 values['storage_backend'] = "cliquet.storage.%s" % backend
33 values['cache_backend'] = "cliquet.cache.%s" % backend
34 values['permission_backend'] = "cliquet.permission.%s" % backend
35
36 if backend == 'postgresql':
37 postgresql_url = "postgres://postgres:postgres@localhost/postgres"
38 values['storage_url'] = postgresql_url
39 values['cache_url'] = postgresql_url
40 values['permission_url'] = postgresql_url
41
42 elif backend == 'redis':
43 redis_url = "redis://localhost:6379"
44 values['storage_url'] = redis_url + "/1"
45 values['cache_url'] = redis_url + "/2"
46 values['permission_url'] = redis_url + "/3"
47
48 else:
49 values['storage_url'] = ''
50 values['cache_url'] = ''
51 values['permission_url'] = ''
52
53 render_template("kinto.tpl", config_file, **values)
54
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kinto/config/__init__.py b/kinto/config/__init__.py
--- a/kinto/config/__init__.py
+++ b/kinto/config/__init__.py
@@ -12,7 +12,7 @@
template = os.path.join(HERE, template)
folder = os.path.dirname(destination)
- if not os.path.exists(folder):
+ if folder and not os.path.exists(folder):
os.makedirs(folder)
logger.info("Created config {}".format(os.path.abspath(destination)))
| {"golden_diff": "diff --git a/kinto/config/__init__.py b/kinto/config/__init__.py\n--- a/kinto/config/__init__.py\n+++ b/kinto/config/__init__.py\n@@ -12,7 +12,7 @@\n template = os.path.join(HERE, template)\n folder = os.path.dirname(destination)\n \n- if not os.path.exists(folder):\n+ if folder and not os.path.exists(folder):\n os.makedirs(folder)\n \n logger.info(\"Created config {}\".format(os.path.abspath(destination)))\n", "issue": "Error while trying to generate a configuration file without subfolder with CLI.\n```\n$ kinto --ini kinto.ini\n\nTraceback (most recent call last):\n File \"~/.virtualenvs/kinto/bin/kinto\", line 9, in <module>\n load_entry_point('kinto', 'console_scripts', 'kinto')()\n File \"~/mozilla/kinto/kinto/__main__.py\", line 72, in main\n init(config_file, backend)\n File \"~/mozilla/kinto/kinto/config/__init__.py\", line 50, in init\n render_template(\"kinto.tpl\", config_file, **values)\n File \"~/mozilla/kinto/kinto/config/__init__.py\", line 14, in render_template\n os.makedirs(folder)\n File \"~/.virtualenvs/kinto/lib/python2.7/os.py\", line 157, in makedirs\n mkdir(name, mode)\nOSError: [Errno 2] No such file or directory: ''\n```\n\n", "before_files": [{"content": "import os\nimport codecs\n\nfrom cliquet import utils as cliquet_utils\n\nfrom kinto import logger\n\nHERE = os.path.abspath(os.path.dirname(__file__))\n\n\ndef render_template(template, destination, **kwargs):\n template = os.path.join(HERE, template)\n folder = os.path.dirname(destination)\n\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n logger.info(\"Created config {}\".format(os.path.abspath(destination)))\n\n with codecs.open(template, 'r', encoding='utf-8') as f:\n raw_template = f.read()\n rendered = raw_template.format(**kwargs)\n with codecs.open(destination, 'w+', encoding='utf-8') as output:\n output.write(rendered)\n\n\ndef init(config_file, backend):\n values = {}\n\n values['secret'] = cliquet_utils.random_bytes_hex(32)\n\n values['storage_backend'] = \"cliquet.storage.%s\" % backend\n values['cache_backend'] = \"cliquet.cache.%s\" % backend\n values['permission_backend'] = \"cliquet.permission.%s\" % backend\n\n if backend == 'postgresql':\n postgresql_url = \"postgres://postgres:postgres@localhost/postgres\"\n values['storage_url'] = postgresql_url\n values['cache_url'] = postgresql_url\n values['permission_url'] = postgresql_url\n\n elif backend == 'redis':\n redis_url = \"redis://localhost:6379\"\n values['storage_url'] = redis_url + \"/1\"\n values['cache_url'] = redis_url + \"/2\"\n values['permission_url'] = redis_url + \"/3\"\n\n else:\n values['storage_url'] = ''\n values['cache_url'] = ''\n values['permission_url'] = ''\n\n render_template(\"kinto.tpl\", config_file, **values)\n", "path": "kinto/config/__init__.py"}], "after_files": [{"content": "import os\nimport codecs\n\nfrom cliquet import utils as cliquet_utils\n\nfrom kinto import logger\n\nHERE = os.path.abspath(os.path.dirname(__file__))\n\n\ndef render_template(template, destination, **kwargs):\n template = os.path.join(HERE, template)\n folder = os.path.dirname(destination)\n\n if folder and not os.path.exists(folder):\n os.makedirs(folder)\n\n logger.info(\"Created config {}\".format(os.path.abspath(destination)))\n\n with codecs.open(template, 'r', encoding='utf-8') as f:\n raw_template = f.read()\n rendered = raw_template.format(**kwargs)\n with codecs.open(destination, 'w+', encoding='utf-8') as output:\n output.write(rendered)\n\n\ndef init(config_file, backend):\n values = {}\n\n values['secret'] = cliquet_utils.random_bytes_hex(32)\n\n values['storage_backend'] = \"cliquet.storage.%s\" % backend\n values['cache_backend'] = \"cliquet.cache.%s\" % backend\n values['permission_backend'] = \"cliquet.permission.%s\" % backend\n\n if backend == 'postgresql':\n postgresql_url = \"postgres://postgres:postgres@localhost/postgres\"\n values['storage_url'] = postgresql_url\n values['cache_url'] = postgresql_url\n values['permission_url'] = postgresql_url\n\n elif backend == 'redis':\n redis_url = \"redis://localhost:6379\"\n values['storage_url'] = redis_url + \"/1\"\n values['cache_url'] = redis_url + \"/2\"\n values['permission_url'] = redis_url + \"/3\"\n\n else:\n values['storage_url'] = ''\n values['cache_url'] = ''\n values['permission_url'] = ''\n\n render_template(\"kinto.tpl\", config_file, **values)\n", "path": "kinto/config/__init__.py"}]} | 974 | 111 |
gh_patches_debug_26861 | rasdani/github-patches | git_diff | pyca__cryptography-1462 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove pragma nocovers from Windows specific code
See #502
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cryptography/hazmat/bindings/openssl/binding.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
10 # implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 from __future__ import absolute_import, division, print_function
15
16 import os
17 import sys
18 import threading
19
20 from cryptography.hazmat.bindings.utils import build_ffi_for_binding
21
22
23 _OSX_PRE_INCLUDE = """
24 #ifdef __APPLE__
25 #include <AvailabilityMacros.h>
26 #define __ORIG_DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER \
27 DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER
28 #undef DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER
29 #define DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER
30 #endif
31 """
32
33 _OSX_POST_INCLUDE = """
34 #ifdef __APPLE__
35 #undef DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER
36 #define DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER \
37 __ORIG_DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER
38 #endif
39 """
40
41
42 class Binding(object):
43 """
44 OpenSSL API wrapper.
45 """
46 _module_prefix = "cryptography.hazmat.bindings.openssl."
47 _modules = [
48 "aes",
49 "asn1",
50 "bignum",
51 "bio",
52 "cmac",
53 "cms",
54 "conf",
55 "crypto",
56 "dh",
57 "dsa",
58 "ec",
59 "ecdh",
60 "ecdsa",
61 "engine",
62 "err",
63 "evp",
64 "hmac",
65 "nid",
66 "objects",
67 "opensslv",
68 "osrandom_engine",
69 "pem",
70 "pkcs7",
71 "pkcs12",
72 "rand",
73 "rsa",
74 "ssl",
75 "x509",
76 "x509name",
77 "x509v3",
78 "x509_vfy"
79 ]
80
81 _locks = None
82 _lock_cb_handle = None
83 _lock_init_lock = threading.Lock()
84
85 ffi = None
86 lib = None
87
88 def __init__(self):
89 self._ensure_ffi_initialized()
90
91 @classmethod
92 def _ensure_ffi_initialized(cls):
93 if cls.ffi is not None and cls.lib is not None:
94 return
95
96 # OpenSSL goes by a different library name on different operating
97 # systems.
98 if sys.platform != "win32":
99 # In some circumstances, the order in which these libs are
100 # specified on the linker command-line is significant;
101 # libssl must come before libcrypto
102 # (http://marc.info/?l=openssl-users&m=135361825921871)
103 libraries = ["ssl", "crypto"]
104 else: # pragma: no cover
105 link_type = os.environ.get("PYCA_WINDOWS_LINK_TYPE", "static")
106 libraries = _get_windows_libraries(link_type)
107
108 cls.ffi, cls.lib = build_ffi_for_binding(
109 module_prefix=cls._module_prefix,
110 modules=cls._modules,
111 pre_include=_OSX_PRE_INCLUDE,
112 post_include=_OSX_POST_INCLUDE,
113 libraries=libraries,
114 )
115 res = cls.lib.Cryptography_add_osrandom_engine()
116 assert res != 0
117
118 @classmethod
119 def init_static_locks(cls):
120 with cls._lock_init_lock:
121 cls._ensure_ffi_initialized()
122
123 if not cls._lock_cb_handle:
124 cls._lock_cb_handle = cls.ffi.callback(
125 "void(int, int, const char *, int)",
126 cls._lock_cb
127 )
128
129 # Use Python's implementation if available, importing _ssl triggers
130 # the setup for this.
131 __import__("_ssl")
132
133 if cls.lib.CRYPTO_get_locking_callback() != cls.ffi.NULL:
134 return
135
136 # If nothing else has setup a locking callback already, we set up
137 # our own
138 num_locks = cls.lib.CRYPTO_num_locks()
139 cls._locks = [threading.Lock() for n in range(num_locks)]
140
141 cls.lib.CRYPTO_set_locking_callback(cls._lock_cb_handle)
142
143 @classmethod
144 def _lock_cb(cls, mode, n, file, line):
145 lock = cls._locks[n]
146
147 if mode & cls.lib.CRYPTO_LOCK:
148 lock.acquire()
149 elif mode & cls.lib.CRYPTO_UNLOCK:
150 lock.release()
151 else:
152 raise RuntimeError(
153 "Unknown lock mode {0}: lock={1}, file={2}, line={3}.".format(
154 mode, n, file, line
155 )
156 )
157
158
159 def _get_windows_libraries(link_type):
160 if link_type == "dynamic":
161 return ["libeay32", "ssleay32", "advapi32"]
162 elif link_type == "static" or link_type == "":
163 return ["libeay32mt", "ssleay32mt", "advapi32",
164 "crypt32", "gdi32", "user32", "ws2_32"]
165 else:
166 raise ValueError(
167 "PYCA_WINDOWS_LINK_TYPE must be 'static' or 'dynamic'"
168 )
169
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cryptography/hazmat/bindings/openssl/binding.py b/cryptography/hazmat/bindings/openssl/binding.py
--- a/cryptography/hazmat/bindings/openssl/binding.py
+++ b/cryptography/hazmat/bindings/openssl/binding.py
@@ -95,15 +95,7 @@
# OpenSSL goes by a different library name on different operating
# systems.
- if sys.platform != "win32":
- # In some circumstances, the order in which these libs are
- # specified on the linker command-line is significant;
- # libssl must come before libcrypto
- # (http://marc.info/?l=openssl-users&m=135361825921871)
- libraries = ["ssl", "crypto"]
- else: # pragma: no cover
- link_type = os.environ.get("PYCA_WINDOWS_LINK_TYPE", "static")
- libraries = _get_windows_libraries(link_type)
+ libraries = _get_libraries(sys.platform)
cls.ffi, cls.lib = build_ffi_for_binding(
module_prefix=cls._module_prefix,
@@ -156,6 +148,18 @@
)
+def _get_libraries(platform):
+ if platform != "win32":
+ # In some circumstances, the order in which these libs are
+ # specified on the linker command-line is significant;
+ # libssl must come before libcrypto
+ # (http://marc.info/?l=openssl-users&m=135361825921871)
+ return ["ssl", "crypto"]
+ else:
+ link_type = os.environ.get("PYCA_WINDOWS_LINK_TYPE", "static")
+ return _get_windows_libraries(link_type)
+
+
def _get_windows_libraries(link_type):
if link_type == "dynamic":
return ["libeay32", "ssleay32", "advapi32"]
| {"golden_diff": "diff --git a/cryptography/hazmat/bindings/openssl/binding.py b/cryptography/hazmat/bindings/openssl/binding.py\n--- a/cryptography/hazmat/bindings/openssl/binding.py\n+++ b/cryptography/hazmat/bindings/openssl/binding.py\n@@ -95,15 +95,7 @@\n \n # OpenSSL goes by a different library name on different operating\n # systems.\n- if sys.platform != \"win32\":\n- # In some circumstances, the order in which these libs are\n- # specified on the linker command-line is significant;\n- # libssl must come before libcrypto\n- # (http://marc.info/?l=openssl-users&m=135361825921871)\n- libraries = [\"ssl\", \"crypto\"]\n- else: # pragma: no cover\n- link_type = os.environ.get(\"PYCA_WINDOWS_LINK_TYPE\", \"static\")\n- libraries = _get_windows_libraries(link_type)\n+ libraries = _get_libraries(sys.platform)\n \n cls.ffi, cls.lib = build_ffi_for_binding(\n module_prefix=cls._module_prefix,\n@@ -156,6 +148,18 @@\n )\n \n \n+def _get_libraries(platform):\n+ if platform != \"win32\":\n+ # In some circumstances, the order in which these libs are\n+ # specified on the linker command-line is significant;\n+ # libssl must come before libcrypto\n+ # (http://marc.info/?l=openssl-users&m=135361825921871)\n+ return [\"ssl\", \"crypto\"]\n+ else:\n+ link_type = os.environ.get(\"PYCA_WINDOWS_LINK_TYPE\", \"static\")\n+ return _get_windows_libraries(link_type)\n+\n+\n def _get_windows_libraries(link_type):\n if link_type == \"dynamic\":\n return [\"libeay32\", \"ssleay32\", \"advapi32\"]\n", "issue": "Remove pragma nocovers from Windows specific code\nSee #502 \n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport sys\nimport threading\n\nfrom cryptography.hazmat.bindings.utils import build_ffi_for_binding\n\n\n_OSX_PRE_INCLUDE = \"\"\"\n#ifdef __APPLE__\n#include <AvailabilityMacros.h>\n#define __ORIG_DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER \\\n DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER\n#undef DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER\n#define DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER\n#endif\n\"\"\"\n\n_OSX_POST_INCLUDE = \"\"\"\n#ifdef __APPLE__\n#undef DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER\n#define DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER \\\n __ORIG_DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER\n#endif\n\"\"\"\n\n\nclass Binding(object):\n \"\"\"\n OpenSSL API wrapper.\n \"\"\"\n _module_prefix = \"cryptography.hazmat.bindings.openssl.\"\n _modules = [\n \"aes\",\n \"asn1\",\n \"bignum\",\n \"bio\",\n \"cmac\",\n \"cms\",\n \"conf\",\n \"crypto\",\n \"dh\",\n \"dsa\",\n \"ec\",\n \"ecdh\",\n \"ecdsa\",\n \"engine\",\n \"err\",\n \"evp\",\n \"hmac\",\n \"nid\",\n \"objects\",\n \"opensslv\",\n \"osrandom_engine\",\n \"pem\",\n \"pkcs7\",\n \"pkcs12\",\n \"rand\",\n \"rsa\",\n \"ssl\",\n \"x509\",\n \"x509name\",\n \"x509v3\",\n \"x509_vfy\"\n ]\n\n _locks = None\n _lock_cb_handle = None\n _lock_init_lock = threading.Lock()\n\n ffi = None\n lib = None\n\n def __init__(self):\n self._ensure_ffi_initialized()\n\n @classmethod\n def _ensure_ffi_initialized(cls):\n if cls.ffi is not None and cls.lib is not None:\n return\n\n # OpenSSL goes by a different library name on different operating\n # systems.\n if sys.platform != \"win32\":\n # In some circumstances, the order in which these libs are\n # specified on the linker command-line is significant;\n # libssl must come before libcrypto\n # (http://marc.info/?l=openssl-users&m=135361825921871)\n libraries = [\"ssl\", \"crypto\"]\n else: # pragma: no cover\n link_type = os.environ.get(\"PYCA_WINDOWS_LINK_TYPE\", \"static\")\n libraries = _get_windows_libraries(link_type)\n\n cls.ffi, cls.lib = build_ffi_for_binding(\n module_prefix=cls._module_prefix,\n modules=cls._modules,\n pre_include=_OSX_PRE_INCLUDE,\n post_include=_OSX_POST_INCLUDE,\n libraries=libraries,\n )\n res = cls.lib.Cryptography_add_osrandom_engine()\n assert res != 0\n\n @classmethod\n def init_static_locks(cls):\n with cls._lock_init_lock:\n cls._ensure_ffi_initialized()\n\n if not cls._lock_cb_handle:\n cls._lock_cb_handle = cls.ffi.callback(\n \"void(int, int, const char *, int)\",\n cls._lock_cb\n )\n\n # Use Python's implementation if available, importing _ssl triggers\n # the setup for this.\n __import__(\"_ssl\")\n\n if cls.lib.CRYPTO_get_locking_callback() != cls.ffi.NULL:\n return\n\n # If nothing else has setup a locking callback already, we set up\n # our own\n num_locks = cls.lib.CRYPTO_num_locks()\n cls._locks = [threading.Lock() for n in range(num_locks)]\n\n cls.lib.CRYPTO_set_locking_callback(cls._lock_cb_handle)\n\n @classmethod\n def _lock_cb(cls, mode, n, file, line):\n lock = cls._locks[n]\n\n if mode & cls.lib.CRYPTO_LOCK:\n lock.acquire()\n elif mode & cls.lib.CRYPTO_UNLOCK:\n lock.release()\n else:\n raise RuntimeError(\n \"Unknown lock mode {0}: lock={1}, file={2}, line={3}.\".format(\n mode, n, file, line\n )\n )\n\n\ndef _get_windows_libraries(link_type):\n if link_type == \"dynamic\":\n return [\"libeay32\", \"ssleay32\", \"advapi32\"]\n elif link_type == \"static\" or link_type == \"\":\n return [\"libeay32mt\", \"ssleay32mt\", \"advapi32\",\n \"crypt32\", \"gdi32\", \"user32\", \"ws2_32\"]\n else:\n raise ValueError(\n \"PYCA_WINDOWS_LINK_TYPE must be 'static' or 'dynamic'\"\n )\n", "path": "cryptography/hazmat/bindings/openssl/binding.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport sys\nimport threading\n\nfrom cryptography.hazmat.bindings.utils import build_ffi_for_binding\n\n\n_OSX_PRE_INCLUDE = \"\"\"\n#ifdef __APPLE__\n#include <AvailabilityMacros.h>\n#define __ORIG_DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER \\\n DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER\n#undef DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER\n#define DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER\n#endif\n\"\"\"\n\n_OSX_POST_INCLUDE = \"\"\"\n#ifdef __APPLE__\n#undef DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER\n#define DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER \\\n __ORIG_DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER\n#endif\n\"\"\"\n\n\nclass Binding(object):\n \"\"\"\n OpenSSL API wrapper.\n \"\"\"\n _module_prefix = \"cryptography.hazmat.bindings.openssl.\"\n _modules = [\n \"aes\",\n \"asn1\",\n \"bignum\",\n \"bio\",\n \"cmac\",\n \"cms\",\n \"conf\",\n \"crypto\",\n \"dh\",\n \"dsa\",\n \"ec\",\n \"ecdh\",\n \"ecdsa\",\n \"engine\",\n \"err\",\n \"evp\",\n \"hmac\",\n \"nid\",\n \"objects\",\n \"opensslv\",\n \"osrandom_engine\",\n \"pem\",\n \"pkcs7\",\n \"pkcs12\",\n \"rand\",\n \"rsa\",\n \"ssl\",\n \"x509\",\n \"x509name\",\n \"x509v3\",\n \"x509_vfy\"\n ]\n\n _locks = None\n _lock_cb_handle = None\n _lock_init_lock = threading.Lock()\n\n ffi = None\n lib = None\n\n def __init__(self):\n self._ensure_ffi_initialized()\n\n @classmethod\n def _ensure_ffi_initialized(cls):\n if cls.ffi is not None and cls.lib is not None:\n return\n\n # OpenSSL goes by a different library name on different operating\n # systems.\n libraries = _get_libraries(sys.platform)\n\n cls.ffi, cls.lib = build_ffi_for_binding(\n module_prefix=cls._module_prefix,\n modules=cls._modules,\n pre_include=_OSX_PRE_INCLUDE,\n post_include=_OSX_POST_INCLUDE,\n libraries=libraries,\n )\n res = cls.lib.Cryptography_add_osrandom_engine()\n assert res != 0\n\n @classmethod\n def init_static_locks(cls):\n with cls._lock_init_lock:\n cls._ensure_ffi_initialized()\n\n if not cls._lock_cb_handle:\n cls._lock_cb_handle = cls.ffi.callback(\n \"void(int, int, const char *, int)\",\n cls._lock_cb\n )\n\n # Use Python's implementation if available, importing _ssl triggers\n # the setup for this.\n __import__(\"_ssl\")\n\n if cls.lib.CRYPTO_get_locking_callback() != cls.ffi.NULL:\n return\n\n # If nothing else has setup a locking callback already, we set up\n # our own\n num_locks = cls.lib.CRYPTO_num_locks()\n cls._locks = [threading.Lock() for n in range(num_locks)]\n\n cls.lib.CRYPTO_set_locking_callback(cls._lock_cb_handle)\n\n @classmethod\n def _lock_cb(cls, mode, n, file, line):\n lock = cls._locks[n]\n\n if mode & cls.lib.CRYPTO_LOCK:\n lock.acquire()\n elif mode & cls.lib.CRYPTO_UNLOCK:\n lock.release()\n else:\n raise RuntimeError(\n \"Unknown lock mode {0}: lock={1}, file={2}, line={3}.\".format(\n mode, n, file, line\n )\n )\n\n\ndef _get_libraries(platform):\n if platform != \"win32\":\n # In some circumstances, the order in which these libs are\n # specified on the linker command-line is significant;\n # libssl must come before libcrypto\n # (http://marc.info/?l=openssl-users&m=135361825921871)\n return [\"ssl\", \"crypto\"]\n else:\n link_type = os.environ.get(\"PYCA_WINDOWS_LINK_TYPE\", \"static\")\n return _get_windows_libraries(link_type)\n\n\ndef _get_windows_libraries(link_type):\n if link_type == \"dynamic\":\n return [\"libeay32\", \"ssleay32\", \"advapi32\"]\n elif link_type == \"static\" or link_type == \"\":\n return [\"libeay32mt\", \"ssleay32mt\", \"advapi32\",\n \"crypt32\", \"gdi32\", \"user32\", \"ws2_32\"]\n else:\n raise ValueError(\n \"PYCA_WINDOWS_LINK_TYPE must be 'static' or 'dynamic'\"\n )\n", "path": "cryptography/hazmat/bindings/openssl/binding.py"}]} | 1,922 | 447 |
gh_patches_debug_2172 | rasdani/github-patches | git_diff | liqd__a4-opin-1799 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Changing the Organisation Details is not possible
**URL:**
https://opin.me/en/dashboard/organisations/liquid-democracy/settings/
**user:**
Initiators, who try to fill in the Organisations details & as an admin too.
**expected behaviour:**
If I fill in Organisation details, save them and it is there
**behaviour:**
I fill in the Organisation details, press save and it reloads, but do not save.
**important screensize:**
**device & browser:**
Firefox 73.0.1 (64-Bit)
**Comment/Question:**
Screenshot?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `euth/dashboard/forms.py`
Content:
```
1
2 import parler
3 from django import forms
4 from django.conf import settings
5 from django.core.exceptions import ValidationError
6 from django.utils.translation import ugettext_lazy as _
7
8 from euth.organisations.models import Organisation
9
10
11 class OrganisationForm(forms.ModelForm):
12 translated_fields = [
13 ('description_why', forms.CharField, {
14 'label': _('description why'),
15 'widget': forms.Textarea,
16 }),
17 ('description_how', forms.CharField, {
18 'widget': forms.Textarea,
19 'label': _('description how')
20 }),
21 ('description', forms.CharField, {
22 'label': _('description'),
23 'help_text': _(
24 'More info about the organisation / '
25 'Short text for organisation overview'),
26 'widget': forms.Textarea,
27 })
28 ]
29 languages = [lang_code for lang_code, lang in settings.LANGUAGES]
30
31 class Meta:
32 model = Organisation
33 fields = [
34 'name', 'image', 'logo', 'twitter_handle', 'facebook_handle',
35 'instagram_handle', 'webpage', 'country', 'place'
36 ]
37 help_texts = {
38 'name': _('The title of your organisation'),
39 }
40
41 def _get_identifier(self, language, fieldname):
42 return '{}__{}'.format(language, fieldname)
43
44 def __init__(self, *args, **kwargs):
45 super().__init__(*args, **kwargs)
46
47 # inject additional form fields for translated model fields
48 for lang_code in self.languages:
49 for name, field_cls, kwargs in self.translated_fields:
50 self.instance.set_current_language(lang_code)
51 field = field_cls(**kwargs)
52 identifier = self._get_identifier(
53 lang_code, name)
54 field.required = False
55
56 try:
57 translation = self.instance.get_translation(lang_code)
58 initial = getattr(translation, name)
59 except parler.models.TranslationDoesNotExist:
60 initial = ''
61
62 field.initial = initial
63 self.fields[identifier] = field
64
65 def translated(self):
66 """
67 Return translated fields as list of tuples (language code, fields).
68 """
69
70 from itertools import groupby
71 fields = [(field.html_name.split('__')[0], field) for field in self
72 if '__' in field.html_name]
73 groups = groupby(fields, lambda x: x[0])
74 values = [(lang, list(map(lambda x: x[1], group)))
75 for lang, group in groups]
76 return values
77
78 def untranslated(self):
79 """
80 Return untranslated fields as flat list.
81 """
82 return [field for field in self if '__' not in field.html_name]
83
84 def prefiled_languages(self):
85 """
86 Return languages tabs that need to be displayed.
87 """
88 languages = [lang for lang in self.languages
89 if lang in self.data
90 or self.instance.has_translation(lang)]
91 # always provide english
92 if 'en' not in languages:
93 languages.insert(0, 'en')
94 return languages
95
96 def save(self, commit=True):
97 instance = super().save(commit=commit)
98 if commit is True:
99 for lang_code in self.languages:
100 if lang_code in self.data:
101 instance.set_current_language(lang_code)
102 for fieldname, _cls, _kwargs in self.translated_fields:
103 identifier = '{}__{}'.format(lang_code, fieldname)
104 setattr(instance, fieldname,
105 self.cleaned_data.get(identifier))
106 instance.save()
107 elif instance.has_translation(lang_code):
108 instance.delete_translation(lang_code)
109 return instance
110
111 def clean(self):
112 for lang_code in self.languages:
113 if lang_code in self.data:
114 for fieldname in self.translated_fields:
115 identifier = self._get_identifier(lang_code, fieldname[0])
116 data = self.cleaned_data
117 if identifier not in data or not data[identifier]:
118 msg = 'This field is required'
119 raise ValidationError((identifier, msg))
120
121 return self.cleaned_data
122
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/euth/dashboard/forms.py b/euth/dashboard/forms.py
--- a/euth/dashboard/forms.py
+++ b/euth/dashboard/forms.py
@@ -81,7 +81,7 @@
"""
return [field for field in self if '__' not in field.html_name]
- def prefiled_languages(self):
+ def prefilled_languages(self):
"""
Return languages tabs that need to be displayed.
"""
| {"golden_diff": "diff --git a/euth/dashboard/forms.py b/euth/dashboard/forms.py\n--- a/euth/dashboard/forms.py\n+++ b/euth/dashboard/forms.py\n@@ -81,7 +81,7 @@\n \"\"\"\n return [field for field in self if '__' not in field.html_name]\n \n- def prefiled_languages(self):\n+ def prefilled_languages(self):\n \"\"\"\n Return languages tabs that need to be displayed.\n \"\"\"\n", "issue": "Changing the Organisation Details is not possible\n**URL:** \r\nhttps://opin.me/en/dashboard/organisations/liquid-democracy/settings/\r\n**user:** \r\nInitiators, who try to fill in the Organisations details & as an admin too.\r\n**expected behaviour:** \r\nIf I fill in Organisation details, save them and it is there\r\n**behaviour:** \r\nI fill in the Organisation details, press save and it reloads, but do not save.\r\n**important screensize:**\r\n\r\n**device & browser:** \r\nFirefox 73.0.1 (64-Bit)\r\n**Comment/Question:** \r\n\r\nScreenshot?\r\n\n", "before_files": [{"content": "\nimport parler\nfrom django import forms\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom euth.organisations.models import Organisation\n\n\nclass OrganisationForm(forms.ModelForm):\n translated_fields = [\n ('description_why', forms.CharField, {\n 'label': _('description why'),\n 'widget': forms.Textarea,\n }),\n ('description_how', forms.CharField, {\n 'widget': forms.Textarea,\n 'label': _('description how')\n }),\n ('description', forms.CharField, {\n 'label': _('description'),\n 'help_text': _(\n 'More info about the organisation / '\n 'Short text for organisation overview'),\n 'widget': forms.Textarea,\n })\n ]\n languages = [lang_code for lang_code, lang in settings.LANGUAGES]\n\n class Meta:\n model = Organisation\n fields = [\n 'name', 'image', 'logo', 'twitter_handle', 'facebook_handle',\n 'instagram_handle', 'webpage', 'country', 'place'\n ]\n help_texts = {\n 'name': _('The title of your organisation'),\n }\n\n def _get_identifier(self, language, fieldname):\n return '{}__{}'.format(language, fieldname)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # inject additional form fields for translated model fields\n for lang_code in self.languages:\n for name, field_cls, kwargs in self.translated_fields:\n self.instance.set_current_language(lang_code)\n field = field_cls(**kwargs)\n identifier = self._get_identifier(\n lang_code, name)\n field.required = False\n\n try:\n translation = self.instance.get_translation(lang_code)\n initial = getattr(translation, name)\n except parler.models.TranslationDoesNotExist:\n initial = ''\n\n field.initial = initial\n self.fields[identifier] = field\n\n def translated(self):\n \"\"\"\n Return translated fields as list of tuples (language code, fields).\n \"\"\"\n\n from itertools import groupby\n fields = [(field.html_name.split('__')[0], field) for field in self\n if '__' in field.html_name]\n groups = groupby(fields, lambda x: x[0])\n values = [(lang, list(map(lambda x: x[1], group)))\n for lang, group in groups]\n return values\n\n def untranslated(self):\n \"\"\"\n Return untranslated fields as flat list.\n \"\"\"\n return [field for field in self if '__' not in field.html_name]\n\n def prefiled_languages(self):\n \"\"\"\n Return languages tabs that need to be displayed.\n \"\"\"\n languages = [lang for lang in self.languages\n if lang in self.data\n or self.instance.has_translation(lang)]\n # always provide english\n if 'en' not in languages:\n languages.insert(0, 'en')\n return languages\n\n def save(self, commit=True):\n instance = super().save(commit=commit)\n if commit is True:\n for lang_code in self.languages:\n if lang_code in self.data:\n instance.set_current_language(lang_code)\n for fieldname, _cls, _kwargs in self.translated_fields:\n identifier = '{}__{}'.format(lang_code, fieldname)\n setattr(instance, fieldname,\n self.cleaned_data.get(identifier))\n instance.save()\n elif instance.has_translation(lang_code):\n instance.delete_translation(lang_code)\n return instance\n\n def clean(self):\n for lang_code in self.languages:\n if lang_code in self.data:\n for fieldname in self.translated_fields:\n identifier = self._get_identifier(lang_code, fieldname[0])\n data = self.cleaned_data\n if identifier not in data or not data[identifier]:\n msg = 'This field is required'\n raise ValidationError((identifier, msg))\n\n return self.cleaned_data\n", "path": "euth/dashboard/forms.py"}], "after_files": [{"content": "\nimport parler\nfrom django import forms\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom euth.organisations.models import Organisation\n\n\nclass OrganisationForm(forms.ModelForm):\n translated_fields = [\n ('description_why', forms.CharField, {\n 'label': _('description why'),\n 'widget': forms.Textarea,\n }),\n ('description_how', forms.CharField, {\n 'widget': forms.Textarea,\n 'label': _('description how')\n }),\n ('description', forms.CharField, {\n 'label': _('description'),\n 'help_text': _(\n 'More info about the organisation / '\n 'Short text for organisation overview'),\n 'widget': forms.Textarea,\n })\n ]\n languages = [lang_code for lang_code, lang in settings.LANGUAGES]\n\n class Meta:\n model = Organisation\n fields = [\n 'name', 'image', 'logo', 'twitter_handle', 'facebook_handle',\n 'instagram_handle', 'webpage', 'country', 'place'\n ]\n help_texts = {\n 'name': _('The title of your organisation'),\n }\n\n def _get_identifier(self, language, fieldname):\n return '{}__{}'.format(language, fieldname)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # inject additional form fields for translated model fields\n for lang_code in self.languages:\n for name, field_cls, kwargs in self.translated_fields:\n self.instance.set_current_language(lang_code)\n field = field_cls(**kwargs)\n identifier = self._get_identifier(\n lang_code, name)\n field.required = False\n\n try:\n translation = self.instance.get_translation(lang_code)\n initial = getattr(translation, name)\n except parler.models.TranslationDoesNotExist:\n initial = ''\n\n field.initial = initial\n self.fields[identifier] = field\n\n def translated(self):\n \"\"\"\n Return translated fields as list of tuples (language code, fields).\n \"\"\"\n\n from itertools import groupby\n fields = [(field.html_name.split('__')[0], field) for field in self\n if '__' in field.html_name]\n groups = groupby(fields, lambda x: x[0])\n values = [(lang, list(map(lambda x: x[1], group)))\n for lang, group in groups]\n return values\n\n def untranslated(self):\n \"\"\"\n Return untranslated fields as flat list.\n \"\"\"\n return [field for field in self if '__' not in field.html_name]\n\n def prefilled_languages(self):\n \"\"\"\n Return languages tabs that need to be displayed.\n \"\"\"\n languages = [lang for lang in self.languages\n if lang in self.data\n or self.instance.has_translation(lang)]\n # always provide english\n if 'en' not in languages:\n languages.insert(0, 'en')\n return languages\n\n def save(self, commit=True):\n instance = super().save(commit=commit)\n if commit is True:\n for lang_code in self.languages:\n if lang_code in self.data:\n instance.set_current_language(lang_code)\n for fieldname, _cls, _kwargs in self.translated_fields:\n identifier = '{}__{}'.format(lang_code, fieldname)\n setattr(instance, fieldname,\n self.cleaned_data.get(identifier))\n instance.save()\n elif instance.has_translation(lang_code):\n instance.delete_translation(lang_code)\n return instance\n\n def clean(self):\n for lang_code in self.languages:\n if lang_code in self.data:\n for fieldname in self.translated_fields:\n identifier = self._get_identifier(lang_code, fieldname[0])\n data = self.cleaned_data\n if identifier not in data or not data[identifier]:\n msg = 'This field is required'\n raise ValidationError((identifier, msg))\n\n return self.cleaned_data\n", "path": "euth/dashboard/forms.py"}]} | 1,483 | 95 |
gh_patches_debug_16640 | rasdani/github-patches | git_diff | PrefectHQ__prefect-9724 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PrefectHTTPStatusError: Client error '429 Too Many Requests' for url
### First check
- [X] I added a descriptive title to this issue.
- [X] I used the GitHub search to find a similar issue and didn't find it.
- [X] I searched the Prefect documentation for this issue.
- [X] I checked that this issue is related to Prefect and not one of its dependencies.
### Bug summary
While using `prefect` with `prefect-dask` I encountered a rate limit error. this shouldn't be happening as prefect client base should retry on those. I'm not sure why this is happening but this has risen at `2.10.10` and did not exist before
### Reproduction
```python3
Any Flow with prefect-dask
```
### Error
```python3
Traceback (most recent call last):
File "/usr/local/lib/python3.11/dist-packages/distributed/client.py", line 1697, in _close
await self.scheduler_comm.close()
asyncio.exceptions.CancelledError
01:00:08.452 | ERROR | Flow run 'psi5-alastria-x' - Crash detected! Execution was interrupted by an unexpected exception: PrefectHTTPStatusError: Client error '429 Too Many Requests' for url 'https://cloud-url/task_runs/'
Response: {'detail': 'Orchestration API rate limit reached'}
For more information check: https://httpstatuses.com/429
```
### Versions
```Text
Version: 2.10.10
API version: 0.8.4
Python version: 3.11.2
Git commit: 8159450b
Built: Thu, May 18, 2023 3:43 PM
OS/Arch: linux/x86_64
Profile: default
Server type: server
```
### Additional context
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/prefect/client/cloud.py`
Content:
```
1 import re
2 from typing import Any, Dict, List, Optional
3
4 import anyio
5 import httpx
6 import pydantic
7 from fastapi import status
8
9 import prefect.context
10 import prefect.settings
11 from prefect.client.schemas import Workspace
12 from prefect.exceptions import PrefectException
13 from prefect.settings import PREFECT_API_KEY, PREFECT_CLOUD_API_URL
14
15
16 def get_cloud_client(
17 host: Optional[str] = None,
18 api_key: Optional[str] = None,
19 httpx_settings: Optional[dict] = None,
20 infer_cloud_url: bool = False,
21 ) -> "CloudClient":
22 """
23 Needs a docstring.
24 """
25 if httpx_settings is not None:
26 httpx_settings = httpx_settings.copy()
27
28 if infer_cloud_url is False:
29 host = host or PREFECT_CLOUD_API_URL.value()
30 else:
31 configured_url = prefect.settings.PREFECT_API_URL.value()
32 host = re.sub(r"accounts/.{36}/workspaces/.{36}\Z", "", configured_url)
33
34 return CloudClient(
35 host=host,
36 api_key=api_key or PREFECT_API_KEY.value(),
37 httpx_settings=httpx_settings,
38 )
39
40
41 class CloudUnauthorizedError(PrefectException):
42 """
43 Raised when the CloudClient receives a 401 or 403 from the Cloud API.
44 """
45
46
47 class CloudClient:
48 def __init__(
49 self,
50 host: str,
51 api_key: str,
52 httpx_settings: dict = None,
53 ) -> None:
54 httpx_settings = httpx_settings or dict()
55 httpx_settings.setdefault("headers", dict())
56 httpx_settings["headers"].setdefault("Authorization", f"Bearer {api_key}")
57
58 httpx_settings.setdefault("base_url", host)
59 self._client = httpx.AsyncClient(**httpx_settings)
60
61 async def api_healthcheck(self):
62 """
63 Attempts to connect to the Cloud API and raises the encountered exception if not
64 successful.
65
66 If successful, returns `None`.
67 """
68 with anyio.fail_after(10):
69 await self.read_workspaces()
70
71 async def read_workspaces(self) -> List[Workspace]:
72 return pydantic.parse_obj_as(List[Workspace], await self.get("/me/workspaces"))
73
74 async def read_worker_metadata(self) -> Dict[str, Any]:
75 return await self.get("collections/views/aggregate-worker-metadata")
76
77 async def __aenter__(self):
78 await self._client.__aenter__()
79 return self
80
81 async def __aexit__(self, *exc_info):
82 return await self._client.__aexit__(*exc_info)
83
84 def __enter__(self):
85 raise RuntimeError(
86 "The `CloudClient` must be entered with an async context. Use 'async "
87 "with CloudClient(...)' not 'with CloudClient(...)'"
88 )
89
90 def __exit__(self, *_):
91 assert False, "This should never be called but must be defined for __enter__"
92
93 async def get(self, route, **kwargs):
94 try:
95 res = await self._client.get(route, **kwargs)
96 res.raise_for_status()
97 except httpx.HTTPStatusError as exc:
98 if exc.response.status_code in (
99 status.HTTP_401_UNAUTHORIZED,
100 status.HTTP_403_FORBIDDEN,
101 ):
102 raise CloudUnauthorizedError
103 else:
104 raise exc
105
106 return res.json()
107
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/prefect/client/cloud.py b/src/prefect/client/cloud.py
--- a/src/prefect/client/cloud.py
+++ b/src/prefect/client/cloud.py
@@ -8,6 +8,7 @@
import prefect.context
import prefect.settings
+from prefect.client.base import PrefectHttpxClient
from prefect.client.schemas import Workspace
from prefect.exceptions import PrefectException
from prefect.settings import PREFECT_API_KEY, PREFECT_CLOUD_API_URL
@@ -56,7 +57,7 @@
httpx_settings["headers"].setdefault("Authorization", f"Bearer {api_key}")
httpx_settings.setdefault("base_url", host)
- self._client = httpx.AsyncClient(**httpx_settings)
+ self._client = PrefectHttpxClient(**httpx_settings)
async def api_healthcheck(self):
"""
| {"golden_diff": "diff --git a/src/prefect/client/cloud.py b/src/prefect/client/cloud.py\n--- a/src/prefect/client/cloud.py\n+++ b/src/prefect/client/cloud.py\n@@ -8,6 +8,7 @@\n \n import prefect.context\n import prefect.settings\n+from prefect.client.base import PrefectHttpxClient\n from prefect.client.schemas import Workspace\n from prefect.exceptions import PrefectException\n from prefect.settings import PREFECT_API_KEY, PREFECT_CLOUD_API_URL\n@@ -56,7 +57,7 @@\n httpx_settings[\"headers\"].setdefault(\"Authorization\", f\"Bearer {api_key}\")\n \n httpx_settings.setdefault(\"base_url\", host)\n- self._client = httpx.AsyncClient(**httpx_settings)\n+ self._client = PrefectHttpxClient(**httpx_settings)\n \n async def api_healthcheck(self):\n \"\"\"\n", "issue": "PrefectHTTPStatusError: Client error '429 Too Many Requests' for url\n### First check\r\n\r\n- [X] I added a descriptive title to this issue.\r\n- [X] I used the GitHub search to find a similar issue and didn't find it.\r\n- [X] I searched the Prefect documentation for this issue.\r\n- [X] I checked that this issue is related to Prefect and not one of its dependencies.\r\n\r\n### Bug summary\r\n\r\nWhile using `prefect` with `prefect-dask` I encountered a rate limit error. this shouldn't be happening as prefect client base should retry on those. I'm not sure why this is happening but this has risen at `2.10.10` and did not exist before\r\n\r\n### Reproduction\r\n\r\n```python3\r\nAny Flow with prefect-dask\r\n```\r\n\r\n\r\n### Error\r\n\r\n```python3\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.11/dist-packages/distributed/client.py\", line 1697, in _close\r\n await self.scheduler_comm.close()\r\nasyncio.exceptions.CancelledError\r\n01:00:08.452 | ERROR | Flow run 'psi5-alastria-x' - Crash detected! Execution was interrupted by an unexpected exception: PrefectHTTPStatusError: Client error '429 Too Many Requests' for url 'https://cloud-url/task_runs/'\r\nResponse: {'detail': 'Orchestration API rate limit reached'}\r\nFor more information check: https://httpstatuses.com/429\r\n```\r\n\r\n\r\n### Versions\r\n\r\n```Text\r\nVersion: 2.10.10\r\nAPI version: 0.8.4\r\nPython version: 3.11.2\r\nGit commit: 8159450b\r\nBuilt: Thu, May 18, 2023 3:43 PM\r\nOS/Arch: linux/x86_64\r\nProfile: default\r\nServer type: server\r\n```\r\n\r\n\r\n### Additional context\r\n\r\n_No response_\n", "before_files": [{"content": "import re\nfrom typing import Any, Dict, List, Optional\n\nimport anyio\nimport httpx\nimport pydantic\nfrom fastapi import status\n\nimport prefect.context\nimport prefect.settings\nfrom prefect.client.schemas import Workspace\nfrom prefect.exceptions import PrefectException\nfrom prefect.settings import PREFECT_API_KEY, PREFECT_CLOUD_API_URL\n\n\ndef get_cloud_client(\n host: Optional[str] = None,\n api_key: Optional[str] = None,\n httpx_settings: Optional[dict] = None,\n infer_cloud_url: bool = False,\n) -> \"CloudClient\":\n \"\"\"\n Needs a docstring.\n \"\"\"\n if httpx_settings is not None:\n httpx_settings = httpx_settings.copy()\n\n if infer_cloud_url is False:\n host = host or PREFECT_CLOUD_API_URL.value()\n else:\n configured_url = prefect.settings.PREFECT_API_URL.value()\n host = re.sub(r\"accounts/.{36}/workspaces/.{36}\\Z\", \"\", configured_url)\n\n return CloudClient(\n host=host,\n api_key=api_key or PREFECT_API_KEY.value(),\n httpx_settings=httpx_settings,\n )\n\n\nclass CloudUnauthorizedError(PrefectException):\n \"\"\"\n Raised when the CloudClient receives a 401 or 403 from the Cloud API.\n \"\"\"\n\n\nclass CloudClient:\n def __init__(\n self,\n host: str,\n api_key: str,\n httpx_settings: dict = None,\n ) -> None:\n httpx_settings = httpx_settings or dict()\n httpx_settings.setdefault(\"headers\", dict())\n httpx_settings[\"headers\"].setdefault(\"Authorization\", f\"Bearer {api_key}\")\n\n httpx_settings.setdefault(\"base_url\", host)\n self._client = httpx.AsyncClient(**httpx_settings)\n\n async def api_healthcheck(self):\n \"\"\"\n Attempts to connect to the Cloud API and raises the encountered exception if not\n successful.\n\n If successful, returns `None`.\n \"\"\"\n with anyio.fail_after(10):\n await self.read_workspaces()\n\n async def read_workspaces(self) -> List[Workspace]:\n return pydantic.parse_obj_as(List[Workspace], await self.get(\"/me/workspaces\"))\n\n async def read_worker_metadata(self) -> Dict[str, Any]:\n return await self.get(\"collections/views/aggregate-worker-metadata\")\n\n async def __aenter__(self):\n await self._client.__aenter__()\n return self\n\n async def __aexit__(self, *exc_info):\n return await self._client.__aexit__(*exc_info)\n\n def __enter__(self):\n raise RuntimeError(\n \"The `CloudClient` must be entered with an async context. Use 'async \"\n \"with CloudClient(...)' not 'with CloudClient(...)'\"\n )\n\n def __exit__(self, *_):\n assert False, \"This should never be called but must be defined for __enter__\"\n\n async def get(self, route, **kwargs):\n try:\n res = await self._client.get(route, **kwargs)\n res.raise_for_status()\n except httpx.HTTPStatusError as exc:\n if exc.response.status_code in (\n status.HTTP_401_UNAUTHORIZED,\n status.HTTP_403_FORBIDDEN,\n ):\n raise CloudUnauthorizedError\n else:\n raise exc\n\n return res.json()\n", "path": "src/prefect/client/cloud.py"}], "after_files": [{"content": "import re\nfrom typing import Any, Dict, List, Optional\n\nimport anyio\nimport httpx\nimport pydantic\nfrom fastapi import status\n\nimport prefect.context\nimport prefect.settings\nfrom prefect.client.base import PrefectHttpxClient\nfrom prefect.client.schemas import Workspace\nfrom prefect.exceptions import PrefectException\nfrom prefect.settings import PREFECT_API_KEY, PREFECT_CLOUD_API_URL\n\n\ndef get_cloud_client(\n host: Optional[str] = None,\n api_key: Optional[str] = None,\n httpx_settings: Optional[dict] = None,\n infer_cloud_url: bool = False,\n) -> \"CloudClient\":\n \"\"\"\n Needs a docstring.\n \"\"\"\n if httpx_settings is not None:\n httpx_settings = httpx_settings.copy()\n\n if infer_cloud_url is False:\n host = host or PREFECT_CLOUD_API_URL.value()\n else:\n configured_url = prefect.settings.PREFECT_API_URL.value()\n host = re.sub(r\"accounts/.{36}/workspaces/.{36}\\Z\", \"\", configured_url)\n\n return CloudClient(\n host=host,\n api_key=api_key or PREFECT_API_KEY.value(),\n httpx_settings=httpx_settings,\n )\n\n\nclass CloudUnauthorizedError(PrefectException):\n \"\"\"\n Raised when the CloudClient receives a 401 or 403 from the Cloud API.\n \"\"\"\n\n\nclass CloudClient:\n def __init__(\n self,\n host: str,\n api_key: str,\n httpx_settings: dict = None,\n ) -> None:\n httpx_settings = httpx_settings or dict()\n httpx_settings.setdefault(\"headers\", dict())\n httpx_settings[\"headers\"].setdefault(\"Authorization\", f\"Bearer {api_key}\")\n\n httpx_settings.setdefault(\"base_url\", host)\n self._client = PrefectHttpxClient(**httpx_settings)\n\n async def api_healthcheck(self):\n \"\"\"\n Attempts to connect to the Cloud API and raises the encountered exception if not\n successful.\n\n If successful, returns `None`.\n \"\"\"\n with anyio.fail_after(10):\n await self.read_workspaces()\n\n async def read_workspaces(self) -> List[Workspace]:\n return pydantic.parse_obj_as(List[Workspace], await self.get(\"/me/workspaces\"))\n\n async def read_worker_metadata(self) -> Dict[str, Any]:\n return await self.get(\"collections/views/aggregate-worker-metadata\")\n\n async def __aenter__(self):\n await self._client.__aenter__()\n return self\n\n async def __aexit__(self, *exc_info):\n return await self._client.__aexit__(*exc_info)\n\n def __enter__(self):\n raise RuntimeError(\n \"The `CloudClient` must be entered with an async context. Use 'async \"\n \"with CloudClient(...)' not 'with CloudClient(...)'\"\n )\n\n def __exit__(self, *_):\n assert False, \"This should never be called but must be defined for __enter__\"\n\n async def get(self, route, **kwargs):\n try:\n res = await self._client.get(route, **kwargs)\n res.raise_for_status()\n except httpx.HTTPStatusError as exc:\n if exc.response.status_code in (\n status.HTTP_401_UNAUTHORIZED,\n status.HTTP_403_FORBIDDEN,\n ):\n raise CloudUnauthorizedError\n else:\n raise exc\n\n return res.json()\n", "path": "src/prefect/client/cloud.py"}]} | 1,651 | 188 |
gh_patches_debug_11073 | rasdani/github-patches | git_diff | mozmeao__snippets-service-1398 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Re-generate all bundles with Distribution changes after Timestamp
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `snippets/base/management/commands/generate_bundles.py`
Content:
```
1 import os
2 import json
3 import itertools
4 from datetime import datetime
5
6 import brotli
7 from product_details import product_details
8
9 from django.conf import settings
10 from django.core.files.base import ContentFile
11 from django.core.management.base import BaseCommand
12 from django.db.models import Q
13 from django.core.files.storage import default_storage
14
15 from snippets.base.models import DistributionBundle, Job
16
17
18 class Command(BaseCommand):
19 args = '(no args)'
20 help = 'Generate bundles'
21
22 def add_arguments(self, parser):
23 # Named (optional) arguments
24 parser.add_argument(
25 '--timestamp',
26 help='Parse Jobs last modified after <timestamp>',
27 )
28
29 def handle(self, *args, **options):
30 if not options['timestamp']:
31 self.stdout.write('Generating all bundles.')
32 total_jobs = Job.objects.all()
33 else:
34 self.stdout.write(
35 'Generating bundles with Jobs modified on or after {}'.format(options['timestamp'])
36 )
37 total_jobs = Job.objects.filter(snippet__modified__gte=options['timestamp'])
38
39 if not total_jobs:
40 self.stdout.write('Nothing to do…')
41 return
42
43 self.stdout.write('Processing bundles…')
44
45 combinations_to_process = set(
46 itertools.chain.from_iterable(
47 itertools.product(
48 job.channels,
49 job.snippet.locale.code.strip(',').split(',')
50 )
51 for job in total_jobs
52 )
53 )
54 distribution_bundles_to_process = DistributionBundle.objects.filter(
55 distributions__jobs__in=total_jobs
56 ).distinct().order_by('id')
57
58 for distribution_bundle in distribution_bundles_to_process:
59 distributions = distribution_bundle.distributions.all()
60
61 for channel, locale in combinations_to_process:
62 additional_jobs = []
63 if channel == 'nightly' and settings.NIGHTLY_INCLUDES_RELEASE:
64 additional_jobs = Job.objects.filter(
65 status=Job.PUBLISHED).filter(**{
66 'targets__on_release': True,
67 'distribution__in': distributions,
68 })
69
70 channel_jobs = Job.objects.filter(
71 status=Job.PUBLISHED).filter(
72 Q(**{
73 'targets__on_{}'.format(channel): True,
74 'distribution__in': distributions,
75 }))
76
77 all_jobs = Job.objects.filter(
78 Q(id__in=additional_jobs) | Q(id__in=channel_jobs)
79 )
80
81 locales_to_process = [
82 key.lower() for key in product_details.languages.keys()
83 if key.lower().startswith(locale)
84 ]
85
86 for locale_to_process in locales_to_process:
87 filename = 'Firefox/{channel}/{locale}/{distribution}.json'.format(
88 channel=channel,
89 locale=locale_to_process,
90 distribution=distribution_bundle.code_name,
91 )
92 filename = os.path.join(settings.MEDIA_BUNDLES_PREGEN_ROOT, filename)
93 full_locale = ',{},'.format(locale_to_process.lower())
94 splitted_locale = ',{},'.format(locale_to_process.lower().split('-', 1)[0])
95 bundle_jobs = all_jobs.filter(
96 Q(snippet__locale__code__contains=splitted_locale) |
97 Q(snippet__locale__code__contains=full_locale)).distinct()
98
99 # If DistributionBundle is not enabled, or if there are no
100 # Published Jobs for the channel / locale / distribution
101 # combination, delete the current bundle file if it exists.
102 if not distribution_bundle.enabled or not bundle_jobs.exists():
103 if default_storage.exists(filename):
104 self.stdout.write('Removing {}'.format(filename))
105 default_storage.delete(filename)
106 continue
107
108 data = []
109 channel_job_ids = list(channel_jobs.values_list('id', flat=True))
110 for job in bundle_jobs:
111 if job.id in channel_job_ids:
112 render = job.render()
113 else:
114 render = job.render(always_eval_to_false=True)
115 data.append(render)
116
117 bundle_content = json.dumps({
118 'messages': data,
119 'metadata': {
120 'generated_at': datetime.utcnow().isoformat(),
121 'number_of_snippets': len(data),
122 'channel': channel,
123 }
124 })
125
126 # Convert str to bytes.
127 if isinstance(bundle_content, str):
128 bundle_content = bundle_content.encode('utf-8')
129
130 if settings.BUNDLE_BROTLI_COMPRESS:
131 content_file = ContentFile(brotli.compress(bundle_content))
132 content_file.content_encoding = 'br'
133 else:
134 content_file = ContentFile(bundle_content)
135
136 default_storage.save(filename, content_file)
137 self.stdout.write(self.style.SUCCESS('Writing bundle {}'.format(filename)))
138
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/snippets/base/management/commands/generate_bundles.py b/snippets/base/management/commands/generate_bundles.py
--- a/snippets/base/management/commands/generate_bundles.py
+++ b/snippets/base/management/commands/generate_bundles.py
@@ -34,7 +34,10 @@
self.stdout.write(
'Generating bundles with Jobs modified on or after {}'.format(options['timestamp'])
)
- total_jobs = Job.objects.filter(snippet__modified__gte=options['timestamp'])
+ total_jobs = Job.objects.filter(
+ Q(snippet__modified__gte=options['timestamp']) |
+ Q(distribution__distributionbundle__modified__gte=options['timestamp'])
+ ).distinct()
if not total_jobs:
self.stdout.write('Nothing to do…')
| {"golden_diff": "diff --git a/snippets/base/management/commands/generate_bundles.py b/snippets/base/management/commands/generate_bundles.py\n--- a/snippets/base/management/commands/generate_bundles.py\n+++ b/snippets/base/management/commands/generate_bundles.py\n@@ -34,7 +34,10 @@\n self.stdout.write(\n 'Generating bundles with Jobs modified on or after {}'.format(options['timestamp'])\n )\n- total_jobs = Job.objects.filter(snippet__modified__gte=options['timestamp'])\n+ total_jobs = Job.objects.filter(\n+ Q(snippet__modified__gte=options['timestamp']) |\n+ Q(distribution__distributionbundle__modified__gte=options['timestamp'])\n+ ).distinct()\n \n if not total_jobs:\n self.stdout.write('Nothing to do\u2026')\n", "issue": "Re-generate all bundles with Distribution changes after Timestamp\n\n", "before_files": [{"content": "import os\nimport json\nimport itertools\nfrom datetime import datetime\n\nimport brotli\nfrom product_details import product_details\n\nfrom django.conf import settings\nfrom django.core.files.base import ContentFile\nfrom django.core.management.base import BaseCommand\nfrom django.db.models import Q\nfrom django.core.files.storage import default_storage\n\nfrom snippets.base.models import DistributionBundle, Job\n\n\nclass Command(BaseCommand):\n args = '(no args)'\n help = 'Generate bundles'\n\n def add_arguments(self, parser):\n # Named (optional) arguments\n parser.add_argument(\n '--timestamp',\n help='Parse Jobs last modified after <timestamp>',\n )\n\n def handle(self, *args, **options):\n if not options['timestamp']:\n self.stdout.write('Generating all bundles.')\n total_jobs = Job.objects.all()\n else:\n self.stdout.write(\n 'Generating bundles with Jobs modified on or after {}'.format(options['timestamp'])\n )\n total_jobs = Job.objects.filter(snippet__modified__gte=options['timestamp'])\n\n if not total_jobs:\n self.stdout.write('Nothing to do\u2026')\n return\n\n self.stdout.write('Processing bundles\u2026')\n\n combinations_to_process = set(\n itertools.chain.from_iterable(\n itertools.product(\n job.channels,\n job.snippet.locale.code.strip(',').split(',')\n )\n for job in total_jobs\n )\n )\n distribution_bundles_to_process = DistributionBundle.objects.filter(\n distributions__jobs__in=total_jobs\n ).distinct().order_by('id')\n\n for distribution_bundle in distribution_bundles_to_process:\n distributions = distribution_bundle.distributions.all()\n\n for channel, locale in combinations_to_process:\n additional_jobs = []\n if channel == 'nightly' and settings.NIGHTLY_INCLUDES_RELEASE:\n additional_jobs = Job.objects.filter(\n status=Job.PUBLISHED).filter(**{\n 'targets__on_release': True,\n 'distribution__in': distributions,\n })\n\n channel_jobs = Job.objects.filter(\n status=Job.PUBLISHED).filter(\n Q(**{\n 'targets__on_{}'.format(channel): True,\n 'distribution__in': distributions,\n }))\n\n all_jobs = Job.objects.filter(\n Q(id__in=additional_jobs) | Q(id__in=channel_jobs)\n )\n\n locales_to_process = [\n key.lower() for key in product_details.languages.keys()\n if key.lower().startswith(locale)\n ]\n\n for locale_to_process in locales_to_process:\n filename = 'Firefox/{channel}/{locale}/{distribution}.json'.format(\n channel=channel,\n locale=locale_to_process,\n distribution=distribution_bundle.code_name,\n )\n filename = os.path.join(settings.MEDIA_BUNDLES_PREGEN_ROOT, filename)\n full_locale = ',{},'.format(locale_to_process.lower())\n splitted_locale = ',{},'.format(locale_to_process.lower().split('-', 1)[0])\n bundle_jobs = all_jobs.filter(\n Q(snippet__locale__code__contains=splitted_locale) |\n Q(snippet__locale__code__contains=full_locale)).distinct()\n\n # If DistributionBundle is not enabled, or if there are no\n # Published Jobs for the channel / locale / distribution\n # combination, delete the current bundle file if it exists.\n if not distribution_bundle.enabled or not bundle_jobs.exists():\n if default_storage.exists(filename):\n self.stdout.write('Removing {}'.format(filename))\n default_storage.delete(filename)\n continue\n\n data = []\n channel_job_ids = list(channel_jobs.values_list('id', flat=True))\n for job in bundle_jobs:\n if job.id in channel_job_ids:\n render = job.render()\n else:\n render = job.render(always_eval_to_false=True)\n data.append(render)\n\n bundle_content = json.dumps({\n 'messages': data,\n 'metadata': {\n 'generated_at': datetime.utcnow().isoformat(),\n 'number_of_snippets': len(data),\n 'channel': channel,\n }\n })\n\n # Convert str to bytes.\n if isinstance(bundle_content, str):\n bundle_content = bundle_content.encode('utf-8')\n\n if settings.BUNDLE_BROTLI_COMPRESS:\n content_file = ContentFile(brotli.compress(bundle_content))\n content_file.content_encoding = 'br'\n else:\n content_file = ContentFile(bundle_content)\n\n default_storage.save(filename, content_file)\n self.stdout.write(self.style.SUCCESS('Writing bundle {}'.format(filename)))\n", "path": "snippets/base/management/commands/generate_bundles.py"}], "after_files": [{"content": "import os\nimport json\nimport itertools\nfrom datetime import datetime\n\nimport brotli\nfrom product_details import product_details\n\nfrom django.conf import settings\nfrom django.core.files.base import ContentFile\nfrom django.core.management.base import BaseCommand\nfrom django.db.models import Q\nfrom django.core.files.storage import default_storage\n\nfrom snippets.base.models import DistributionBundle, Job\n\n\nclass Command(BaseCommand):\n args = '(no args)'\n help = 'Generate bundles'\n\n def add_arguments(self, parser):\n # Named (optional) arguments\n parser.add_argument(\n '--timestamp',\n help='Parse Jobs last modified after <timestamp>',\n )\n\n def handle(self, *args, **options):\n if not options['timestamp']:\n self.stdout.write('Generating all bundles.')\n total_jobs = Job.objects.all()\n else:\n self.stdout.write(\n 'Generating bundles with Jobs modified on or after {}'.format(options['timestamp'])\n )\n total_jobs = Job.objects.filter(\n Q(snippet__modified__gte=options['timestamp']) |\n Q(distribution__distributionbundle__modified__gte=options['timestamp'])\n ).distinct()\n\n if not total_jobs:\n self.stdout.write('Nothing to do\u2026')\n return\n\n self.stdout.write('Processing bundles\u2026')\n\n combinations_to_process = set(\n itertools.chain.from_iterable(\n itertools.product(\n job.channels,\n job.snippet.locale.code.strip(',').split(',')\n )\n for job in total_jobs\n )\n )\n distribution_bundles_to_process = DistributionBundle.objects.filter(\n distributions__jobs__in=total_jobs\n ).distinct().order_by('id')\n\n for distribution_bundle in distribution_bundles_to_process:\n distributions = distribution_bundle.distributions.all()\n\n for channel, locale in combinations_to_process:\n additional_jobs = []\n if channel == 'nightly' and settings.NIGHTLY_INCLUDES_RELEASE:\n additional_jobs = Job.objects.filter(\n status=Job.PUBLISHED).filter(**{\n 'targets__on_release': True,\n 'distribution__in': distributions,\n })\n\n channel_jobs = Job.objects.filter(\n status=Job.PUBLISHED).filter(\n Q(**{\n 'targets__on_{}'.format(channel): True,\n 'distribution__in': distributions,\n }))\n\n all_jobs = Job.objects.filter(\n Q(id__in=additional_jobs) | Q(id__in=channel_jobs)\n )\n\n locales_to_process = [\n key.lower() for key in product_details.languages.keys()\n if key.lower().startswith(locale)\n ]\n\n for locale_to_process in locales_to_process:\n filename = 'Firefox/{channel}/{locale}/{distribution}.json'.format(\n channel=channel,\n locale=locale_to_process,\n distribution=distribution_bundle.code_name,\n )\n filename = os.path.join(settings.MEDIA_BUNDLES_PREGEN_ROOT, filename)\n full_locale = ',{},'.format(locale_to_process.lower())\n splitted_locale = ',{},'.format(locale_to_process.lower().split('-', 1)[0])\n bundle_jobs = all_jobs.filter(\n Q(snippet__locale__code__contains=splitted_locale) |\n Q(snippet__locale__code__contains=full_locale)).distinct()\n\n # If DistributionBundle is not enabled, or if there are no\n # Published Jobs for the channel / locale / distribution\n # combination, delete the current bundle file if it exists.\n if not distribution_bundle.enabled or not bundle_jobs.exists():\n if default_storage.exists(filename):\n self.stdout.write('Removing {}'.format(filename))\n default_storage.delete(filename)\n continue\n\n data = []\n channel_job_ids = list(channel_jobs.values_list('id', flat=True))\n for job in bundle_jobs:\n if job.id in channel_job_ids:\n render = job.render()\n else:\n render = job.render(always_eval_to_false=True)\n data.append(render)\n\n bundle_content = json.dumps({\n 'messages': data,\n 'metadata': {\n 'generated_at': datetime.utcnow().isoformat(),\n 'number_of_snippets': len(data),\n 'channel': channel,\n }\n })\n\n # Convert str to bytes.\n if isinstance(bundle_content, str):\n bundle_content = bundle_content.encode('utf-8')\n\n if settings.BUNDLE_BROTLI_COMPRESS:\n content_file = ContentFile(brotli.compress(bundle_content))\n content_file.content_encoding = 'br'\n else:\n content_file = ContentFile(bundle_content)\n\n default_storage.save(filename, content_file)\n self.stdout.write(self.style.SUCCESS('Writing bundle {}'.format(filename)))\n", "path": "snippets/base/management/commands/generate_bundles.py"}]} | 1,533 | 175 |
gh_patches_debug_147 | rasdani/github-patches | git_diff | encode__httpx-868 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
0.12.0 PyPI wheel contains both public- and private-name modules
The following works in httpx 0.11.1:
```python
In [1]: import httpx
...: from httpx.exceptions import InvalidURL
In [2]: try:
...: httpx.get("foo.bar")
...: except InvalidURL:
...: pass
...:
```
In 0.12.0 the exception isn't caught:
```python
In [1]: import httpx
...: from httpx.exceptions import InvalidURL
In [2]: try:
...: httpx.get("foo.bar")
...: except InvalidURL:
...: pass
...:
---------------------------------------------------------------------------
InvalidURL Traceback (most recent call last)
<ipython-input-2-87135a63c42c> in <module>
1 try:
----> 2 httpx.get("foo.bar")
3 except InvalidURL:
4 pass
5
~/.venv/lib/python3.7/site-packages/httpx/_api.py in get(url, params, headers, cookies, auth, allow_redirects, cert, verify, timeout, trust_env)
166 verify=verify,
167 timeout=timeout,
--> 168 trust_env=trust_env,
169 )
170
~/.venv/lib/python3.7/site-packages/httpx/_api.py in request(method, url, params, data, files, json, headers, cookies, auth, timeout, allow_redirects, verify, cert, trust_env)
92 cookies=cookies,
93 auth=auth,
---> 94 allow_redirects=allow_redirects,
95 )
96
~/.venv/lib/python3.7/site-packages/httpx/_client.py in request(self, method, url, data, files, json, params, headers, cookies, auth, allow_redirects, timeout)
566 params=params,
567 headers=headers,
--> 568 cookies=cookies,
569 )
570 return self.send(
~/.venv/lib/python3.7/site-packages/httpx/_client.py in build_request(self, method, url, data, files, json, params, headers, cookies)
196 Build and return a request instance.
197 """
--> 198 url = self.merge_url(url)
199 headers = self.merge_headers(headers)
200 cookies = self.merge_cookies(cookies)
~/.venv/lib/python3.7/site-packages/httpx/_client.py in merge_url(self, url)
216 to create the URL used for the outgoing request.
217 """
--> 218 url = self.base_url.join(relative_url=url)
219 if url.scheme == "http" and hstspreload.in_hsts_preload(url.host):
220 port = None if url.port == 80 else url.port
~/.venv/lib/python3.7/site-packages/httpx/_models.py in join(self, relative_url)
227 """
228 if self.is_relative_url:
--> 229 return URL(relative_url)
230
231 # We drop any fragment portion, because RFC 3986 strictly
~/.venv/lib/python3.7/site-packages/httpx/_models.py in __init__(self, url, allow_relative, params)
104 if not allow_relative:
105 if not self.scheme:
--> 106 raise InvalidURL("No scheme included in URL.")
107 if not self.host:
108 raise InvalidURL("No host included in URL.")
InvalidURL: No scheme included in URL.
```
This works though:
```python
In [3]: import httpx
...: from httpx._exceptions import InvalidURL
In [4]: try:
...: httpx.get("foo.bar")
...: except InvalidURL:
...: pass
...:
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `httpx/__version__.py`
Content:
```
1 __title__ = "httpx"
2 __description__ = "A next generation HTTP client, for Python 3."
3 __version__ = "0.12.0"
4
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/httpx/__version__.py b/httpx/__version__.py
--- a/httpx/__version__.py
+++ b/httpx/__version__.py
@@ -1,3 +1,3 @@
__title__ = "httpx"
__description__ = "A next generation HTTP client, for Python 3."
-__version__ = "0.12.0"
+__version__ = "0.12.1"
| {"golden_diff": "diff --git a/httpx/__version__.py b/httpx/__version__.py\n--- a/httpx/__version__.py\n+++ b/httpx/__version__.py\n@@ -1,3 +1,3 @@\n __title__ = \"httpx\"\n __description__ = \"A next generation HTTP client, for Python 3.\"\n-__version__ = \"0.12.0\"\n+__version__ = \"0.12.1\"\n", "issue": "0.12.0 PyPI wheel contains both public- and private-name modules\nThe following works in httpx 0.11.1:\r\n\r\n```python\r\nIn [1]: import httpx \r\n ...: from httpx.exceptions import InvalidURL \r\n\r\nIn [2]: try: \r\n ...: httpx.get(\"foo.bar\") \r\n ...: except InvalidURL: \r\n ...: pass \r\n ...: \r\n```\r\n\r\nIn 0.12.0 the exception isn't caught:\r\n\r\n```python\r\nIn [1]: import httpx \r\n ...: from httpx.exceptions import InvalidURL \r\n\r\nIn [2]: try: \r\n ...: httpx.get(\"foo.bar\") \r\n ...: except InvalidURL: \r\n ...: pass \r\n ...: \r\n---------------------------------------------------------------------------\r\nInvalidURL Traceback (most recent call last)\r\n<ipython-input-2-87135a63c42c> in <module>\r\n 1 try:\r\n----> 2 httpx.get(\"foo.bar\")\r\n 3 except InvalidURL:\r\n 4 pass\r\n 5 \r\n\r\n~/.venv/lib/python3.7/site-packages/httpx/_api.py in get(url, params, headers, cookies, auth, allow_redirects, cert, verify, timeout, trust_env)\r\n 166 verify=verify,\r\n 167 timeout=timeout,\r\n--> 168 trust_env=trust_env,\r\n 169 )\r\n 170 \r\n\r\n~/.venv/lib/python3.7/site-packages/httpx/_api.py in request(method, url, params, data, files, json, headers, cookies, auth, timeout, allow_redirects, verify, cert, trust_env)\r\n 92 cookies=cookies,\r\n 93 auth=auth,\r\n---> 94 allow_redirects=allow_redirects,\r\n 95 )\r\n 96 \r\n\r\n~/.venv/lib/python3.7/site-packages/httpx/_client.py in request(self, method, url, data, files, json, params, headers, cookies, auth, allow_redirects, timeout)\r\n 566 params=params,\r\n 567 headers=headers,\r\n--> 568 cookies=cookies,\r\n 569 )\r\n 570 return self.send(\r\n\r\n~/.venv/lib/python3.7/site-packages/httpx/_client.py in build_request(self, method, url, data, files, json, params, headers, cookies)\r\n 196 Build and return a request instance.\r\n 197 \"\"\"\r\n--> 198 url = self.merge_url(url)\r\n 199 headers = self.merge_headers(headers)\r\n 200 cookies = self.merge_cookies(cookies)\r\n\r\n~/.venv/lib/python3.7/site-packages/httpx/_client.py in merge_url(self, url)\r\n 216 to create the URL used for the outgoing request.\r\n 217 \"\"\"\r\n--> 218 url = self.base_url.join(relative_url=url)\r\n 219 if url.scheme == \"http\" and hstspreload.in_hsts_preload(url.host):\r\n 220 port = None if url.port == 80 else url.port\r\n\r\n~/.venv/lib/python3.7/site-packages/httpx/_models.py in join(self, relative_url)\r\n 227 \"\"\"\r\n 228 if self.is_relative_url:\r\n--> 229 return URL(relative_url)\r\n 230 \r\n 231 # We drop any fragment portion, because RFC 3986 strictly\r\n\r\n~/.venv/lib/python3.7/site-packages/httpx/_models.py in __init__(self, url, allow_relative, params)\r\n 104 if not allow_relative:\r\n 105 if not self.scheme:\r\n--> 106 raise InvalidURL(\"No scheme included in URL.\")\r\n 107 if not self.host:\r\n 108 raise InvalidURL(\"No host included in URL.\")\r\n\r\nInvalidURL: No scheme included in URL.\r\n```\r\n\r\nThis works though:\r\n\r\n```python\r\nIn [3]: import httpx \r\n ...: from httpx._exceptions import InvalidURL \r\n\r\nIn [4]: try: \r\n ...: httpx.get(\"foo.bar\") \r\n ...: except InvalidURL: \r\n ...: pass \r\n ...: \r\n```\n", "before_files": [{"content": "__title__ = \"httpx\"\n__description__ = \"A next generation HTTP client, for Python 3.\"\n__version__ = \"0.12.0\"\n", "path": "httpx/__version__.py"}], "after_files": [{"content": "__title__ = \"httpx\"\n__description__ = \"A next generation HTTP client, for Python 3.\"\n__version__ = \"0.12.1\"\n", "path": "httpx/__version__.py"}]} | 1,272 | 96 |
gh_patches_debug_6543 | rasdani/github-patches | git_diff | saleor__saleor-10987 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unable to filter customers with 0 orders
### **Steps to reproduce the problem:**
```graphql
query Customers{
customers(filter: {numberOfOrders: {lte: 0, gte: 0}}, first: 10){
edges{
node{
id
email
orders{
totalCount
}
}
}
totalCount
}
}
```
### **Current result:**
Backend returns all customers instead of those with 0 orders
### **Expected result:**
Return all customers with 0 orders
### **Screenshots:**
### **System information:**
### **Environment:**
master.staging core v3.8.0-a
### **Additional info/links:**
https://master.staging.saleor.cloud/dashboard/customers/?asc=true&sort=name&numberOfOrdersFrom=0&numberOfOrdersTo=0
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `saleor/graphql/utils/filters.py`
Content:
```
1 from django.utils import timezone
2
3 from ..core.enums import ReportingPeriod
4
5
6 def reporting_period_to_date(period):
7 now = timezone.now()
8 if period == ReportingPeriod.TODAY:
9 start_date = now.replace(hour=0, minute=0, second=0, microsecond=0)
10 elif period == ReportingPeriod.THIS_MONTH:
11 start_date = now.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
12 else:
13 raise ValueError("Unknown period: %s" % period)
14 return start_date
15
16
17 def filter_by_period(queryset, period, field_name):
18 start_date = reporting_period_to_date(period)
19 return queryset.filter(**{"%s__gte" % field_name: start_date})
20
21
22 def filter_range_field(qs, field, value):
23 gte, lte = value.get("gte"), value.get("lte")
24 if gte:
25 lookup = {f"{field}__gte": gte}
26 qs = qs.filter(**lookup)
27 if lte:
28 lookup = {f"{field}__lte": lte}
29 qs = qs.filter(**lookup)
30 return qs
31
32
33 def filter_by_id(object_type):
34 from . import resolve_global_ids_to_primary_keys
35
36 def inner(qs, _, value):
37 if not value:
38 return qs
39 _, obj_pks = resolve_global_ids_to_primary_keys(value, object_type)
40 return qs.filter(id__in=obj_pks)
41
42 return inner
43
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/saleor/graphql/utils/filters.py b/saleor/graphql/utils/filters.py
--- a/saleor/graphql/utils/filters.py
+++ b/saleor/graphql/utils/filters.py
@@ -21,10 +21,10 @@
def filter_range_field(qs, field, value):
gte, lte = value.get("gte"), value.get("lte")
- if gte:
+ if gte is not None:
lookup = {f"{field}__gte": gte}
qs = qs.filter(**lookup)
- if lte:
+ if lte is not None:
lookup = {f"{field}__lte": lte}
qs = qs.filter(**lookup)
return qs
| {"golden_diff": "diff --git a/saleor/graphql/utils/filters.py b/saleor/graphql/utils/filters.py\n--- a/saleor/graphql/utils/filters.py\n+++ b/saleor/graphql/utils/filters.py\n@@ -21,10 +21,10 @@\n \n def filter_range_field(qs, field, value):\n gte, lte = value.get(\"gte\"), value.get(\"lte\")\n- if gte:\n+ if gte is not None:\n lookup = {f\"{field}__gte\": gte}\n qs = qs.filter(**lookup)\n- if lte:\n+ if lte is not None:\n lookup = {f\"{field}__lte\": lte}\n qs = qs.filter(**lookup)\n return qs\n", "issue": "Unable to filter customers with 0 orders\n### **Steps to reproduce the problem:**\n```graphql\nquery Customers{\n customers(filter: {numberOfOrders: {lte: 0, gte: 0}}, first: 10){\n edges{\n node{\n id\n email\n orders{\n totalCount\n }\n }\n }\n totalCount\n }\n}\n```\n\n### **Current result:**\nBackend returns all customers instead of those with 0 orders\n\n### **Expected result:**\nReturn all customers with 0 orders\n\n### **Screenshots:**\n\n### **System information:**\n\n### **Environment:**\nmaster.staging core v3.8.0-a\n\n### **Additional info/links:**\nhttps://master.staging.saleor.cloud/dashboard/customers/?asc=true&sort=name&numberOfOrdersFrom=0&numberOfOrdersTo=0\n", "before_files": [{"content": "from django.utils import timezone\n\nfrom ..core.enums import ReportingPeriod\n\n\ndef reporting_period_to_date(period):\n now = timezone.now()\n if period == ReportingPeriod.TODAY:\n start_date = now.replace(hour=0, minute=0, second=0, microsecond=0)\n elif period == ReportingPeriod.THIS_MONTH:\n start_date = now.replace(day=1, hour=0, minute=0, second=0, microsecond=0)\n else:\n raise ValueError(\"Unknown period: %s\" % period)\n return start_date\n\n\ndef filter_by_period(queryset, period, field_name):\n start_date = reporting_period_to_date(period)\n return queryset.filter(**{\"%s__gte\" % field_name: start_date})\n\n\ndef filter_range_field(qs, field, value):\n gte, lte = value.get(\"gte\"), value.get(\"lte\")\n if gte:\n lookup = {f\"{field}__gte\": gte}\n qs = qs.filter(**lookup)\n if lte:\n lookup = {f\"{field}__lte\": lte}\n qs = qs.filter(**lookup)\n return qs\n\n\ndef filter_by_id(object_type):\n from . import resolve_global_ids_to_primary_keys\n\n def inner(qs, _, value):\n if not value:\n return qs\n _, obj_pks = resolve_global_ids_to_primary_keys(value, object_type)\n return qs.filter(id__in=obj_pks)\n\n return inner\n", "path": "saleor/graphql/utils/filters.py"}], "after_files": [{"content": "from django.utils import timezone\n\nfrom ..core.enums import ReportingPeriod\n\n\ndef reporting_period_to_date(period):\n now = timezone.now()\n if period == ReportingPeriod.TODAY:\n start_date = now.replace(hour=0, minute=0, second=0, microsecond=0)\n elif period == ReportingPeriod.THIS_MONTH:\n start_date = now.replace(day=1, hour=0, minute=0, second=0, microsecond=0)\n else:\n raise ValueError(\"Unknown period: %s\" % period)\n return start_date\n\n\ndef filter_by_period(queryset, period, field_name):\n start_date = reporting_period_to_date(period)\n return queryset.filter(**{\"%s__gte\" % field_name: start_date})\n\n\ndef filter_range_field(qs, field, value):\n gte, lte = value.get(\"gte\"), value.get(\"lte\")\n if gte is not None:\n lookup = {f\"{field}__gte\": gte}\n qs = qs.filter(**lookup)\n if lte is not None:\n lookup = {f\"{field}__lte\": lte}\n qs = qs.filter(**lookup)\n return qs\n\n\ndef filter_by_id(object_type):\n from . import resolve_global_ids_to_primary_keys\n\n def inner(qs, _, value):\n if not value:\n return qs\n _, obj_pks = resolve_global_ids_to_primary_keys(value, object_type)\n return qs.filter(id__in=obj_pks)\n\n return inner\n", "path": "saleor/graphql/utils/filters.py"}]} | 843 | 165 |
gh_patches_debug_29715 | rasdani/github-patches | git_diff | opensearch-project__opensearch-build-4090 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Detecting and alerting of duplication keys/components/entries in YAML file
### Is your feature request related to a problem? Please describe
it was found in release 1.3.11 , a PR to update [manifest](https://github.com/opensearch-project/opensearch-build/blob/main/manifests/1.3.11/opensearch-1.3.11.yml) has duplicated components name.
It would cause the resource wasted on CI to rebuild the duplicated components
### Describe the solution you'd like
We want to have a check to detect if there is any duplication entries based on keys/components/names and probably fail the GitHub check
### Describe alternatives you've considered
Manually check for duplicate values
### Acceptance Criteria
* The manifest check should fail at CI level for components with duplicate components.name values in opensearch and opensearch-dashboard as well as test manifests. See what are [manifests](https://github.com/opensearch-project/opensearch-build/wiki/Building-an-OpenSearch-and-OpenSearch-Dashboards-Distribution#what-are-manifests)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/ci_workflow/ci_manifests.py`
Content:
```
1 # Copyright OpenSearch Contributors
2 # SPDX-License-Identifier: Apache-2.0
3 #
4 # The OpenSearch Contributors require contributions made to
5 # this file be licensed under the Apache-2.0 license or a
6 # compatible open source license.
7
8
9 import re
10 from io import TextIOWrapper
11 from typing import Type, Union
12
13 from ci_workflow.ci_args import CiArgs
14 from ci_workflow.ci_input_manifest import CiInputManifest
15 from ci_workflow.ci_test_manifest import CiTestManifest
16
17
18 class CiManifests:
19 @staticmethod
20 def __klass(filename: str) -> Union[Type[CiTestManifest], Type[CiInputManifest]]:
21 if re.search("-test.yml$", filename):
22 return CiTestManifest
23 else:
24 return CiInputManifest
25
26 @classmethod
27 def from_file(cls, file: TextIOWrapper, args: CiArgs) -> Union[CiTestManifest, CiInputManifest]:
28 return cls.__klass(file.name)(file, args)
29
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/ci_workflow/ci_manifests.py b/src/ci_workflow/ci_manifests.py
--- a/src/ci_workflow/ci_manifests.py
+++ b/src/ci_workflow/ci_manifests.py
@@ -7,9 +7,12 @@
import re
+from collections import Counter
from io import TextIOWrapper
from typing import Type, Union
+import yaml
+
from ci_workflow.ci_args import CiArgs
from ci_workflow.ci_input_manifest import CiInputManifest
from ci_workflow.ci_test_manifest import CiTestManifest
@@ -23,6 +26,29 @@
else:
return CiInputManifest
+ @staticmethod
+ def __get_duplicate_component_names(count_component_names: Counter) -> list:
+ duplicate_component_names = []
+ for component_name, count in count_component_names.items():
+ if count > 1:
+ duplicate_component_names.append(component_name)
+ return duplicate_component_names
+
+ @staticmethod
+ def __check_duplicate_component_names(file: TextIOWrapper) -> None:
+ yaml_dict = yaml.safe_load(file)
+ component_names = []
+ for component in yaml_dict['components']:
+ component_names.append(component['name'])
+ count_component_names = Counter(component_names)
+
+ if set(count_component_names.values()) != set([1]):
+ duplicate_component_names = CiManifests.__get_duplicate_component_names(count_component_names)
+ duplicate_component_names_string = ', '.join(duplicate_component_names)
+ raise ValueError(f"Found {duplicate_component_names_string} as a duplicate component(s) in manifest {file.name}. ")
+ file.seek(0)
+
@classmethod
def from_file(cls, file: TextIOWrapper, args: CiArgs) -> Union[CiTestManifest, CiInputManifest]:
+ cls.__check_duplicate_component_names(file)
return cls.__klass(file.name)(file, args)
| {"golden_diff": "diff --git a/src/ci_workflow/ci_manifests.py b/src/ci_workflow/ci_manifests.py\n--- a/src/ci_workflow/ci_manifests.py\n+++ b/src/ci_workflow/ci_manifests.py\n@@ -7,9 +7,12 @@\n \n \n import re\n+from collections import Counter\n from io import TextIOWrapper\n from typing import Type, Union\n \n+import yaml\n+\n from ci_workflow.ci_args import CiArgs\n from ci_workflow.ci_input_manifest import CiInputManifest\n from ci_workflow.ci_test_manifest import CiTestManifest\n@@ -23,6 +26,29 @@\n else:\n return CiInputManifest\n \n+ @staticmethod\n+ def __get_duplicate_component_names(count_component_names: Counter) -> list:\n+ duplicate_component_names = []\n+ for component_name, count in count_component_names.items():\n+ if count > 1:\n+ duplicate_component_names.append(component_name)\n+ return duplicate_component_names\n+\n+ @staticmethod\n+ def __check_duplicate_component_names(file: TextIOWrapper) -> None:\n+ yaml_dict = yaml.safe_load(file)\n+ component_names = []\n+ for component in yaml_dict['components']:\n+ component_names.append(component['name'])\n+ count_component_names = Counter(component_names)\n+\n+ if set(count_component_names.values()) != set([1]):\n+ duplicate_component_names = CiManifests.__get_duplicate_component_names(count_component_names)\n+ duplicate_component_names_string = ', '.join(duplicate_component_names)\n+ raise ValueError(f\"Found {duplicate_component_names_string} as a duplicate component(s) in manifest {file.name}. \")\n+ file.seek(0)\n+\n @classmethod\n def from_file(cls, file: TextIOWrapper, args: CiArgs) -> Union[CiTestManifest, CiInputManifest]:\n+ cls.__check_duplicate_component_names(file)\n return cls.__klass(file.name)(file, args)\n", "issue": "Detecting and alerting of duplication keys/components/entries in YAML file\n### Is your feature request related to a problem? Please describe\r\n\r\nit was found in release 1.3.11 , a PR to update [manifest](https://github.com/opensearch-project/opensearch-build/blob/main/manifests/1.3.11/opensearch-1.3.11.yml) has duplicated components name.\r\nIt would cause the resource wasted on CI to rebuild the duplicated components \r\n\r\n### Describe the solution you'd like\r\n\r\nWe want to have a check to detect if there is any duplication entries based on keys/components/names and probably fail the GitHub check\r\n\r\n### Describe alternatives you've considered\r\n\r\nManually check for duplicate values\r\n\r\n### Acceptance Criteria\r\n* The manifest check should fail at CI level for components with duplicate components.name values in opensearch and opensearch-dashboard as well as test manifests. See what are [manifests](https://github.com/opensearch-project/opensearch-build/wiki/Building-an-OpenSearch-and-OpenSearch-Dashboards-Distribution#what-are-manifests)\n", "before_files": [{"content": "# Copyright OpenSearch Contributors\n# SPDX-License-Identifier: Apache-2.0\n#\n# The OpenSearch Contributors require contributions made to\n# this file be licensed under the Apache-2.0 license or a\n# compatible open source license.\n\n\nimport re\nfrom io import TextIOWrapper\nfrom typing import Type, Union\n\nfrom ci_workflow.ci_args import CiArgs\nfrom ci_workflow.ci_input_manifest import CiInputManifest\nfrom ci_workflow.ci_test_manifest import CiTestManifest\n\n\nclass CiManifests:\n @staticmethod\n def __klass(filename: str) -> Union[Type[CiTestManifest], Type[CiInputManifest]]:\n if re.search(\"-test.yml$\", filename):\n return CiTestManifest\n else:\n return CiInputManifest\n\n @classmethod\n def from_file(cls, file: TextIOWrapper, args: CiArgs) -> Union[CiTestManifest, CiInputManifest]:\n return cls.__klass(file.name)(file, args)\n", "path": "src/ci_workflow/ci_manifests.py"}], "after_files": [{"content": "# Copyright OpenSearch Contributors\n# SPDX-License-Identifier: Apache-2.0\n#\n# The OpenSearch Contributors require contributions made to\n# this file be licensed under the Apache-2.0 license or a\n# compatible open source license.\n\n\nimport re\nfrom collections import Counter\nfrom io import TextIOWrapper\nfrom typing import Type, Union\n\nimport yaml\n\nfrom ci_workflow.ci_args import CiArgs\nfrom ci_workflow.ci_input_manifest import CiInputManifest\nfrom ci_workflow.ci_test_manifest import CiTestManifest\n\n\nclass CiManifests:\n @staticmethod\n def __klass(filename: str) -> Union[Type[CiTestManifest], Type[CiInputManifest]]:\n if re.search(\"-test.yml$\", filename):\n return CiTestManifest\n else:\n return CiInputManifest\n\n @staticmethod\n def __get_duplicate_component_names(count_component_names: Counter) -> list:\n duplicate_component_names = []\n for component_name, count in count_component_names.items():\n if count > 1:\n duplicate_component_names.append(component_name)\n return duplicate_component_names\n\n @staticmethod\n def __check_duplicate_component_names(file: TextIOWrapper) -> None:\n yaml_dict = yaml.safe_load(file)\n component_names = []\n for component in yaml_dict['components']:\n component_names.append(component['name'])\n count_component_names = Counter(component_names)\n\n if set(count_component_names.values()) != set([1]):\n duplicate_component_names = CiManifests.__get_duplicate_component_names(count_component_names)\n duplicate_component_names_string = ', '.join(duplicate_component_names)\n raise ValueError(f\"Found {duplicate_component_names_string} as a duplicate component(s) in manifest {file.name}. \")\n file.seek(0)\n\n @classmethod\n def from_file(cls, file: TextIOWrapper, args: CiArgs) -> Union[CiTestManifest, CiInputManifest]:\n cls.__check_duplicate_component_names(file)\n return cls.__klass(file.name)(file, args)\n", "path": "src/ci_workflow/ci_manifests.py"}]} | 750 | 414 |
gh_patches_debug_47463 | rasdani/github-patches | git_diff | bokeh__bokeh-5968 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Transform docstring ends abruptly
```
Bases: bokeh.model.Model
Base class for Transform models that represent a computation to be carried out on the client-side.
JavaScript implementations should implement the following methods:
```
<img width="879" alt="screen shot 2017-02-17 at 2 43 31 am" src="https://cloud.githubusercontent.com/assets/1796208/23058499/e52042e8-f4ba-11e6-8f8a-596498e00084.png">
Should add the methods that need to be implemented.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bokeh/models/transforms.py`
Content:
```
1 '''
2
3 '''
4 from __future__ import absolute_import
5
6 from ..core.enums import StepMode, JitterRandomDistribution
7 from ..core.has_props import abstract
8 from ..core.properties import Bool, Either, Enum, Float, Instance, Seq, String
9 from ..model import Model
10
11 from .sources import ColumnarDataSource
12
13 @abstract
14 class Transform(Model):
15 ''' Base class for ``Transform`` models that represent a computation
16 to be carried out on the client-side.
17
18 JavaScript implementations should implement the following methods:
19
20 .. code-block: coffeescript
21
22 compute: (x) ->
23 # compute the transform of a single value
24
25 v_compute: (xs) ->
26 # compute the transform of an array of values
27
28 '''
29 pass
30
31
32 class Jitter(Transform):
33 ''' Apply either a uniform or normally sampled random jitter to data.
34
35 '''
36
37
38 mean = Float(default=0, help="""
39 The central value for the random sample
40 """)
41
42 width = Float(default=1, help="""
43 The width (absolute for uniform distribution and sigma for the normal distribution) of the random sample.
44 """)
45
46 distribution = Enum(JitterRandomDistribution, default='uniform', help="""
47 The random distribution upon which to pull the random scatter
48 """)
49
50 @abstract
51 class Interpolator(Transform):
52 ''' Base class for interpolator transforms.
53
54 Interpolators return the value of a function which has been evaluated
55 between specified (x, y) pairs of data. As an example, if two control
56 point pairs were provided to the interpolator, a linear interpolaction
57 at a specific value of 'x' would result in the value of 'y' which existed
58 on the line conneting the two control points.
59
60 The control point pairs for the interpolators can be specified through either
61
62 * A literal sequence of values:
63
64 .. code-block: python
65
66 interp = Interpolator(x=[1, 2, 3, 4, 5], y=[2, 5, 10, 12, 16])
67
68 * or a pair of columns defined in a `ColumnDataSource` object:
69
70 .. code-block: python
71
72 interp = Interpolator(x="year", y="earnings", data=jewlery_prices))
73
74
75 This is the base class and is not intended to end use. Please see the
76 documentation for the final derived classes (Jitter, LineraInterpolator,
77 StepInterpolator) for mor information on their specific methods of
78 interpolation.
79
80 '''
81 x = Either(String, Seq(Float), help="""
82 Independant coordiante denoting the location of a point.
83 """)
84
85 y = Either(String, Seq(Float), help="""
86 Dependant coordinate denoting the value of a point at a location.
87 """)
88
89 data = Instance(ColumnarDataSource, help="""
90 Data which defines the source for the named columns if a string is passed to either the ``x`` or ``y`` parameters.
91 """)
92
93 clip = Bool(True, help="""
94 Determine if the interpolation should clip the result to include only values inside its predefined range.
95 If this is set to False, it will return the most value of the closest point.
96 """)
97
98 # Define an initialization routine to do some cross checking of input values
99 def __init__(self, **kwargs):
100 super(Interpolator, self).__init__(**kwargs)
101
102
103 class LinearInterpolator(Interpolator):
104 ''' Compute a linear interpolation between the control points provided through
105 the ``x``, ``y``, and ``data`` parameters.
106
107 '''
108 pass
109
110
111 class StepInterpolator(Interpolator):
112 ''' Compute a step-wise interpolation between the points provided through
113 the ``x``, ``y``, and ``data`` parameters.
114
115 '''
116
117 mode = Enum(StepMode, default="after", help="""
118 Adjust the behavior of the returned value in relation to the control points. The parameter can assume one of three values:
119
120 * ``after`` (default): Assume the y-value associated with the nearest x-value which is less than or equal to the point to transform.
121 * ``before``: Assume the y-value associated with the nearest x-value which is greater than the point to transform.
122 * ``center``: Assume the y-value associated with the nearest x-value to the point to transform.
123 """)
124
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bokeh/models/transforms.py b/bokeh/models/transforms.py
--- a/bokeh/models/transforms.py
+++ b/bokeh/models/transforms.py
@@ -19,11 +19,11 @@
.. code-block: coffeescript
- compute: (x) ->
- # compute the transform of a single value
+ compute: (x) ->
+ # compute the transform of a single value
- v_compute: (xs) ->
- # compute the transform of an array of values
+ v_compute: (xs) ->
+ # compute the transform of an array of values
'''
pass
| {"golden_diff": "diff --git a/bokeh/models/transforms.py b/bokeh/models/transforms.py\n--- a/bokeh/models/transforms.py\n+++ b/bokeh/models/transforms.py\n@@ -19,11 +19,11 @@\n \n .. code-block: coffeescript\n \n- compute: (x) ->\n- # compute the transform of a single value\n+ compute: (x) ->\n+ # compute the transform of a single value\n \n- v_compute: (xs) ->\n- # compute the transform of an array of values\n+ v_compute: (xs) ->\n+ # compute the transform of an array of values\n \n '''\n pass\n", "issue": "Transform docstring ends abruptly\n```\r\n Bases: bokeh.model.Model\r\n Base class for Transform models that represent a computation to be carried out on the client-side.\r\n JavaScript implementations should implement the following methods:\r\n```\r\n<img width=\"879\" alt=\"screen shot 2017-02-17 at 2 43 31 am\" src=\"https://cloud.githubusercontent.com/assets/1796208/23058499/e52042e8-f4ba-11e6-8f8a-596498e00084.png\">\r\n\r\nShould add the methods that need to be implemented.\r\n\n", "before_files": [{"content": "'''\n\n'''\nfrom __future__ import absolute_import\n\nfrom ..core.enums import StepMode, JitterRandomDistribution\nfrom ..core.has_props import abstract\nfrom ..core.properties import Bool, Either, Enum, Float, Instance, Seq, String\nfrom ..model import Model\n\nfrom .sources import ColumnarDataSource\n\n@abstract\nclass Transform(Model):\n ''' Base class for ``Transform`` models that represent a computation\n to be carried out on the client-side.\n\n JavaScript implementations should implement the following methods:\n\n .. code-block: coffeescript\n\n compute: (x) ->\n # compute the transform of a single value\n\n v_compute: (xs) ->\n # compute the transform of an array of values\n\n '''\n pass\n\n\nclass Jitter(Transform):\n ''' Apply either a uniform or normally sampled random jitter to data.\n\n '''\n\n\n mean = Float(default=0, help=\"\"\"\n The central value for the random sample\n \"\"\")\n\n width = Float(default=1, help=\"\"\"\n The width (absolute for uniform distribution and sigma for the normal distribution) of the random sample.\n \"\"\")\n\n distribution = Enum(JitterRandomDistribution, default='uniform', help=\"\"\"\n The random distribution upon which to pull the random scatter\n \"\"\")\n\n@abstract\nclass Interpolator(Transform):\n ''' Base class for interpolator transforms.\n\n Interpolators return the value of a function which has been evaluated\n between specified (x, y) pairs of data. As an example, if two control\n point pairs were provided to the interpolator, a linear interpolaction\n at a specific value of 'x' would result in the value of 'y' which existed\n on the line conneting the two control points.\n\n The control point pairs for the interpolators can be specified through either\n\n * A literal sequence of values:\n\n .. code-block: python\n\n interp = Interpolator(x=[1, 2, 3, 4, 5], y=[2, 5, 10, 12, 16])\n\n * or a pair of columns defined in a `ColumnDataSource` object:\n\n .. code-block: python\n\n interp = Interpolator(x=\"year\", y=\"earnings\", data=jewlery_prices))\n\n\n This is the base class and is not intended to end use. Please see the\n documentation for the final derived classes (Jitter, LineraInterpolator,\n StepInterpolator) for mor information on their specific methods of\n interpolation.\n\n '''\n x = Either(String, Seq(Float), help=\"\"\"\n Independant coordiante denoting the location of a point.\n \"\"\")\n\n y = Either(String, Seq(Float), help=\"\"\"\n Dependant coordinate denoting the value of a point at a location.\n \"\"\")\n\n data = Instance(ColumnarDataSource, help=\"\"\"\n Data which defines the source for the named columns if a string is passed to either the ``x`` or ``y`` parameters.\n \"\"\")\n\n clip = Bool(True, help=\"\"\"\n Determine if the interpolation should clip the result to include only values inside its predefined range.\n If this is set to False, it will return the most value of the closest point.\n \"\"\")\n\n # Define an initialization routine to do some cross checking of input values\n def __init__(self, **kwargs):\n super(Interpolator, self).__init__(**kwargs)\n\n\nclass LinearInterpolator(Interpolator):\n ''' Compute a linear interpolation between the control points provided through\n the ``x``, ``y``, and ``data`` parameters.\n\n '''\n pass\n\n\nclass StepInterpolator(Interpolator):\n ''' Compute a step-wise interpolation between the points provided through\n the ``x``, ``y``, and ``data`` parameters.\n\n '''\n\n mode = Enum(StepMode, default=\"after\", help=\"\"\"\n Adjust the behavior of the returned value in relation to the control points. The parameter can assume one of three values:\n\n * ``after`` (default): Assume the y-value associated with the nearest x-value which is less than or equal to the point to transform.\n * ``before``: Assume the y-value associated with the nearest x-value which is greater than the point to transform.\n * ``center``: Assume the y-value associated with the nearest x-value to the point to transform.\n \"\"\")\n", "path": "bokeh/models/transforms.py"}], "after_files": [{"content": "'''\n\n'''\nfrom __future__ import absolute_import\n\nfrom ..core.enums import StepMode, JitterRandomDistribution\nfrom ..core.has_props import abstract\nfrom ..core.properties import Bool, Either, Enum, Float, Instance, Seq, String\nfrom ..model import Model\n\nfrom .sources import ColumnarDataSource\n\n@abstract\nclass Transform(Model):\n ''' Base class for ``Transform`` models that represent a computation\n to be carried out on the client-side.\n\n JavaScript implementations should implement the following methods:\n\n .. code-block: coffeescript\n\n compute: (x) ->\n # compute the transform of a single value\n\n v_compute: (xs) ->\n # compute the transform of an array of values\n\n '''\n pass\n\n\nclass Jitter(Transform):\n ''' Apply either a uniform or normally sampled random jitter to data.\n\n '''\n\n\n mean = Float(default=0, help=\"\"\"\n The central value for the random sample\n \"\"\")\n\n width = Float(default=1, help=\"\"\"\n The width (absolute for uniform distribution and sigma for the normal distribution) of the random sample.\n \"\"\")\n\n distribution = Enum(JitterRandomDistribution, default='uniform', help=\"\"\"\n The random distribution upon which to pull the random scatter\n \"\"\")\n\n@abstract\nclass Interpolator(Transform):\n ''' Base class for interpolator transforms.\n\n Interpolators return the value of a function which has been evaluated\n between specified (x, y) pairs of data. As an example, if two control\n point pairs were provided to the interpolator, a linear interpolaction\n at a specific value of 'x' would result in the value of 'y' which existed\n on the line conneting the two control points.\n\n The control point pairs for the interpolators can be specified through either\n\n * A literal sequence of values:\n\n .. code-block: python\n\n interp = Interpolator(x=[1, 2, 3, 4, 5], y=[2, 5, 10, 12, 16])\n\n * or a pair of columns defined in a `ColumnDataSource` object:\n\n .. code-block: python\n\n interp = Interpolator(x=\"year\", y=\"earnings\", data=jewlery_prices))\n\n\n This is the base class and is not intended to end use. Please see the\n documentation for the final derived classes (Jitter, LineraInterpolator,\n StepInterpolator) for mor information on their specific methods of\n interpolation.\n\n '''\n x = Either(String, Seq(Float), help=\"\"\"\n Independant coordiante denoting the location of a point.\n \"\"\")\n\n y = Either(String, Seq(Float), help=\"\"\"\n Dependant coordinate denoting the value of a point at a location.\n \"\"\")\n\n data = Instance(ColumnarDataSource, help=\"\"\"\n Data which defines the source for the named columns if a string is passed to either the ``x`` or ``y`` parameters.\n \"\"\")\n\n clip = Bool(True, help=\"\"\"\n Determine if the interpolation should clip the result to include only values inside its predefined range.\n If this is set to False, it will return the most value of the closest point.\n \"\"\")\n\n # Define an initialization routine to do some cross checking of input values\n def __init__(self, **kwargs):\n super(Interpolator, self).__init__(**kwargs)\n\n\nclass LinearInterpolator(Interpolator):\n ''' Compute a linear interpolation between the control points provided through\n the ``x``, ``y``, and ``data`` parameters.\n\n '''\n pass\n\n\nclass StepInterpolator(Interpolator):\n ''' Compute a step-wise interpolation between the points provided through\n the ``x``, ``y``, and ``data`` parameters.\n\n '''\n\n mode = Enum(StepMode, default=\"after\", help=\"\"\"\n Adjust the behavior of the returned value in relation to the control points. The parameter can assume one of three values:\n\n * ``after`` (default): Assume the y-value associated with the nearest x-value which is less than or equal to the point to transform.\n * ``before``: Assume the y-value associated with the nearest x-value which is greater than the point to transform.\n * ``center``: Assume the y-value associated with the nearest x-value to the point to transform.\n \"\"\")\n", "path": "bokeh/models/transforms.py"}]} | 1,625 | 149 |
gh_patches_debug_14281 | rasdani/github-patches | git_diff | litestar-org__litestar-3179 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug: CORS Middleware not setting all headers as per spec
### Description
Right now, there's only a handful of headers that are only being set for the preflight request. They must be set for both the preflight and actual request.
https://fetch.spec.whatwg.org/#http-responses
Only `Access-Control-Allow-Origin` is being set here.
https://github.com/litestar-org/litestar/blob/1fb981da4b6171cd3fa348c9ffe1c575c5bc862f/litestar/middleware/cors.py#L61-L73
Only `Access-Control-Allow-Credentials` and `Access-Control-Expose-Headers` get set here, and this is what the above code uses to update headers
https://github.com/litestar-org/litestar/blob/1fb981da4b6171cd3fa348c9ffe1c575c5bc862f/litestar/config/cors.py#L123-L136
This still doesn't account for:
- Access-Control-Allow-Methods
- Access-Control-Allow-Headers
which are only set on preflight, but should also be set to the actual request.
### Litestar Version
2.2.1
### Platform
- [X] Linux
- [ ] Mac
- [ ] Windows
- [ ] Other (Please specify in the description above)
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/3178">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/3178/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/3178/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `litestar/middleware/cors.py`
Content:
```
1 from __future__ import annotations
2
3 from typing import TYPE_CHECKING
4
5 from litestar.datastructures import Headers, MutableScopeHeaders
6 from litestar.enums import ScopeType
7 from litestar.middleware.base import AbstractMiddleware
8
9 __all__ = ("CORSMiddleware",)
10
11
12 if TYPE_CHECKING:
13 from litestar.config.cors import CORSConfig
14 from litestar.types import ASGIApp, Message, Receive, Scope, Send
15
16
17 class CORSMiddleware(AbstractMiddleware):
18 """CORS Middleware."""
19
20 __slots__ = ("config",)
21
22 def __init__(self, app: ASGIApp, config: CORSConfig) -> None:
23 """Middleware that adds CORS validation to the application.
24
25 Args:
26 app: The ``next`` ASGI app to call.
27 config: An instance of :class:`CORSConfig <litestar.config.cors.CORSConfig>`
28 """
29 super().__init__(app=app, scopes={ScopeType.HTTP})
30 self.config = config
31
32 async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
33 """ASGI callable.
34
35 Args:
36 scope: The ASGI connection scope.
37 receive: The ASGI receive function.
38 send: The ASGI send function.
39
40 Returns:
41 None
42 """
43 headers = Headers.from_scope(scope=scope)
44 if origin := headers.get("origin"):
45 await self.app(scope, receive, self.send_wrapper(send=send, origin=origin, has_cookie="cookie" in headers))
46 else:
47 await self.app(scope, receive, send)
48
49 def send_wrapper(self, send: Send, origin: str, has_cookie: bool) -> Send:
50 """Wrap ``send`` to ensure that state is not disconnected.
51
52 Args:
53 has_cookie: Boolean flag dictating if the connection has a cookie set.
54 origin: The value of the ``Origin`` header.
55 send: The ASGI send function.
56
57 Returns:
58 An ASGI send function.
59 """
60
61 async def wrapped_send(message: Message) -> None:
62 if message["type"] == "http.response.start":
63 message.setdefault("headers", [])
64 headers = MutableScopeHeaders.from_message(message=message)
65 headers.update(self.config.simple_headers)
66
67 if (self.config.is_allow_all_origins and has_cookie) or (
68 not self.config.is_allow_all_origins and self.config.is_origin_allowed(origin=origin)
69 ):
70 headers["Access-Control-Allow-Origin"] = origin
71 headers["Vary"] = "Origin"
72
73 await send(message)
74
75 return wrapped_send
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/litestar/middleware/cors.py b/litestar/middleware/cors.py
--- a/litestar/middleware/cors.py
+++ b/litestar/middleware/cors.py
@@ -70,6 +70,15 @@
headers["Access-Control-Allow-Origin"] = origin
headers["Vary"] = "Origin"
+ # We don't want to overwrite this for preflight requests.
+ allow_headers = headers.get("Access-Control-Allow-Headers")
+ if not allow_headers and self.config.allow_headers:
+ headers["Access-Control-Allow-Headers"] = ", ".join(sorted(set(self.config.allow_headers)))
+
+ allow_methods = headers.get("Access-Control-Allow-Methods")
+ if not allow_methods and self.config.allow_methods:
+ headers["Access-Control-Allow-Methods"] = ", ".join(sorted(set(self.config.allow_methods)))
+
await send(message)
return wrapped_send
| {"golden_diff": "diff --git a/litestar/middleware/cors.py b/litestar/middleware/cors.py\n--- a/litestar/middleware/cors.py\n+++ b/litestar/middleware/cors.py\n@@ -70,6 +70,15 @@\n headers[\"Access-Control-Allow-Origin\"] = origin\n headers[\"Vary\"] = \"Origin\"\n \n+ # We don't want to overwrite this for preflight requests.\n+ allow_headers = headers.get(\"Access-Control-Allow-Headers\")\n+ if not allow_headers and self.config.allow_headers:\n+ headers[\"Access-Control-Allow-Headers\"] = \", \".join(sorted(set(self.config.allow_headers)))\n+\n+ allow_methods = headers.get(\"Access-Control-Allow-Methods\")\n+ if not allow_methods and self.config.allow_methods:\n+ headers[\"Access-Control-Allow-Methods\"] = \", \".join(sorted(set(self.config.allow_methods)))\n+\n await send(message)\n \n return wrapped_send\n", "issue": "Bug: CORS Middleware not setting all headers as per spec\n### Description\r\n\r\nRight now, there's only a handful of headers that are only being set for the preflight request. They must be set for both the preflight and actual request. \r\nhttps://fetch.spec.whatwg.org/#http-responses\r\n\r\nOnly `Access-Control-Allow-Origin` is being set here.\r\nhttps://github.com/litestar-org/litestar/blob/1fb981da4b6171cd3fa348c9ffe1c575c5bc862f/litestar/middleware/cors.py#L61-L73\r\n\r\nOnly `Access-Control-Allow-Credentials` and `Access-Control-Expose-Headers` get set here, and this is what the above code uses to update headers\r\nhttps://github.com/litestar-org/litestar/blob/1fb981da4b6171cd3fa348c9ffe1c575c5bc862f/litestar/config/cors.py#L123-L136\r\n\r\nThis still doesn't account for:\r\n- Access-Control-Allow-Methods\r\n- Access-Control-Allow-Headers\r\n\r\nwhich are only set on preflight, but should also be set to the actual request.\r\n\r\n### Litestar Version\r\n\r\n2.2.1\r\n\r\n### Platform\r\n\r\n- [X] Linux\r\n- [ ] Mac\r\n- [ ] Windows\r\n- [ ] Other (Please specify in the description above)\r\n\r\n<!-- POLAR PLEDGE BADGE START -->\r\n---\r\n> [!NOTE] \r\n> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and \r\n> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.\r\n>\r\n> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)\r\n> * If you would like to see an issue prioritized, make a pledge towards it!\r\n> * We receive the pledge once the issue is completed & verified\r\n> * This, along with engagement in the community, helps us know which features are a priority to our users.\r\n\r\n<a href=\"https://polar.sh/litestar-org/litestar/issues/3178\">\r\n<picture>\r\n <source media=\"(prefers-color-scheme: dark)\" srcset=\"https://polar.sh/api/github/litestar-org/litestar/issues/3178/pledge.svg?darkmode=1\">\r\n <img alt=\"Fund with Polar\" src=\"https://polar.sh/api/github/litestar-org/litestar/issues/3178/pledge.svg\">\r\n</picture>\r\n</a>\r\n<!-- POLAR PLEDGE BADGE END -->\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom litestar.datastructures import Headers, MutableScopeHeaders\nfrom litestar.enums import ScopeType\nfrom litestar.middleware.base import AbstractMiddleware\n\n__all__ = (\"CORSMiddleware\",)\n\n\nif TYPE_CHECKING:\n from litestar.config.cors import CORSConfig\n from litestar.types import ASGIApp, Message, Receive, Scope, Send\n\n\nclass CORSMiddleware(AbstractMiddleware):\n \"\"\"CORS Middleware.\"\"\"\n\n __slots__ = (\"config\",)\n\n def __init__(self, app: ASGIApp, config: CORSConfig) -> None:\n \"\"\"Middleware that adds CORS validation to the application.\n\n Args:\n app: The ``next`` ASGI app to call.\n config: An instance of :class:`CORSConfig <litestar.config.cors.CORSConfig>`\n \"\"\"\n super().__init__(app=app, scopes={ScopeType.HTTP})\n self.config = config\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n \"\"\"ASGI callable.\n\n Args:\n scope: The ASGI connection scope.\n receive: The ASGI receive function.\n send: The ASGI send function.\n\n Returns:\n None\n \"\"\"\n headers = Headers.from_scope(scope=scope)\n if origin := headers.get(\"origin\"):\n await self.app(scope, receive, self.send_wrapper(send=send, origin=origin, has_cookie=\"cookie\" in headers))\n else:\n await self.app(scope, receive, send)\n\n def send_wrapper(self, send: Send, origin: str, has_cookie: bool) -> Send:\n \"\"\"Wrap ``send`` to ensure that state is not disconnected.\n\n Args:\n has_cookie: Boolean flag dictating if the connection has a cookie set.\n origin: The value of the ``Origin`` header.\n send: The ASGI send function.\n\n Returns:\n An ASGI send function.\n \"\"\"\n\n async def wrapped_send(message: Message) -> None:\n if message[\"type\"] == \"http.response.start\":\n message.setdefault(\"headers\", [])\n headers = MutableScopeHeaders.from_message(message=message)\n headers.update(self.config.simple_headers)\n\n if (self.config.is_allow_all_origins and has_cookie) or (\n not self.config.is_allow_all_origins and self.config.is_origin_allowed(origin=origin)\n ):\n headers[\"Access-Control-Allow-Origin\"] = origin\n headers[\"Vary\"] = \"Origin\"\n\n await send(message)\n\n return wrapped_send\n", "path": "litestar/middleware/cors.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom litestar.datastructures import Headers, MutableScopeHeaders\nfrom litestar.enums import ScopeType\nfrom litestar.middleware.base import AbstractMiddleware\n\n__all__ = (\"CORSMiddleware\",)\n\n\nif TYPE_CHECKING:\n from litestar.config.cors import CORSConfig\n from litestar.types import ASGIApp, Message, Receive, Scope, Send\n\n\nclass CORSMiddleware(AbstractMiddleware):\n \"\"\"CORS Middleware.\"\"\"\n\n __slots__ = (\"config\",)\n\n def __init__(self, app: ASGIApp, config: CORSConfig) -> None:\n \"\"\"Middleware that adds CORS validation to the application.\n\n Args:\n app: The ``next`` ASGI app to call.\n config: An instance of :class:`CORSConfig <litestar.config.cors.CORSConfig>`\n \"\"\"\n super().__init__(app=app, scopes={ScopeType.HTTP})\n self.config = config\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n \"\"\"ASGI callable.\n\n Args:\n scope: The ASGI connection scope.\n receive: The ASGI receive function.\n send: The ASGI send function.\n\n Returns:\n None\n \"\"\"\n headers = Headers.from_scope(scope=scope)\n if origin := headers.get(\"origin\"):\n await self.app(scope, receive, self.send_wrapper(send=send, origin=origin, has_cookie=\"cookie\" in headers))\n else:\n await self.app(scope, receive, send)\n\n def send_wrapper(self, send: Send, origin: str, has_cookie: bool) -> Send:\n \"\"\"Wrap ``send`` to ensure that state is not disconnected.\n\n Args:\n has_cookie: Boolean flag dictating if the connection has a cookie set.\n origin: The value of the ``Origin`` header.\n send: The ASGI send function.\n\n Returns:\n An ASGI send function.\n \"\"\"\n\n async def wrapped_send(message: Message) -> None:\n if message[\"type\"] == \"http.response.start\":\n message.setdefault(\"headers\", [])\n headers = MutableScopeHeaders.from_message(message=message)\n headers.update(self.config.simple_headers)\n\n if (self.config.is_allow_all_origins and has_cookie) or (\n not self.config.is_allow_all_origins and self.config.is_origin_allowed(origin=origin)\n ):\n headers[\"Access-Control-Allow-Origin\"] = origin\n headers[\"Vary\"] = \"Origin\"\n\n # We don't want to overwrite this for preflight requests.\n allow_headers = headers.get(\"Access-Control-Allow-Headers\")\n if not allow_headers and self.config.allow_headers:\n headers[\"Access-Control-Allow-Headers\"] = \", \".join(sorted(set(self.config.allow_headers)))\n\n allow_methods = headers.get(\"Access-Control-Allow-Methods\")\n if not allow_methods and self.config.allow_methods:\n headers[\"Access-Control-Allow-Methods\"] = \", \".join(sorted(set(self.config.allow_methods)))\n\n await send(message)\n\n return wrapped_send\n", "path": "litestar/middleware/cors.py"}]} | 1,558 | 197 |
gh_patches_debug_30165 | rasdani/github-patches | git_diff | pytorch__ignite-281 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Feature Request] More general metrics
I find the metrics to be a bit limited as one might want to pass additional options (even tensors) to the loss.
For instance in recurrent models with different sequence lengths, one would use a mask to avoid counting errors on padded time steps.
The mask is necessary in the loss to know which outputs to use in the final averaging/ loss.
[Feature Request] More general metrics
I find the metrics to be a bit limited as one might want to pass additional options (even tensors) to the loss.
For instance in recurrent models with different sequence lengths, one would use a mask to avoid counting errors on padded time steps.
The mask is necessary in the loss to know which outputs to use in the final averaging/ loss.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ignite/metrics/loss.py`
Content:
```
1 from __future__ import division
2
3 from ignite.exceptions import NotComputableError
4 from ignite.metrics.metric import Metric
5
6
7 class Loss(Metric):
8 """
9 Calculates the average loss according to the passed loss_fn.
10
11 - `loss_fn` must return the average loss over all observations in the batch.
12 - `update` must receive output of the form `(y_pred, y)`.
13 """
14 def __init__(self, loss_fn, output_transform=lambda x: x):
15 super(Loss, self).__init__(output_transform)
16 self._loss_fn = loss_fn
17
18 def reset(self):
19 self._sum = 0
20 self._num_examples = 0
21
22 def update(self, output):
23 y_pred, y = output
24 average_loss = self._loss_fn(y_pred, y)
25 assert len(average_loss.shape) == 0, '`loss_fn` did not return the average loss'
26 self._sum += average_loss.item() * y.shape[0]
27 self._num_examples += y.shape[0]
28
29 def compute(self):
30 if self._num_examples == 0:
31 raise NotComputableError(
32 'Loss must have at least one example before it can be computed')
33 return self._sum / self._num_examples
34
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ignite/metrics/loss.py b/ignite/metrics/loss.py
--- a/ignite/metrics/loss.py
+++ b/ignite/metrics/loss.py
@@ -8,9 +8,21 @@
"""
Calculates the average loss according to the passed loss_fn.
- - `loss_fn` must return the average loss over all observations in the batch.
- - `update` must receive output of the form `(y_pred, y)`.
+ Args:
+ loss_fn (callable): a callable taking a prediction tensor, a target
+ tensor, optionally other arguments, and returns the average loss
+ over all observations in the batch.
+ output_transform (callable): a callable that is used to transform the
+ :class:`ignite.engine.Engine`'s `process_function`'s output into the
+ form expected by the metric.
+ This can be useful if, for example, you have a multi-output model and
+ you want to compute the metric with respect to one of the outputs.
+ The output is is expected to be a tuple (prediction, target) or
+ (prediction, target, kwargs) where kwargs is a dictionary of extra
+ keywords arguments.
+
"""
+
def __init__(self, loss_fn, output_transform=lambda x: x):
super(Loss, self).__init__(output_transform)
self._loss_fn = loss_fn
@@ -20,8 +32,12 @@
self._num_examples = 0
def update(self, output):
- y_pred, y = output
- average_loss = self._loss_fn(y_pred, y)
+ if len(output) == 2:
+ y_pred, y = output
+ kwargs = {}
+ else:
+ y_pred, y, kwargs = output
+ average_loss = self._loss_fn(y_pred, y, **kwargs)
assert len(average_loss.shape) == 0, '`loss_fn` did not return the average loss'
self._sum += average_loss.item() * y.shape[0]
self._num_examples += y.shape[0]
| {"golden_diff": "diff --git a/ignite/metrics/loss.py b/ignite/metrics/loss.py\n--- a/ignite/metrics/loss.py\n+++ b/ignite/metrics/loss.py\n@@ -8,9 +8,21 @@\n \"\"\"\n Calculates the average loss according to the passed loss_fn.\n \n- - `loss_fn` must return the average loss over all observations in the batch.\n- - `update` must receive output of the form `(y_pred, y)`.\n+ Args:\n+ loss_fn (callable): a callable taking a prediction tensor, a target\n+ tensor, optionally other arguments, and returns the average loss\n+ over all observations in the batch.\n+ output_transform (callable): a callable that is used to transform the\n+ :class:`ignite.engine.Engine`'s `process_function`'s output into the\n+ form expected by the metric.\n+ This can be useful if, for example, you have a multi-output model and\n+ you want to compute the metric with respect to one of the outputs.\n+ The output is is expected to be a tuple (prediction, target) or\n+ (prediction, target, kwargs) where kwargs is a dictionary of extra\n+ keywords arguments.\n+\n \"\"\"\n+\n def __init__(self, loss_fn, output_transform=lambda x: x):\n super(Loss, self).__init__(output_transform)\n self._loss_fn = loss_fn\n@@ -20,8 +32,12 @@\n self._num_examples = 0\n \n def update(self, output):\n- y_pred, y = output\n- average_loss = self._loss_fn(y_pred, y)\n+ if len(output) == 2:\n+ y_pred, y = output\n+ kwargs = {}\n+ else:\n+ y_pred, y, kwargs = output\n+ average_loss = self._loss_fn(y_pred, y, **kwargs)\n assert len(average_loss.shape) == 0, '`loss_fn` did not return the average loss'\n self._sum += average_loss.item() * y.shape[0]\n self._num_examples += y.shape[0]\n", "issue": "[Feature Request] More general metrics\nI find the metrics to be a bit limited as one might want to pass additional options (even tensors) to the loss.\r\nFor instance in recurrent models with different sequence lengths, one would use a mask to avoid counting errors on padded time steps.\r\nThe mask is necessary in the loss to know which outputs to use in the final averaging/ loss.\n[Feature Request] More general metrics\nI find the metrics to be a bit limited as one might want to pass additional options (even tensors) to the loss.\r\nFor instance in recurrent models with different sequence lengths, one would use a mask to avoid counting errors on padded time steps.\r\nThe mask is necessary in the loss to know which outputs to use in the final averaging/ loss.\n", "before_files": [{"content": "from __future__ import division\n\nfrom ignite.exceptions import NotComputableError\nfrom ignite.metrics.metric import Metric\n\n\nclass Loss(Metric):\n \"\"\"\n Calculates the average loss according to the passed loss_fn.\n\n - `loss_fn` must return the average loss over all observations in the batch.\n - `update` must receive output of the form `(y_pred, y)`.\n \"\"\"\n def __init__(self, loss_fn, output_transform=lambda x: x):\n super(Loss, self).__init__(output_transform)\n self._loss_fn = loss_fn\n\n def reset(self):\n self._sum = 0\n self._num_examples = 0\n\n def update(self, output):\n y_pred, y = output\n average_loss = self._loss_fn(y_pred, y)\n assert len(average_loss.shape) == 0, '`loss_fn` did not return the average loss'\n self._sum += average_loss.item() * y.shape[0]\n self._num_examples += y.shape[0]\n\n def compute(self):\n if self._num_examples == 0:\n raise NotComputableError(\n 'Loss must have at least one example before it can be computed')\n return self._sum / self._num_examples\n", "path": "ignite/metrics/loss.py"}], "after_files": [{"content": "from __future__ import division\n\nfrom ignite.exceptions import NotComputableError\nfrom ignite.metrics.metric import Metric\n\n\nclass Loss(Metric):\n \"\"\"\n Calculates the average loss according to the passed loss_fn.\n\n Args:\n loss_fn (callable): a callable taking a prediction tensor, a target\n tensor, optionally other arguments, and returns the average loss\n over all observations in the batch.\n output_transform (callable): a callable that is used to transform the\n :class:`ignite.engine.Engine`'s `process_function`'s output into the\n form expected by the metric.\n This can be useful if, for example, you have a multi-output model and\n you want to compute the metric with respect to one of the outputs.\n The output is is expected to be a tuple (prediction, target) or\n (prediction, target, kwargs) where kwargs is a dictionary of extra\n keywords arguments.\n\n \"\"\"\n\n def __init__(self, loss_fn, output_transform=lambda x: x):\n super(Loss, self).__init__(output_transform)\n self._loss_fn = loss_fn\n\n def reset(self):\n self._sum = 0\n self._num_examples = 0\n\n def update(self, output):\n if len(output) == 2:\n y_pred, y = output\n kwargs = {}\n else:\n y_pred, y, kwargs = output\n average_loss = self._loss_fn(y_pred, y, **kwargs)\n assert len(average_loss.shape) == 0, '`loss_fn` did not return the average loss'\n self._sum += average_loss.item() * y.shape[0]\n self._num_examples += y.shape[0]\n\n def compute(self):\n if self._num_examples == 0:\n raise NotComputableError(\n 'Loss must have at least one example before it can be computed')\n return self._sum / self._num_examples\n", "path": "ignite/metrics/loss.py"}]} | 745 | 467 |
gh_patches_debug_12175 | rasdani/github-patches | git_diff | liqd__a4-opin-529 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Wording change for accept page
The page for accepting invites to private projects currently is a bit too straight-forward. ;) Let’s add some information
The headline should be changed to: Do you want to join “<project name>” ?
Then there should be another line underneath the headline set in our standard paragraph text style:
You were invited by the initiator of the project. If you accept you will be able to participate in the project. If you decline the invitation, you can also ask for membership at a later time.
The English label for the reject button should be changed to “decline”
The reject button looks strange. I think the button should be styled as a regular red button. Or is the small font-size on purpose?

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `euth/projects/rules.py`
Content:
```
1 import rules
2 from rules.predicates import is_superuser
3
4 from euth.organisations.predicates import is_initiator
5
6 from .predicates import is_live, is_member, is_public
7
8 rules.add_perm('euth_projects.edit_project',
9 is_superuser | is_initiator)
10
11
12 rules.add_perm('projects.view_project',
13 is_superuser | is_initiator |
14 ((is_public | is_member) & is_live))
15
```
Path: `euth/projects/views.py`
Content:
```
1 from django.shortcuts import redirect
2 from django.views import generic
3 from rules.contrib import views as rules_views
4
5 from . import mixins, models
6
7
8 class ProjectDetailView(rules_views.PermissionRequiredMixin,
9 mixins.PhaseDispatchMixin,
10 generic.DetailView):
11
12 model = models.Project
13 permission_required = 'projects.view_project'
14
15 @property
16 def raise_exception(self):
17 return self.request.user.is_authenticated()
18
19 def handle_no_permission(self):
20 """
21 Check if user clould join
22 """
23 membership_impossible = (
24 not self.request.user.is_authenticated()
25 or self.project.is_draft
26 or self.project.has_member(self.request.user)
27 )
28
29 if membership_impossible:
30 return super().handle_no_permission()
31 else:
32 return self._redirect_membership_request()
33
34 def _redirect_membership_request(self):
35 return redirect('memberships-request',
36 project_slug=self.project.slug)
37
38 @property
39 def project(self):
40 """
41 Emulate ProjectMixin interface for template sharing.
42 """
43 return self.get_object()
44
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/euth/projects/rules.py b/euth/projects/rules.py
--- a/euth/projects/rules.py
+++ b/euth/projects/rules.py
@@ -9,6 +9,6 @@
is_superuser | is_initiator)
-rules.add_perm('projects.view_project',
+rules.add_perm('euth_projects.view_project',
is_superuser | is_initiator |
((is_public | is_member) & is_live))
diff --git a/euth/projects/views.py b/euth/projects/views.py
--- a/euth/projects/views.py
+++ b/euth/projects/views.py
@@ -10,7 +10,7 @@
generic.DetailView):
model = models.Project
- permission_required = 'projects.view_project'
+ permission_required = 'euth_projects.view_project'
@property
def raise_exception(self):
| {"golden_diff": "diff --git a/euth/projects/rules.py b/euth/projects/rules.py\n--- a/euth/projects/rules.py\n+++ b/euth/projects/rules.py\n@@ -9,6 +9,6 @@\n is_superuser | is_initiator)\n \n \n-rules.add_perm('projects.view_project',\n+rules.add_perm('euth_projects.view_project',\n is_superuser | is_initiator |\n ((is_public | is_member) & is_live))\ndiff --git a/euth/projects/views.py b/euth/projects/views.py\n--- a/euth/projects/views.py\n+++ b/euth/projects/views.py\n@@ -10,7 +10,7 @@\n generic.DetailView):\n \n model = models.Project\n- permission_required = 'projects.view_project'\n+ permission_required = 'euth_projects.view_project'\n \n @property\n def raise_exception(self):\n", "issue": "Wording change for accept page\nThe page for accepting invites to private projects currently is a bit too straight-forward. ;) Let\u2019s add some information\r\n\r\nThe headline should be changed to: Do you want to join \u201c<project name>\u201d ?\r\nThen there should be another line underneath the headline set in our standard paragraph text style:\r\nYou were invited by the initiator of the project. If you accept you will be able to participate in the project. If you decline the invitation, you can also ask for membership at a later time.\r\n\r\nThe English label for the reject button should be changed to \u201cdecline\u201d\r\nThe reject button looks strange. I think the button should be styled as a regular red button. Or is the small font-size on purpose?\r\n\r\n\r\n\n", "before_files": [{"content": "import rules\nfrom rules.predicates import is_superuser\n\nfrom euth.organisations.predicates import is_initiator\n\nfrom .predicates import is_live, is_member, is_public\n\nrules.add_perm('euth_projects.edit_project',\n is_superuser | is_initiator)\n\n\nrules.add_perm('projects.view_project',\n is_superuser | is_initiator |\n ((is_public | is_member) & is_live))\n", "path": "euth/projects/rules.py"}, {"content": "from django.shortcuts import redirect\nfrom django.views import generic\nfrom rules.contrib import views as rules_views\n\nfrom . import mixins, models\n\n\nclass ProjectDetailView(rules_views.PermissionRequiredMixin,\n mixins.PhaseDispatchMixin,\n generic.DetailView):\n\n model = models.Project\n permission_required = 'projects.view_project'\n\n @property\n def raise_exception(self):\n return self.request.user.is_authenticated()\n\n def handle_no_permission(self):\n \"\"\"\n Check if user clould join\n \"\"\"\n membership_impossible = (\n not self.request.user.is_authenticated()\n or self.project.is_draft\n or self.project.has_member(self.request.user)\n )\n\n if membership_impossible:\n return super().handle_no_permission()\n else:\n return self._redirect_membership_request()\n\n def _redirect_membership_request(self):\n return redirect('memberships-request',\n project_slug=self.project.slug)\n\n @property\n def project(self):\n \"\"\"\n Emulate ProjectMixin interface for template sharing.\n \"\"\"\n return self.get_object()\n", "path": "euth/projects/views.py"}], "after_files": [{"content": "import rules\nfrom rules.predicates import is_superuser\n\nfrom euth.organisations.predicates import is_initiator\n\nfrom .predicates import is_live, is_member, is_public\n\nrules.add_perm('euth_projects.edit_project',\n is_superuser | is_initiator)\n\n\nrules.add_perm('euth_projects.view_project',\n is_superuser | is_initiator |\n ((is_public | is_member) & is_live))\n", "path": "euth/projects/rules.py"}, {"content": "from django.shortcuts import redirect\nfrom django.views import generic\nfrom rules.contrib import views as rules_views\n\nfrom . import mixins, models\n\n\nclass ProjectDetailView(rules_views.PermissionRequiredMixin,\n mixins.PhaseDispatchMixin,\n generic.DetailView):\n\n model = models.Project\n permission_required = 'euth_projects.view_project'\n\n @property\n def raise_exception(self):\n return self.request.user.is_authenticated()\n\n def handle_no_permission(self):\n \"\"\"\n Check if user clould join\n \"\"\"\n membership_impossible = (\n not self.request.user.is_authenticated()\n or self.project.is_draft\n or self.project.has_member(self.request.user)\n )\n\n if membership_impossible:\n return super().handle_no_permission()\n else:\n return self._redirect_membership_request()\n\n def _redirect_membership_request(self):\n return redirect('memberships-request',\n project_slug=self.project.slug)\n\n @property\n def project(self):\n \"\"\"\n Emulate ProjectMixin interface for template sharing.\n \"\"\"\n return self.get_object()\n", "path": "euth/projects/views.py"}]} | 922 | 180 |
gh_patches_debug_1300 | rasdani/github-patches | git_diff | sql-machine-learning__elasticdl-368 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Better check for codec names
currently, codec name argument is not checked. A typo would result in worker interpreting encoded data.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticdl/master/main.py`
Content:
```
1 import logging
2 import time
3 import argparse
4 import os
5
6 import grpc
7 import tensorflow as tf
8
9 tf.enable_eager_execution()
10
11 from concurrent import futures
12 from recordio import File
13 from elasticdl.proto import master_pb2_grpc
14 from elasticdl.master.servicer import MasterServicer
15 from elasticdl.master.task_queue import _TaskQueue
16 from elasticdl.master.k8s_worker_manager import WorkerManager
17 from elasticdl.common.model_helper import load_user_model, build_model
18
19
20 def _make_task_queue(data_dir, record_per_task, num_epoch):
21 f_records = {}
22 for f in os.listdir(data_dir):
23 p = os.path.join(data_dir, f)
24 with File(p, "r") as rio:
25 f_records[p] = rio.count()
26 return _TaskQueue(f_records, record_per_task, num_epoch)
27
28
29 def _parse_args():
30 parser = argparse.ArgumentParser(description="ElasticDL Master")
31 parser.add_argument(
32 "--model_file",
33 help="Full file path of user defined neural model",
34 required=True,
35 )
36 parser.add_argument(
37 "--train_data_dir",
38 help="Training data directory. Files should be in RecordIO format",
39 required=True,
40 )
41 parser.add_argument("--record_per_task", type=int, required=True)
42 parser.add_argument("--num_epoch", type=int, required=True)
43 parser.add_argument(
44 "--grads_to_wait",
45 type=int,
46 help="Number of gradients to wait before updating model",
47 required=True,
48 )
49 parser.add_argument(
50 "--minibatch_size",
51 type=int,
52 help="Minibatch size used by workers to compute gradients",
53 required=True,
54 )
55 parser.add_argument(
56 "--num_worker",
57 type=int,
58 help="the number of workers used in training",
59 default=0,
60 )
61 parser.add_argument(
62 "--worker_image", help="docker image for worker", default=None
63 )
64 parser.add_argument("--job_name", help="job name", required=True)
65 parser.add_argument(
66 "--codec-type",
67 default=None,
68 help="Type of codec(tf_example or None)",
69 )
70 return parser.parse_args()
71
72
73 def main():
74 # TODO: pass port via flags.
75 PORT = 50001
76 logger = logging.getLogger("master")
77 args = _parse_args()
78 task_q = _make_task_queue(
79 args.train_data_dir, args.record_per_task, args.num_epoch
80 )
81 model_module = load_user_model(args.model_file)
82 model_inst = model_module.model
83 build_model(model_inst, model_module.feature_columns())
84 optimizer = model_module.optimizer()
85
86 server = grpc.server(futures.ThreadPoolExecutor(max_workers=64))
87 master_pb2_grpc.add_MasterServicer_to_server(
88 MasterServicer(
89 logger,
90 args.grads_to_wait,
91 args.minibatch_size,
92 optimizer,
93 task_q,
94 init_var=model_inst.trainable_variables,
95 ),
96 server,
97 )
98 server.add_insecure_port("[::]:{}".format(PORT))
99 server.start()
100 logger.warning("Server started at port: %d", PORT)
101
102 if args.num_worker:
103 master_addr = "%s:%d" % (os.getenv("MY_POD_IP", "localhost"), PORT)
104 worker_command = ["python"]
105 worker_args = [
106 "-m",
107 "elasticdl.worker.main",
108 "--model_file",
109 args.model_file,
110 "--master_addr",
111 master_addr,
112 "--codec-type",
113 args.codec_type
114 ]
115
116 worker_manager = WorkerManager(
117 job_name=args.job_name,
118 worker_image=args.worker_image,
119 command=worker_command,
120 args=worker_args,
121 namespace="default",
122 num_worker=args.num_worker,
123 )
124 worker_manager.start_workers(restart_policy="Never")
125
126 try:
127 while True:
128 if task_q.finished():
129 break
130 time.sleep(30)
131 except KeyboardInterrupt:
132 logger.warning("Server stopping")
133
134 if args.num_worker:
135 # TODO: worker_manager.remove_workers supports synchronized call
136 worker_manager.remove_workers()
137 # wait for worker pod to be deleted
138 max_check_num = 10
139 for _ in range(max_check_num):
140 time.sleep(3)
141 counters = worker_manager.get_counters()
142 if not counters:
143 break
144 server.stop(0)
145
146
147 if __name__ == "__main__":
148 logging.basicConfig()
149 main()
150
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/elasticdl/master/main.py b/elasticdl/master/main.py
--- a/elasticdl/master/main.py
+++ b/elasticdl/master/main.py
@@ -65,6 +65,7 @@
parser.add_argument(
"--codec-type",
default=None,
+ choices=["tf_example"],
help="Type of codec(tf_example or None)",
)
return parser.parse_args()
| {"golden_diff": "diff --git a/elasticdl/master/main.py b/elasticdl/master/main.py\n--- a/elasticdl/master/main.py\n+++ b/elasticdl/master/main.py\n@@ -65,6 +65,7 @@\n parser.add_argument(\n \"--codec-type\",\n default=None,\n+ choices=[\"tf_example\"],\n help=\"Type of codec(tf_example or None)\",\n )\n return parser.parse_args()\n", "issue": "Better check for codec names\ncurrently, codec name argument is not checked. A typo would result in worker interpreting encoded data.\n", "before_files": [{"content": "import logging\nimport time\nimport argparse\nimport os\n\nimport grpc\nimport tensorflow as tf\n\ntf.enable_eager_execution()\n\nfrom concurrent import futures\nfrom recordio import File\nfrom elasticdl.proto import master_pb2_grpc\nfrom elasticdl.master.servicer import MasterServicer\nfrom elasticdl.master.task_queue import _TaskQueue\nfrom elasticdl.master.k8s_worker_manager import WorkerManager\nfrom elasticdl.common.model_helper import load_user_model, build_model\n\n\ndef _make_task_queue(data_dir, record_per_task, num_epoch):\n f_records = {}\n for f in os.listdir(data_dir):\n p = os.path.join(data_dir, f)\n with File(p, \"r\") as rio:\n f_records[p] = rio.count()\n return _TaskQueue(f_records, record_per_task, num_epoch)\n\n\ndef _parse_args():\n parser = argparse.ArgumentParser(description=\"ElasticDL Master\")\n parser.add_argument(\n \"--model_file\",\n help=\"Full file path of user defined neural model\",\n required=True,\n )\n parser.add_argument(\n \"--train_data_dir\",\n help=\"Training data directory. Files should be in RecordIO format\",\n required=True,\n )\n parser.add_argument(\"--record_per_task\", type=int, required=True)\n parser.add_argument(\"--num_epoch\", type=int, required=True)\n parser.add_argument(\n \"--grads_to_wait\",\n type=int,\n help=\"Number of gradients to wait before updating model\",\n required=True,\n )\n parser.add_argument(\n \"--minibatch_size\",\n type=int,\n help=\"Minibatch size used by workers to compute gradients\",\n required=True,\n )\n parser.add_argument(\n \"--num_worker\",\n type=int,\n help=\"the number of workers used in training\",\n default=0,\n )\n parser.add_argument(\n \"--worker_image\", help=\"docker image for worker\", default=None\n )\n parser.add_argument(\"--job_name\", help=\"job name\", required=True)\n parser.add_argument(\n \"--codec-type\",\n default=None,\n help=\"Type of codec(tf_example or None)\",\n )\n return parser.parse_args()\n\n\ndef main():\n # TODO: pass port via flags.\n PORT = 50001\n logger = logging.getLogger(\"master\")\n args = _parse_args()\n task_q = _make_task_queue(\n args.train_data_dir, args.record_per_task, args.num_epoch\n )\n model_module = load_user_model(args.model_file)\n model_inst = model_module.model\n build_model(model_inst, model_module.feature_columns())\n optimizer = model_module.optimizer()\n\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=64))\n master_pb2_grpc.add_MasterServicer_to_server(\n MasterServicer(\n logger,\n args.grads_to_wait,\n args.minibatch_size,\n optimizer,\n task_q,\n init_var=model_inst.trainable_variables,\n ),\n server,\n )\n server.add_insecure_port(\"[::]:{}\".format(PORT))\n server.start()\n logger.warning(\"Server started at port: %d\", PORT)\n\n if args.num_worker:\n master_addr = \"%s:%d\" % (os.getenv(\"MY_POD_IP\", \"localhost\"), PORT)\n worker_command = [\"python\"]\n worker_args = [\n \"-m\",\n \"elasticdl.worker.main\",\n \"--model_file\",\n args.model_file,\n \"--master_addr\",\n master_addr,\n \"--codec-type\",\n args.codec_type\n ]\n\n worker_manager = WorkerManager(\n job_name=args.job_name,\n worker_image=args.worker_image,\n command=worker_command,\n args=worker_args,\n namespace=\"default\",\n num_worker=args.num_worker,\n )\n worker_manager.start_workers(restart_policy=\"Never\")\n\n try:\n while True:\n if task_q.finished():\n break\n time.sleep(30)\n except KeyboardInterrupt:\n logger.warning(\"Server stopping\")\n\n if args.num_worker:\n # TODO: worker_manager.remove_workers supports synchronized call\n worker_manager.remove_workers()\n # wait for worker pod to be deleted\n max_check_num = 10\n for _ in range(max_check_num):\n time.sleep(3)\n counters = worker_manager.get_counters()\n if not counters:\n break\n server.stop(0)\n\n\nif __name__ == \"__main__\":\n logging.basicConfig()\n main()\n", "path": "elasticdl/master/main.py"}], "after_files": [{"content": "import logging\nimport time\nimport argparse\nimport os\n\nimport grpc\nimport tensorflow as tf\n\ntf.enable_eager_execution()\n\nfrom concurrent import futures\nfrom recordio import File\nfrom elasticdl.proto import master_pb2_grpc\nfrom elasticdl.master.servicer import MasterServicer\nfrom elasticdl.master.task_queue import _TaskQueue\nfrom elasticdl.master.k8s_worker_manager import WorkerManager\nfrom elasticdl.common.model_helper import load_user_model, build_model\n\n\ndef _make_task_queue(data_dir, record_per_task, num_epoch):\n f_records = {}\n for f in os.listdir(data_dir):\n p = os.path.join(data_dir, f)\n with File(p, \"r\") as rio:\n f_records[p] = rio.count()\n return _TaskQueue(f_records, record_per_task, num_epoch)\n\n\ndef _parse_args():\n parser = argparse.ArgumentParser(description=\"ElasticDL Master\")\n parser.add_argument(\n \"--model_file\",\n help=\"Full file path of user defined neural model\",\n required=True,\n )\n parser.add_argument(\n \"--train_data_dir\",\n help=\"Training data directory. Files should be in RecordIO format\",\n required=True,\n )\n parser.add_argument(\"--record_per_task\", type=int, required=True)\n parser.add_argument(\"--num_epoch\", type=int, required=True)\n parser.add_argument(\n \"--grads_to_wait\",\n type=int,\n help=\"Number of gradients to wait before updating model\",\n required=True,\n )\n parser.add_argument(\n \"--minibatch_size\",\n type=int,\n help=\"Minibatch size used by workers to compute gradients\",\n required=True,\n )\n parser.add_argument(\n \"--num_worker\",\n type=int,\n help=\"the number of workers used in training\",\n default=0,\n )\n parser.add_argument(\n \"--worker_image\", help=\"docker image for worker\", default=None\n )\n parser.add_argument(\"--job_name\", help=\"job name\", required=True)\n parser.add_argument(\n \"--codec-type\",\n default=None,\n choices=[\"tf_example\"],\n help=\"Type of codec(tf_example or None)\",\n )\n return parser.parse_args()\n\n\ndef main():\n # TODO: pass port via flags.\n PORT = 50001\n logger = logging.getLogger(\"master\")\n args = _parse_args()\n task_q = _make_task_queue(\n args.train_data_dir, args.record_per_task, args.num_epoch\n )\n model_module = load_user_model(args.model_file)\n model_inst = model_module.model\n build_model(model_inst, model_module.feature_columns())\n optimizer = model_module.optimizer()\n\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=64))\n master_pb2_grpc.add_MasterServicer_to_server(\n MasterServicer(\n logger,\n args.grads_to_wait,\n args.minibatch_size,\n optimizer,\n task_q,\n init_var=model_inst.trainable_variables,\n ),\n server,\n )\n server.add_insecure_port(\"[::]:{}\".format(PORT))\n server.start()\n logger.warning(\"Server started at port: %d\", PORT)\n\n if args.num_worker:\n master_addr = \"%s:%d\" % (os.getenv(\"MY_POD_IP\", \"localhost\"), PORT)\n worker_command = [\"python\"]\n worker_args = [\n \"-m\",\n \"elasticdl.worker.main\",\n \"--model_file\",\n args.model_file,\n \"--master_addr\",\n master_addr,\n \"--codec-type\",\n args.codec_type\n ]\n\n worker_manager = WorkerManager(\n job_name=args.job_name,\n worker_image=args.worker_image,\n command=worker_command,\n args=worker_args,\n namespace=\"default\",\n num_worker=args.num_worker,\n )\n worker_manager.start_workers(restart_policy=\"Never\")\n\n try:\n while True:\n if task_q.finished():\n break\n time.sleep(30)\n except KeyboardInterrupt:\n logger.warning(\"Server stopping\")\n\n if args.num_worker:\n # TODO: worker_manager.remove_workers supports synchronized call\n worker_manager.remove_workers()\n # wait for worker pod to be deleted\n max_check_num = 10\n for _ in range(max_check_num):\n time.sleep(3)\n counters = worker_manager.get_counters()\n if not counters:\n break\n server.stop(0)\n\n\nif __name__ == \"__main__\":\n logging.basicConfig()\n main()\n", "path": "elasticdl/master/main.py"}]} | 1,569 | 88 |
gh_patches_debug_17319 | rasdani/github-patches | git_diff | elastic__elasticsearch-py-210 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support for custom authentication objects for requests module
Hi,
Several transport classes are available, one of them is "requests".
Requests supports basic-authentication but far more than that ([0](http://docs.python-requests.org/en/latest/user/advanced/#custom-authentication)). In order to support this a few lines would need to be changed to allow for providing an authentication object ([1](https://github.com/elastic/elasticsearch-py/compare/master...sim0nx:requests_custom_authentication)).
I have the code ready ([1](https://github.com/elastic/elasticsearch-py/compare/master...sim0nx:requests_custom_authentication)) for this and am actively using it.
Would you be willing to accept this contribution ?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticsearch/connection/http_requests.py`
Content:
```
1 import time
2 import warnings
3 try:
4 import requests
5 REQUESTS_AVAILABLE = True
6 except ImportError:
7 REQUESTS_AVAILABLE = False
8
9 from .base import Connection
10 from ..exceptions import ConnectionError, ImproperlyConfigured, ConnectionTimeout, SSLError
11 from ..compat import urlencode
12
13 class RequestsHttpConnection(Connection):
14 """
15 Connection using the `requests` library.
16
17 :arg http_auth: optional http auth information as either ':' separated
18 string or a tuple
19 :arg use_ssl: use ssl for the connection if `True`
20 :arg verify_certs: whether to verify SSL certificates
21 :arg ca_certs: optional path to CA bundle. By default standard requests'
22 bundle will be used.
23 :arg client_cert: path to the file containing the private key and the
24 certificate
25 """
26 def __init__(self, host='localhost', port=9200, http_auth=None,
27 use_ssl=False, verify_certs=False, ca_certs=None, client_cert=None,
28 **kwargs):
29 if not REQUESTS_AVAILABLE:
30 raise ImproperlyConfigured("Please install requests to use RequestsHttpConnection.")
31
32 super(RequestsHttpConnection, self).__init__(host= host, port=port, **kwargs)
33 self.session = requests.session()
34 if http_auth is not None:
35 if not isinstance(http_auth, (tuple, list)):
36 http_auth = http_auth.split(':', 1)
37 http_auth = tuple(http_auth)
38 self.session.auth = http_auth
39 self.base_url = 'http%s://%s:%d%s' % (
40 's' if use_ssl else '',
41 host, port, self.url_prefix
42 )
43 self.session.verify = verify_certs
44 self.session.cert = client_cert
45 if ca_certs:
46 if not verify_certs:
47 raise ImproperlyConfigured("You cannot pass CA certificates when verify SSL is off.")
48 self.session.verify = ca_certs
49
50 if use_ssl and not verify_certs:
51 warnings.warn(
52 'Connecting to %s using SSL with verify_certs=False is insecure.' % self.base_url)
53
54 def perform_request(self, method, url, params=None, body=None, timeout=None, ignore=()):
55 url = self.base_url + url
56 if params:
57 url = '%s?%s' % (url, urlencode(params or {}))
58
59 start = time.time()
60 try:
61 response = self.session.request(method, url, data=body, timeout=timeout or self.timeout)
62 duration = time.time() - start
63 raw_data = response.text
64 except requests.exceptions.SSLError as e:
65 self.log_request_fail(method, url, body, time.time() - start, exception=e)
66 raise SSLError('N/A', str(e), e)
67 except requests.Timeout as e:
68 self.log_request_fail(method, url, body, time.time() - start, exception=e)
69 raise ConnectionTimeout('TIMEOUT', str(e), e)
70 except requests.ConnectionError as e:
71 self.log_request_fail(method, url, body, time.time() - start, exception=e)
72 raise ConnectionError('N/A', str(e), e)
73
74 # raise errors based on http status codes, let the client handle those if needed
75 if not (200 <= response.status_code < 300) and response.status_code not in ignore:
76 self.log_request_fail(method, url, body, duration, response.status_code)
77 self._raise_error(response.status_code, raw_data)
78
79 self.log_request_success(method, url, response.request.path_url, body, response.status_code, raw_data, duration)
80
81 return response.status_code, response.headers, raw_data
82
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/elasticsearch/connection/http_requests.py b/elasticsearch/connection/http_requests.py
--- a/elasticsearch/connection/http_requests.py
+++ b/elasticsearch/connection/http_requests.py
@@ -1,3 +1,4 @@
+import six
import time
import warnings
try:
@@ -32,8 +33,10 @@
super(RequestsHttpConnection, self).__init__(host= host, port=port, **kwargs)
self.session = requests.session()
if http_auth is not None:
- if not isinstance(http_auth, (tuple, list)):
- http_auth = http_auth.split(':', 1)
+ if isinstance(http_auth, (tuple, list)):
+ http_auth = tuple(http_auth)
+ elif isinstance(http_auth, six.string_types):
+ http_auth = tuple(http_auth.split(':', 1))
http_auth = tuple(http_auth)
self.session.auth = http_auth
self.base_url = 'http%s://%s:%d%s' % (
| {"golden_diff": "diff --git a/elasticsearch/connection/http_requests.py b/elasticsearch/connection/http_requests.py\n--- a/elasticsearch/connection/http_requests.py\n+++ b/elasticsearch/connection/http_requests.py\n@@ -1,3 +1,4 @@\n+import six\n import time\n import warnings\n try:\n@@ -32,8 +33,10 @@\n super(RequestsHttpConnection, self).__init__(host= host, port=port, **kwargs)\n self.session = requests.session()\n if http_auth is not None:\n- if not isinstance(http_auth, (tuple, list)):\n- http_auth = http_auth.split(':', 1)\n+ if isinstance(http_auth, (tuple, list)):\n+ http_auth = tuple(http_auth)\n+ elif isinstance(http_auth, six.string_types):\n+ http_auth = tuple(http_auth.split(':', 1))\n http_auth = tuple(http_auth)\n self.session.auth = http_auth\n self.base_url = 'http%s://%s:%d%s' % (\n", "issue": "Support for custom authentication objects for requests module\nHi,\n\nSeveral transport classes are available, one of them is \"requests\".\nRequests supports basic-authentication but far more than that ([0](http://docs.python-requests.org/en/latest/user/advanced/#custom-authentication)). In order to support this a few lines would need to be changed to allow for providing an authentication object ([1](https://github.com/elastic/elasticsearch-py/compare/master...sim0nx:requests_custom_authentication)).\n\nI have the code ready ([1](https://github.com/elastic/elasticsearch-py/compare/master...sim0nx:requests_custom_authentication)) for this and am actively using it.\n\nWould you be willing to accept this contribution ?\n\n", "before_files": [{"content": "import time\nimport warnings\ntry:\n import requests\n REQUESTS_AVAILABLE = True\nexcept ImportError:\n REQUESTS_AVAILABLE = False\n\nfrom .base import Connection\nfrom ..exceptions import ConnectionError, ImproperlyConfigured, ConnectionTimeout, SSLError\nfrom ..compat import urlencode\n\nclass RequestsHttpConnection(Connection):\n \"\"\"\n Connection using the `requests` library.\n\n :arg http_auth: optional http auth information as either ':' separated\n string or a tuple\n :arg use_ssl: use ssl for the connection if `True`\n :arg verify_certs: whether to verify SSL certificates\n :arg ca_certs: optional path to CA bundle. By default standard requests'\n bundle will be used.\n :arg client_cert: path to the file containing the private key and the\n certificate\n \"\"\"\n def __init__(self, host='localhost', port=9200, http_auth=None,\n use_ssl=False, verify_certs=False, ca_certs=None, client_cert=None,\n **kwargs):\n if not REQUESTS_AVAILABLE:\n raise ImproperlyConfigured(\"Please install requests to use RequestsHttpConnection.\")\n\n super(RequestsHttpConnection, self).__init__(host= host, port=port, **kwargs)\n self.session = requests.session()\n if http_auth is not None:\n if not isinstance(http_auth, (tuple, list)):\n http_auth = http_auth.split(':', 1)\n http_auth = tuple(http_auth)\n self.session.auth = http_auth\n self.base_url = 'http%s://%s:%d%s' % (\n 's' if use_ssl else '',\n host, port, self.url_prefix\n )\n self.session.verify = verify_certs\n self.session.cert = client_cert\n if ca_certs:\n if not verify_certs:\n raise ImproperlyConfigured(\"You cannot pass CA certificates when verify SSL is off.\")\n self.session.verify = ca_certs\n\n if use_ssl and not verify_certs:\n warnings.warn(\n 'Connecting to %s using SSL with verify_certs=False is insecure.' % self.base_url)\n\n def perform_request(self, method, url, params=None, body=None, timeout=None, ignore=()):\n url = self.base_url + url\n if params:\n url = '%s?%s' % (url, urlencode(params or {}))\n\n start = time.time()\n try:\n response = self.session.request(method, url, data=body, timeout=timeout or self.timeout)\n duration = time.time() - start\n raw_data = response.text\n except requests.exceptions.SSLError as e:\n self.log_request_fail(method, url, body, time.time() - start, exception=e)\n raise SSLError('N/A', str(e), e)\n except requests.Timeout as e:\n self.log_request_fail(method, url, body, time.time() - start, exception=e)\n raise ConnectionTimeout('TIMEOUT', str(e), e)\n except requests.ConnectionError as e:\n self.log_request_fail(method, url, body, time.time() - start, exception=e)\n raise ConnectionError('N/A', str(e), e)\n\n # raise errors based on http status codes, let the client handle those if needed\n if not (200 <= response.status_code < 300) and response.status_code not in ignore:\n self.log_request_fail(method, url, body, duration, response.status_code)\n self._raise_error(response.status_code, raw_data)\n\n self.log_request_success(method, url, response.request.path_url, body, response.status_code, raw_data, duration)\n\n return response.status_code, response.headers, raw_data\n", "path": "elasticsearch/connection/http_requests.py"}], "after_files": [{"content": "import six\nimport time\nimport warnings\ntry:\n import requests\n REQUESTS_AVAILABLE = True\nexcept ImportError:\n REQUESTS_AVAILABLE = False\n\nfrom .base import Connection\nfrom ..exceptions import ConnectionError, ImproperlyConfigured, ConnectionTimeout, SSLError\nfrom ..compat import urlencode\n\nclass RequestsHttpConnection(Connection):\n \"\"\"\n Connection using the `requests` library.\n\n :arg http_auth: optional http auth information as either ':' separated\n string or a tuple\n :arg use_ssl: use ssl for the connection if `True`\n :arg verify_certs: whether to verify SSL certificates\n :arg ca_certs: optional path to CA bundle. By default standard requests'\n bundle will be used.\n :arg client_cert: path to the file containing the private key and the\n certificate\n \"\"\"\n def __init__(self, host='localhost', port=9200, http_auth=None,\n use_ssl=False, verify_certs=False, ca_certs=None, client_cert=None,\n **kwargs):\n if not REQUESTS_AVAILABLE:\n raise ImproperlyConfigured(\"Please install requests to use RequestsHttpConnection.\")\n\n super(RequestsHttpConnection, self).__init__(host= host, port=port, **kwargs)\n self.session = requests.session()\n if http_auth is not None:\n if isinstance(http_auth, (tuple, list)):\n http_auth = tuple(http_auth)\n elif isinstance(http_auth, six.string_types):\n http_auth = tuple(http_auth.split(':', 1))\n http_auth = tuple(http_auth)\n self.session.auth = http_auth\n self.base_url = 'http%s://%s:%d%s' % (\n 's' if use_ssl else '',\n host, port, self.url_prefix\n )\n self.session.verify = verify_certs\n self.session.cert = client_cert\n if ca_certs:\n if not verify_certs:\n raise ImproperlyConfigured(\"You cannot pass CA certificates when verify SSL is off.\")\n self.session.verify = ca_certs\n\n if use_ssl and not verify_certs:\n warnings.warn(\n 'Connecting to %s using SSL with verify_certs=False is insecure.' % self.base_url)\n\n def perform_request(self, method, url, params=None, body=None, timeout=None, ignore=()):\n url = self.base_url + url\n if params:\n url = '%s?%s' % (url, urlencode(params or {}))\n\n start = time.time()\n try:\n response = self.session.request(method, url, data=body, timeout=timeout or self.timeout)\n duration = time.time() - start\n raw_data = response.text\n except requests.exceptions.SSLError as e:\n self.log_request_fail(method, url, body, time.time() - start, exception=e)\n raise SSLError('N/A', str(e), e)\n except requests.Timeout as e:\n self.log_request_fail(method, url, body, time.time() - start, exception=e)\n raise ConnectionTimeout('TIMEOUT', str(e), e)\n except requests.ConnectionError as e:\n self.log_request_fail(method, url, body, time.time() - start, exception=e)\n raise ConnectionError('N/A', str(e), e)\n\n # raise errors based on http status codes, let the client handle those if needed\n if not (200 <= response.status_code < 300) and response.status_code not in ignore:\n self.log_request_fail(method, url, body, duration, response.status_code)\n self._raise_error(response.status_code, raw_data)\n\n self.log_request_success(method, url, response.request.path_url, body, response.status_code, raw_data, duration)\n\n return response.status_code, response.headers, raw_data\n", "path": "elasticsearch/connection/http_requests.py"}]} | 1,351 | 210 |
gh_patches_debug_37274 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-2964 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider tiffany is broken
During the global build at 2021-05-26-14-42-23, spider **tiffany** failed with **0 features** and **0 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/tiffany.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tiffany.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tiffany.geojson))
Tiffany
http://www.tiffany.com/jewelry-stores/store-list/united-states
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/tiffany.py`
Content:
```
1 import scrapy
2 import re
3 import json
4 from locations.items import GeojsonPointItem
5
6 class TiffanySpider(scrapy.Spider):
7
8 name = "tiffany"
9 item_attributes = { 'brand': "Tiffany" }
10 allowed_domains = ["www.tiffany.com"]
11 download_delay = 0.5
12 start_urls = (
13 'http://www.tiffany.com/jewelry-stores/store-list/united-states',
14 )
15
16 def parse_day(self, day):
17 if re.search('-', day):
18 days = day.split('-')
19 osm_days = []
20 if len(days) == 2:
21 for day in days:
22 osm_day = day.strip()[:2]
23 osm_days.append(osm_day)
24 return "-".join(osm_days)
25 return day.strip()[:2]
26
27 def parse_times(self, times):
28 if times.strip() == 'CLOSED':
29 return 'Closed'
30 hours_to = [x.strip() for x in times.split('-')]
31 cleaned_times = []
32 for hour in hours_to:
33 if re.search('PM$', hour):
34 hour = re.sub('PM', '', hour).strip()
35 hour_min = hour.split(":")
36 if int(hour_min[0]) < 12:
37 hour_min[0] = str(12 + int(hour_min[0]))
38 cleaned_times.append(":".join(hour_min))
39
40 if re.search('AM$', hour):
41 hour = re.sub('AM', '', hour).strip()
42 hour_min = hour.split(":")
43 if len(hour_min[0]) <2:
44 hour_min[0] = hour_min[0].zfill(2)
45 else:
46 hour_min[0] = str(int(hour_min[0]))
47
48 cleaned_times.append(":".join(hour_min))
49 return "-".join(cleaned_times)
50
51 def parse_hours(self, lis):
52 hours = []
53 for li in lis:
54 if re.search(r"([0-9]{1,2}):([0-9]{1,2})([APM]{2})|CLOSED" , li):
55 day = li.split(':')[0]
56 times = li.replace(day+':','')
57 if times and day:
58 parsed_time = self.parse_times(times)
59 parsed_day = self.parse_day(day)
60 hours.append(parsed_day + ' ' + parsed_time)
61
62 return "; ".join(hours)
63
64 def parse_stores(self, response):
65 data = json.loads(response.xpath('//script[@type="application/ld+json"]/text()').extract_first())
66 properties = {
67 'addr_full': data['address']['streetAddress'],
68 'phone': data['telephone'],
69 'name': data['name'],
70 'city': data['address']['addressLocality'],
71 'state': data['address']['addressRegion'],
72 'postcode': data['address']['postalCode'],
73 'ref': data['name'].replace(' ','_'),
74 'website': response.url,
75 'lat': float(data['geo']['latitude']),
76 'lon': float(data['geo']['longitude']),
77 }
78
79 hours = self.parse_hours(response.xpath('//div[@id="divExtendedInfo"]/text()').extract())
80 if hours:
81 properties['opening_hours'] = hours
82 yield GeojsonPointItem(**properties)
83
84 def parse(self, response):
85 urls = response.xpath('//a[contains(text(),"View on Map")]/@href').extract()
86 for path in urls:
87 yield scrapy.Request(response.urljoin(path), callback=self.parse_stores)
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/locations/spiders/tiffany.py b/locations/spiders/tiffany.py
--- a/locations/spiders/tiffany.py
+++ b/locations/spiders/tiffany.py
@@ -6,11 +6,11 @@
class TiffanySpider(scrapy.Spider):
name = "tiffany"
- item_attributes = { 'brand': "Tiffany" }
+ item_attributes = { 'brand': "Tiffany", 'brand_wikidata': "Q1066858" }
allowed_domains = ["www.tiffany.com"]
download_delay = 0.5
start_urls = (
- 'http://www.tiffany.com/jewelry-stores/store-list/united-states',
+ 'https://www.tiffany.com/jewelry-stores/store-list/',
)
def parse_day(self, day):
@@ -61,27 +61,31 @@
return "; ".join(hours)
- def parse_stores(self, response):
- data = json.loads(response.xpath('//script[@type="application/ld+json"]/text()').extract_first())
- properties = {
- 'addr_full': data['address']['streetAddress'],
- 'phone': data['telephone'],
- 'name': data['name'],
- 'city': data['address']['addressLocality'],
- 'state': data['address']['addressRegion'],
- 'postcode': data['address']['postalCode'],
- 'ref': data['name'].replace(' ','_'),
- 'website': response.url,
- 'lat': float(data['geo']['latitude']),
- 'lon': float(data['geo']['longitude']),
- }
+ def parse(self, response):
+ for href in response.xpath('//@href[contains(., "/jewelry-stores/")]').extract():
+ yield scrapy.Request(response.urljoin(href))
- hours = self.parse_hours(response.xpath('//div[@id="divExtendedInfo"]/text()').extract())
- if hours:
- properties['opening_hours'] = hours
- yield GeojsonPointItem(**properties)
+ for ldjson in response.xpath('//script[@type="application/ld+json"]/text()').extract():
+ data = json.loads(ldjson)
+ if data["@type"] != "Store":
+ continue
+
+ properties = {
+ 'name': data['name'],
+ 'phone': data['telephone'],
+ 'addr_full': data['address']['streetAddress'],
+ 'city': data['address']['addressLocality'],
+ 'state': data['address']['addressRegion'],
+ 'postcode': data['address']['postalCode'],
+ 'country': data['address']['addressCountry'],
+ 'ref': data['name'].replace(' ','_'),
+ 'website': response.url,
+ 'lat': response.xpath('//tiffany-maps/@markeratlat').extract_first(),
+ 'lon': response.xpath('//tiffany-maps/@markeratlng').extract_first(),
+ }
+
+ hours = self.parse_hours(response.xpath('//div[@id="divExtendedInfo"]/text()').extract())
+ if hours:
+ properties['opening_hours'] = hours
+ yield GeojsonPointItem(**properties)
- def parse(self, response):
- urls = response.xpath('//a[contains(text(),"View on Map")]/@href').extract()
- for path in urls:
- yield scrapy.Request(response.urljoin(path), callback=self.parse_stores)
| {"golden_diff": "diff --git a/locations/spiders/tiffany.py b/locations/spiders/tiffany.py\n--- a/locations/spiders/tiffany.py\n+++ b/locations/spiders/tiffany.py\n@@ -6,11 +6,11 @@\n class TiffanySpider(scrapy.Spider):\n \n name = \"tiffany\"\n- item_attributes = { 'brand': \"Tiffany\" }\n+ item_attributes = { 'brand': \"Tiffany\", 'brand_wikidata': \"Q1066858\" }\n allowed_domains = [\"www.tiffany.com\"]\n download_delay = 0.5\n start_urls = (\n- 'http://www.tiffany.com/jewelry-stores/store-list/united-states',\n+ 'https://www.tiffany.com/jewelry-stores/store-list/',\n )\n \n def parse_day(self, day):\n@@ -61,27 +61,31 @@\n \n return \"; \".join(hours)\n \n- def parse_stores(self, response):\n- data = json.loads(response.xpath('//script[@type=\"application/ld+json\"]/text()').extract_first())\n- properties = {\n- 'addr_full': data['address']['streetAddress'],\n- 'phone': data['telephone'],\n- 'name': data['name'],\n- 'city': data['address']['addressLocality'],\n- 'state': data['address']['addressRegion'],\n- 'postcode': data['address']['postalCode'],\n- 'ref': data['name'].replace(' ','_'),\n- 'website': response.url,\n- 'lat': float(data['geo']['latitude']),\n- 'lon': float(data['geo']['longitude']),\n- }\n+ def parse(self, response):\n+ for href in response.xpath('//@href[contains(., \"/jewelry-stores/\")]').extract():\n+ yield scrapy.Request(response.urljoin(href))\n \n- hours = self.parse_hours(response.xpath('//div[@id=\"divExtendedInfo\"]/text()').extract())\n- if hours:\n- properties['opening_hours'] = hours\n- yield GeojsonPointItem(**properties)\n+ for ldjson in response.xpath('//script[@type=\"application/ld+json\"]/text()').extract():\n+ data = json.loads(ldjson)\n+ if data[\"@type\"] != \"Store\":\n+ continue\n+\n+ properties = {\n+ 'name': data['name'],\n+ 'phone': data['telephone'],\n+ 'addr_full': data['address']['streetAddress'],\n+ 'city': data['address']['addressLocality'],\n+ 'state': data['address']['addressRegion'],\n+ 'postcode': data['address']['postalCode'],\n+ 'country': data['address']['addressCountry'],\n+ 'ref': data['name'].replace(' ','_'),\n+ 'website': response.url,\n+ 'lat': response.xpath('//tiffany-maps/@markeratlat').extract_first(),\n+ 'lon': response.xpath('//tiffany-maps/@markeratlng').extract_first(),\n+ }\n+\n+ hours = self.parse_hours(response.xpath('//div[@id=\"divExtendedInfo\"]/text()').extract())\n+ if hours:\n+ properties['opening_hours'] = hours\n+ yield GeojsonPointItem(**properties)\n \n- def parse(self, response):\n- urls = response.xpath('//a[contains(text(),\"View on Map\")]/@href').extract()\n- for path in urls:\n- yield scrapy.Request(response.urljoin(path), callback=self.parse_stores)\n", "issue": "Spider tiffany is broken\nDuring the global build at 2021-05-26-14-42-23, spider **tiffany** failed with **0 features** and **0 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/tiffany.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tiffany.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tiffany.geojson))\nTiffany\nhttp://www.tiffany.com/jewelry-stores/store-list/united-states\n", "before_files": [{"content": "import scrapy\nimport re\nimport json\nfrom locations.items import GeojsonPointItem\n\nclass TiffanySpider(scrapy.Spider):\n\n name = \"tiffany\"\n item_attributes = { 'brand': \"Tiffany\" }\n allowed_domains = [\"www.tiffany.com\"]\n download_delay = 0.5\n start_urls = (\n 'http://www.tiffany.com/jewelry-stores/store-list/united-states',\n )\n\n def parse_day(self, day):\n if re.search('-', day):\n days = day.split('-')\n osm_days = []\n if len(days) == 2:\n for day in days:\n osm_day = day.strip()[:2]\n osm_days.append(osm_day)\n return \"-\".join(osm_days)\n return day.strip()[:2]\n\n def parse_times(self, times):\n if times.strip() == 'CLOSED':\n return 'Closed'\n hours_to = [x.strip() for x in times.split('-')]\n cleaned_times = []\n for hour in hours_to:\n if re.search('PM$', hour):\n hour = re.sub('PM', '', hour).strip()\n hour_min = hour.split(\":\")\n if int(hour_min[0]) < 12:\n hour_min[0] = str(12 + int(hour_min[0]))\n cleaned_times.append(\":\".join(hour_min))\n\n if re.search('AM$', hour):\n hour = re.sub('AM', '', hour).strip()\n hour_min = hour.split(\":\")\n if len(hour_min[0]) <2:\n hour_min[0] = hour_min[0].zfill(2)\n else:\n hour_min[0] = str(int(hour_min[0]))\n\n cleaned_times.append(\":\".join(hour_min))\n return \"-\".join(cleaned_times)\n\n def parse_hours(self, lis):\n hours = []\n for li in lis:\n if re.search(r\"([0-9]{1,2}):([0-9]{1,2})([APM]{2})|CLOSED\" , li):\n day = li.split(':')[0]\n times = li.replace(day+':','')\n if times and day:\n parsed_time = self.parse_times(times)\n parsed_day = self.parse_day(day)\n hours.append(parsed_day + ' ' + parsed_time)\n\n return \"; \".join(hours)\n\n def parse_stores(self, response):\n data = json.loads(response.xpath('//script[@type=\"application/ld+json\"]/text()').extract_first())\n properties = {\n 'addr_full': data['address']['streetAddress'],\n 'phone': data['telephone'],\n 'name': data['name'],\n 'city': data['address']['addressLocality'],\n 'state': data['address']['addressRegion'],\n 'postcode': data['address']['postalCode'],\n 'ref': data['name'].replace(' ','_'),\n 'website': response.url,\n 'lat': float(data['geo']['latitude']),\n 'lon': float(data['geo']['longitude']),\n }\n\n hours = self.parse_hours(response.xpath('//div[@id=\"divExtendedInfo\"]/text()').extract())\n if hours:\n properties['opening_hours'] = hours\n yield GeojsonPointItem(**properties)\n\n def parse(self, response):\n urls = response.xpath('//a[contains(text(),\"View on Map\")]/@href').extract()\n for path in urls:\n yield scrapy.Request(response.urljoin(path), callback=self.parse_stores)\n", "path": "locations/spiders/tiffany.py"}], "after_files": [{"content": "import scrapy\nimport re\nimport json\nfrom locations.items import GeojsonPointItem\n\nclass TiffanySpider(scrapy.Spider):\n\n name = \"tiffany\"\n item_attributes = { 'brand': \"Tiffany\", 'brand_wikidata': \"Q1066858\" }\n allowed_domains = [\"www.tiffany.com\"]\n download_delay = 0.5\n start_urls = (\n 'https://www.tiffany.com/jewelry-stores/store-list/',\n )\n\n def parse_day(self, day):\n if re.search('-', day):\n days = day.split('-')\n osm_days = []\n if len(days) == 2:\n for day in days:\n osm_day = day.strip()[:2]\n osm_days.append(osm_day)\n return \"-\".join(osm_days)\n return day.strip()[:2]\n\n def parse_times(self, times):\n if times.strip() == 'CLOSED':\n return 'Closed'\n hours_to = [x.strip() for x in times.split('-')]\n cleaned_times = []\n for hour in hours_to:\n if re.search('PM$', hour):\n hour = re.sub('PM', '', hour).strip()\n hour_min = hour.split(\":\")\n if int(hour_min[0]) < 12:\n hour_min[0] = str(12 + int(hour_min[0]))\n cleaned_times.append(\":\".join(hour_min))\n\n if re.search('AM$', hour):\n hour = re.sub('AM', '', hour).strip()\n hour_min = hour.split(\":\")\n if len(hour_min[0]) <2:\n hour_min[0] = hour_min[0].zfill(2)\n else:\n hour_min[0] = str(int(hour_min[0]))\n\n cleaned_times.append(\":\".join(hour_min))\n return \"-\".join(cleaned_times)\n\n def parse_hours(self, lis):\n hours = []\n for li in lis:\n if re.search(r\"([0-9]{1,2}):([0-9]{1,2})([APM]{2})|CLOSED\" , li):\n day = li.split(':')[0]\n times = li.replace(day+':','')\n if times and day:\n parsed_time = self.parse_times(times)\n parsed_day = self.parse_day(day)\n hours.append(parsed_day + ' ' + parsed_time)\n\n return \"; \".join(hours)\n\n def parse(self, response):\n for href in response.xpath('//@href[contains(., \"/jewelry-stores/\")]').extract():\n yield scrapy.Request(response.urljoin(href))\n\n for ldjson in response.xpath('//script[@type=\"application/ld+json\"]/text()').extract():\n data = json.loads(ldjson)\n if data[\"@type\"] != \"Store\":\n continue\n\n properties = {\n 'name': data['name'],\n 'phone': data['telephone'],\n 'addr_full': data['address']['streetAddress'],\n 'city': data['address']['addressLocality'],\n 'state': data['address']['addressRegion'],\n 'postcode': data['address']['postalCode'],\n 'country': data['address']['addressCountry'],\n 'ref': data['name'].replace(' ','_'),\n 'website': response.url,\n 'lat': response.xpath('//tiffany-maps/@markeratlat').extract_first(),\n 'lon': response.xpath('//tiffany-maps/@markeratlng').extract_first(),\n }\n\n hours = self.parse_hours(response.xpath('//div[@id=\"divExtendedInfo\"]/text()').extract())\n if hours:\n properties['opening_hours'] = hours\n yield GeojsonPointItem(**properties)\n\n", "path": "locations/spiders/tiffany.py"}]} | 1,375 | 756 |
gh_patches_debug_8364 | rasdani/github-patches | git_diff | svthalia__concrexit-1756 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PaymentDetailView in admin API allows deleting payments unauthorized
https://github.com/svthalia/concrexit/blob/4ab37961f50e398cc52422cdc1df66f6ab8ff2ee/website/payments/api/v2/admin/views.py#L69
### Describe the bug
Payments sometimes should be undeletable. For example, TPay payments that are in a batch. The PaymentAdmin prevents such deletions. However, the rest framework DestroyAPIView does not respect that.
### How to reproduce
Steps to reproduce the behaviour:
1. Have a payment
2. Add it to a batch
3. Process the batch
4. Do the API `DELETE` request at `/api/v2/admin/payments/<id>`
### Expected behaviour
Either disable payment deletion at all from the API, or manually implement a check that the payment is not in a processed batch.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/payments/api/v2/admin/views.py`
Content:
```
1 import rest_framework.filters as framework_filters
2 from django.apps import apps
3 from django.http import Http404
4 from django.utils.translation import gettext_lazy as _
5 from oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope
6 from rest_framework import status, serializers
7 from rest_framework.exceptions import PermissionDenied, ValidationError
8 from rest_framework.generics import get_object_or_404
9 from rest_framework.permissions import IsAdminUser
10 from rest_framework.response import Response
11 from rest_framework.settings import api_settings
12 from rest_framework.views import APIView
13
14 from payments import services, payables, NotRegistered
15 from payments.api.v2 import filters
16 from payments.api.v2.admin.serializers.payable_create import (
17 PayableCreateAdminSerializer,
18 )
19 from payments.api.v2.admin.serializers.payable_detail import PayableAdminSerializer
20 from payments.api.v2.admin.serializers.payment import (
21 PaymentAdminSerializer,
22 PaymentCreateSerializer,
23 )
24 from payments.exceptions import PaymentError
25 from payments.models import Payment, PaymentUser
26 from thaliawebsite.api.v2.admin import (
27 AdminListAPIView,
28 AdminCreateAPIView,
29 AdminRetrieveAPIView,
30 AdminDestroyAPIView,
31 )
32
33
34 class PaymentListCreateView(AdminListAPIView, AdminCreateAPIView):
35 """View that allows you to create and list payments as admin."""
36
37 queryset = Payment.objects.prefetch_related(
38 "paid_by__profile",
39 "paid_by__membership_set",
40 "processed_by__profile",
41 "processed_by__membership_set",
42 )
43
44 required_scopes = ["payments:admin"]
45 filter_backends = (
46 framework_filters.OrderingFilter,
47 filters.CreatedAtFilter,
48 filters.PaymentTypeFilter,
49 )
50 ordering_fields = ("created_at",)
51
52 def get_serializer_class(self):
53 if self.request.method.lower() == "post":
54 return PaymentCreateSerializer
55 return PaymentAdminSerializer
56
57 def create(self, request, *args, **kwargs):
58 serializer = self.get_serializer(data=request.data)
59 serializer.is_valid(raise_exception=True)
60 self.perform_create(serializer)
61 return Response(
62 PaymentAdminSerializer(
63 serializer.instance, context=self.get_serializer_context()
64 ).data,
65 status=status.HTTP_201_CREATED,
66 )
67
68
69 class PaymentDetailView(AdminRetrieveAPIView, AdminDestroyAPIView):
70 """View that allows you to manage a single payment as admin."""
71
72 queryset = Payment.objects.all()
73 serializer_class = PaymentAdminSerializer
74 permission_classes = [IsAuthenticatedOrTokenHasScope]
75 required_scopes = ["payments:admin"]
76
77
78 class PayableDetailView(APIView):
79 """View that allows you to manipulate the payment for the payable.
80
81 Permissions of this view are based on the payable.
82 """
83
84 required_scopes = ["payments:admin"]
85 permission_classes = [IsAuthenticatedOrTokenHasScope, IsAdminUser]
86
87 def get_serializer_context(self):
88 return {"request": self.request, "format": self.format_kwarg, "view": self}
89
90 def get_payable(self):
91 app_label = self.kwargs["app_label"]
92 model_name = self.kwargs["model_name"]
93 payable_pk = self.kwargs["payable_pk"]
94
95 try:
96 payable_model = apps.get_model(app_label=app_label, model_name=model_name)
97 payable = payables.get_payable(
98 get_object_or_404(payable_model, pk=payable_pk)
99 )
100 except (LookupError, NotRegistered) as e:
101 raise serializers.ValidationError(
102 {api_settings.NON_FIELD_ERRORS_KEY: [_("Payable model not found")]}
103 ) from e
104
105 if not payable.can_manage_payment(self.request.member):
106 raise PermissionDenied(
107 detail=_("You do not have permission to perform this action.")
108 )
109
110 return payable
111
112 def get(self, request, *args, **kwargs):
113 """Get information about a payable."""
114 serializer = PayableAdminSerializer(
115 self.get_payable(), context=self.get_serializer_context()
116 )
117 return Response(serializer.data, status=status.HTTP_200_OK)
118
119 def delete(self, request, *args, **kwargs):
120 """Remove the current payment for a payable."""
121 payable = self.get_payable()
122
123 if not payable.model.payment:
124 raise Http404
125
126 try:
127 services.delete_payment(
128 payable.model, request.member,
129 )
130 payable.model.save()
131 except PaymentError as e:
132 raise PermissionDenied(detail=str(e))
133
134 return Response(status=status.HTTP_204_NO_CONTENT)
135
136 def patch(self, request, *args, **kwargs):
137 """Mark the payable as paid by creating a payment for it."""
138 serializer = PayableCreateAdminSerializer(
139 data=request.data, context=self.get_serializer_context()
140 )
141 serializer.is_valid(raise_exception=True)
142
143 payable = self.get_payable()
144
145 try:
146 services.create_payment(
147 payable,
148 PaymentUser.objects.get(pk=request.user.pk),
149 serializer.data["payment_type"],
150 )
151 payable.model.save()
152 except PaymentError as e:
153 raise ValidationError(detail={api_settings.NON_FIELD_ERRORS_KEY: [str(e)]})
154
155 return Response(
156 PayableAdminSerializer(payable, context=self.get_serializer_context()).data,
157 status=status.HTTP_201_CREATED,
158 )
159
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/website/payments/api/v2/admin/views.py b/website/payments/api/v2/admin/views.py
--- a/website/payments/api/v2/admin/views.py
+++ b/website/payments/api/v2/admin/views.py
@@ -74,6 +74,11 @@
permission_classes = [IsAuthenticatedOrTokenHasScope]
required_scopes = ["payments:admin"]
+ def delete(self, request, *args, **kwargs):
+ if self.get_object().batch and self.get_object().batch.processed:
+ raise PermissionDenied("This payment cannot be deleted.")
+ return super().delete(request, *args, **kwargs)
+
class PayableDetailView(APIView):
"""View that allows you to manipulate the payment for the payable.
| {"golden_diff": "diff --git a/website/payments/api/v2/admin/views.py b/website/payments/api/v2/admin/views.py\n--- a/website/payments/api/v2/admin/views.py\n+++ b/website/payments/api/v2/admin/views.py\n@@ -74,6 +74,11 @@\n permission_classes = [IsAuthenticatedOrTokenHasScope]\n required_scopes = [\"payments:admin\"]\n \n+ def delete(self, request, *args, **kwargs):\n+ if self.get_object().batch and self.get_object().batch.processed:\n+ raise PermissionDenied(\"This payment cannot be deleted.\")\n+ return super().delete(request, *args, **kwargs)\n+\n \n class PayableDetailView(APIView):\n \"\"\"View that allows you to manipulate the payment for the payable.\n", "issue": "PaymentDetailView in admin API allows deleting payments unauthorized\nhttps://github.com/svthalia/concrexit/blob/4ab37961f50e398cc52422cdc1df66f6ab8ff2ee/website/payments/api/v2/admin/views.py#L69\r\n\r\n### Describe the bug\r\nPayments sometimes should be undeletable. For example, TPay payments that are in a batch. The PaymentAdmin prevents such deletions. However, the rest framework DestroyAPIView does not respect that.\r\n\r\n### How to reproduce\r\nSteps to reproduce the behaviour:\r\n1. Have a payment \r\n2. Add it to a batch\r\n3. Process the batch\r\n4. Do the API `DELETE` request at `/api/v2/admin/payments/<id>`\r\n\r\n### Expected behaviour\r\nEither disable payment deletion at all from the API, or manually implement a check that the payment is not in a processed batch.\r\n\n", "before_files": [{"content": "import rest_framework.filters as framework_filters\nfrom django.apps import apps\nfrom django.http import Http404\nfrom django.utils.translation import gettext_lazy as _\nfrom oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope\nfrom rest_framework import status, serializers\nfrom rest_framework.exceptions import PermissionDenied, ValidationError\nfrom rest_framework.generics import get_object_or_404\nfrom rest_framework.permissions import IsAdminUser\nfrom rest_framework.response import Response\nfrom rest_framework.settings import api_settings\nfrom rest_framework.views import APIView\n\nfrom payments import services, payables, NotRegistered\nfrom payments.api.v2 import filters\nfrom payments.api.v2.admin.serializers.payable_create import (\n PayableCreateAdminSerializer,\n)\nfrom payments.api.v2.admin.serializers.payable_detail import PayableAdminSerializer\nfrom payments.api.v2.admin.serializers.payment import (\n PaymentAdminSerializer,\n PaymentCreateSerializer,\n)\nfrom payments.exceptions import PaymentError\nfrom payments.models import Payment, PaymentUser\nfrom thaliawebsite.api.v2.admin import (\n AdminListAPIView,\n AdminCreateAPIView,\n AdminRetrieveAPIView,\n AdminDestroyAPIView,\n)\n\n\nclass PaymentListCreateView(AdminListAPIView, AdminCreateAPIView):\n \"\"\"View that allows you to create and list payments as admin.\"\"\"\n\n queryset = Payment.objects.prefetch_related(\n \"paid_by__profile\",\n \"paid_by__membership_set\",\n \"processed_by__profile\",\n \"processed_by__membership_set\",\n )\n\n required_scopes = [\"payments:admin\"]\n filter_backends = (\n framework_filters.OrderingFilter,\n filters.CreatedAtFilter,\n filters.PaymentTypeFilter,\n )\n ordering_fields = (\"created_at\",)\n\n def get_serializer_class(self):\n if self.request.method.lower() == \"post\":\n return PaymentCreateSerializer\n return PaymentAdminSerializer\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n return Response(\n PaymentAdminSerializer(\n serializer.instance, context=self.get_serializer_context()\n ).data,\n status=status.HTTP_201_CREATED,\n )\n\n\nclass PaymentDetailView(AdminRetrieveAPIView, AdminDestroyAPIView):\n \"\"\"View that allows you to manage a single payment as admin.\"\"\"\n\n queryset = Payment.objects.all()\n serializer_class = PaymentAdminSerializer\n permission_classes = [IsAuthenticatedOrTokenHasScope]\n required_scopes = [\"payments:admin\"]\n\n\nclass PayableDetailView(APIView):\n \"\"\"View that allows you to manipulate the payment for the payable.\n\n Permissions of this view are based on the payable.\n \"\"\"\n\n required_scopes = [\"payments:admin\"]\n permission_classes = [IsAuthenticatedOrTokenHasScope, IsAdminUser]\n\n def get_serializer_context(self):\n return {\"request\": self.request, \"format\": self.format_kwarg, \"view\": self}\n\n def get_payable(self):\n app_label = self.kwargs[\"app_label\"]\n model_name = self.kwargs[\"model_name\"]\n payable_pk = self.kwargs[\"payable_pk\"]\n\n try:\n payable_model = apps.get_model(app_label=app_label, model_name=model_name)\n payable = payables.get_payable(\n get_object_or_404(payable_model, pk=payable_pk)\n )\n except (LookupError, NotRegistered) as e:\n raise serializers.ValidationError(\n {api_settings.NON_FIELD_ERRORS_KEY: [_(\"Payable model not found\")]}\n ) from e\n\n if not payable.can_manage_payment(self.request.member):\n raise PermissionDenied(\n detail=_(\"You do not have permission to perform this action.\")\n )\n\n return payable\n\n def get(self, request, *args, **kwargs):\n \"\"\"Get information about a payable.\"\"\"\n serializer = PayableAdminSerializer(\n self.get_payable(), context=self.get_serializer_context()\n )\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n def delete(self, request, *args, **kwargs):\n \"\"\"Remove the current payment for a payable.\"\"\"\n payable = self.get_payable()\n\n if not payable.model.payment:\n raise Http404\n\n try:\n services.delete_payment(\n payable.model, request.member,\n )\n payable.model.save()\n except PaymentError as e:\n raise PermissionDenied(detail=str(e))\n\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n def patch(self, request, *args, **kwargs):\n \"\"\"Mark the payable as paid by creating a payment for it.\"\"\"\n serializer = PayableCreateAdminSerializer(\n data=request.data, context=self.get_serializer_context()\n )\n serializer.is_valid(raise_exception=True)\n\n payable = self.get_payable()\n\n try:\n services.create_payment(\n payable,\n PaymentUser.objects.get(pk=request.user.pk),\n serializer.data[\"payment_type\"],\n )\n payable.model.save()\n except PaymentError as e:\n raise ValidationError(detail={api_settings.NON_FIELD_ERRORS_KEY: [str(e)]})\n\n return Response(\n PayableAdminSerializer(payable, context=self.get_serializer_context()).data,\n status=status.HTTP_201_CREATED,\n )\n", "path": "website/payments/api/v2/admin/views.py"}], "after_files": [{"content": "import rest_framework.filters as framework_filters\nfrom django.apps import apps\nfrom django.http import Http404\nfrom django.utils.translation import gettext_lazy as _\nfrom oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope\nfrom rest_framework import status, serializers\nfrom rest_framework.exceptions import PermissionDenied, ValidationError\nfrom rest_framework.generics import get_object_or_404\nfrom rest_framework.permissions import IsAdminUser\nfrom rest_framework.response import Response\nfrom rest_framework.settings import api_settings\nfrom rest_framework.views import APIView\n\nfrom payments import services, payables, NotRegistered\nfrom payments.api.v2 import filters\nfrom payments.api.v2.admin.serializers.payable_create import (\n PayableCreateAdminSerializer,\n)\nfrom payments.api.v2.admin.serializers.payable_detail import PayableAdminSerializer\nfrom payments.api.v2.admin.serializers.payment import (\n PaymentAdminSerializer,\n PaymentCreateSerializer,\n)\nfrom payments.exceptions import PaymentError\nfrom payments.models import Payment, PaymentUser\nfrom thaliawebsite.api.v2.admin import (\n AdminListAPIView,\n AdminCreateAPIView,\n AdminRetrieveAPIView,\n AdminDestroyAPIView,\n)\n\n\nclass PaymentListCreateView(AdminListAPIView, AdminCreateAPIView):\n \"\"\"View that allows you to create and list payments as admin.\"\"\"\n\n queryset = Payment.objects.prefetch_related(\n \"paid_by__profile\",\n \"paid_by__membership_set\",\n \"processed_by__profile\",\n \"processed_by__membership_set\",\n )\n\n required_scopes = [\"payments:admin\"]\n filter_backends = (\n framework_filters.OrderingFilter,\n filters.CreatedAtFilter,\n filters.PaymentTypeFilter,\n )\n ordering_fields = (\"created_at\",)\n\n def get_serializer_class(self):\n if self.request.method.lower() == \"post\":\n return PaymentCreateSerializer\n return PaymentAdminSerializer\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n return Response(\n PaymentAdminSerializer(\n serializer.instance, context=self.get_serializer_context()\n ).data,\n status=status.HTTP_201_CREATED,\n )\n\n\nclass PaymentDetailView(AdminRetrieveAPIView, AdminDestroyAPIView):\n \"\"\"View that allows you to manage a single payment as admin.\"\"\"\n\n queryset = Payment.objects.all()\n serializer_class = PaymentAdminSerializer\n permission_classes = [IsAuthenticatedOrTokenHasScope]\n required_scopes = [\"payments:admin\"]\n\n def delete(self, request, *args, **kwargs):\n if self.get_object().batch and self.get_object().batch.processed:\n raise PermissionDenied(\"This payment cannot be deleted.\")\n return super().delete(request, *args, **kwargs)\n\n\nclass PayableDetailView(APIView):\n \"\"\"View that allows you to manipulate the payment for the payable.\n\n Permissions of this view are based on the payable.\n \"\"\"\n\n required_scopes = [\"payments:admin\"]\n permission_classes = [IsAuthenticatedOrTokenHasScope, IsAdminUser]\n\n def get_serializer_context(self):\n return {\"request\": self.request, \"format\": self.format_kwarg, \"view\": self}\n\n def get_payable(self):\n app_label = self.kwargs[\"app_label\"]\n model_name = self.kwargs[\"model_name\"]\n payable_pk = self.kwargs[\"payable_pk\"]\n\n try:\n payable_model = apps.get_model(app_label=app_label, model_name=model_name)\n payable = payables.get_payable(\n get_object_or_404(payable_model, pk=payable_pk)\n )\n except (LookupError, NotRegistered) as e:\n raise serializers.ValidationError(\n {api_settings.NON_FIELD_ERRORS_KEY: [_(\"Payable model not found\")]}\n ) from e\n\n if not payable.can_manage_payment(self.request.member):\n raise PermissionDenied(\n detail=_(\"You do not have permission to perform this action.\")\n )\n\n return payable\n\n def get(self, request, *args, **kwargs):\n \"\"\"Get information about a payable.\"\"\"\n serializer = PayableAdminSerializer(\n self.get_payable(), context=self.get_serializer_context()\n )\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n def delete(self, request, *args, **kwargs):\n \"\"\"Remove the current payment for a payable.\"\"\"\n payable = self.get_payable()\n\n if not payable.model.payment:\n raise Http404\n\n try:\n services.delete_payment(\n payable.model, request.member,\n )\n payable.model.save()\n except PaymentError as e:\n raise PermissionDenied(detail=str(e))\n\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n def patch(self, request, *args, **kwargs):\n \"\"\"Mark the payable as paid by creating a payment for it.\"\"\"\n serializer = PayableCreateAdminSerializer(\n data=request.data, context=self.get_serializer_context()\n )\n serializer.is_valid(raise_exception=True)\n\n payable = self.get_payable()\n\n try:\n services.create_payment(\n payable,\n PaymentUser.objects.get(pk=request.user.pk),\n serializer.data[\"payment_type\"],\n )\n payable.model.save()\n except PaymentError as e:\n raise ValidationError(detail={api_settings.NON_FIELD_ERRORS_KEY: [str(e)]})\n\n return Response(\n PayableAdminSerializer(payable, context=self.get_serializer_context()).data,\n status=status.HTTP_201_CREATED,\n )\n", "path": "website/payments/api/v2/admin/views.py"}]} | 1,942 | 168 |
gh_patches_debug_644 | rasdani/github-patches | git_diff | pex-tool__pex-1864 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release 2.1.101
On the docket:
+ [x] Pex fails to find RECORD for python-certifi-win32 1.6.1 #1861
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pex/version.py`
Content:
```
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.100"
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.100"
+__version__ = "2.1.101"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.100\"\n+__version__ = \"2.1.101\"\n", "issue": "Release 2.1.101\nOn the docket:\r\n+ [x] Pex fails to find RECORD for python-certifi-win32 1.6.1 #1861\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.100\"\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.101\"\n", "path": "pex/version.py"}]} | 352 | 98 |
gh_patches_debug_32606 | rasdani/github-patches | git_diff | nextcloud__appstore-33 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
API routes should be enabled for CORS
Only API routes should be whitelisted for CORS
We need to make sure that CORS and session auth are mutually exclusive
The solution is probably to integrate https://github.com/ottoyiu/django-cors-headers/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nextcloudappstore/settings.py`
Content:
```
1 """
2 Django settings for nextcloudappstore project.
3
4 Generated by 'django-admin startproject' using Django 1.9.6.
5
6 For more information on this file, see
7 https://docs.djangoproject.com/en/1.9/topics/settings/
8
9 For the full list of settings and their values, see
10 https://docs.djangoproject.com/en/1.9/ref/settings/
11 """
12
13 import os
14
15 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
16 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
17
18 # Quick-start development settings - unsuitable for production
19 # See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
20
21 # Application definition
22
23 INSTALLED_APPS = [
24 'parler',
25 'rest_framework',
26 'django.contrib.admin',
27 'django.contrib.auth',
28 'django.contrib.contenttypes',
29 'django.contrib.sessions',
30 'django.contrib.messages',
31 # The Django sites framework is required by allauth
32 'django.contrib.sites',
33 'django.contrib.staticfiles',
34 'captcha',
35 'nextcloudappstore.core.apps.CoreConfig',
36 'allauth',
37 'allauth.account',
38 'allauth.socialaccount',
39 'allauth.socialaccount.providers.github',
40 'allauth.socialaccount.providers.bitbucket',
41 ]
42
43 MIDDLEWARE_CLASSES = [
44 'django.middleware.security.SecurityMiddleware',
45 'django.contrib.sessions.middleware.SessionMiddleware',
46 'django.middleware.common.CommonMiddleware',
47 'django.middleware.csrf.CsrfViewMiddleware',
48 'django.contrib.auth.middleware.AuthenticationMiddleware',
49 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
50 'django.contrib.messages.middleware.MessageMiddleware',
51 'django.middleware.clickjacking.XFrameOptionsMiddleware',
52 ]
53
54 ROOT_URLCONF = 'nextcloudappstore.urls'
55
56 TEMPLATES = [
57 {
58 'BACKEND': 'django.template.backends.django.DjangoTemplates',
59 'DIRS': [],
60 'APP_DIRS': True,
61 'OPTIONS': {
62 'context_processors': [
63 'django.template.context_processors.debug',
64 'django.template.context_processors.request',
65 'django.contrib.auth.context_processors.auth',
66 'django.contrib.messages.context_processors.messages',
67 ],
68 },
69 },
70 ]
71
72 WSGI_APPLICATION = 'nextcloudappstore.wsgi.application'
73
74 # Database
75 # https://docs.djangoproject.com/en/1.9/ref/settings/#databases
76
77 DATABASES = {
78 'default': {
79 'ENGINE': 'django.db.backends.sqlite3',
80 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
81 'TEST': {
82 'NAME': os.path.join(BASE_DIR, 'test.sqlite3'),
83 }
84 }
85 }
86
87 AUTHENTICATION_BACKENDS = (
88 # Needed to login by username in Django admin, regardless of `allauth`
89 'django.contrib.auth.backends.ModelBackend',
90
91 # `allauth` specific authentication methods, such as login by e-mail
92 'allauth.account.auth_backends.AuthenticationBackend',
93 )
94
95 # Password validation
96 # https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
97
98 AUTH_PASSWORD_VALIDATORS = [
99 {
100 'NAME': 'django.contrib.auth.password_validation'
101 '.UserAttributeSimilarityValidator',
102 },
103 {
104 'NAME': 'django.contrib.auth.password_validation'
105 '.MinimumLengthValidator',
106 },
107 {
108 'NAME': 'django.contrib.auth.password_validation'
109 '.CommonPasswordValidator',
110 },
111 {
112 'NAME': 'django.contrib.auth.password_validation'
113 '.NumericPasswordValidator',
114 },
115 ]
116
117 REST_FRAMEWORK = {
118 'DEFAULT_RENDERER_CLASSES': (
119 'djangorestframework_camel_case.render.CamelCaseJSONRenderer',
120 ),
121 'DEFAULT_PARSER_CLASSES': (
122 'djangorestframework_camel_case.parser.CamelCaseJSONParser',
123 ),
124 'DEFAULT_THROTTLE_RATES': {
125 'app_upload': '100/day'
126 }
127 }
128
129 SITE_ID = 1
130
131 # Allauth configuration
132 # http://django-allauth.readthedocs.io/en/latest/configuration.html
133 ACCOUNT_EMAIL_REQUIRED = True
134 ACCOUNT_EMAIL_VERIFICATION = "mandatory"
135 ACCOUNT_LOGOUT_ON_GET = True
136 ACCOUNT_LOGOUT_REDIRECT_URL = 'home'
137 ACCOUNT_SESSION_REMEMBER = True
138 ACCOUNT_SIGNUP_FORM_CLASS = \
139 'nextcloudappstore.core.user.forms.SignupFormRecaptcha'
140
141 # Internationalization
142 # https://docs.djangoproject.com/en/1.9/topics/i18n/
143 LANGUAGE_CODE = 'en-us'
144 TIME_ZONE = 'UTC'
145 USE_I18N = True
146 USE_L10N = True
147 USE_TZ = True
148
149 PARLER_LANGUAGES = {
150 1: (
151 {'code': 'en'},
152 {'code': 'de'},
153 {'code': 'fr'},
154 ),
155 'default': {
156 'fallbacks': ['en'],
157 'hide_untranslated': False,
158 }
159 }
160
161 # Static files (CSS, JavaScript, Images)
162 # https://docs.djangoproject.com/en/1.9/howto/static-files/
163 STATICFILES_DIRS = (
164 os.path.join(BASE_DIR, 'static'),
165 )
166 MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
167 RELEASE_DOWNLOAD_ROOT = os.path.join(MEDIA_ROOT, 'releasetmp')
168 STATIC_URL = '/static/'
169 MEDIA_URL = '/media/'
170
171 # Default security settings
172 SECURE_BROWSER_XSS_FILTER = True
173 SECURE_CONTENT_TYPE_NOSNIFF = True
174
175 # use modern no Captcha reCaptcha
176 NOCAPTCHA = True
177
178
179 LOGIN_REDIRECT_URL = 'home'
180
181 try:
182 from nextcloudappstore.local_settings import *
183 except:
184 pass
185
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nextcloudappstore/settings.py b/nextcloudappstore/settings.py
--- a/nextcloudappstore/settings.py
+++ b/nextcloudappstore/settings.py
@@ -21,28 +21,29 @@
# Application definition
INSTALLED_APPS = [
+ 'nextcloudappstore.core.apps.CoreConfig',
'parler',
+ 'captcha',
'rest_framework',
+ 'corsheaders',
+ 'allauth',
+ 'allauth.account',
+ 'allauth.socialaccount',
+ 'allauth.socialaccount.providers.github',
+ 'allauth.socialaccount.providers.bitbucket',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
- # The Django sites framework is required by allauth
'django.contrib.sites',
'django.contrib.staticfiles',
- 'captcha',
- 'nextcloudappstore.core.apps.CoreConfig',
- 'allauth',
- 'allauth.account',
- 'allauth.socialaccount',
- 'allauth.socialaccount.providers.github',
- 'allauth.socialaccount.providers.bitbucket',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
+ 'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
@@ -171,6 +172,22 @@
# Default security settings
SECURE_BROWSER_XSS_FILTER = True
SECURE_CONTENT_TYPE_NOSNIFF = True
+CORS_ORIGIN_ALLOW_ALL = True
+CORS_URLS_REGEX = r'^/api/.*$'
+CORS_ALLOW_HEADERS = (
+ 'x-requested-with',
+ 'content-type',
+ 'accept',
+ 'origin',
+ 'authorization',
+ 'x-csrftoken',
+ 'if-none-match',
+)
+CORS_EXPOSE_HEADERS = (
+ 'etag',
+ 'x-content-type-options',
+ 'content-type',
+)
# use modern no Captcha reCaptcha
NOCAPTCHA = True
| {"golden_diff": "diff --git a/nextcloudappstore/settings.py b/nextcloudappstore/settings.py\n--- a/nextcloudappstore/settings.py\n+++ b/nextcloudappstore/settings.py\n@@ -21,28 +21,29 @@\n # Application definition\n \n INSTALLED_APPS = [\n+ 'nextcloudappstore.core.apps.CoreConfig',\n 'parler',\n+ 'captcha',\n 'rest_framework',\n+ 'corsheaders',\n+ 'allauth',\n+ 'allauth.account',\n+ 'allauth.socialaccount',\n+ 'allauth.socialaccount.providers.github',\n+ 'allauth.socialaccount.providers.bitbucket',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n- # The Django sites framework is required by allauth\n 'django.contrib.sites',\n 'django.contrib.staticfiles',\n- 'captcha',\n- 'nextcloudappstore.core.apps.CoreConfig',\n- 'allauth',\n- 'allauth.account',\n- 'allauth.socialaccount',\n- 'allauth.socialaccount.providers.github',\n- 'allauth.socialaccount.providers.bitbucket',\n ]\n \n MIDDLEWARE_CLASSES = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n+ 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n@@ -171,6 +172,22 @@\n # Default security settings\n SECURE_BROWSER_XSS_FILTER = True\n SECURE_CONTENT_TYPE_NOSNIFF = True\n+CORS_ORIGIN_ALLOW_ALL = True\n+CORS_URLS_REGEX = r'^/api/.*$'\n+CORS_ALLOW_HEADERS = (\n+ 'x-requested-with',\n+ 'content-type',\n+ 'accept',\n+ 'origin',\n+ 'authorization',\n+ 'x-csrftoken',\n+ 'if-none-match',\n+)\n+CORS_EXPOSE_HEADERS = (\n+ 'etag',\n+ 'x-content-type-options',\n+ 'content-type',\n+)\n \n # use modern no Captcha reCaptcha\n NOCAPTCHA = True\n", "issue": "API routes should be enabled for CORS\nOnly API routes should be whitelisted for CORS\n\nWe need to make sure that CORS and session auth are mutually exclusive\n\nThe solution is probably to integrate https://github.com/ottoyiu/django-cors-headers/\n\n", "before_files": [{"content": "\"\"\"\nDjango settings for nextcloudappstore project.\n\nGenerated by 'django-admin startproject' using Django 1.9.6.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.9/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.9/ref/settings/\n\"\"\"\n\nimport os\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/\n\n# Application definition\n\nINSTALLED_APPS = [\n 'parler',\n 'rest_framework',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n # The Django sites framework is required by allauth\n 'django.contrib.sites',\n 'django.contrib.staticfiles',\n 'captcha',\n 'nextcloudappstore.core.apps.CoreConfig',\n 'allauth',\n 'allauth.account',\n 'allauth.socialaccount',\n 'allauth.socialaccount.providers.github',\n 'allauth.socialaccount.providers.bitbucket',\n]\n\nMIDDLEWARE_CLASSES = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'nextcloudappstore.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'nextcloudappstore.wsgi.application'\n\n# Database\n# https://docs.djangoproject.com/en/1.9/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n 'TEST': {\n 'NAME': os.path.join(BASE_DIR, 'test.sqlite3'),\n }\n }\n}\n\nAUTHENTICATION_BACKENDS = (\n # Needed to login by username in Django admin, regardless of `allauth`\n 'django.contrib.auth.backends.ModelBackend',\n\n # `allauth` specific authentication methods, such as login by e-mail\n 'allauth.account.auth_backends.AuthenticationBackend',\n)\n\n# Password validation\n# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation'\n '.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation'\n '.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation'\n '.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation'\n '.NumericPasswordValidator',\n },\n]\n\nREST_FRAMEWORK = {\n 'DEFAULT_RENDERER_CLASSES': (\n 'djangorestframework_camel_case.render.CamelCaseJSONRenderer',\n ),\n 'DEFAULT_PARSER_CLASSES': (\n 'djangorestframework_camel_case.parser.CamelCaseJSONParser',\n ),\n 'DEFAULT_THROTTLE_RATES': {\n 'app_upload': '100/day'\n }\n}\n\nSITE_ID = 1\n\n# Allauth configuration\n# http://django-allauth.readthedocs.io/en/latest/configuration.html\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_EMAIL_VERIFICATION = \"mandatory\"\nACCOUNT_LOGOUT_ON_GET = True\nACCOUNT_LOGOUT_REDIRECT_URL = 'home'\nACCOUNT_SESSION_REMEMBER = True\nACCOUNT_SIGNUP_FORM_CLASS = \\\n 'nextcloudappstore.core.user.forms.SignupFormRecaptcha'\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.9/topics/i18n/\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'UTC'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nPARLER_LANGUAGES = {\n 1: (\n {'code': 'en'},\n {'code': 'de'},\n {'code': 'fr'},\n ),\n 'default': {\n 'fallbacks': ['en'],\n 'hide_untranslated': False,\n }\n}\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.9/howto/static-files/\nSTATICFILES_DIRS = (\n os.path.join(BASE_DIR, 'static'),\n)\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\nRELEASE_DOWNLOAD_ROOT = os.path.join(MEDIA_ROOT, 'releasetmp')\nSTATIC_URL = '/static/'\nMEDIA_URL = '/media/'\n\n# Default security settings\nSECURE_BROWSER_XSS_FILTER = True\nSECURE_CONTENT_TYPE_NOSNIFF = True\n\n# use modern no Captcha reCaptcha\nNOCAPTCHA = True\n\n\nLOGIN_REDIRECT_URL = 'home'\n\ntry:\n from nextcloudappstore.local_settings import *\nexcept:\n pass\n", "path": "nextcloudappstore/settings.py"}], "after_files": [{"content": "\"\"\"\nDjango settings for nextcloudappstore project.\n\nGenerated by 'django-admin startproject' using Django 1.9.6.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.9/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.9/ref/settings/\n\"\"\"\n\nimport os\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/\n\n# Application definition\n\nINSTALLED_APPS = [\n 'nextcloudappstore.core.apps.CoreConfig',\n 'parler',\n 'captcha',\n 'rest_framework',\n 'corsheaders',\n 'allauth',\n 'allauth.account',\n 'allauth.socialaccount',\n 'allauth.socialaccount.providers.github',\n 'allauth.socialaccount.providers.bitbucket',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.sites',\n 'django.contrib.staticfiles',\n]\n\nMIDDLEWARE_CLASSES = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'nextcloudappstore.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'nextcloudappstore.wsgi.application'\n\n# Database\n# https://docs.djangoproject.com/en/1.9/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n 'TEST': {\n 'NAME': os.path.join(BASE_DIR, 'test.sqlite3'),\n }\n }\n}\n\nAUTHENTICATION_BACKENDS = (\n # Needed to login by username in Django admin, regardless of `allauth`\n 'django.contrib.auth.backends.ModelBackend',\n\n # `allauth` specific authentication methods, such as login by e-mail\n 'allauth.account.auth_backends.AuthenticationBackend',\n)\n\n# Password validation\n# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation'\n '.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation'\n '.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation'\n '.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation'\n '.NumericPasswordValidator',\n },\n]\n\nREST_FRAMEWORK = {\n 'DEFAULT_RENDERER_CLASSES': (\n 'djangorestframework_camel_case.render.CamelCaseJSONRenderer',\n ),\n 'DEFAULT_PARSER_CLASSES': (\n 'djangorestframework_camel_case.parser.CamelCaseJSONParser',\n ),\n 'DEFAULT_THROTTLE_RATES': {\n 'app_upload': '100/day'\n }\n}\n\nSITE_ID = 1\n\n# Allauth configuration\n# http://django-allauth.readthedocs.io/en/latest/configuration.html\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_EMAIL_VERIFICATION = \"mandatory\"\nACCOUNT_LOGOUT_ON_GET = True\nACCOUNT_LOGOUT_REDIRECT_URL = 'home'\nACCOUNT_SESSION_REMEMBER = True\nACCOUNT_SIGNUP_FORM_CLASS = \\\n 'nextcloudappstore.core.user.forms.SignupFormRecaptcha'\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.9/topics/i18n/\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'UTC'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nPARLER_LANGUAGES = {\n 1: (\n {'code': 'en'},\n {'code': 'de'},\n {'code': 'fr'},\n ),\n 'default': {\n 'fallbacks': ['en'],\n 'hide_untranslated': False,\n }\n}\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.9/howto/static-files/\nSTATICFILES_DIRS = (\n os.path.join(BASE_DIR, 'static'),\n)\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\nRELEASE_DOWNLOAD_ROOT = os.path.join(MEDIA_ROOT, 'releasetmp')\nSTATIC_URL = '/static/'\nMEDIA_URL = '/media/'\n\n# Default security settings\nSECURE_BROWSER_XSS_FILTER = True\nSECURE_CONTENT_TYPE_NOSNIFF = True\nCORS_ORIGIN_ALLOW_ALL = True\nCORS_URLS_REGEX = r'^/api/.*$'\nCORS_ALLOW_HEADERS = (\n 'x-requested-with',\n 'content-type',\n 'accept',\n 'origin',\n 'authorization',\n 'x-csrftoken',\n 'if-none-match',\n)\nCORS_EXPOSE_HEADERS = (\n 'etag',\n 'x-content-type-options',\n 'content-type',\n)\n\n# use modern no Captcha reCaptcha\nNOCAPTCHA = True\n\n\nLOGIN_REDIRECT_URL = 'home'\n\ntry:\n from nextcloudappstore.local_settings import *\nexcept:\n pass\n", "path": "nextcloudappstore/settings.py"}]} | 1,941 | 478 |
gh_patches_debug_38106 | rasdani/github-patches | git_diff | TOMToolkit__tom_base-825 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ATLAS forced photometry data processor should correctly interpret limiting data points.
see [ATLAS Forced Photemetry Output Description](https://fallingstar-data.com/forcedphot/resultdesc/)
for how to interpret data.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tom_dataproducts/processors/atlas_processor.py`
Content:
```
1 import mimetypes
2
3 from astropy import units
4 import astropy.io.ascii
5 from astropy.time import Time, TimezoneInfo
6
7 from tom_dataproducts.data_processor import DataProcessor
8 from tom_dataproducts.exceptions import InvalidFileFormatException
9
10
11 class AtlasProcessor(DataProcessor):
12
13 def data_type_override(self):
14 return 'photometry'
15
16 def process_data(self, data_product):
17 """
18 Routes a atlas processing call to a method specific to a file-format.
19
20 :param data_product: Photometric DataProduct which will be processed into the specified format for database
21 ingestion
22 :type data_product: DataProduct
23
24 :returns: python list of 2-tuples, each with a timestamp and corresponding data
25 :rtype: list
26 """
27
28 mimetype = mimetypes.guess_type(data_product.data.path)[0]
29 if mimetype in self.PLAINTEXT_MIMETYPES:
30 photometry = self._process_photometry_from_plaintext(data_product)
31 return [(datum.pop('timestamp'), datum, datum.pop('source', 'ATLAS')) for datum in photometry]
32 else:
33 raise InvalidFileFormatException('Unsupported file type')
34
35 def _process_photometry_from_plaintext(self, data_product):
36 """
37 Processes the photometric data from a plaintext file into a list of dicts. File is read using astropy as
38 specified in the below documentation. The file is expected to be a multi-column delimited space delimited
39 text file, as produced by the ATLAS forced photometry service at https://fallingstar-data.com/forcedphot
40
41 The header looks like this:
42 ###MJD m dm uJy duJy F err chi/N RA Dec x y maj min phi apfit mag5sig Sky Obs
43
44 :param data_product: ATLAS Photometric DataProduct which will be processed into a list of dicts
45 :type data_product: DataProduct
46
47 :returns: python list containing the photometric data from the DataProduct
48 :rtype: list
49 """
50 photometry = []
51
52 data = astropy.io.ascii.read(data_product.data.path)
53 if len(data) < 1:
54 raise InvalidFileFormatException('Empty table or invalid file type')
55
56 try:
57 for datum in data:
58 time = Time(float(datum['##MJD']), format='mjd')
59 utc = TimezoneInfo(utc_offset=0*units.hour)
60 time.format = 'datetime'
61 value = {
62 'timestamp': time.to_datetime(timezone=utc),
63 'magnitude': float(datum['m']),
64 'magnitude_error': float(datum['dm']),
65 'filter': str(datum['F'])
66 }
67 photometry.append(value)
68 except Exception as e:
69 raise InvalidFileFormatException(e)
70
71 return photometry
72
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/tom_dataproducts/processors/atlas_processor.py b/tom_dataproducts/processors/atlas_processor.py
--- a/tom_dataproducts/processors/atlas_processor.py
+++ b/tom_dataproducts/processors/atlas_processor.py
@@ -21,7 +21,7 @@
ingestion
:type data_product: DataProduct
- :returns: python list of 2-tuples, each with a timestamp and corresponding data
+ :returns: python list of 3-tuples, each with a timestamp and corresponding data, and source
:rtype: list
"""
@@ -37,6 +37,7 @@
Processes the photometric data from a plaintext file into a list of dicts. File is read using astropy as
specified in the below documentation. The file is expected to be a multi-column delimited space delimited
text file, as produced by the ATLAS forced photometry service at https://fallingstar-data.com/forcedphot
+ See https://fallingstar-data.com/forcedphot/resultdesc/ for a description of the output format.
The header looks like this:
###MJD m dm uJy duJy F err chi/N RA Dec x y maj min phi apfit mag5sig Sky Obs
@@ -48,6 +49,7 @@
:rtype: list
"""
photometry = []
+ signal_to_noise_cutoff = 3.0 # cutoff to turn magnitudes into non-detection limits
data = astropy.io.ascii.read(data_product.data.path)
if len(data) < 1:
@@ -60,10 +62,19 @@
time.format = 'datetime'
value = {
'timestamp': time.to_datetime(timezone=utc),
- 'magnitude': float(datum['m']),
- 'magnitude_error': float(datum['dm']),
- 'filter': str(datum['F'])
+ 'filter': str(datum['F']),
+ 'error': float(datum['dm']),
+ 'telescope': 'ATLAS',
}
+ # If the signal is in the noise, set the non-detection limit to the
+ # absolute value of the reported magnitude.
+ # see https://fallingstar-data.com/forcedphot/resultdesc/
+ signal_to_noise = abs(float(datum['uJy']))/abs(float(datum['duJy']))
+ if signal_to_noise <= signal_to_noise_cutoff:
+ value['limit'] = abs(float(datum['m']))
+ else:
+ value['magnitude'] = abs(float(datum['m']))
+
photometry.append(value)
except Exception as e:
raise InvalidFileFormatException(e)
| {"golden_diff": "diff --git a/tom_dataproducts/processors/atlas_processor.py b/tom_dataproducts/processors/atlas_processor.py\n--- a/tom_dataproducts/processors/atlas_processor.py\n+++ b/tom_dataproducts/processors/atlas_processor.py\n@@ -21,7 +21,7 @@\n ingestion\n :type data_product: DataProduct\n \n- :returns: python list of 2-tuples, each with a timestamp and corresponding data\n+ :returns: python list of 3-tuples, each with a timestamp and corresponding data, and source\n :rtype: list\n \"\"\"\n \n@@ -37,6 +37,7 @@\n Processes the photometric data from a plaintext file into a list of dicts. File is read using astropy as\n specified in the below documentation. The file is expected to be a multi-column delimited space delimited\n text file, as produced by the ATLAS forced photometry service at https://fallingstar-data.com/forcedphot\n+ See https://fallingstar-data.com/forcedphot/resultdesc/ for a description of the output format.\n \n The header looks like this:\n ###MJD m dm uJy duJy F err chi/N RA Dec x y maj min phi apfit mag5sig Sky Obs\n@@ -48,6 +49,7 @@\n :rtype: list\n \"\"\"\n photometry = []\n+ signal_to_noise_cutoff = 3.0 # cutoff to turn magnitudes into non-detection limits\n \n data = astropy.io.ascii.read(data_product.data.path)\n if len(data) < 1:\n@@ -60,10 +62,19 @@\n time.format = 'datetime'\n value = {\n 'timestamp': time.to_datetime(timezone=utc),\n- 'magnitude': float(datum['m']),\n- 'magnitude_error': float(datum['dm']),\n- 'filter': str(datum['F'])\n+ 'filter': str(datum['F']),\n+ 'error': float(datum['dm']),\n+ 'telescope': 'ATLAS',\n }\n+ # If the signal is in the noise, set the non-detection limit to the\n+ # absolute value of the reported magnitude.\n+ # see https://fallingstar-data.com/forcedphot/resultdesc/\n+ signal_to_noise = abs(float(datum['uJy']))/abs(float(datum['duJy']))\n+ if signal_to_noise <= signal_to_noise_cutoff:\n+ value['limit'] = abs(float(datum['m']))\n+ else:\n+ value['magnitude'] = abs(float(datum['m']))\n+\n photometry.append(value)\n except Exception as e:\n raise InvalidFileFormatException(e)\n", "issue": "ATLAS forced photometry data processor should correctly interpret limiting data points.\nsee [ATLAS Forced Photemetry Output Description](https://fallingstar-data.com/forcedphot/resultdesc/)\nfor how to interpret data.\n", "before_files": [{"content": "import mimetypes\n\nfrom astropy import units\nimport astropy.io.ascii\nfrom astropy.time import Time, TimezoneInfo\n\nfrom tom_dataproducts.data_processor import DataProcessor\nfrom tom_dataproducts.exceptions import InvalidFileFormatException\n\n\nclass AtlasProcessor(DataProcessor):\n\n def data_type_override(self):\n return 'photometry'\n\n def process_data(self, data_product):\n \"\"\"\n Routes a atlas processing call to a method specific to a file-format.\n\n :param data_product: Photometric DataProduct which will be processed into the specified format for database\n ingestion\n :type data_product: DataProduct\n\n :returns: python list of 2-tuples, each with a timestamp and corresponding data\n :rtype: list\n \"\"\"\n\n mimetype = mimetypes.guess_type(data_product.data.path)[0]\n if mimetype in self.PLAINTEXT_MIMETYPES:\n photometry = self._process_photometry_from_plaintext(data_product)\n return [(datum.pop('timestamp'), datum, datum.pop('source', 'ATLAS')) for datum in photometry]\n else:\n raise InvalidFileFormatException('Unsupported file type')\n\n def _process_photometry_from_plaintext(self, data_product):\n \"\"\"\n Processes the photometric data from a plaintext file into a list of dicts. File is read using astropy as\n specified in the below documentation. The file is expected to be a multi-column delimited space delimited\n text file, as produced by the ATLAS forced photometry service at https://fallingstar-data.com/forcedphot\n\n The header looks like this:\n ###MJD m dm uJy duJy F err chi/N RA Dec x y maj min phi apfit mag5sig Sky Obs\n\n :param data_product: ATLAS Photometric DataProduct which will be processed into a list of dicts\n :type data_product: DataProduct\n\n :returns: python list containing the photometric data from the DataProduct\n :rtype: list\n \"\"\"\n photometry = []\n\n data = astropy.io.ascii.read(data_product.data.path)\n if len(data) < 1:\n raise InvalidFileFormatException('Empty table or invalid file type')\n\n try:\n for datum in data:\n time = Time(float(datum['##MJD']), format='mjd')\n utc = TimezoneInfo(utc_offset=0*units.hour)\n time.format = 'datetime'\n value = {\n 'timestamp': time.to_datetime(timezone=utc),\n 'magnitude': float(datum['m']),\n 'magnitude_error': float(datum['dm']),\n 'filter': str(datum['F'])\n }\n photometry.append(value)\n except Exception as e:\n raise InvalidFileFormatException(e)\n\n return photometry\n", "path": "tom_dataproducts/processors/atlas_processor.py"}], "after_files": [{"content": "import mimetypes\n\nfrom astropy import units\nimport astropy.io.ascii\nfrom astropy.time import Time, TimezoneInfo\n\nfrom tom_dataproducts.data_processor import DataProcessor\nfrom tom_dataproducts.exceptions import InvalidFileFormatException\n\n\nclass AtlasProcessor(DataProcessor):\n\n def data_type_override(self):\n return 'photometry'\n\n def process_data(self, data_product):\n \"\"\"\n Routes a atlas processing call to a method specific to a file-format.\n\n :param data_product: Photometric DataProduct which will be processed into the specified format for database\n ingestion\n :type data_product: DataProduct\n\n :returns: python list of 3-tuples, each with a timestamp and corresponding data, and source\n :rtype: list\n \"\"\"\n\n mimetype = mimetypes.guess_type(data_product.data.path)[0]\n if mimetype in self.PLAINTEXT_MIMETYPES:\n photometry = self._process_photometry_from_plaintext(data_product)\n return [(datum.pop('timestamp'), datum, datum.pop('source', 'ATLAS')) for datum in photometry]\n else:\n raise InvalidFileFormatException('Unsupported file type')\n\n def _process_photometry_from_plaintext(self, data_product):\n \"\"\"\n Processes the photometric data from a plaintext file into a list of dicts. File is read using astropy as\n specified in the below documentation. The file is expected to be a multi-column delimited space delimited\n text file, as produced by the ATLAS forced photometry service at https://fallingstar-data.com/forcedphot\n See https://fallingstar-data.com/forcedphot/resultdesc/ for a description of the output format.\n\n The header looks like this:\n ###MJD m dm uJy duJy F err chi/N RA Dec x y maj min phi apfit mag5sig Sky Obs\n\n :param data_product: ATLAS Photometric DataProduct which will be processed into a list of dicts\n :type data_product: DataProduct\n\n :returns: python list containing the photometric data from the DataProduct\n :rtype: list\n \"\"\"\n photometry = []\n signal_to_noise_cutoff = 3.0 # cutoff to turn magnitudes into non-detection limits\n\n data = astropy.io.ascii.read(data_product.data.path)\n if len(data) < 1:\n raise InvalidFileFormatException('Empty table or invalid file type')\n\n try:\n for datum in data:\n time = Time(float(datum['##MJD']), format='mjd')\n utc = TimezoneInfo(utc_offset=0*units.hour)\n time.format = 'datetime'\n value = {\n 'timestamp': time.to_datetime(timezone=utc),\n 'filter': str(datum['F']),\n 'error': float(datum['dm']),\n 'telescope': 'ATLAS',\n }\n # If the signal is in the noise, set the non-detection limit to the\n # absolute value of the reported magnitude.\n # see https://fallingstar-data.com/forcedphot/resultdesc/\n signal_to_noise = abs(float(datum['uJy']))/abs(float(datum['duJy']))\n if signal_to_noise <= signal_to_noise_cutoff:\n value['limit'] = abs(float(datum['m']))\n else:\n value['magnitude'] = abs(float(datum['m']))\n\n photometry.append(value)\n except Exception as e:\n raise InvalidFileFormatException(e)\n\n return photometry\n", "path": "tom_dataproducts/processors/atlas_processor.py"}]} | 1,054 | 608 |
gh_patches_debug_2210 | rasdani/github-patches | git_diff | ARM-DOE__ACT-673 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Feedstock failing due to pandas datetime
### Description
CI is failing due to datetime units not being set for csv reader
### What I Did
See the PR here that was failing
https://github.com/conda-forge/act-atmos-feedstock/pull/63
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `act/io/csvfiles.py`
Content:
```
1 """
2 This module contains I/O operations for loading csv files.
3
4 """
5
6 import pathlib
7
8 import pandas as pd
9
10 from .armfiles import check_arm_standards
11
12
13 def read_csv(filename, sep=',', engine='python', column_names=None, skipfooter=0, ignore_index=True, **kwargs):
14
15 """
16 Returns an `xarray.Dataset` with stored data and metadata from user-defined
17 query of CSV files.
18
19 Parameters
20 ----------
21 filenames : str or list
22 Name of file(s) to read.
23 sep : str
24 The separator between columns in the csv file.
25 column_names : list or None
26 The list of column names in the csv file.
27 verbose : bool
28 If true, will print if a file is not found.
29 ignore_index : bool
30 Keyword for pandas concat function. If True, do not use the index
31 values along the concatenation axis. The resulting axis will be labeled
32 0, …, n - 1. This is useful if you are concatenating datasets where the
33 concatenation axis does not have meaningful indexing information. Note
34 the index values on the other axes are still respected in the join.
35
36 Additional keyword arguments will be passed into pandas.read_csv.
37
38 Returns
39 -------
40 ds : xarray.Dataset
41 ACT Xarray dataset. Will be None if the file is not found.
42
43 Examples
44 --------
45 This example will load the example sounding data used for unit testing:
46
47 .. code-block:: python
48
49 import act
50
51 ds = act.io.csvfiles.read(act.tests.sample_files.EXAMPLE_CSV_WILDCARD)
52
53 """
54
55 # Convert to string if filename is a pathlib or not a list
56 if isinstance(filename, (pathlib.PurePath, str)):
57 filename = [str(filename)]
58
59 if isinstance(filename, list) and isinstance(filename[0], pathlib.PurePath):
60 filename = [str(ii) for ii in filename]
61
62 # Read data using pandas read_csv one file at a time and append to
63 # list. Then concatinate the list into one pandas dataframe.
64 li = []
65 for fl in filename:
66 df = pd.read_csv(
67 fl, sep=sep, names=column_names, skipfooter=skipfooter, engine=engine, **kwargs
68 )
69 li.append(df)
70
71 if len(li) == 1:
72 df = li[0]
73 else:
74 df = pd.concat(li, axis=0, ignore_index=ignore_index)
75
76 # Set Coordinates if there's a variable date_time
77 if 'date_time' in df:
78 df.date_time = df.date_time.astype('datetime64')
79 df.time = df.date_time
80 df = df.set_index('time')
81
82 # Convert to xarray DataSet
83 ds = df.to_xarray()
84
85 # Set additional variables
86 # Since we cannot assume a standard naming convention setting
87 # file_date and file_time to the first time in the file
88 x_coord = ds.coords.to_index().values[0]
89 if isinstance(x_coord, str):
90 x_coord_dt = pd.to_datetime(x_coord)
91 ds.attrs['_file_dates'] = x_coord_dt.strftime('%Y%m%d')
92 ds.attrs['_file_times'] = x_coord_dt.strftime('%H%M%S')
93
94 # Check for standard ARM datastream name, if none, assume the file is ARM
95 # standard format.
96 is_arm_file_flag = check_arm_standards(ds)
97 if is_arm_file_flag == 0:
98
99 ds.attrs['_datastream'] = '.'.join(filename[0].split('/')[-1].split('.')[0:2])
100
101 # Add additional attributes, site, standards flag, etc...
102 ds.attrs['_site'] = str(ds.attrs['_datastream'])[0:3]
103 ds.attrs['_arm_standards_flag'] = is_arm_file_flag
104
105 return ds
106
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/act/io/csvfiles.py b/act/io/csvfiles.py
--- a/act/io/csvfiles.py
+++ b/act/io/csvfiles.py
@@ -75,7 +75,7 @@
# Set Coordinates if there's a variable date_time
if 'date_time' in df:
- df.date_time = df.date_time.astype('datetime64')
+ df.date_time = df.date_time.astype('datetime64[ns]')
df.time = df.date_time
df = df.set_index('time')
| {"golden_diff": "diff --git a/act/io/csvfiles.py b/act/io/csvfiles.py\n--- a/act/io/csvfiles.py\n+++ b/act/io/csvfiles.py\n@@ -75,7 +75,7 @@\n \n # Set Coordinates if there's a variable date_time\n if 'date_time' in df:\n- df.date_time = df.date_time.astype('datetime64')\n+ df.date_time = df.date_time.astype('datetime64[ns]')\n df.time = df.date_time\n df = df.set_index('time')\n", "issue": "Feedstock failing due to pandas datetime\n### Description\r\nCI is failing due to datetime units not being set for csv reader\r\n\r\n### What I Did\r\n\r\nSee the PR here that was failing\r\nhttps://github.com/conda-forge/act-atmos-feedstock/pull/63\r\n\n", "before_files": [{"content": "\"\"\"\nThis module contains I/O operations for loading csv files.\n\n\"\"\"\n\nimport pathlib\n\nimport pandas as pd\n\nfrom .armfiles import check_arm_standards\n\n\ndef read_csv(filename, sep=',', engine='python', column_names=None, skipfooter=0, ignore_index=True, **kwargs):\n\n \"\"\"\n Returns an `xarray.Dataset` with stored data and metadata from user-defined\n query of CSV files.\n\n Parameters\n ----------\n filenames : str or list\n Name of file(s) to read.\n sep : str\n The separator between columns in the csv file.\n column_names : list or None\n The list of column names in the csv file.\n verbose : bool\n If true, will print if a file is not found.\n ignore_index : bool\n Keyword for pandas concat function. If True, do not use the index\n values along the concatenation axis. The resulting axis will be labeled\n 0, \u2026, n - 1. This is useful if you are concatenating datasets where the\n concatenation axis does not have meaningful indexing information. Note\n the index values on the other axes are still respected in the join.\n\n Additional keyword arguments will be passed into pandas.read_csv.\n\n Returns\n -------\n ds : xarray.Dataset\n ACT Xarray dataset. Will be None if the file is not found.\n\n Examples\n --------\n This example will load the example sounding data used for unit testing:\n\n .. code-block:: python\n\n import act\n\n ds = act.io.csvfiles.read(act.tests.sample_files.EXAMPLE_CSV_WILDCARD)\n\n \"\"\"\n\n # Convert to string if filename is a pathlib or not a list\n if isinstance(filename, (pathlib.PurePath, str)):\n filename = [str(filename)]\n\n if isinstance(filename, list) and isinstance(filename[0], pathlib.PurePath):\n filename = [str(ii) for ii in filename]\n\n # Read data using pandas read_csv one file at a time and append to\n # list. Then concatinate the list into one pandas dataframe.\n li = []\n for fl in filename:\n df = pd.read_csv(\n fl, sep=sep, names=column_names, skipfooter=skipfooter, engine=engine, **kwargs\n )\n li.append(df)\n\n if len(li) == 1:\n df = li[0]\n else:\n df = pd.concat(li, axis=0, ignore_index=ignore_index)\n\n # Set Coordinates if there's a variable date_time\n if 'date_time' in df:\n df.date_time = df.date_time.astype('datetime64')\n df.time = df.date_time\n df = df.set_index('time')\n\n # Convert to xarray DataSet\n ds = df.to_xarray()\n\n # Set additional variables\n # Since we cannot assume a standard naming convention setting\n # file_date and file_time to the first time in the file\n x_coord = ds.coords.to_index().values[0]\n if isinstance(x_coord, str):\n x_coord_dt = pd.to_datetime(x_coord)\n ds.attrs['_file_dates'] = x_coord_dt.strftime('%Y%m%d')\n ds.attrs['_file_times'] = x_coord_dt.strftime('%H%M%S')\n\n # Check for standard ARM datastream name, if none, assume the file is ARM\n # standard format.\n is_arm_file_flag = check_arm_standards(ds)\n if is_arm_file_flag == 0:\n\n ds.attrs['_datastream'] = '.'.join(filename[0].split('/')[-1].split('.')[0:2])\n\n # Add additional attributes, site, standards flag, etc...\n ds.attrs['_site'] = str(ds.attrs['_datastream'])[0:3]\n ds.attrs['_arm_standards_flag'] = is_arm_file_flag\n\n return ds\n", "path": "act/io/csvfiles.py"}], "after_files": [{"content": "\"\"\"\nThis module contains I/O operations for loading csv files.\n\n\"\"\"\n\nimport pathlib\n\nimport pandas as pd\n\nfrom .armfiles import check_arm_standards\n\n\ndef read_csv(filename, sep=',', engine='python', column_names=None, skipfooter=0, ignore_index=True, **kwargs):\n\n \"\"\"\n Returns an `xarray.Dataset` with stored data and metadata from user-defined\n query of CSV files.\n\n Parameters\n ----------\n filenames : str or list\n Name of file(s) to read.\n sep : str\n The separator between columns in the csv file.\n column_names : list or None\n The list of column names in the csv file.\n verbose : bool\n If true, will print if a file is not found.\n ignore_index : bool\n Keyword for pandas concat function. If True, do not use the index\n values along the concatenation axis. The resulting axis will be labeled\n 0, \u2026, n - 1. This is useful if you are concatenating datasets where the\n concatenation axis does not have meaningful indexing information. Note\n the index values on the other axes are still respected in the join.\n\n Additional keyword arguments will be passed into pandas.read_csv.\n\n Returns\n -------\n ds : xarray.Dataset\n ACT Xarray dataset. Will be None if the file is not found.\n\n Examples\n --------\n This example will load the example sounding data used for unit testing:\n\n .. code-block:: python\n\n import act\n\n ds = act.io.csvfiles.read(act.tests.sample_files.EXAMPLE_CSV_WILDCARD)\n\n \"\"\"\n\n # Convert to string if filename is a pathlib or not a list\n if isinstance(filename, (pathlib.PurePath, str)):\n filename = [str(filename)]\n\n if isinstance(filename, list) and isinstance(filename[0], pathlib.PurePath):\n filename = [str(ii) for ii in filename]\n\n # Read data using pandas read_csv one file at a time and append to\n # list. Then concatinate the list into one pandas dataframe.\n li = []\n for fl in filename:\n df = pd.read_csv(\n fl, sep=sep, names=column_names, skipfooter=skipfooter, engine=engine, **kwargs\n )\n li.append(df)\n\n if len(li) == 1:\n df = li[0]\n else:\n df = pd.concat(li, axis=0, ignore_index=ignore_index)\n\n # Set Coordinates if there's a variable date_time\n if 'date_time' in df:\n df.date_time = df.date_time.astype('datetime64[ns]')\n df.time = df.date_time\n df = df.set_index('time')\n\n # Convert to xarray DataSet\n ds = df.to_xarray()\n\n # Set additional variables\n # Since we cannot assume a standard naming convention setting\n # file_date and file_time to the first time in the file\n x_coord = ds.coords.to_index().values[0]\n if isinstance(x_coord, str):\n x_coord_dt = pd.to_datetime(x_coord)\n ds.attrs['_file_dates'] = x_coord_dt.strftime('%Y%m%d')\n ds.attrs['_file_times'] = x_coord_dt.strftime('%H%M%S')\n\n # Check for standard ARM datastream name, if none, assume the file is ARM\n # standard format.\n is_arm_file_flag = check_arm_standards(ds)\n if is_arm_file_flag == 0:\n\n ds.attrs['_datastream'] = '.'.join(filename[0].split('/')[-1].split('.')[0:2])\n\n # Add additional attributes, site, standards flag, etc...\n ds.attrs['_site'] = str(ds.attrs['_datastream'])[0:3]\n ds.attrs['_arm_standards_flag'] = is_arm_file_flag\n\n return ds\n", "path": "act/io/csvfiles.py"}]} | 1,362 | 119 |
gh_patches_debug_14245 | rasdani/github-patches | git_diff | pyqtgraph__pyqtgraph-1067 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SciPy in requirements in README but not in install_requires
Hey!
I'm wondering why SciPy is listed as a requirement in README but not in setup.py install_require argument.
Cheers,
Mike
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/MultiPlotWidget.py`
Content:
```
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3 ## Add path to library (just for examples; you do not need this)
4 import initExample
5
6
7 from scipy import random
8 from numpy import linspace
9 from pyqtgraph.Qt import QtGui, QtCore
10 import pyqtgraph as pg
11 from pyqtgraph import MultiPlotWidget
12 try:
13 from pyqtgraph.metaarray import *
14 except:
15 print("MultiPlot is only used with MetaArray for now (and you do not have the metaarray package)")
16 exit()
17
18 app = QtGui.QApplication([])
19 mw = QtGui.QMainWindow()
20 mw.resize(800,800)
21 pw = MultiPlotWidget()
22 mw.setCentralWidget(pw)
23 mw.show()
24
25 data = random.normal(size=(3, 1000)) * np.array([[0.1], [1e-5], [1]])
26 ma = MetaArray(data, info=[
27 {'name': 'Signal', 'cols': [
28 {'name': 'Col1', 'units': 'V'},
29 {'name': 'Col2', 'units': 'A'},
30 {'name': 'Col3'},
31 ]},
32 {'name': 'Time', 'values': linspace(0., 1., 1000), 'units': 's'}
33 ])
34 pw.plot(ma)
35
36 ## Start Qt event loop unless running in interactive mode.
37 if __name__ == '__main__':
38 import sys
39 if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
40 QtGui.QApplication.instance().exec_()
41
42
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/MultiPlotWidget.py b/examples/MultiPlotWidget.py
--- a/examples/MultiPlotWidget.py
+++ b/examples/MultiPlotWidget.py
@@ -3,8 +3,7 @@
## Add path to library (just for examples; you do not need this)
import initExample
-
-from scipy import random
+import numpy as np
from numpy import linspace
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph as pg
@@ -22,7 +21,7 @@
mw.setCentralWidget(pw)
mw.show()
-data = random.normal(size=(3, 1000)) * np.array([[0.1], [1e-5], [1]])
+data = np.random.normal(size=(3, 1000)) * np.array([[0.1], [1e-5], [1]])
ma = MetaArray(data, info=[
{'name': 'Signal', 'cols': [
{'name': 'Col1', 'units': 'V'},
| {"golden_diff": "diff --git a/examples/MultiPlotWidget.py b/examples/MultiPlotWidget.py\n--- a/examples/MultiPlotWidget.py\n+++ b/examples/MultiPlotWidget.py\n@@ -3,8 +3,7 @@\n ## Add path to library (just for examples; you do not need this)\n import initExample\n \n-\n-from scipy import random\n+import numpy as np\n from numpy import linspace\n from pyqtgraph.Qt import QtGui, QtCore\n import pyqtgraph as pg\n@@ -22,7 +21,7 @@\n mw.setCentralWidget(pw)\n mw.show()\n \n-data = random.normal(size=(3, 1000)) * np.array([[0.1], [1e-5], [1]])\n+data = np.random.normal(size=(3, 1000)) * np.array([[0.1], [1e-5], [1]])\n ma = MetaArray(data, info=[\n {'name': 'Signal', 'cols': [\n {'name': 'Col1', 'units': 'V'},\n", "issue": "SciPy in requirements in README but not in install_requires\nHey!\r\nI'm wondering why SciPy is listed as a requirement in README but not in setup.py install_require argument.\r\n\r\nCheers,\r\nMike\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n## Add path to library (just for examples; you do not need this)\nimport initExample\n\n\nfrom scipy import random\nfrom numpy import linspace\nfrom pyqtgraph.Qt import QtGui, QtCore\nimport pyqtgraph as pg\nfrom pyqtgraph import MultiPlotWidget\ntry:\n from pyqtgraph.metaarray import *\nexcept:\n print(\"MultiPlot is only used with MetaArray for now (and you do not have the metaarray package)\")\n exit()\n\napp = QtGui.QApplication([])\nmw = QtGui.QMainWindow()\nmw.resize(800,800)\npw = MultiPlotWidget()\nmw.setCentralWidget(pw)\nmw.show()\n\ndata = random.normal(size=(3, 1000)) * np.array([[0.1], [1e-5], [1]])\nma = MetaArray(data, info=[\n {'name': 'Signal', 'cols': [\n {'name': 'Col1', 'units': 'V'}, \n {'name': 'Col2', 'units': 'A'}, \n {'name': 'Col3'},\n ]}, \n {'name': 'Time', 'values': linspace(0., 1., 1000), 'units': 's'}\n ])\npw.plot(ma)\n\n## Start Qt event loop unless running in interactive mode.\nif __name__ == '__main__':\n import sys\n if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):\n QtGui.QApplication.instance().exec_()\n\n", "path": "examples/MultiPlotWidget.py"}], "after_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n## Add path to library (just for examples; you do not need this)\nimport initExample\n\nimport numpy as np\nfrom numpy import linspace\nfrom pyqtgraph.Qt import QtGui, QtCore\nimport pyqtgraph as pg\nfrom pyqtgraph import MultiPlotWidget\ntry:\n from pyqtgraph.metaarray import *\nexcept:\n print(\"MultiPlot is only used with MetaArray for now (and you do not have the metaarray package)\")\n exit()\n\napp = QtGui.QApplication([])\nmw = QtGui.QMainWindow()\nmw.resize(800,800)\npw = MultiPlotWidget()\nmw.setCentralWidget(pw)\nmw.show()\n\ndata = np.random.normal(size=(3, 1000)) * np.array([[0.1], [1e-5], [1]])\nma = MetaArray(data, info=[\n {'name': 'Signal', 'cols': [\n {'name': 'Col1', 'units': 'V'}, \n {'name': 'Col2', 'units': 'A'}, \n {'name': 'Col3'},\n ]}, \n {'name': 'Time', 'values': linspace(0., 1., 1000), 'units': 's'}\n ])\npw.plot(ma)\n\n## Start Qt event loop unless running in interactive mode.\nif __name__ == '__main__':\n import sys\n if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):\n QtGui.QApplication.instance().exec_()\n\n", "path": "examples/MultiPlotWidget.py"}]} | 710 | 220 |
gh_patches_debug_29127 | rasdani/github-patches | git_diff | zestedesavoir__zds-site-2384 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
L'API ne retourne pas toujours les mêmes infos pour un membre
> Un autre truc, quand on met un jour un membre on peut spécifier deux champs qui ne sont pas fournit par le get classique : `hover_or_click` et `show_sign`. Est ce normal ?
Source:[Kje](http://zestedesavoir.com/forums/sujet/1365/zep-17-elaboration-de-lapi-des-membres/?page=18#p45095)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zds/member/api/serializers.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 from rest_framework import serializers
4
5 from zds.member.commons import ProfileUsernameValidator, ProfileEmailValidator, \
6 ProfileCreate
7 from zds.member.models import Profile
8
9
10 class ProfileListSerializer(serializers.ModelSerializer):
11 """
12 Serializers of a user object.
13 """
14
15 username = serializers.CharField(source='user.username')
16 is_active = serializers.BooleanField(source='user.is_active')
17 date_joined = serializers.DateTimeField(source='user.date_joined')
18
19 class Meta:
20 model = Profile
21 fields = ('pk', 'username', 'is_active', 'date_joined')
22
23
24 class ProfileCreateSerializer(serializers.ModelSerializer, ProfileCreate, ProfileUsernameValidator,
25 ProfileEmailValidator):
26 """
27 Serializers of a user object to create one.
28 """
29
30 username = serializers.CharField(source='user.username')
31 email = serializers.EmailField(source='user.email')
32 password = serializers.CharField(source='user.password')
33
34 class Meta:
35 model = Profile
36 fields = ('pk', 'username', 'email', 'password')
37 write_only_fields = ('password')
38
39 def create(self, validated_data):
40 profile = self.create_profile(validated_data.get('user'))
41 self.save_profile(profile)
42 return profile
43
44 def throw_error(self, key=None, message=None):
45 raise serializers.ValidationError(message)
46
47
48 class ProfileDetailSerializer(serializers.ModelSerializer):
49 """
50 Serializers of a profile object.
51 """
52
53 username = serializers.CharField(source='user.username')
54 email = serializers.EmailField(source='user.email')
55 is_active = serializers.BooleanField(source='user.is_active')
56 date_joined = serializers.DateTimeField(source='user.date_joined')
57
58 class Meta:
59 model = Profile
60 fields = ('pk', 'username', 'show_email', 'email', 'is_active',
61 'site', 'avatar_url', 'biography', 'sign', 'email_for_answer',
62 'last_visit', 'date_joined')
63
64 def __init__(self, *args, **kwargs):
65 """
66 Create the serializer with or without email field, depending on the show_email argument.
67 """
68 show_email = kwargs.pop('show_email', False)
69 is_authenticated = kwargs.pop('is_authenticated', False)
70
71 super(ProfileDetailSerializer, self).__init__(*args, **kwargs)
72
73 if not show_email or not is_authenticated:
74 # Drop email field.
75 self.fields.pop('email')
76
77
78 class ProfileValidatorSerializer(serializers.ModelSerializer, ProfileUsernameValidator, ProfileEmailValidator):
79 """
80 Serializers of a profile object used to update a member.
81 """
82
83 username = serializers.CharField(source='user.username', required=False, allow_blank=True)
84 email = serializers.EmailField(source='user.email', required=False, allow_blank=True)
85
86 class Meta:
87 model = Profile
88 fields = ('pk', 'username', 'email', 'site', 'avatar_url', 'biography',
89 'sign', 'show_email', 'show_sign', 'hover_or_click',
90 'email_for_answer')
91
92 def update(self, instance, validated_data):
93 """
94 Update and return an existing `Profile` instance, given the validated data.
95 """
96 instance.user.username = validated_data.get('user').get('username',
97 instance.user.username) or instance.user.username
98 instance.user.email = validated_data.get('user').get('email', instance.user.email) or instance.user.email
99 instance.site = validated_data.get('site', instance.site) or instance.site
100 instance.avatar_url = validated_data.get('avatar_url', instance.avatar_url) or instance.avatar_url
101 instance.biography = validated_data.get('biography', instance.biography) or instance.biography
102 instance.sign = validated_data.get('sign', instance.sign) or instance.sign
103 instance.show_email = validated_data.get('show_email', instance.show_email) or instance.show_email
104 instance.show_sign = validated_data.get('show_sign', instance.show_sign) or instance.show_sign
105 instance.hover_or_click = validated_data.get('hover_or_click',
106 instance.hover_or_click) or instance.hover_or_click
107 instance.email_for_answer = validated_data.get('email_for_answer',
108 instance.email_for_answer) or instance.email_for_answer
109 instance.user.save()
110 instance.save()
111 return instance
112
113 def throw_error(self, key=None, message=None):
114 raise serializers.ValidationError(message)
115
116
117 class ProfileSanctionSerializer(serializers.ModelSerializer):
118 """
119 Serializers of a profile object to set the user in reading only access.
120 """
121
122 username = serializers.ReadOnlyField(source='user.username')
123 email = serializers.ReadOnlyField(source='user.email')
124
125 class Meta:
126 model = Profile
127 fields = ('pk', 'username', 'email', 'can_write', 'end_ban_write', 'can_read', 'end_ban_read')
128 read_only_fields = ('can_write', 'end_ban_write', 'can_read', 'end_ban_read')
129
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/zds/member/api/serializers.py b/zds/member/api/serializers.py
--- a/zds/member/api/serializers.py
+++ b/zds/member/api/serializers.py
@@ -57,9 +57,9 @@
class Meta:
model = Profile
- fields = ('pk', 'username', 'show_email', 'email', 'is_active',
- 'site', 'avatar_url', 'biography', 'sign', 'email_for_answer',
- 'last_visit', 'date_joined')
+ fields = ('pk', 'username', 'email', 'is_active', 'date_joined',
+ 'site', 'avatar_url', 'biography', 'sign', 'show_email',
+ 'show_sign', 'hover_or_click', 'email_for_answer', 'last_visit')
def __init__(self, *args, **kwargs):
"""
@@ -82,12 +82,15 @@
username = serializers.CharField(source='user.username', required=False, allow_blank=True)
email = serializers.EmailField(source='user.email', required=False, allow_blank=True)
+ is_active = serializers.BooleanField(source='user.is_active', required=False)
+ date_joined = serializers.DateTimeField(source='user.date_joined', required=False)
class Meta:
model = Profile
- fields = ('pk', 'username', 'email', 'site', 'avatar_url', 'biography',
- 'sign', 'show_email', 'show_sign', 'hover_or_click',
- 'email_for_answer')
+ fields = ('pk', 'username', 'email', 'is_active', 'date_joined',
+ 'site', 'avatar_url', 'biography', 'sign', 'show_email',
+ 'show_sign', 'hover_or_click', 'email_for_answer', 'last_visit')
+ read_only_fields = ('is_active', 'date_joined', 'last_visit',)
def update(self, instance, validated_data):
"""
| {"golden_diff": "diff --git a/zds/member/api/serializers.py b/zds/member/api/serializers.py\n--- a/zds/member/api/serializers.py\n+++ b/zds/member/api/serializers.py\n@@ -57,9 +57,9 @@\n \n class Meta:\n model = Profile\n- fields = ('pk', 'username', 'show_email', 'email', 'is_active',\n- 'site', 'avatar_url', 'biography', 'sign', 'email_for_answer',\n- 'last_visit', 'date_joined')\n+ fields = ('pk', 'username', 'email', 'is_active', 'date_joined',\n+ 'site', 'avatar_url', 'biography', 'sign', 'show_email',\n+ 'show_sign', 'hover_or_click', 'email_for_answer', 'last_visit')\n \n def __init__(self, *args, **kwargs):\n \"\"\"\n@@ -82,12 +82,15 @@\n \n username = serializers.CharField(source='user.username', required=False, allow_blank=True)\n email = serializers.EmailField(source='user.email', required=False, allow_blank=True)\n+ is_active = serializers.BooleanField(source='user.is_active', required=False)\n+ date_joined = serializers.DateTimeField(source='user.date_joined', required=False)\n \n class Meta:\n model = Profile\n- fields = ('pk', 'username', 'email', 'site', 'avatar_url', 'biography',\n- 'sign', 'show_email', 'show_sign', 'hover_or_click',\n- 'email_for_answer')\n+ fields = ('pk', 'username', 'email', 'is_active', 'date_joined',\n+ 'site', 'avatar_url', 'biography', 'sign', 'show_email',\n+ 'show_sign', 'hover_or_click', 'email_for_answer', 'last_visit')\n+ read_only_fields = ('is_active', 'date_joined', 'last_visit',)\n \n def update(self, instance, validated_data):\n \"\"\"\n", "issue": "L'API ne retourne pas toujours les m\u00eames infos pour un membre\n> Un autre truc, quand on met un jour un membre on peut sp\u00e9cifier deux champs qui ne sont pas fournit par le get classique : `hover_or_click` et `show_sign`. Est ce normal ?\n\nSource:[Kje](http://zestedesavoir.com/forums/sujet/1365/zep-17-elaboration-de-lapi-des-membres/?page=18#p45095)\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom rest_framework import serializers\n\nfrom zds.member.commons import ProfileUsernameValidator, ProfileEmailValidator, \\\n ProfileCreate\nfrom zds.member.models import Profile\n\n\nclass ProfileListSerializer(serializers.ModelSerializer):\n \"\"\"\n Serializers of a user object.\n \"\"\"\n\n username = serializers.CharField(source='user.username')\n is_active = serializers.BooleanField(source='user.is_active')\n date_joined = serializers.DateTimeField(source='user.date_joined')\n\n class Meta:\n model = Profile\n fields = ('pk', 'username', 'is_active', 'date_joined')\n\n\nclass ProfileCreateSerializer(serializers.ModelSerializer, ProfileCreate, ProfileUsernameValidator,\n ProfileEmailValidator):\n \"\"\"\n Serializers of a user object to create one.\n \"\"\"\n\n username = serializers.CharField(source='user.username')\n email = serializers.EmailField(source='user.email')\n password = serializers.CharField(source='user.password')\n\n class Meta:\n model = Profile\n fields = ('pk', 'username', 'email', 'password')\n write_only_fields = ('password')\n\n def create(self, validated_data):\n profile = self.create_profile(validated_data.get('user'))\n self.save_profile(profile)\n return profile\n\n def throw_error(self, key=None, message=None):\n raise serializers.ValidationError(message)\n\n\nclass ProfileDetailSerializer(serializers.ModelSerializer):\n \"\"\"\n Serializers of a profile object.\n \"\"\"\n\n username = serializers.CharField(source='user.username')\n email = serializers.EmailField(source='user.email')\n is_active = serializers.BooleanField(source='user.is_active')\n date_joined = serializers.DateTimeField(source='user.date_joined')\n\n class Meta:\n model = Profile\n fields = ('pk', 'username', 'show_email', 'email', 'is_active',\n 'site', 'avatar_url', 'biography', 'sign', 'email_for_answer',\n 'last_visit', 'date_joined')\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Create the serializer with or without email field, depending on the show_email argument.\n \"\"\"\n show_email = kwargs.pop('show_email', False)\n is_authenticated = kwargs.pop('is_authenticated', False)\n\n super(ProfileDetailSerializer, self).__init__(*args, **kwargs)\n\n if not show_email or not is_authenticated:\n # Drop email field.\n self.fields.pop('email')\n\n\nclass ProfileValidatorSerializer(serializers.ModelSerializer, ProfileUsernameValidator, ProfileEmailValidator):\n \"\"\"\n Serializers of a profile object used to update a member.\n \"\"\"\n\n username = serializers.CharField(source='user.username', required=False, allow_blank=True)\n email = serializers.EmailField(source='user.email', required=False, allow_blank=True)\n\n class Meta:\n model = Profile\n fields = ('pk', 'username', 'email', 'site', 'avatar_url', 'biography',\n 'sign', 'show_email', 'show_sign', 'hover_or_click',\n 'email_for_answer')\n\n def update(self, instance, validated_data):\n \"\"\"\n Update and return an existing `Profile` instance, given the validated data.\n \"\"\"\n instance.user.username = validated_data.get('user').get('username',\n instance.user.username) or instance.user.username\n instance.user.email = validated_data.get('user').get('email', instance.user.email) or instance.user.email\n instance.site = validated_data.get('site', instance.site) or instance.site\n instance.avatar_url = validated_data.get('avatar_url', instance.avatar_url) or instance.avatar_url\n instance.biography = validated_data.get('biography', instance.biography) or instance.biography\n instance.sign = validated_data.get('sign', instance.sign) or instance.sign\n instance.show_email = validated_data.get('show_email', instance.show_email) or instance.show_email\n instance.show_sign = validated_data.get('show_sign', instance.show_sign) or instance.show_sign\n instance.hover_or_click = validated_data.get('hover_or_click',\n instance.hover_or_click) or instance.hover_or_click\n instance.email_for_answer = validated_data.get('email_for_answer',\n instance.email_for_answer) or instance.email_for_answer\n instance.user.save()\n instance.save()\n return instance\n\n def throw_error(self, key=None, message=None):\n raise serializers.ValidationError(message)\n\n\nclass ProfileSanctionSerializer(serializers.ModelSerializer):\n \"\"\"\n Serializers of a profile object to set the user in reading only access.\n \"\"\"\n\n username = serializers.ReadOnlyField(source='user.username')\n email = serializers.ReadOnlyField(source='user.email')\n\n class Meta:\n model = Profile\n fields = ('pk', 'username', 'email', 'can_write', 'end_ban_write', 'can_read', 'end_ban_read')\n read_only_fields = ('can_write', 'end_ban_write', 'can_read', 'end_ban_read')\n", "path": "zds/member/api/serializers.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom rest_framework import serializers\n\nfrom zds.member.commons import ProfileUsernameValidator, ProfileEmailValidator, \\\n ProfileCreate\nfrom zds.member.models import Profile\n\n\nclass ProfileListSerializer(serializers.ModelSerializer):\n \"\"\"\n Serializers of a user object.\n \"\"\"\n\n username = serializers.CharField(source='user.username')\n is_active = serializers.BooleanField(source='user.is_active')\n date_joined = serializers.DateTimeField(source='user.date_joined')\n\n class Meta:\n model = Profile\n fields = ('pk', 'username', 'is_active', 'date_joined')\n\n\nclass ProfileCreateSerializer(serializers.ModelSerializer, ProfileCreate, ProfileUsernameValidator,\n ProfileEmailValidator):\n \"\"\"\n Serializers of a user object to create one.\n \"\"\"\n\n username = serializers.CharField(source='user.username')\n email = serializers.EmailField(source='user.email')\n password = serializers.CharField(source='user.password')\n\n class Meta:\n model = Profile\n fields = ('pk', 'username', 'email', 'password')\n write_only_fields = ('password')\n\n def create(self, validated_data):\n profile = self.create_profile(validated_data.get('user'))\n self.save_profile(profile)\n return profile\n\n def throw_error(self, key=None, message=None):\n raise serializers.ValidationError(message)\n\n\nclass ProfileDetailSerializer(serializers.ModelSerializer):\n \"\"\"\n Serializers of a profile object.\n \"\"\"\n\n username = serializers.CharField(source='user.username')\n email = serializers.EmailField(source='user.email')\n is_active = serializers.BooleanField(source='user.is_active')\n date_joined = serializers.DateTimeField(source='user.date_joined')\n\n class Meta:\n model = Profile\n fields = ('pk', 'username', 'email', 'is_active', 'date_joined',\n 'site', 'avatar_url', 'biography', 'sign', 'show_email',\n 'show_sign', 'hover_or_click', 'email_for_answer', 'last_visit')\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Create the serializer with or without email field, depending on the show_email argument.\n \"\"\"\n show_email = kwargs.pop('show_email', False)\n is_authenticated = kwargs.pop('is_authenticated', False)\n\n super(ProfileDetailSerializer, self).__init__(*args, **kwargs)\n\n if not show_email or not is_authenticated:\n # Drop email field.\n self.fields.pop('email')\n\n\nclass ProfileValidatorSerializer(serializers.ModelSerializer, ProfileUsernameValidator, ProfileEmailValidator):\n \"\"\"\n Serializers of a profile object used to update a member.\n \"\"\"\n\n username = serializers.CharField(source='user.username', required=False, allow_blank=True)\n email = serializers.EmailField(source='user.email', required=False, allow_blank=True)\n is_active = serializers.BooleanField(source='user.is_active', required=False)\n date_joined = serializers.DateTimeField(source='user.date_joined', required=False)\n\n class Meta:\n model = Profile\n fields = ('pk', 'username', 'email', 'is_active', 'date_joined',\n 'site', 'avatar_url', 'biography', 'sign', 'show_email',\n 'show_sign', 'hover_or_click', 'email_for_answer', 'last_visit')\n read_only_fields = ('is_active', 'date_joined', 'last_visit',)\n\n def update(self, instance, validated_data):\n \"\"\"\n Update and return an existing `Profile` instance, given the validated data.\n \"\"\"\n instance.user.username = validated_data.get('user').get('username',\n instance.user.username) or instance.user.username\n instance.user.email = validated_data.get('user').get('email', instance.user.email) or instance.user.email\n instance.site = validated_data.get('site', instance.site) or instance.site\n instance.avatar_url = validated_data.get('avatar_url', instance.avatar_url) or instance.avatar_url\n instance.biography = validated_data.get('biography', instance.biography) or instance.biography\n instance.sign = validated_data.get('sign', instance.sign) or instance.sign\n instance.show_email = validated_data.get('show_email', instance.show_email) or instance.show_email\n instance.show_sign = validated_data.get('show_sign', instance.show_sign) or instance.show_sign\n instance.hover_or_click = validated_data.get('hover_or_click',\n instance.hover_or_click) or instance.hover_or_click\n instance.email_for_answer = validated_data.get('email_for_answer',\n instance.email_for_answer) or instance.email_for_answer\n instance.user.save()\n instance.save()\n return instance\n\n def throw_error(self, key=None, message=None):\n raise serializers.ValidationError(message)\n\n\nclass ProfileSanctionSerializer(serializers.ModelSerializer):\n \"\"\"\n Serializers of a profile object to set the user in reading only access.\n \"\"\"\n\n username = serializers.ReadOnlyField(source='user.username')\n email = serializers.ReadOnlyField(source='user.email')\n\n class Meta:\n model = Profile\n fields = ('pk', 'username', 'email', 'can_write', 'end_ban_write', 'can_read', 'end_ban_read')\n read_only_fields = ('can_write', 'end_ban_write', 'can_read', 'end_ban_read')\n", "path": "zds/member/api/serializers.py"}]} | 1,691 | 434 |
gh_patches_debug_27067 | rasdani/github-patches | git_diff | akvo__akvo-rsr-2287 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Allow 0 as value for funding amount in partnerships
It should be possible to fill in 0 as a funding amount in the project editor, and then publish a project. This is based on Plan Finland feedback:
"Are you able to give us an estimate on when the suggestions we made to Geert could be published (the changes to the results section and possibility for 0€ budget project)."
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `akvo/rsr/models/publishing_status.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7 from django.conf import settings
8 from django.core.exceptions import ValidationError
9 from django.core.mail import send_mail
10 from django.db import models
11 from django.db.models.signals import post_save
12 from django.dispatch import receiver
13 from django.utils.translation import ugettext_lazy as _
14 from .partnership import Partnership
15
16 from ..fields import ValidXMLCharField
17
18
19 class PublishingStatus(models.Model):
20 """Keep track of publishing status."""
21 STATUS_PUBLISHED = 'published'
22 STATUS_UNPUBLISHED = 'unpublished'
23 PUBLISHING_STATUS = (
24 (STATUS_UNPUBLISHED, _(u'Unpublished')),
25 (STATUS_PUBLISHED, _(u'Published')),
26 )
27
28 project = models.OneToOneField('Project',)
29 status = ValidXMLCharField(max_length=30,
30 choices=PUBLISHING_STATUS,
31 db_index=True, default=STATUS_UNPUBLISHED)
32
33 def clean(self):
34 """Projects can only be published, when several checks have been performed."""
35 if self.status == 'published':
36 validation_errors = []
37
38 if not self.project.title:
39 validation_errors.append(
40 ValidationError(_('Project needs to have a title.'),
41 code='title')
42 )
43
44 if not self.project.subtitle:
45 validation_errors.append(
46 ValidationError(_('Project needs to have a subtitle.'),
47 code='subtitle')
48 )
49
50 if self.project.iati_status == '6':
51 validation_errors.append(
52 ValidationError(_('Project needs to have non-suspended status.'),
53 code='status')
54 )
55
56 if not (self.project.date_start_planned or self.project.date_start_actual):
57 validation_errors.append(
58 ValidationError(
59 _('Project needs to have the planned or actual start date field filled '
60 'in.'), code='start_date')
61 )
62
63 if not self.project.current_image:
64 validation_errors.append(
65 ValidationError(_('Project needs to have a photo.'),
66 code='current_image')
67 )
68
69 if not self.project.partnerships.filter(
70 organisation__can_create_projects__exact=True).exists():
71 validation_errors.append(
72 ValidationError(
73 _('Project has no partner that is allowed to publish it.'),
74 code='partners'
75 )
76 )
77
78 if not self.project.partnerships.filter(
79 iati_organisation_role__in=[Partnership.IATI_FUNDING_PARTNER,
80 Partnership.IATI_IMPLEMENTING_PARTNER,
81 Partnership.IATI_ACCOUNTABLE_PARTNER]
82 ).exists():
83 validation_errors.append(
84 ValidationError(
85 _('Project needs to have at least one funding, implementing or accountable '
86 'partner.'),
87 code='partners'
88 )
89 )
90 else:
91 for funding_partner in self.project.partnerships.filter(
92 iati_organisation_role=Partnership.IATI_FUNDING_PARTNER):
93 if not funding_partner.funding_amount:
94 validation_errors.append(
95 ValidationError(_('All funding partners should have a funding amount.'),
96 code='partners'
97 )
98 )
99 break
100
101 if not self.project.project_plan_summary:
102 validation_errors.append(
103 ValidationError(_('Project needs to have the project plan summary filled in.'),
104 code='summary')
105 )
106
107 if not self.project.goals_overview:
108 validation_errors.append(
109 ValidationError(_('Project needs to have the goals overview field filled in.'),
110 code='goals_overview')
111 )
112
113 if not self.project.locations.all():
114 validation_errors.append(
115 ValidationError(_('Project needs to have at least one location.'),
116 code='location')
117 )
118 else:
119 for location in self.project.locations.all():
120 if not (location.latitude and location.longitude):
121 validation_errors.append(
122 ValidationError(
123 _('All locations need to have a latitude and longitude specified.'),
124 code='location')
125 )
126 break
127
128 if not self.project.budget_items.all():
129 validation_errors.append(
130 ValidationError(_('Project needs to have at least one budget item.'),
131 code='budget_item')
132 )
133 elif not self.project.budget_items.filter(amount__gt=0).exists():
134 validation_errors.append(
135 ValidationError(
136 _('Project needs to have at least one budget item with an amount.'),
137 code='budget_item'
138 )
139 )
140
141 if validation_errors:
142 raise ValidationError(validation_errors)
143
144 class Meta:
145 app_label = 'rsr'
146 verbose_name = _(u'publishing status')
147 verbose_name_plural = _(u'publishing statuses')
148 ordering = ('-status', 'project')
149
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/akvo/rsr/models/publishing_status.py b/akvo/rsr/models/publishing_status.py
--- a/akvo/rsr/models/publishing_status.py
+++ b/akvo/rsr/models/publishing_status.py
@@ -90,7 +90,7 @@
else:
for funding_partner in self.project.partnerships.filter(
iati_organisation_role=Partnership.IATI_FUNDING_PARTNER):
- if not funding_partner.funding_amount:
+ if not funding_partner.funding_amount and not funding_partner.funding_amount == 0:
validation_errors.append(
ValidationError(_('All funding partners should have a funding amount.'),
code='partners'
@@ -130,7 +130,7 @@
ValidationError(_('Project needs to have at least one budget item.'),
code='budget_item')
)
- elif not self.project.budget_items.filter(amount__gt=0).exists():
+ elif not self.project.budget_items.filter(amount__gte=0).exists():
validation_errors.append(
ValidationError(
_('Project needs to have at least one budget item with an amount.'),
| {"golden_diff": "diff --git a/akvo/rsr/models/publishing_status.py b/akvo/rsr/models/publishing_status.py\n--- a/akvo/rsr/models/publishing_status.py\n+++ b/akvo/rsr/models/publishing_status.py\n@@ -90,7 +90,7 @@\n else:\n for funding_partner in self.project.partnerships.filter(\n iati_organisation_role=Partnership.IATI_FUNDING_PARTNER):\n- if not funding_partner.funding_amount:\n+ if not funding_partner.funding_amount and not funding_partner.funding_amount == 0:\n validation_errors.append(\n ValidationError(_('All funding partners should have a funding amount.'),\n code='partners'\n@@ -130,7 +130,7 @@\n ValidationError(_('Project needs to have at least one budget item.'),\n code='budget_item')\n )\n- elif not self.project.budget_items.filter(amount__gt=0).exists():\n+ elif not self.project.budget_items.filter(amount__gte=0).exists():\n validation_errors.append(\n ValidationError(\n _('Project needs to have at least one budget item with an amount.'),\n", "issue": "Allow 0 as value for funding amount in partnerships\nIt should be possible to fill in 0 as a funding amount in the project editor, and then publish a project. This is based on Plan Finland feedback:\n\n\"Are you able to give us an estimate on when the suggestions we made to Geert could be published (the changes to the results section and possibility for 0\u20ac budget project).\"\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError\nfrom django.core.mail import send_mail\nfrom django.db import models\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom django.utils.translation import ugettext_lazy as _\nfrom .partnership import Partnership\n\nfrom ..fields import ValidXMLCharField\n\n\nclass PublishingStatus(models.Model):\n \"\"\"Keep track of publishing status.\"\"\"\n STATUS_PUBLISHED = 'published'\n STATUS_UNPUBLISHED = 'unpublished'\n PUBLISHING_STATUS = (\n (STATUS_UNPUBLISHED, _(u'Unpublished')),\n (STATUS_PUBLISHED, _(u'Published')),\n )\n\n project = models.OneToOneField('Project',)\n status = ValidXMLCharField(max_length=30,\n choices=PUBLISHING_STATUS,\n db_index=True, default=STATUS_UNPUBLISHED)\n\n def clean(self):\n \"\"\"Projects can only be published, when several checks have been performed.\"\"\"\n if self.status == 'published':\n validation_errors = []\n\n if not self.project.title:\n validation_errors.append(\n ValidationError(_('Project needs to have a title.'),\n code='title')\n )\n\n if not self.project.subtitle:\n validation_errors.append(\n ValidationError(_('Project needs to have a subtitle.'),\n code='subtitle')\n )\n\n if self.project.iati_status == '6':\n validation_errors.append(\n ValidationError(_('Project needs to have non-suspended status.'),\n code='status')\n )\n\n if not (self.project.date_start_planned or self.project.date_start_actual):\n validation_errors.append(\n ValidationError(\n _('Project needs to have the planned or actual start date field filled '\n 'in.'), code='start_date')\n )\n\n if not self.project.current_image:\n validation_errors.append(\n ValidationError(_('Project needs to have a photo.'),\n code='current_image')\n )\n\n if not self.project.partnerships.filter(\n organisation__can_create_projects__exact=True).exists():\n validation_errors.append(\n ValidationError(\n _('Project has no partner that is allowed to publish it.'),\n code='partners'\n )\n )\n\n if not self.project.partnerships.filter(\n iati_organisation_role__in=[Partnership.IATI_FUNDING_PARTNER,\n Partnership.IATI_IMPLEMENTING_PARTNER,\n Partnership.IATI_ACCOUNTABLE_PARTNER]\n ).exists():\n validation_errors.append(\n ValidationError(\n _('Project needs to have at least one funding, implementing or accountable '\n 'partner.'),\n code='partners'\n )\n )\n else:\n for funding_partner in self.project.partnerships.filter(\n iati_organisation_role=Partnership.IATI_FUNDING_PARTNER):\n if not funding_partner.funding_amount:\n validation_errors.append(\n ValidationError(_('All funding partners should have a funding amount.'),\n code='partners'\n )\n )\n break\n\n if not self.project.project_plan_summary:\n validation_errors.append(\n ValidationError(_('Project needs to have the project plan summary filled in.'),\n code='summary')\n )\n\n if not self.project.goals_overview:\n validation_errors.append(\n ValidationError(_('Project needs to have the goals overview field filled in.'),\n code='goals_overview')\n )\n\n if not self.project.locations.all():\n validation_errors.append(\n ValidationError(_('Project needs to have at least one location.'),\n code='location')\n )\n else:\n for location in self.project.locations.all():\n if not (location.latitude and location.longitude):\n validation_errors.append(\n ValidationError(\n _('All locations need to have a latitude and longitude specified.'),\n code='location')\n )\n break\n\n if not self.project.budget_items.all():\n validation_errors.append(\n ValidationError(_('Project needs to have at least one budget item.'),\n code='budget_item')\n )\n elif not self.project.budget_items.filter(amount__gt=0).exists():\n validation_errors.append(\n ValidationError(\n _('Project needs to have at least one budget item with an amount.'),\n code='budget_item'\n )\n )\n\n if validation_errors:\n raise ValidationError(validation_errors)\n\n class Meta:\n app_label = 'rsr'\n verbose_name = _(u'publishing status')\n verbose_name_plural = _(u'publishing statuses')\n ordering = ('-status', 'project')\n", "path": "akvo/rsr/models/publishing_status.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError\nfrom django.core.mail import send_mail\nfrom django.db import models\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom django.utils.translation import ugettext_lazy as _\nfrom .partnership import Partnership\n\nfrom ..fields import ValidXMLCharField\n\n\nclass PublishingStatus(models.Model):\n \"\"\"Keep track of publishing status.\"\"\"\n STATUS_PUBLISHED = 'published'\n STATUS_UNPUBLISHED = 'unpublished'\n PUBLISHING_STATUS = (\n (STATUS_UNPUBLISHED, _(u'Unpublished')),\n (STATUS_PUBLISHED, _(u'Published')),\n )\n\n project = models.OneToOneField('Project',)\n status = ValidXMLCharField(max_length=30,\n choices=PUBLISHING_STATUS,\n db_index=True, default=STATUS_UNPUBLISHED)\n\n def clean(self):\n \"\"\"Projects can only be published, when several checks have been performed.\"\"\"\n if self.status == 'published':\n validation_errors = []\n\n if not self.project.title:\n validation_errors.append(\n ValidationError(_('Project needs to have a title.'),\n code='title')\n )\n\n if not self.project.subtitle:\n validation_errors.append(\n ValidationError(_('Project needs to have a subtitle.'),\n code='subtitle')\n )\n\n if self.project.iati_status == '6':\n validation_errors.append(\n ValidationError(_('Project needs to have non-suspended status.'),\n code='status')\n )\n\n if not (self.project.date_start_planned or self.project.date_start_actual):\n validation_errors.append(\n ValidationError(\n _('Project needs to have the planned or actual start date field filled '\n 'in.'), code='start_date')\n )\n\n if not self.project.current_image:\n validation_errors.append(\n ValidationError(_('Project needs to have a photo.'),\n code='current_image')\n )\n\n if not self.project.partnerships.filter(\n organisation__can_create_projects__exact=True).exists():\n validation_errors.append(\n ValidationError(\n _('Project has no partner that is allowed to publish it.'),\n code='partners'\n )\n )\n\n if not self.project.partnerships.filter(\n iati_organisation_role__in=[Partnership.IATI_FUNDING_PARTNER,\n Partnership.IATI_IMPLEMENTING_PARTNER,\n Partnership.IATI_ACCOUNTABLE_PARTNER]\n ).exists():\n validation_errors.append(\n ValidationError(\n _('Project needs to have at least one funding, implementing or accountable '\n 'partner.'),\n code='partners'\n )\n )\n else:\n for funding_partner in self.project.partnerships.filter(\n iati_organisation_role=Partnership.IATI_FUNDING_PARTNER):\n if not funding_partner.funding_amount and not funding_partner.funding_amount == 0:\n validation_errors.append(\n ValidationError(_('All funding partners should have a funding amount.'),\n code='partners'\n )\n )\n break\n\n if not self.project.project_plan_summary:\n validation_errors.append(\n ValidationError(_('Project needs to have the project plan summary filled in.'),\n code='summary')\n )\n\n if not self.project.goals_overview:\n validation_errors.append(\n ValidationError(_('Project needs to have the goals overview field filled in.'),\n code='goals_overview')\n )\n\n if not self.project.locations.all():\n validation_errors.append(\n ValidationError(_('Project needs to have at least one location.'),\n code='location')\n )\n else:\n for location in self.project.locations.all():\n if not (location.latitude and location.longitude):\n validation_errors.append(\n ValidationError(\n _('All locations need to have a latitude and longitude specified.'),\n code='location')\n )\n break\n\n if not self.project.budget_items.all():\n validation_errors.append(\n ValidationError(_('Project needs to have at least one budget item.'),\n code='budget_item')\n )\n elif not self.project.budget_items.filter(amount__gte=0).exists():\n validation_errors.append(\n ValidationError(\n _('Project needs to have at least one budget item with an amount.'),\n code='budget_item'\n )\n )\n\n if validation_errors:\n raise ValidationError(validation_errors)\n\n class Meta:\n app_label = 'rsr'\n verbose_name = _(u'publishing status')\n verbose_name_plural = _(u'publishing statuses')\n ordering = ('-status', 'project')\n", "path": "akvo/rsr/models/publishing_status.py"}]} | 1,674 | 248 |
gh_patches_debug_6638 | rasdani/github-patches | git_diff | zulip__zulip-28016 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Onboarding hotspots are misplaced
I think our grid rewrites of the sidebars have resulted in the onboarding hotspots being somewhat misplaced:

(The `offset_x` and `offset_y` values may need updating).
I'm not entirely sure where the best place for these are. The main one that seems very wrong is the compose box one.
That said, we should aim to spend pretty minimal time on this system because we plan to rip it out in favor of a totally different onboarding system.
See https://zulip.readthedocs.io/en/latest/subsystems/hotspots.html for notes on how to test using the `ALWAYS_SEND_ALL_HOTSPOTS` setting as shown in this screenshot. (Usually, they're shown only one at a time in sequence).
@sayamsamal can you pick this one up?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zerver/lib/hotspots.py`
Content:
```
1 # See https://zulip.readthedocs.io/en/latest/subsystems/hotspots.html
2 # for documentation on this subsystem.
3 from dataclasses import dataclass
4 from typing import Dict, List, Optional, Union
5
6 from django.conf import settings
7 from django.utils.translation import gettext_lazy
8 from django_stubs_ext import StrPromise
9
10 from zerver.models import UserHotspot, UserProfile
11
12
13 @dataclass
14 class Hotspot:
15 name: str
16 title: Optional[StrPromise]
17 description: Optional[StrPromise]
18 has_trigger: bool = False
19
20 def to_dict(self, delay: float = 0) -> Dict[str, Union[str, float, bool]]:
21 return {
22 "name": self.name,
23 "title": str(self.title),
24 "description": str(self.description),
25 "delay": delay,
26 "has_trigger": self.has_trigger,
27 }
28
29
30 INTRO_HOTSPOTS: List[Hotspot] = [
31 Hotspot(
32 name="intro_streams",
33 title=gettext_lazy("Catch up on a stream"),
34 description=gettext_lazy(
35 "Messages sent to a stream are seen by everyone subscribed "
36 "to that stream. Try clicking on one of the stream links below."
37 ),
38 ),
39 Hotspot(
40 name="intro_topics",
41 title=gettext_lazy("Topics"),
42 description=gettext_lazy(
43 "Every message has a topic. Topics keep conversations "
44 "easy to follow, and make it easy to reply to conversations that start "
45 "while you are offline."
46 ),
47 ),
48 Hotspot(
49 name="intro_gear",
50 title=gettext_lazy("Settings"),
51 description=gettext_lazy("Go to Settings to configure your notifications and preferences."),
52 ),
53 Hotspot(
54 name="intro_compose",
55 title=gettext_lazy("Compose"),
56 description=gettext_lazy(
57 "Click here to start a new conversation. Pick a topic "
58 "(2-3 words is best), and give it a go!"
59 ),
60 ),
61 ]
62
63
64 NON_INTRO_HOTSPOTS: List[Hotspot] = []
65
66 # We would most likely implement new hotspots in the future that aren't
67 # a part of the initial tutorial. To that end, classifying them into
68 # categories which are aggregated in ALL_HOTSPOTS, seems like a good start.
69 ALL_HOTSPOTS = [*INTRO_HOTSPOTS, *NON_INTRO_HOTSPOTS]
70
71
72 def get_next_hotspots(user: UserProfile) -> List[Dict[str, Union[str, float, bool]]]:
73 # For manual testing, it can be convenient to set
74 # ALWAYS_SEND_ALL_HOTSPOTS=True in `zproject/dev_settings.py` to
75 # make it easy to click on all of the hotspots.
76 #
77 # Since this is just for development purposes, it's convenient for us to send
78 # all the hotspots rather than any specific category.
79 if settings.ALWAYS_SEND_ALL_HOTSPOTS:
80 return [hotspot.to_dict() for hotspot in ALL_HOTSPOTS]
81
82 # If a Zulip server has disabled the tutorial, never send hotspots.
83 if not settings.TUTORIAL_ENABLED:
84 return []
85
86 seen_hotspots = frozenset(
87 UserHotspot.objects.filter(user=user).values_list("hotspot", flat=True)
88 )
89
90 hotspots = [hotspot.to_dict() for hotspot in NON_INTRO_HOTSPOTS]
91
92 if user.tutorial_status == UserProfile.TUTORIAL_FINISHED:
93 return hotspots
94
95 for hotspot in INTRO_HOTSPOTS:
96 if hotspot.name in seen_hotspots:
97 continue
98
99 hotspots.append(hotspot.to_dict(delay=0.5))
100 return hotspots
101
102 user.tutorial_status = UserProfile.TUTORIAL_FINISHED
103 user.save(update_fields=["tutorial_status"])
104 return hotspots
105
106
107 def copy_hotspots(source_profile: UserProfile, target_profile: UserProfile) -> None:
108 for userhotspot in frozenset(UserHotspot.objects.filter(user=source_profile)):
109 UserHotspot.objects.create(
110 user=target_profile, hotspot=userhotspot.hotspot, timestamp=userhotspot.timestamp
111 )
112
113 target_profile.tutorial_status = source_profile.tutorial_status
114 target_profile.onboarding_steps = source_profile.onboarding_steps
115 target_profile.save(update_fields=["tutorial_status", "onboarding_steps"])
116
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/zerver/lib/hotspots.py b/zerver/lib/hotspots.py
--- a/zerver/lib/hotspots.py
+++ b/zerver/lib/hotspots.py
@@ -46,6 +46,9 @@
),
),
Hotspot(
+ # In theory, this should be renamed to intro_personal, since
+ # it's no longer attached to the gear menu, but renaming these
+ # requires a migration that is not worth doing at this time.
name="intro_gear",
title=gettext_lazy("Settings"),
description=gettext_lazy("Go to Settings to configure your notifications and preferences."),
| {"golden_diff": "diff --git a/zerver/lib/hotspots.py b/zerver/lib/hotspots.py\n--- a/zerver/lib/hotspots.py\n+++ b/zerver/lib/hotspots.py\n@@ -46,6 +46,9 @@\n ),\n ),\n Hotspot(\n+ # In theory, this should be renamed to intro_personal, since\n+ # it's no longer attached to the gear menu, but renaming these\n+ # requires a migration that is not worth doing at this time.\n name=\"intro_gear\",\n title=gettext_lazy(\"Settings\"),\n description=gettext_lazy(\"Go to Settings to configure your notifications and preferences.\"),\n", "issue": "Onboarding hotspots are misplaced\nI think our grid rewrites of the sidebars have resulted in the onboarding hotspots being somewhat misplaced:\r\n\r\n\r\n\r\n(The `offset_x` and `offset_y` values may need updating).\r\n\r\nI'm not entirely sure where the best place for these are. The main one that seems very wrong is the compose box one.\r\n\r\nThat said, we should aim to spend pretty minimal time on this system because we plan to rip it out in favor of a totally different onboarding system.\r\n\r\nSee https://zulip.readthedocs.io/en/latest/subsystems/hotspots.html for notes on how to test using the `ALWAYS_SEND_ALL_HOTSPOTS` setting as shown in this screenshot. (Usually, they're shown only one at a time in sequence).\r\n\r\n@sayamsamal can you pick this one up?\r\n\n", "before_files": [{"content": "# See https://zulip.readthedocs.io/en/latest/subsystems/hotspots.html\n# for documentation on this subsystem.\nfrom dataclasses import dataclass\nfrom typing import Dict, List, Optional, Union\n\nfrom django.conf import settings\nfrom django.utils.translation import gettext_lazy\nfrom django_stubs_ext import StrPromise\n\nfrom zerver.models import UserHotspot, UserProfile\n\n\n@dataclass\nclass Hotspot:\n name: str\n title: Optional[StrPromise]\n description: Optional[StrPromise]\n has_trigger: bool = False\n\n def to_dict(self, delay: float = 0) -> Dict[str, Union[str, float, bool]]:\n return {\n \"name\": self.name,\n \"title\": str(self.title),\n \"description\": str(self.description),\n \"delay\": delay,\n \"has_trigger\": self.has_trigger,\n }\n\n\nINTRO_HOTSPOTS: List[Hotspot] = [\n Hotspot(\n name=\"intro_streams\",\n title=gettext_lazy(\"Catch up on a stream\"),\n description=gettext_lazy(\n \"Messages sent to a stream are seen by everyone subscribed \"\n \"to that stream. Try clicking on one of the stream links below.\"\n ),\n ),\n Hotspot(\n name=\"intro_topics\",\n title=gettext_lazy(\"Topics\"),\n description=gettext_lazy(\n \"Every message has a topic. Topics keep conversations \"\n \"easy to follow, and make it easy to reply to conversations that start \"\n \"while you are offline.\"\n ),\n ),\n Hotspot(\n name=\"intro_gear\",\n title=gettext_lazy(\"Settings\"),\n description=gettext_lazy(\"Go to Settings to configure your notifications and preferences.\"),\n ),\n Hotspot(\n name=\"intro_compose\",\n title=gettext_lazy(\"Compose\"),\n description=gettext_lazy(\n \"Click here to start a new conversation. Pick a topic \"\n \"(2-3 words is best), and give it a go!\"\n ),\n ),\n]\n\n\nNON_INTRO_HOTSPOTS: List[Hotspot] = []\n\n# We would most likely implement new hotspots in the future that aren't\n# a part of the initial tutorial. To that end, classifying them into\n# categories which are aggregated in ALL_HOTSPOTS, seems like a good start.\nALL_HOTSPOTS = [*INTRO_HOTSPOTS, *NON_INTRO_HOTSPOTS]\n\n\ndef get_next_hotspots(user: UserProfile) -> List[Dict[str, Union[str, float, bool]]]:\n # For manual testing, it can be convenient to set\n # ALWAYS_SEND_ALL_HOTSPOTS=True in `zproject/dev_settings.py` to\n # make it easy to click on all of the hotspots.\n #\n # Since this is just for development purposes, it's convenient for us to send\n # all the hotspots rather than any specific category.\n if settings.ALWAYS_SEND_ALL_HOTSPOTS:\n return [hotspot.to_dict() for hotspot in ALL_HOTSPOTS]\n\n # If a Zulip server has disabled the tutorial, never send hotspots.\n if not settings.TUTORIAL_ENABLED:\n return []\n\n seen_hotspots = frozenset(\n UserHotspot.objects.filter(user=user).values_list(\"hotspot\", flat=True)\n )\n\n hotspots = [hotspot.to_dict() for hotspot in NON_INTRO_HOTSPOTS]\n\n if user.tutorial_status == UserProfile.TUTORIAL_FINISHED:\n return hotspots\n\n for hotspot in INTRO_HOTSPOTS:\n if hotspot.name in seen_hotspots:\n continue\n\n hotspots.append(hotspot.to_dict(delay=0.5))\n return hotspots\n\n user.tutorial_status = UserProfile.TUTORIAL_FINISHED\n user.save(update_fields=[\"tutorial_status\"])\n return hotspots\n\n\ndef copy_hotspots(source_profile: UserProfile, target_profile: UserProfile) -> None:\n for userhotspot in frozenset(UserHotspot.objects.filter(user=source_profile)):\n UserHotspot.objects.create(\n user=target_profile, hotspot=userhotspot.hotspot, timestamp=userhotspot.timestamp\n )\n\n target_profile.tutorial_status = source_profile.tutorial_status\n target_profile.onboarding_steps = source_profile.onboarding_steps\n target_profile.save(update_fields=[\"tutorial_status\", \"onboarding_steps\"])\n", "path": "zerver/lib/hotspots.py"}], "after_files": [{"content": "# See https://zulip.readthedocs.io/en/latest/subsystems/hotspots.html\n# for documentation on this subsystem.\nfrom dataclasses import dataclass\nfrom typing import Dict, List, Optional, Union\n\nfrom django.conf import settings\nfrom django.utils.translation import gettext_lazy\nfrom django_stubs_ext import StrPromise\n\nfrom zerver.models import UserHotspot, UserProfile\n\n\n@dataclass\nclass Hotspot:\n name: str\n title: Optional[StrPromise]\n description: Optional[StrPromise]\n has_trigger: bool = False\n\n def to_dict(self, delay: float = 0) -> Dict[str, Union[str, float, bool]]:\n return {\n \"name\": self.name,\n \"title\": str(self.title),\n \"description\": str(self.description),\n \"delay\": delay,\n \"has_trigger\": self.has_trigger,\n }\n\n\nINTRO_HOTSPOTS: List[Hotspot] = [\n Hotspot(\n name=\"intro_streams\",\n title=gettext_lazy(\"Catch up on a stream\"),\n description=gettext_lazy(\n \"Messages sent to a stream are seen by everyone subscribed \"\n \"to that stream. Try clicking on one of the stream links below.\"\n ),\n ),\n Hotspot(\n name=\"intro_topics\",\n title=gettext_lazy(\"Topics\"),\n description=gettext_lazy(\n \"Every message has a topic. Topics keep conversations \"\n \"easy to follow, and make it easy to reply to conversations that start \"\n \"while you are offline.\"\n ),\n ),\n Hotspot(\n # In theory, this should be renamed to intro_personal, since\n # it's no longer attached to the gear menu, but renaming these\n # requires a migration that is not worth doing at this time.\n name=\"intro_gear\",\n title=gettext_lazy(\"Settings\"),\n description=gettext_lazy(\"Go to Settings to configure your notifications and preferences.\"),\n ),\n Hotspot(\n name=\"intro_compose\",\n title=gettext_lazy(\"Compose\"),\n description=gettext_lazy(\n \"Click here to start a new conversation. Pick a topic \"\n \"(2-3 words is best), and give it a go!\"\n ),\n ),\n]\n\n\nNON_INTRO_HOTSPOTS: List[Hotspot] = []\n\n# We would most likely implement new hotspots in the future that aren't\n# a part of the initial tutorial. To that end, classifying them into\n# categories which are aggregated in ALL_HOTSPOTS, seems like a good start.\nALL_HOTSPOTS = [*INTRO_HOTSPOTS, *NON_INTRO_HOTSPOTS]\n\n\ndef get_next_hotspots(user: UserProfile) -> List[Dict[str, Union[str, float, bool]]]:\n # For manual testing, it can be convenient to set\n # ALWAYS_SEND_ALL_HOTSPOTS=True in `zproject/dev_settings.py` to\n # make it easy to click on all of the hotspots.\n #\n # Since this is just for development purposes, it's convenient for us to send\n # all the hotspots rather than any specific category.\n if settings.ALWAYS_SEND_ALL_HOTSPOTS:\n return [hotspot.to_dict() for hotspot in ALL_HOTSPOTS]\n\n # If a Zulip server has disabled the tutorial, never send hotspots.\n if not settings.TUTORIAL_ENABLED:\n return []\n\n seen_hotspots = frozenset(\n UserHotspot.objects.filter(user=user).values_list(\"hotspot\", flat=True)\n )\n\n hotspots = [hotspot.to_dict() for hotspot in NON_INTRO_HOTSPOTS]\n\n if user.tutorial_status == UserProfile.TUTORIAL_FINISHED:\n return hotspots\n\n for hotspot in INTRO_HOTSPOTS:\n if hotspot.name in seen_hotspots:\n continue\n\n hotspots.append(hotspot.to_dict(delay=0.5))\n return hotspots\n\n user.tutorial_status = UserProfile.TUTORIAL_FINISHED\n user.save(update_fields=[\"tutorial_status\"])\n return hotspots\n\n\ndef copy_hotspots(source_profile: UserProfile, target_profile: UserProfile) -> None:\n for userhotspot in frozenset(UserHotspot.objects.filter(user=source_profile)):\n UserHotspot.objects.create(\n user=target_profile, hotspot=userhotspot.hotspot, timestamp=userhotspot.timestamp\n )\n\n target_profile.tutorial_status = source_profile.tutorial_status\n target_profile.onboarding_steps = source_profile.onboarding_steps\n target_profile.save(update_fields=[\"tutorial_status\", \"onboarding_steps\"])\n", "path": "zerver/lib/hotspots.py"}]} | 1,653 | 139 |
gh_patches_debug_52787 | rasdani/github-patches | git_diff | conan-io__conan-center-index-5412 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[package] all: "Access is denied" in os.rename() on Windows
### Package and Environment Details (include every applicable attribute)
* Package Name/Version: **almost all packages affected**
* Operating System+version: **Windows 10**
* Compiler+version: **MSVC 16**
* Conan version: **conan 1.35.2**
* Python version: **Python 3.8.7**
### Conan profile (output of `conan profile show default` or `conan profile show <profile>` if custom profile is in use)
```
[settings]
os_build=Windows
os=Windows
arch=x86_64
arch_build=x86_64
compiler=Visual Studio
compiler.version=16
compiler.runtime=MD
build_type=Release
```
### Steps to reproduce (Include if Applicable)
This is a known issue. Solution provided by https://github.com/conan-io/conan/pull/6774
However most recipes still use `os.rename()` and not `tools.rename()`.
### Log
```
b2/4.2.0: Configuring sources in C:\Users\xxx\.conan\data\b2\4.2.0\_\_\source
ERROR: b2/4.2.0: Error in source() method, line 58
os.rename(extracted_dir, "source")
PermissionError: [WinError 5] Access is denied: 'build-4.2.0' -> 'source'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `recipes/bzip2/all/conanfile.py`
Content:
```
1 import os
2 import textwrap
3 from conans import ConanFile, CMake, tools
4
5 required_conan_version = ">=1.33.0"
6
7
8 class Bzip2Conan(ConanFile):
9 name = "bzip2"
10 url = "https://github.com/conan-io/conan-center-index"
11 homepage = "http://www.bzip.org"
12 license = "bzip2-1.0.8"
13 description = "bzip2 is a free and open-source file compression program that uses the Burrows Wheeler algorithm."
14 topics = ("conan", "bzip2", "data-compressor", "file-compression")
15
16 settings = "os", "compiler", "arch", "build_type"
17 options = {
18 "shared": [True, False],
19 "fPIC": [True, False],
20 "build_executable": [True, False]
21 }
22 default_options = {
23 "shared": False,
24 "fPIC": True,
25 "build_executable": True
26 }
27
28 exports_sources = ["CMakeLists.txt", "patches/**"]
29 generators = "cmake"
30 _cmake = None
31
32 @property
33 def _source_subfolder(self):
34 return "source_subfolder"
35
36 def config_options(self):
37 if self.settings.os == "Windows":
38 del self.options.fPIC
39 self.license = "bzip2-{}".format(self.version)
40
41 def configure(self):
42 if self.options.shared:
43 del self.options.fPIC
44 del self.settings.compiler.libcxx
45 del self.settings.compiler.cppstd
46
47 def source(self):
48 tools.get(**self.conan_data["sources"][self.version])
49 folder_name = "%s-%s" % (self.name, self.version)
50 os.rename(folder_name, self._source_subfolder)
51
52 def _configure_cmake(self):
53 if self._cmake:
54 return self._cmake
55 self._cmake = CMake(self)
56 self._cmake.definitions["BZ2_VERSION_STRING"] = self.version
57 self._cmake.definitions["BZ2_VERSION_MAJOR"] = tools.Version(self.version).major
58 self._cmake.definitions["BZ2_BUILD_EXE"] = self.options.build_executable
59 self._cmake.configure()
60 return self._cmake
61
62 def build(self):
63 for patch in self.conan_data.get("patches", {}).get(self.version, []):
64 tools.patch(**patch)
65 cmake = self._configure_cmake()
66 cmake.build()
67
68 def package(self):
69 self.copy("LICENSE", dst="licenses", src=self._source_subfolder)
70 cmake = self._configure_cmake()
71 cmake.install()
72 self._create_cmake_module_variables(
73 os.path.join(self.package_folder, self._module_subfolder, self._module_file)
74 )
75
76 @staticmethod
77 def _create_cmake_module_variables(module_file):
78 content = textwrap.dedent("""\
79 if(DEFINED BZip2_FOUND)
80 set(BZIP2_FOUND ${BZip2_FOUND})
81 set(BZIP2_NEED_PREFIX TRUE)
82 endif()
83 if(DEFINED BZip2_INCLUDE_DIR)
84 set(BZIP2_INCLUDE_DIRS ${BZip2_INCLUDE_DIR})
85 set(BZIP2_INCLUDE_DIR ${BZip2_INCLUDE_DIR})
86 endif()
87 if(DEFINED BZip2_LIBRARIES)
88 set(BZIP2_LIBRARIES ${BZip2_LIBRARIES})
89 endif()
90 if(DEFINED BZip2_VERSION)
91 set(BZIP2_VERSION_STRING ${BZip2_VERSION})
92 endif()
93 """)
94 tools.save(module_file, content)
95
96 @property
97 def _module_subfolder(self):
98 return os.path.join("lib", "cmake")
99
100 @property
101 def _module_file(self):
102 return "conan-official-{}-variables.cmake".format(self.name)
103
104 def package_info(self):
105 self.cpp_info.names["cmake_find_package"] = "BZip2"
106 self.cpp_info.names["cmake_find_package_multi"] = "BZip2"
107 self.cpp_info.builddirs.append(self._module_subfolder)
108 self.cpp_info.build_modules["cmake_find_package"] = [os.path.join(self._module_subfolder, self._module_file)]
109 self.cpp_info.libs = ["bz2"]
110
111 if self.options.build_executable:
112 bin_path = os.path.join(self.package_folder, "bin")
113 self.output.info("Appending PATH environment variable: {}".format(bin_path))
114 self.env_info.PATH.append(bin_path)
115
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/recipes/bzip2/all/conanfile.py b/recipes/bzip2/all/conanfile.py
--- a/recipes/bzip2/all/conanfile.py
+++ b/recipes/bzip2/all/conanfile.py
@@ -45,9 +45,7 @@
del self.settings.compiler.cppstd
def source(self):
- tools.get(**self.conan_data["sources"][self.version])
- folder_name = "%s-%s" % (self.name, self.version)
- os.rename(folder_name, self._source_subfolder)
+ tools.get(**self.conan_data["sources"][self.version], destination=self._source_subfolder, strip_root=True)
def _configure_cmake(self):
if self._cmake:
| {"golden_diff": "diff --git a/recipes/bzip2/all/conanfile.py b/recipes/bzip2/all/conanfile.py\n--- a/recipes/bzip2/all/conanfile.py\n+++ b/recipes/bzip2/all/conanfile.py\n@@ -45,9 +45,7 @@\n del self.settings.compiler.cppstd\n \n def source(self):\n- tools.get(**self.conan_data[\"sources\"][self.version])\n- folder_name = \"%s-%s\" % (self.name, self.version)\n- os.rename(folder_name, self._source_subfolder)\n+ tools.get(**self.conan_data[\"sources\"][self.version], destination=self._source_subfolder, strip_root=True)\n \n def _configure_cmake(self):\n if self._cmake:\n", "issue": "[package] all: \"Access is denied\" in os.rename() on Windows\n### Package and Environment Details (include every applicable attribute)\r\n * Package Name/Version: **almost all packages affected**\r\n * Operating System+version: **Windows 10**\r\n * Compiler+version: **MSVC 16**\r\n * Conan version: **conan 1.35.2**\r\n * Python version: **Python 3.8.7**\r\n\r\n\r\n### Conan profile (output of `conan profile show default` or `conan profile show <profile>` if custom profile is in use)\r\n```\r\n[settings]\r\nos_build=Windows\r\nos=Windows\r\narch=x86_64\r\narch_build=x86_64\r\ncompiler=Visual Studio\r\ncompiler.version=16\r\ncompiler.runtime=MD\r\nbuild_type=Release\r\n```\r\n\r\n### Steps to reproduce (Include if Applicable)\r\n\r\nThis is a known issue. Solution provided by https://github.com/conan-io/conan/pull/6774\r\nHowever most recipes still use `os.rename()` and not `tools.rename()`. \r\n\r\n### Log\r\n```\r\nb2/4.2.0: Configuring sources in C:\\Users\\xxx\\.conan\\data\\b2\\4.2.0\\_\\_\\source\r\nERROR: b2/4.2.0: Error in source() method, line 58\r\nos.rename(extracted_dir, \"source\")\r\nPermissionError: [WinError 5] Access is denied: 'build-4.2.0' -> 'source'\r\n```\r\n\n", "before_files": [{"content": "import os\nimport textwrap\nfrom conans import ConanFile, CMake, tools\n\nrequired_conan_version = \">=1.33.0\"\n\n\nclass Bzip2Conan(ConanFile):\n name = \"bzip2\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"http://www.bzip.org\"\n license = \"bzip2-1.0.8\"\n description = \"bzip2 is a free and open-source file compression program that uses the Burrows Wheeler algorithm.\"\n topics = (\"conan\", \"bzip2\", \"data-compressor\", \"file-compression\")\n\n settings = \"os\", \"compiler\", \"arch\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"build_executable\": [True, False]\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n \"build_executable\": True\n }\n\n exports_sources = [\"CMakeLists.txt\", \"patches/**\"]\n generators = \"cmake\"\n _cmake = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n self.license = \"bzip2-{}\".format(self.version)\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n folder_name = \"%s-%s\" % (self.name, self.version)\n os.rename(folder_name, self._source_subfolder)\n\n def _configure_cmake(self):\n if self._cmake:\n return self._cmake\n self._cmake = CMake(self)\n self._cmake.definitions[\"BZ2_VERSION_STRING\"] = self.version\n self._cmake.definitions[\"BZ2_VERSION_MAJOR\"] = tools.Version(self.version).major\n self._cmake.definitions[\"BZ2_BUILD_EXE\"] = self.options.build_executable\n self._cmake.configure()\n return self._cmake\n\n def build(self):\n for patch in self.conan_data.get(\"patches\", {}).get(self.version, []):\n tools.patch(**patch)\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n self.copy(\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)\n cmake = self._configure_cmake()\n cmake.install()\n self._create_cmake_module_variables(\n os.path.join(self.package_folder, self._module_subfolder, self._module_file)\n )\n\n @staticmethod\n def _create_cmake_module_variables(module_file):\n content = textwrap.dedent(\"\"\"\\\n if(DEFINED BZip2_FOUND)\n set(BZIP2_FOUND ${BZip2_FOUND})\n set(BZIP2_NEED_PREFIX TRUE)\n endif()\n if(DEFINED BZip2_INCLUDE_DIR)\n set(BZIP2_INCLUDE_DIRS ${BZip2_INCLUDE_DIR})\n set(BZIP2_INCLUDE_DIR ${BZip2_INCLUDE_DIR})\n endif()\n if(DEFINED BZip2_LIBRARIES)\n set(BZIP2_LIBRARIES ${BZip2_LIBRARIES})\n endif()\n if(DEFINED BZip2_VERSION)\n set(BZIP2_VERSION_STRING ${BZip2_VERSION})\n endif()\n \"\"\")\n tools.save(module_file, content)\n\n @property\n def _module_subfolder(self):\n return os.path.join(\"lib\", \"cmake\")\n\n @property\n def _module_file(self):\n return \"conan-official-{}-variables.cmake\".format(self.name)\n\n def package_info(self):\n self.cpp_info.names[\"cmake_find_package\"] = \"BZip2\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"BZip2\"\n self.cpp_info.builddirs.append(self._module_subfolder)\n self.cpp_info.build_modules[\"cmake_find_package\"] = [os.path.join(self._module_subfolder, self._module_file)]\n self.cpp_info.libs = [\"bz2\"]\n\n if self.options.build_executable:\n bin_path = os.path.join(self.package_folder, \"bin\")\n self.output.info(\"Appending PATH environment variable: {}\".format(bin_path))\n self.env_info.PATH.append(bin_path)\n", "path": "recipes/bzip2/all/conanfile.py"}], "after_files": [{"content": "import os\nimport textwrap\nfrom conans import ConanFile, CMake, tools\n\nrequired_conan_version = \">=1.33.0\"\n\n\nclass Bzip2Conan(ConanFile):\n name = \"bzip2\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"http://www.bzip.org\"\n license = \"bzip2-1.0.8\"\n description = \"bzip2 is a free and open-source file compression program that uses the Burrows Wheeler algorithm.\"\n topics = (\"conan\", \"bzip2\", \"data-compressor\", \"file-compression\")\n\n settings = \"os\", \"compiler\", \"arch\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"build_executable\": [True, False]\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n \"build_executable\": True\n }\n\n exports_sources = [\"CMakeLists.txt\", \"patches/**\"]\n generators = \"cmake\"\n _cmake = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n self.license = \"bzip2-{}\".format(self.version)\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version], destination=self._source_subfolder, strip_root=True)\n\n def _configure_cmake(self):\n if self._cmake:\n return self._cmake\n self._cmake = CMake(self)\n self._cmake.definitions[\"BZ2_VERSION_STRING\"] = self.version\n self._cmake.definitions[\"BZ2_VERSION_MAJOR\"] = tools.Version(self.version).major\n self._cmake.definitions[\"BZ2_BUILD_EXE\"] = self.options.build_executable\n self._cmake.configure()\n return self._cmake\n\n def build(self):\n for patch in self.conan_data.get(\"patches\", {}).get(self.version, []):\n tools.patch(**patch)\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n self.copy(\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)\n cmake = self._configure_cmake()\n cmake.install()\n self._create_cmake_module_variables(\n os.path.join(self.package_folder, self._module_subfolder, self._module_file)\n )\n\n @staticmethod\n def _create_cmake_module_variables(module_file):\n content = textwrap.dedent(\"\"\"\\\n if(DEFINED BZip2_FOUND)\n set(BZIP2_FOUND ${BZip2_FOUND})\n set(BZIP2_NEED_PREFIX TRUE)\n endif()\n if(DEFINED BZip2_INCLUDE_DIR)\n set(BZIP2_INCLUDE_DIRS ${BZip2_INCLUDE_DIR})\n set(BZIP2_INCLUDE_DIR ${BZip2_INCLUDE_DIR})\n endif()\n if(DEFINED BZip2_LIBRARIES)\n set(BZIP2_LIBRARIES ${BZip2_LIBRARIES})\n endif()\n if(DEFINED BZip2_VERSION)\n set(BZIP2_VERSION_STRING ${BZip2_VERSION})\n endif()\n \"\"\")\n tools.save(module_file, content)\n\n @property\n def _module_subfolder(self):\n return os.path.join(\"lib\", \"cmake\")\n\n @property\n def _module_file(self):\n return \"conan-official-{}-variables.cmake\".format(self.name)\n\n def package_info(self):\n self.cpp_info.names[\"cmake_find_package\"] = \"BZip2\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"BZip2\"\n self.cpp_info.builddirs.append(self._module_subfolder)\n self.cpp_info.build_modules[\"cmake_find_package\"] = [os.path.join(self._module_subfolder, self._module_file)]\n self.cpp_info.libs = [\"bz2\"]\n\n if self.options.build_executable:\n bin_path = os.path.join(self.package_folder, \"bin\")\n self.output.info(\"Appending PATH environment variable: {}\".format(bin_path))\n self.env_info.PATH.append(bin_path)\n", "path": "recipes/bzip2/all/conanfile.py"}]} | 1,811 | 165 |
gh_patches_debug_55397 | rasdani/github-patches | git_diff | googleapis__python-bigquery-1413 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support Pythons <4
I'd like to be able to allow python <4 in ibis, but as of this PR (https://github.com/ibis-project/ibis/pull/4797) I cannot due to this library's `<3.11` pin.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import io
16 import os
17
18 import setuptools
19
20
21 # Package metadata.
22
23 name = "google-cloud-bigquery"
24 description = "Google BigQuery API client library"
25
26 # Should be one of:
27 # 'Development Status :: 3 - Alpha'
28 # 'Development Status :: 4 - Beta'
29 # 'Development Status :: 5 - Production/Stable'
30 release_status = "Development Status :: 5 - Production/Stable"
31 dependencies = [
32 "grpcio >= 1.47.0, < 2.0dev", # https://github.com/googleapis/python-bigquery/issues/1262
33 # NOTE: Maintainers, please do not require google-api-core>=2.x.x
34 # Until this issue is closed
35 # https://github.com/googleapis/google-cloud-python/issues/10566
36 "google-api-core[grpc] >= 1.31.5, <3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0",
37 "google-cloud-bigquery-storage >= 2.0.0, <3.0.0dev",
38 "proto-plus >= 1.22.0, <2.0.0dev",
39 # NOTE: Maintainers, please do not require google-cloud-core>=2.x.x
40 # Until this issue is closed
41 # https://github.com/googleapis/google-cloud-python/issues/10566
42 "google-cloud-core >= 1.4.1, <3.0.0dev",
43 "google-resumable-media >= 0.6.0, < 3.0dev",
44 "packaging >= 14.3, <22.0.0dev",
45 "protobuf>=3.19.5,<5.0.0dev,!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5", # For the legacy proto-based types.
46 "python-dateutil >= 2.7.2, <3.0dev",
47 "pyarrow >= 3.0.0, < 11.0dev",
48 "requests >= 2.21.0, < 3.0.0dev",
49 ]
50 extras = {
51 # Keep the no-op bqstorage extra for backward compatibility.
52 # See: https://github.com/googleapis/python-bigquery/issues/757
53 "bqstorage": [],
54 "pandas": ["pandas>=1.0.0", "db-dtypes>=0.3.0,<2.0.0dev"],
55 "ipywidgets": ["ipywidgets==7.7.1"],
56 "geopandas": ["geopandas>=0.9.0, <1.0dev", "Shapely>=1.6.0, <2.0dev"],
57 "ipython": ["ipython>=7.0.1,!=8.1.0"],
58 "tqdm": ["tqdm >= 4.7.4, <5.0.0dev"],
59 "opentelemetry": [
60 "opentelemetry-api >= 1.1.0",
61 "opentelemetry-sdk >= 1.1.0",
62 "opentelemetry-instrumentation >= 0.20b0",
63 ],
64 }
65
66 all_extras = []
67
68 for extra in extras:
69 all_extras.extend(extras[extra])
70
71 extras["all"] = all_extras
72
73 # Setup boilerplate below this line.
74
75 package_root = os.path.abspath(os.path.dirname(__file__))
76
77 readme_filename = os.path.join(package_root, "README.rst")
78 with io.open(readme_filename, encoding="utf-8") as readme_file:
79 readme = readme_file.read()
80
81 version = {}
82 with open(os.path.join(package_root, "google/cloud/bigquery/version.py")) as fp:
83 exec(fp.read(), version)
84 version = version["__version__"]
85
86 # Only include packages under the 'google' namespace. Do not include tests,
87 # benchmarks, etc.
88 packages = [
89 package
90 for package in setuptools.PEP420PackageFinder.find()
91 if package.startswith("google")
92 ]
93
94 # Determine which namespaces are needed.
95 namespaces = ["google"]
96 if "google.cloud" in packages:
97 namespaces.append("google.cloud")
98
99
100 setuptools.setup(
101 name=name,
102 version=version,
103 description=description,
104 long_description=readme,
105 author="Google LLC",
106 author_email="[email protected]",
107 license="Apache 2.0",
108 url="https://github.com/googleapis/python-bigquery",
109 classifiers=[
110 release_status,
111 "Intended Audience :: Developers",
112 "License :: OSI Approved :: Apache Software License",
113 "Programming Language :: Python",
114 "Programming Language :: Python :: 3",
115 "Programming Language :: Python :: 3.7",
116 "Programming Language :: Python :: 3.8",
117 "Programming Language :: Python :: 3.9",
118 "Programming Language :: Python :: 3.10",
119 "Operating System :: OS Independent",
120 "Topic :: Internet",
121 ],
122 platforms="Posix; MacOS X; Windows",
123 packages=packages,
124 namespace_packages=namespaces,
125 install_requires=dependencies,
126 extras_require=extras,
127 python_requires=">=3.7, <3.11",
128 include_package_data=True,
129 zip_safe=False,
130 )
131
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -124,7 +124,7 @@
namespace_packages=namespaces,
install_requires=dependencies,
extras_require=extras,
- python_requires=">=3.7, <3.11",
+ python_requires=">=3.7",
include_package_data=True,
zip_safe=False,
)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -124,7 +124,7 @@\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n- python_requires=\">=3.7, <3.11\",\n+ python_requires=\">=3.7\",\n include_package_data=True,\n zip_safe=False,\n )\n", "issue": "Support Pythons <4\nI'd like to be able to allow python <4 in ibis, but as of this PR (https://github.com/ibis-project/ibis/pull/4797) I cannot due to this library's `<3.11` pin.\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = \"google-cloud-bigquery\"\ndescription = \"Google BigQuery API client library\"\n\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = \"Development Status :: 5 - Production/Stable\"\ndependencies = [\n \"grpcio >= 1.47.0, < 2.0dev\", # https://github.com/googleapis/python-bigquery/issues/1262\n # NOTE: Maintainers, please do not require google-api-core>=2.x.x\n # Until this issue is closed\n # https://github.com/googleapis/google-cloud-python/issues/10566\n \"google-api-core[grpc] >= 1.31.5, <3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0\",\n \"google-cloud-bigquery-storage >= 2.0.0, <3.0.0dev\",\n \"proto-plus >= 1.22.0, <2.0.0dev\",\n # NOTE: Maintainers, please do not require google-cloud-core>=2.x.x\n # Until this issue is closed\n # https://github.com/googleapis/google-cloud-python/issues/10566\n \"google-cloud-core >= 1.4.1, <3.0.0dev\",\n \"google-resumable-media >= 0.6.0, < 3.0dev\",\n \"packaging >= 14.3, <22.0.0dev\",\n \"protobuf>=3.19.5,<5.0.0dev,!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5\", # For the legacy proto-based types.\n \"python-dateutil >= 2.7.2, <3.0dev\",\n \"pyarrow >= 3.0.0, < 11.0dev\",\n \"requests >= 2.21.0, < 3.0.0dev\",\n]\nextras = {\n # Keep the no-op bqstorage extra for backward compatibility.\n # See: https://github.com/googleapis/python-bigquery/issues/757\n \"bqstorage\": [],\n \"pandas\": [\"pandas>=1.0.0\", \"db-dtypes>=0.3.0,<2.0.0dev\"],\n \"ipywidgets\": [\"ipywidgets==7.7.1\"],\n \"geopandas\": [\"geopandas>=0.9.0, <1.0dev\", \"Shapely>=1.6.0, <2.0dev\"],\n \"ipython\": [\"ipython>=7.0.1,!=8.1.0\"],\n \"tqdm\": [\"tqdm >= 4.7.4, <5.0.0dev\"],\n \"opentelemetry\": [\n \"opentelemetry-api >= 1.1.0\",\n \"opentelemetry-sdk >= 1.1.0\",\n \"opentelemetry-instrumentation >= 0.20b0\",\n ],\n}\n\nall_extras = []\n\nfor extra in extras:\n all_extras.extend(extras[extra])\n\nextras[\"all\"] = all_extras\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.rst\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\nversion = {}\nwith open(os.path.join(package_root, \"google/cloud/bigquery/version.py\")) as fp:\n exec(fp.read(), version)\nversion = version[\"__version__\"]\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package\n for package in setuptools.PEP420PackageFinder.find()\n if package.startswith(\"google\")\n]\n\n# Determine which namespaces are needed.\nnamespaces = [\"google\"]\nif \"google.cloud\" in packages:\n namespaces.append(\"google.cloud\")\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n url=\"https://github.com/googleapis/python-bigquery\",\n classifiers=[\n release_status,\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet\",\n ],\n platforms=\"Posix; MacOS X; Windows\",\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n python_requires=\">=3.7, <3.11\",\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = \"google-cloud-bigquery\"\ndescription = \"Google BigQuery API client library\"\n\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = \"Development Status :: 5 - Production/Stable\"\ndependencies = [\n \"grpcio >= 1.47.0, < 2.0dev\", # https://github.com/googleapis/python-bigquery/issues/1262\n # NOTE: Maintainers, please do not require google-api-core>=2.x.x\n # Until this issue is closed\n # https://github.com/googleapis/google-cloud-python/issues/10566\n \"google-api-core[grpc] >= 1.31.5, <3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0\",\n \"google-cloud-bigquery-storage >= 2.0.0, <3.0.0dev\",\n \"proto-plus >= 1.22.0, <2.0.0dev\",\n # NOTE: Maintainers, please do not require google-cloud-core>=2.x.x\n # Until this issue is closed\n # https://github.com/googleapis/google-cloud-python/issues/10566\n \"google-cloud-core >= 1.4.1, <3.0.0dev\",\n \"google-resumable-media >= 0.6.0, < 3.0dev\",\n \"packaging >= 14.3, <22.0.0dev\",\n \"protobuf>=3.19.5,<5.0.0dev,!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5\", # For the legacy proto-based types.\n \"python-dateutil >= 2.7.2, <3.0dev\",\n \"pyarrow >= 3.0.0, < 11.0dev\",\n \"requests >= 2.21.0, < 3.0.0dev\",\n]\nextras = {\n # Keep the no-op bqstorage extra for backward compatibility.\n # See: https://github.com/googleapis/python-bigquery/issues/757\n \"bqstorage\": [],\n \"pandas\": [\"pandas>=1.0.0\", \"db-dtypes>=0.3.0,<2.0.0dev\"],\n \"ipywidgets\": [\"ipywidgets==7.7.1\"],\n \"geopandas\": [\"geopandas>=0.9.0, <1.0dev\", \"Shapely>=1.6.0, <2.0dev\"],\n \"ipython\": [\"ipython>=7.0.1,!=8.1.0\"],\n \"tqdm\": [\"tqdm >= 4.7.4, <5.0.0dev\"],\n \"opentelemetry\": [\n \"opentelemetry-api >= 1.1.0\",\n \"opentelemetry-sdk >= 1.1.0\",\n \"opentelemetry-instrumentation >= 0.20b0\",\n ],\n}\n\nall_extras = []\n\nfor extra in extras:\n all_extras.extend(extras[extra])\n\nextras[\"all\"] = all_extras\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.rst\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\nversion = {}\nwith open(os.path.join(package_root, \"google/cloud/bigquery/version.py\")) as fp:\n exec(fp.read(), version)\nversion = version[\"__version__\"]\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package\n for package in setuptools.PEP420PackageFinder.find()\n if package.startswith(\"google\")\n]\n\n# Determine which namespaces are needed.\nnamespaces = [\"google\"]\nif \"google.cloud\" in packages:\n namespaces.append(\"google.cloud\")\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n url=\"https://github.com/googleapis/python-bigquery\",\n classifiers=[\n release_status,\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet\",\n ],\n platforms=\"Posix; MacOS X; Windows\",\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n python_requires=\">=3.7\",\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "setup.py"}]} | 1,937 | 91 |
gh_patches_debug_1624 | rasdani/github-patches | git_diff | pypa__cibuildwheel-977 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
on windows, setup_py_python_requires attempts to open utf-8 setup.py as Windows-1252 and fails
### Description
This [setup.py file](https://github.com/fgregg/fastcluster/blob/master/setup.py) is valid utf-8, and has a few non-ascii characters. In a windows build, `setup_py_python_requires` appears to be opening this file as if it was encoded like Windows-1252 and thus fails on some non-ascii characters.
### Build log
https://github.com/fgregg/fastcluster/runs/4660766954?check_suite_focus=true#step:5:40
### CI config
https://github.com/fgregg/fastcluster/blob/master/.github/workflows/pythonpackage.yml#L41-L47
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cibuildwheel/projectfiles.py`
Content:
```
1 import ast
2 import sys
3 from configparser import ConfigParser
4 from pathlib import Path
5 from typing import Any, Optional
6
7 import tomli
8
9 if sys.version_info < (3, 8):
10 Constant = ast.Str
11
12 def get_constant(x: ast.Str) -> str:
13 return x.s
14
15 else:
16 Constant = ast.Constant
17
18 def get_constant(x: ast.Constant) -> Any:
19 return x.value
20
21
22 class Analyzer(ast.NodeVisitor):
23 def __init__(self) -> None:
24 self.requires_python: Optional[str] = None
25
26 def visit(self, content: ast.AST) -> None:
27 for node in ast.walk(content):
28 for child in ast.iter_child_nodes(node):
29 child.parent = node # type: ignore[attr-defined]
30 super().visit(content)
31
32 def visit_keyword(self, node: ast.keyword) -> None:
33 self.generic_visit(node)
34 if node.arg == "python_requires":
35 # Must not be nested in an if or other structure
36 # This will be Module -> Expr -> Call -> keyword
37 if not hasattr(node.parent.parent.parent, "parent") and isinstance( # type: ignore[attr-defined]
38 node.value, Constant
39 ):
40 self.requires_python = get_constant(node.value)
41
42
43 def setup_py_python_requires(content: str) -> Optional[str]:
44 try:
45 tree = ast.parse(content)
46 analyzer = Analyzer()
47 analyzer.visit(tree)
48 return analyzer.requires_python or None
49 except Exception:
50 return None
51
52
53 def get_requires_python_str(package_dir: Path) -> Optional[str]:
54 """Return the python requires string from the most canonical source available, or None"""
55
56 # Read in from pyproject.toml:project.requires-python
57 try:
58 with (package_dir / "pyproject.toml").open("rb") as f1:
59 info = tomli.load(f1)
60 return str(info["project"]["requires-python"])
61 except (FileNotFoundError, KeyError, IndexError, TypeError):
62 pass
63
64 # Read in from setup.cfg:options.python_requires
65 try:
66 config = ConfigParser()
67 config.read(package_dir / "setup.cfg")
68 return str(config["options"]["python_requires"])
69 except (FileNotFoundError, KeyError, IndexError, TypeError):
70 pass
71
72 try:
73 with (package_dir / "setup.py").open() as f2:
74 return setup_py_python_requires(f2.read())
75 except FileNotFoundError:
76 pass
77
78 return None
79
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cibuildwheel/projectfiles.py b/cibuildwheel/projectfiles.py
--- a/cibuildwheel/projectfiles.py
+++ b/cibuildwheel/projectfiles.py
@@ -70,7 +70,7 @@
pass
try:
- with (package_dir / "setup.py").open() as f2:
+ with (package_dir / "setup.py").open(encoding="utf8") as f2:
return setup_py_python_requires(f2.read())
except FileNotFoundError:
pass
| {"golden_diff": "diff --git a/cibuildwheel/projectfiles.py b/cibuildwheel/projectfiles.py\n--- a/cibuildwheel/projectfiles.py\n+++ b/cibuildwheel/projectfiles.py\n@@ -70,7 +70,7 @@\n pass\n \n try:\n- with (package_dir / \"setup.py\").open() as f2:\n+ with (package_dir / \"setup.py\").open(encoding=\"utf8\") as f2:\n return setup_py_python_requires(f2.read())\n except FileNotFoundError:\n pass\n", "issue": "on windows, setup_py_python_requires attempts to open utf-8 setup.py as Windows-1252 and fails\n### Description\r\n\r\nThis [setup.py file](https://github.com/fgregg/fastcluster/blob/master/setup.py) is valid utf-8, and has a few non-ascii characters. In a windows build, `setup_py_python_requires` appears to be opening this file as if it was encoded like Windows-1252 and thus fails on some non-ascii characters.\r\n\r\n### Build log\r\n\r\nhttps://github.com/fgregg/fastcluster/runs/4660766954?check_suite_focus=true#step:5:40\r\n\r\n### CI config\r\n\r\nhttps://github.com/fgregg/fastcluster/blob/master/.github/workflows/pythonpackage.yml#L41-L47\n", "before_files": [{"content": "import ast\nimport sys\nfrom configparser import ConfigParser\nfrom pathlib import Path\nfrom typing import Any, Optional\n\nimport tomli\n\nif sys.version_info < (3, 8):\n Constant = ast.Str\n\n def get_constant(x: ast.Str) -> str:\n return x.s\n\nelse:\n Constant = ast.Constant\n\n def get_constant(x: ast.Constant) -> Any:\n return x.value\n\n\nclass Analyzer(ast.NodeVisitor):\n def __init__(self) -> None:\n self.requires_python: Optional[str] = None\n\n def visit(self, content: ast.AST) -> None:\n for node in ast.walk(content):\n for child in ast.iter_child_nodes(node):\n child.parent = node # type: ignore[attr-defined]\n super().visit(content)\n\n def visit_keyword(self, node: ast.keyword) -> None:\n self.generic_visit(node)\n if node.arg == \"python_requires\":\n # Must not be nested in an if or other structure\n # This will be Module -> Expr -> Call -> keyword\n if not hasattr(node.parent.parent.parent, \"parent\") and isinstance( # type: ignore[attr-defined]\n node.value, Constant\n ):\n self.requires_python = get_constant(node.value)\n\n\ndef setup_py_python_requires(content: str) -> Optional[str]:\n try:\n tree = ast.parse(content)\n analyzer = Analyzer()\n analyzer.visit(tree)\n return analyzer.requires_python or None\n except Exception:\n return None\n\n\ndef get_requires_python_str(package_dir: Path) -> Optional[str]:\n \"\"\"Return the python requires string from the most canonical source available, or None\"\"\"\n\n # Read in from pyproject.toml:project.requires-python\n try:\n with (package_dir / \"pyproject.toml\").open(\"rb\") as f1:\n info = tomli.load(f1)\n return str(info[\"project\"][\"requires-python\"])\n except (FileNotFoundError, KeyError, IndexError, TypeError):\n pass\n\n # Read in from setup.cfg:options.python_requires\n try:\n config = ConfigParser()\n config.read(package_dir / \"setup.cfg\")\n return str(config[\"options\"][\"python_requires\"])\n except (FileNotFoundError, KeyError, IndexError, TypeError):\n pass\n\n try:\n with (package_dir / \"setup.py\").open() as f2:\n return setup_py_python_requires(f2.read())\n except FileNotFoundError:\n pass\n\n return None\n", "path": "cibuildwheel/projectfiles.py"}], "after_files": [{"content": "import ast\nimport sys\nfrom configparser import ConfigParser\nfrom pathlib import Path\nfrom typing import Any, Optional\n\nimport tomli\n\nif sys.version_info < (3, 8):\n Constant = ast.Str\n\n def get_constant(x: ast.Str) -> str:\n return x.s\n\nelse:\n Constant = ast.Constant\n\n def get_constant(x: ast.Constant) -> Any:\n return x.value\n\n\nclass Analyzer(ast.NodeVisitor):\n def __init__(self) -> None:\n self.requires_python: Optional[str] = None\n\n def visit(self, content: ast.AST) -> None:\n for node in ast.walk(content):\n for child in ast.iter_child_nodes(node):\n child.parent = node # type: ignore[attr-defined]\n super().visit(content)\n\n def visit_keyword(self, node: ast.keyword) -> None:\n self.generic_visit(node)\n if node.arg == \"python_requires\":\n # Must not be nested in an if or other structure\n # This will be Module -> Expr -> Call -> keyword\n if not hasattr(node.parent.parent.parent, \"parent\") and isinstance( # type: ignore[attr-defined]\n node.value, Constant\n ):\n self.requires_python = get_constant(node.value)\n\n\ndef setup_py_python_requires(content: str) -> Optional[str]:\n try:\n tree = ast.parse(content)\n analyzer = Analyzer()\n analyzer.visit(tree)\n return analyzer.requires_python or None\n except Exception:\n return None\n\n\ndef get_requires_python_str(package_dir: Path) -> Optional[str]:\n \"\"\"Return the python requires string from the most canonical source available, or None\"\"\"\n\n # Read in from pyproject.toml:project.requires-python\n try:\n with (package_dir / \"pyproject.toml\").open(\"rb\") as f1:\n info = tomli.load(f1)\n return str(info[\"project\"][\"requires-python\"])\n except (FileNotFoundError, KeyError, IndexError, TypeError):\n pass\n\n # Read in from setup.cfg:options.python_requires\n try:\n config = ConfigParser()\n config.read(package_dir / \"setup.cfg\")\n return str(config[\"options\"][\"python_requires\"])\n except (FileNotFoundError, KeyError, IndexError, TypeError):\n pass\n\n try:\n with (package_dir / \"setup.py\").open(encoding=\"utf8\") as f2:\n return setup_py_python_requires(f2.read())\n except FileNotFoundError:\n pass\n\n return None\n", "path": "cibuildwheel/projectfiles.py"}]} | 1,109 | 113 |
gh_patches_debug_1169 | rasdani/github-patches | git_diff | sosreport__sos-3483 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Obtain CNI files for containerd
Containerd uses the CNI configuration present in the defined folders by the configuration
```
[plugins."io.containerd.grpc.v1.cri".cni]
conf_dir = "/etc/cni/net.d
```
It will be very useful to obtain the cni configurations present on the folder for debugging networking related problems
https://github.com/sosreport/sos/blob/b94ced8370824bd62f3c7573ae33fcb96c5da531/sos/report/plugins/containerd.py#L12-L28
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sos/report/plugins/containerd.py`
Content:
```
1 # This file is part of the sos project: https://github.com/sosreport/sos
2 #
3 # This copyrighted material is made available to anyone wishing to use,
4 # modify, copy, or redistribute it subject to the terms and conditions of
5 # version 2 of the GNU General Public License.
6 #
7 # See the LICENSE file in the source distribution for further information.
8
9 from sos.report.plugins import (Plugin, RedHatPlugin, UbuntuPlugin, CosPlugin)
10
11
12 class Containerd(Plugin, RedHatPlugin, UbuntuPlugin, CosPlugin):
13
14 short_desc = 'Containerd containers'
15 plugin_name = 'containerd'
16 profiles = ('container',)
17 packages = ('containerd', 'containerd.io',)
18
19 def setup(self):
20 self.add_copy_spec([
21 "/etc/containerd/",
22 ])
23
24 self.add_cmd_output('containerd config dump')
25
26 # collect the containerd logs.
27 self.add_journal(units='containerd')
28
29 # vim: set et ts=4 sw=4 :
30
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sos/report/plugins/containerd.py b/sos/report/plugins/containerd.py
--- a/sos/report/plugins/containerd.py
+++ b/sos/report/plugins/containerd.py
@@ -19,6 +19,7 @@
def setup(self):
self.add_copy_spec([
"/etc/containerd/",
+ "/etc/cni/net.d/",
])
self.add_cmd_output('containerd config dump')
| {"golden_diff": "diff --git a/sos/report/plugins/containerd.py b/sos/report/plugins/containerd.py\n--- a/sos/report/plugins/containerd.py\n+++ b/sos/report/plugins/containerd.py\n@@ -19,6 +19,7 @@\n def setup(self):\n self.add_copy_spec([\n \"/etc/containerd/\",\n+ \"/etc/cni/net.d/\",\n ])\n \n self.add_cmd_output('containerd config dump')\n", "issue": "Obtain CNI files for containerd\nContainerd uses the CNI configuration present in the defined folders by the configuration\r\n\r\n```\r\n [plugins.\"io.containerd.grpc.v1.cri\".cni]\r\n conf_dir = \"/etc/cni/net.d\r\n```\r\n\r\nIt will be very useful to obtain the cni configurations present on the folder for debugging networking related problems \r\n\r\n\r\nhttps://github.com/sosreport/sos/blob/b94ced8370824bd62f3c7573ae33fcb96c5da531/sos/report/plugins/containerd.py#L12-L28\n", "before_files": [{"content": "# This file is part of the sos project: https://github.com/sosreport/sos\n#\n# This copyrighted material is made available to anyone wishing to use,\n# modify, copy, or redistribute it subject to the terms and conditions of\n# version 2 of the GNU General Public License.\n#\n# See the LICENSE file in the source distribution for further information.\n\nfrom sos.report.plugins import (Plugin, RedHatPlugin, UbuntuPlugin, CosPlugin)\n\n\nclass Containerd(Plugin, RedHatPlugin, UbuntuPlugin, CosPlugin):\n\n short_desc = 'Containerd containers'\n plugin_name = 'containerd'\n profiles = ('container',)\n packages = ('containerd', 'containerd.io',)\n\n def setup(self):\n self.add_copy_spec([\n \"/etc/containerd/\",\n ])\n\n self.add_cmd_output('containerd config dump')\n\n # collect the containerd logs.\n self.add_journal(units='containerd')\n\n# vim: set et ts=4 sw=4 :\n", "path": "sos/report/plugins/containerd.py"}], "after_files": [{"content": "# This file is part of the sos project: https://github.com/sosreport/sos\n#\n# This copyrighted material is made available to anyone wishing to use,\n# modify, copy, or redistribute it subject to the terms and conditions of\n# version 2 of the GNU General Public License.\n#\n# See the LICENSE file in the source distribution for further information.\n\nfrom sos.report.plugins import (Plugin, RedHatPlugin, UbuntuPlugin, CosPlugin)\n\n\nclass Containerd(Plugin, RedHatPlugin, UbuntuPlugin, CosPlugin):\n\n short_desc = 'Containerd containers'\n plugin_name = 'containerd'\n profiles = ('container',)\n packages = ('containerd', 'containerd.io',)\n\n def setup(self):\n self.add_copy_spec([\n \"/etc/containerd/\",\n \"/etc/cni/net.d/\",\n ])\n\n self.add_cmd_output('containerd config dump')\n\n # collect the containerd logs.\n self.add_journal(units='containerd')\n\n# vim: set et ts=4 sw=4 :\n", "path": "sos/report/plugins/containerd.py"}]} | 660 | 92 |
gh_patches_debug_35606 | rasdani/github-patches | git_diff | Kinto__kinto-972 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add a configuration of collections that the history plugin needs to keep track on
Today the history plugin applies to all the collection but most of them don't need it.
For instance with the kinto-signer plugin we don't want to track history of changes in the preview and signed collection.
The same goes with the kinto-changes plugin when we don't want to track monitor changes modifications.
The same way we can configure the kinto-signer resources we want to track, we should be able to configure the list of collections we want the history plugin to track.
Add a configuration of collections that the history plugin needs to keep track on
Today the history plugin applies to all the collection but most of them don't need it.
For instance with the kinto-signer plugin we don't want to track history of changes in the preview and signed collection.
The same goes with the kinto-changes plugin when we don't want to track monitor changes modifications.
The same way we can configure the kinto-signer resources we want to track, we should be able to configure the list of collections we want the history plugin to track.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kinto/plugins/history/listener.py`
Content:
```
1 from kinto.core.utils import instance_uri
2 from datetime import datetime
3
4
5 def on_resource_changed(event):
6 """
7 Everytime an object is created/changed/deleted, we create an entry in the
8 ``history`` resource. The entries are served as read-only in the
9 :mod:`kinto.plugins.history.views` module.
10 """
11 payload = event.payload
12 resource_name = payload['resource_name']
13 event_uri = payload['uri']
14
15 bucket_id = None
16 bucket_uri = None
17 collection_uri = None
18
19 storage = event.request.registry.storage
20 permission = event.request.registry.permission
21
22 targets = []
23 for impacted in event.impacted_records:
24 target = impacted['new']
25 obj_id = target['id']
26
27 try:
28 bucket_id = payload['bucket_id']
29 except KeyError:
30 # e.g. DELETE /buckets
31 bucket_id = obj_id
32 bucket_uri = instance_uri(event.request, 'bucket', id=bucket_id)
33
34 if 'collection_id' in payload:
35 collection_id = payload['collection_id']
36 collection_uri = instance_uri(event.request,
37 'collection',
38 bucket_id=bucket_id,
39 id=collection_id)
40
41 # On POST .../records, the URI does not contain the newly created
42 # record id.
43 parts = event_uri.split('/')
44 if resource_name in parts[-1]:
45 parts.append(obj_id)
46 else:
47 # Make sure the id is correct on grouped events.
48 parts[-1] = obj_id
49 uri = '/'.join(parts)
50 targets.append((uri, target))
51
52 # Prepare a list of object ids to be fetched from permission backend,
53 # and fetch them all at once. Use a mapping for later convenience.
54 all_perms_objects_ids = [oid for (oid, _) in targets]
55 all_perms_objects_ids.append(bucket_uri)
56 if collection_uri is not None:
57 all_perms_objects_ids.append(collection_uri)
58 all_perms_objects_ids = list(set(all_perms_objects_ids))
59 all_permissions = permission.get_objects_permissions(all_perms_objects_ids)
60 perms_by_object_id = dict(zip(all_perms_objects_ids, all_permissions))
61
62 bucket_perms = perms_by_object_id[bucket_uri]
63 collection_perms = {}
64 if collection_uri is not None:
65 collection_perms = perms_by_object_id[collection_uri]
66
67 # The principals allowed to read the bucket and collection.
68 # (Note: ``write`` means ``read``)
69 read_principals = set(bucket_perms.get('read', []))
70 read_principals.update(bucket_perms.get('write', []))
71 read_principals.update(collection_perms.get('read', []))
72 read_principals.update(collection_perms.get('write', []))
73
74 # Create a history entry for each impacted record.
75 for (uri, target) in targets:
76 obj_id = target['id']
77 # Prepare the history entry attributes.
78 perms = {k: list(v) for k, v in perms_by_object_id[uri].items()}
79 eventattrs = dict(**payload)
80 eventattrs.pop('timestamp', None) # Already in target `last_modified`.
81 eventattrs.pop('bucket_id', None)
82 eventattrs['%s_id' % resource_name] = obj_id
83 eventattrs['uri'] = uri
84 attrs = dict(date=datetime.now().isoformat(),
85 target={'data': target, 'permissions': perms},
86 **eventattrs)
87
88 # Create a record for the 'history' resource, whose parent_id is
89 # the bucket URI (c.f. views.py).
90 # Note: this will be rolledback if the transaction is rolledback.
91 entry = storage.create(parent_id=bucket_uri,
92 collection_id='history',
93 record=attrs)
94
95 # The read permission on the newly created history entry is the union
96 # of the record permissions with the one from bucket and collection.
97 entry_principals = set(read_principals)
98 entry_principals.update(perms.get('read', []))
99 entry_principals.update(perms.get('write', []))
100 entry_perms = {'read': list(entry_principals)}
101 # /buckets/{id}/history is the URI for the list of history entries.
102 entry_perm_id = '/buckets/%s/history/%s' % (bucket_id, entry['id'])
103 permission.replace_object_permissions(entry_perm_id, entry_perms)
104
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kinto/plugins/history/listener.py b/kinto/plugins/history/listener.py
--- a/kinto/plugins/history/listener.py
+++ b/kinto/plugins/history/listener.py
@@ -1,3 +1,5 @@
+from pyramid.settings import aslist
+
from kinto.core.utils import instance_uri
from datetime import datetime
@@ -18,6 +20,9 @@
storage = event.request.registry.storage
permission = event.request.registry.permission
+ settings = event.request.registry.settings
+
+ excluded_resources = aslist(settings.get('history.exclude_resources', ''))
targets = []
for impacted in event.impacted_records:
@@ -31,12 +36,17 @@
bucket_id = obj_id
bucket_uri = instance_uri(event.request, 'bucket', id=bucket_id)
+ if bucket_uri in excluded_resources:
+ continue
+
if 'collection_id' in payload:
collection_id = payload['collection_id']
collection_uri = instance_uri(event.request,
'collection',
bucket_id=bucket_id,
id=collection_id)
+ if collection_uri in excluded_resources:
+ continue
# On POST .../records, the URI does not contain the newly created
# record id.
@@ -47,8 +57,15 @@
# Make sure the id is correct on grouped events.
parts[-1] = obj_id
uri = '/'.join(parts)
+
+ if uri in excluded_resources:
+ continue
+
targets.append((uri, target))
+ if not targets:
+ return # Nothing to do.
+
# Prepare a list of object ids to be fetched from permission backend,
# and fetch them all at once. Use a mapping for later convenience.
all_perms_objects_ids = [oid for (oid, _) in targets]
| {"golden_diff": "diff --git a/kinto/plugins/history/listener.py b/kinto/plugins/history/listener.py\n--- a/kinto/plugins/history/listener.py\n+++ b/kinto/plugins/history/listener.py\n@@ -1,3 +1,5 @@\n+from pyramid.settings import aslist\n+\n from kinto.core.utils import instance_uri\n from datetime import datetime\n \n@@ -18,6 +20,9 @@\n \n storage = event.request.registry.storage\n permission = event.request.registry.permission\n+ settings = event.request.registry.settings\n+\n+ excluded_resources = aslist(settings.get('history.exclude_resources', ''))\n \n targets = []\n for impacted in event.impacted_records:\n@@ -31,12 +36,17 @@\n bucket_id = obj_id\n bucket_uri = instance_uri(event.request, 'bucket', id=bucket_id)\n \n+ if bucket_uri in excluded_resources:\n+ continue\n+\n if 'collection_id' in payload:\n collection_id = payload['collection_id']\n collection_uri = instance_uri(event.request,\n 'collection',\n bucket_id=bucket_id,\n id=collection_id)\n+ if collection_uri in excluded_resources:\n+ continue\n \n # On POST .../records, the URI does not contain the newly created\n # record id.\n@@ -47,8 +57,15 @@\n # Make sure the id is correct on grouped events.\n parts[-1] = obj_id\n uri = '/'.join(parts)\n+\n+ if uri in excluded_resources:\n+ continue\n+\n targets.append((uri, target))\n \n+ if not targets:\n+ return # Nothing to do.\n+\n # Prepare a list of object ids to be fetched from permission backend,\n # and fetch them all at once. Use a mapping for later convenience.\n all_perms_objects_ids = [oid for (oid, _) in targets]\n", "issue": "Add a configuration of collections that the history plugin needs to keep track on\nToday the history plugin applies to all the collection but most of them don't need it.\r\nFor instance with the kinto-signer plugin we don't want to track history of changes in the preview and signed collection.\r\nThe same goes with the kinto-changes plugin when we don't want to track monitor changes modifications.\r\n\r\nThe same way we can configure the kinto-signer resources we want to track, we should be able to configure the list of collections we want the history plugin to track.\nAdd a configuration of collections that the history plugin needs to keep track on\nToday the history plugin applies to all the collection but most of them don't need it.\r\nFor instance with the kinto-signer plugin we don't want to track history of changes in the preview and signed collection.\r\nThe same goes with the kinto-changes plugin when we don't want to track monitor changes modifications.\r\n\r\nThe same way we can configure the kinto-signer resources we want to track, we should be able to configure the list of collections we want the history plugin to track.\n", "before_files": [{"content": "from kinto.core.utils import instance_uri\nfrom datetime import datetime\n\n\ndef on_resource_changed(event):\n \"\"\"\n Everytime an object is created/changed/deleted, we create an entry in the\n ``history`` resource. The entries are served as read-only in the\n :mod:`kinto.plugins.history.views` module.\n \"\"\"\n payload = event.payload\n resource_name = payload['resource_name']\n event_uri = payload['uri']\n\n bucket_id = None\n bucket_uri = None\n collection_uri = None\n\n storage = event.request.registry.storage\n permission = event.request.registry.permission\n\n targets = []\n for impacted in event.impacted_records:\n target = impacted['new']\n obj_id = target['id']\n\n try:\n bucket_id = payload['bucket_id']\n except KeyError:\n # e.g. DELETE /buckets\n bucket_id = obj_id\n bucket_uri = instance_uri(event.request, 'bucket', id=bucket_id)\n\n if 'collection_id' in payload:\n collection_id = payload['collection_id']\n collection_uri = instance_uri(event.request,\n 'collection',\n bucket_id=bucket_id,\n id=collection_id)\n\n # On POST .../records, the URI does not contain the newly created\n # record id.\n parts = event_uri.split('/')\n if resource_name in parts[-1]:\n parts.append(obj_id)\n else:\n # Make sure the id is correct on grouped events.\n parts[-1] = obj_id\n uri = '/'.join(parts)\n targets.append((uri, target))\n\n # Prepare a list of object ids to be fetched from permission backend,\n # and fetch them all at once. Use a mapping for later convenience.\n all_perms_objects_ids = [oid for (oid, _) in targets]\n all_perms_objects_ids.append(bucket_uri)\n if collection_uri is not None:\n all_perms_objects_ids.append(collection_uri)\n all_perms_objects_ids = list(set(all_perms_objects_ids))\n all_permissions = permission.get_objects_permissions(all_perms_objects_ids)\n perms_by_object_id = dict(zip(all_perms_objects_ids, all_permissions))\n\n bucket_perms = perms_by_object_id[bucket_uri]\n collection_perms = {}\n if collection_uri is not None:\n collection_perms = perms_by_object_id[collection_uri]\n\n # The principals allowed to read the bucket and collection.\n # (Note: ``write`` means ``read``)\n read_principals = set(bucket_perms.get('read', []))\n read_principals.update(bucket_perms.get('write', []))\n read_principals.update(collection_perms.get('read', []))\n read_principals.update(collection_perms.get('write', []))\n\n # Create a history entry for each impacted record.\n for (uri, target) in targets:\n obj_id = target['id']\n # Prepare the history entry attributes.\n perms = {k: list(v) for k, v in perms_by_object_id[uri].items()}\n eventattrs = dict(**payload)\n eventattrs.pop('timestamp', None) # Already in target `last_modified`.\n eventattrs.pop('bucket_id', None)\n eventattrs['%s_id' % resource_name] = obj_id\n eventattrs['uri'] = uri\n attrs = dict(date=datetime.now().isoformat(),\n target={'data': target, 'permissions': perms},\n **eventattrs)\n\n # Create a record for the 'history' resource, whose parent_id is\n # the bucket URI (c.f. views.py).\n # Note: this will be rolledback if the transaction is rolledback.\n entry = storage.create(parent_id=bucket_uri,\n collection_id='history',\n record=attrs)\n\n # The read permission on the newly created history entry is the union\n # of the record permissions with the one from bucket and collection.\n entry_principals = set(read_principals)\n entry_principals.update(perms.get('read', []))\n entry_principals.update(perms.get('write', []))\n entry_perms = {'read': list(entry_principals)}\n # /buckets/{id}/history is the URI for the list of history entries.\n entry_perm_id = '/buckets/%s/history/%s' % (bucket_id, entry['id'])\n permission.replace_object_permissions(entry_perm_id, entry_perms)\n", "path": "kinto/plugins/history/listener.py"}], "after_files": [{"content": "from pyramid.settings import aslist\n\nfrom kinto.core.utils import instance_uri\nfrom datetime import datetime\n\n\ndef on_resource_changed(event):\n \"\"\"\n Everytime an object is created/changed/deleted, we create an entry in the\n ``history`` resource. The entries are served as read-only in the\n :mod:`kinto.plugins.history.views` module.\n \"\"\"\n payload = event.payload\n resource_name = payload['resource_name']\n event_uri = payload['uri']\n\n bucket_id = None\n bucket_uri = None\n collection_uri = None\n\n storage = event.request.registry.storage\n permission = event.request.registry.permission\n settings = event.request.registry.settings\n\n excluded_resources = aslist(settings.get('history.exclude_resources', ''))\n\n targets = []\n for impacted in event.impacted_records:\n target = impacted['new']\n obj_id = target['id']\n\n try:\n bucket_id = payload['bucket_id']\n except KeyError:\n # e.g. DELETE /buckets\n bucket_id = obj_id\n bucket_uri = instance_uri(event.request, 'bucket', id=bucket_id)\n\n if bucket_uri in excluded_resources:\n continue\n\n if 'collection_id' in payload:\n collection_id = payload['collection_id']\n collection_uri = instance_uri(event.request,\n 'collection',\n bucket_id=bucket_id,\n id=collection_id)\n if collection_uri in excluded_resources:\n continue\n\n # On POST .../records, the URI does not contain the newly created\n # record id.\n parts = event_uri.split('/')\n if resource_name in parts[-1]:\n parts.append(obj_id)\n else:\n # Make sure the id is correct on grouped events.\n parts[-1] = obj_id\n uri = '/'.join(parts)\n\n if uri in excluded_resources:\n continue\n\n targets.append((uri, target))\n\n if not targets:\n return # Nothing to do.\n\n # Prepare a list of object ids to be fetched from permission backend,\n # and fetch them all at once. Use a mapping for later convenience.\n all_perms_objects_ids = [oid for (oid, _) in targets]\n all_perms_objects_ids.append(bucket_uri)\n if collection_uri is not None:\n all_perms_objects_ids.append(collection_uri)\n all_perms_objects_ids = list(set(all_perms_objects_ids))\n all_permissions = permission.get_objects_permissions(all_perms_objects_ids)\n perms_by_object_id = dict(zip(all_perms_objects_ids, all_permissions))\n\n bucket_perms = perms_by_object_id[bucket_uri]\n collection_perms = {}\n if collection_uri is not None:\n collection_perms = perms_by_object_id[collection_uri]\n\n # The principals allowed to read the bucket and collection.\n # (Note: ``write`` means ``read``)\n read_principals = set(bucket_perms.get('read', []))\n read_principals.update(bucket_perms.get('write', []))\n read_principals.update(collection_perms.get('read', []))\n read_principals.update(collection_perms.get('write', []))\n\n # Create a history entry for each impacted record.\n for (uri, target) in targets:\n obj_id = target['id']\n # Prepare the history entry attributes.\n perms = {k: list(v) for k, v in perms_by_object_id[uri].items()}\n eventattrs = dict(**payload)\n eventattrs.pop('timestamp', None) # Already in target `last_modified`.\n eventattrs.pop('bucket_id', None)\n eventattrs['%s_id' % resource_name] = obj_id\n eventattrs['uri'] = uri\n attrs = dict(date=datetime.now().isoformat(),\n target={'data': target, 'permissions': perms},\n **eventattrs)\n\n # Create a record for the 'history' resource, whose parent_id is\n # the bucket URI (c.f. views.py).\n # Note: this will be rolledback if the transaction is rolledback.\n entry = storage.create(parent_id=bucket_uri,\n collection_id='history',\n record=attrs)\n\n # The read permission on the newly created history entry is the union\n # of the record permissions with the one from bucket and collection.\n entry_principals = set(read_principals)\n entry_principals.update(perms.get('read', []))\n entry_principals.update(perms.get('write', []))\n entry_perms = {'read': list(entry_principals)}\n # /buckets/{id}/history is the URI for the list of history entries.\n entry_perm_id = '/buckets/%s/history/%s' % (bucket_id, entry['id'])\n permission.replace_object_permissions(entry_perm_id, entry_perms)\n", "path": "kinto/plugins/history/listener.py"}]} | 1,624 | 402 |
gh_patches_debug_25576 | rasdani/github-patches | git_diff | sublimelsp__LSP-1772 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
On Windows, the drive letter in server responsed file URIs are lowercase.
**Describe the bug**
I tried both intelephense and pyright, they both returned lowercased drive letter thus I suspect it's a standard. (or maybe VSCode's LSP lib does it)
https://user-images.githubusercontent.com/6594915/123961095-96286c80-d9e2-11eb-8ada-0da9af754a55.mp4
In "Goto Definition...", this causes ST to open a file whose drive letter is in lowercase. And that may cause various mysterious problem sometimes... Or maybe, this should be fixed in ST core.
**To Reproduce**
Steps to reproduce the behavior:
1. Install LSP-intelephense with a Windows build ST
2. Open a PHP project
3. Make sure the definition file is not opened in a tab already
4. Do "Goto Definition"
5. The newly opened tab should have a lower drive letter
**Expected behavior**
The drive letter should be uppercase.
**Environment (please complete the following information):**
- OS: Win10 21H1 x64
- Sublime Text version: 4109
- LSP version: 4070-1.6.1
- Language servers used: intelephense, pyright
**Additional context**
This is a Windows-only issue as it's case-insensitive.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugin/core/url.py`
Content:
```
1 from .typing import Any, Tuple
2 from urllib.parse import quote
3 from urllib.parse import urljoin
4 from urllib.parse import urlparse
5 from urllib.request import pathname2url
6 from urllib.request import url2pathname
7 import os
8 import re
9
10 import sublime
11
12
13 def filename_to_uri(file_name: str) -> str:
14 """
15 Convert a file name obtained from view.file_name() into an URI
16 """
17 prefix = sublime.installed_packages_path()
18 if file_name.startswith(prefix):
19 return _to_resource_uri(file_name, prefix)
20 prefix = sublime.packages_path()
21 if file_name.startswith(prefix) and not os.path.exists(file_name):
22 return _to_resource_uri(file_name, prefix)
23 path = pathname2url(file_name)
24 re.sub(r"^([A-Z]):/", _lowercase_driveletter, path)
25 return urljoin("file:", path)
26
27
28 def view_to_uri(view: sublime.View) -> str:
29 file_name = view.file_name()
30 if not file_name:
31 return "buffer://sublime/{}".format(view.buffer_id())
32 return filename_to_uri(file_name)
33
34
35 def uri_to_filename(uri: str) -> str:
36 """
37 DEPRECATED: An URI associated to a view does not necessarily have a "file:" scheme.
38 Use urllib.parse.urlparse to determine the scheme and go from there.
39 Use urllib.parse.unquote to unquote the path.
40 """
41 parsed = urlparse(uri)
42 assert parsed.scheme == "file"
43 if os.name == 'nt':
44 # url2pathname does not understand %3A (VS Code's encoding forced on all servers :/)
45 return url2pathname(parsed.path).strip('\\')
46 else:
47 return url2pathname(parsed.path)
48
49
50 def parse_uri(uri: str) -> Tuple[str, str]:
51 """
52 Parses an URI into a tuple where the first element is the URI scheme. The
53 second element is the local filesystem path if the URI is a file URI,
54 otherwise the second element is the original URI.
55 """
56 parsed = urlparse(uri)
57 if parsed.scheme == "file":
58 if os.name == 'nt':
59 # TODO: this is wrong for UNC paths
60 return parsed.scheme, url2pathname(parsed.path).strip('\\')
61 return parsed.scheme, url2pathname(parsed.path)
62 return parsed.scheme, uri
63
64
65 def _to_resource_uri(path: str, prefix: str) -> str:
66 """
67 Terrible hacks from ST core leak into packages as well.
68
69 See: https://github.com/sublimehq/sublime_text/issues/3742
70 """
71 return "res://Packages{}".format(quote(path[len(prefix):]))
72
73
74 def _lowercase_driveletter(match: Any) -> str:
75 """
76 For compatibility with certain other language clients.
77 """
78 return "{}:/".format(match.group(1).lower())
79
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plugin/core/url.py b/plugin/core/url.py
--- a/plugin/core/url.py
+++ b/plugin/core/url.py
@@ -21,7 +21,6 @@
if file_name.startswith(prefix) and not os.path.exists(file_name):
return _to_resource_uri(file_name, prefix)
path = pathname2url(file_name)
- re.sub(r"^([A-Z]):/", _lowercase_driveletter, path)
return urljoin("file:", path)
@@ -42,7 +41,8 @@
assert parsed.scheme == "file"
if os.name == 'nt':
# url2pathname does not understand %3A (VS Code's encoding forced on all servers :/)
- return url2pathname(parsed.path).strip('\\')
+ path = url2pathname(parsed.path).strip('\\')
+ return re.sub(r"^([a-z]):", _uppercase_driveletter, path)
else:
return url2pathname(parsed.path)
@@ -71,8 +71,8 @@
return "res://Packages{}".format(quote(path[len(prefix):]))
-def _lowercase_driveletter(match: Any) -> str:
+def _uppercase_driveletter(match: Any) -> str:
"""
- For compatibility with certain other language clients.
+ For compatibility with Sublime's VCS status in the status bar.
"""
- return "{}:/".format(match.group(1).lower())
+ return "{}:".format(match.group(1).upper())
| {"golden_diff": "diff --git a/plugin/core/url.py b/plugin/core/url.py\n--- a/plugin/core/url.py\n+++ b/plugin/core/url.py\n@@ -21,7 +21,6 @@\n if file_name.startswith(prefix) and not os.path.exists(file_name):\n return _to_resource_uri(file_name, prefix)\n path = pathname2url(file_name)\n- re.sub(r\"^([A-Z]):/\", _lowercase_driveletter, path)\n return urljoin(\"file:\", path)\n \n \n@@ -42,7 +41,8 @@\n assert parsed.scheme == \"file\"\n if os.name == 'nt':\n # url2pathname does not understand %3A (VS Code's encoding forced on all servers :/)\n- return url2pathname(parsed.path).strip('\\\\')\n+ path = url2pathname(parsed.path).strip('\\\\')\n+ return re.sub(r\"^([a-z]):\", _uppercase_driveletter, path)\n else:\n return url2pathname(parsed.path)\n \n@@ -71,8 +71,8 @@\n return \"res://Packages{}\".format(quote(path[len(prefix):]))\n \n \n-def _lowercase_driveletter(match: Any) -> str:\n+def _uppercase_driveletter(match: Any) -> str:\n \"\"\"\n- For compatibility with certain other language clients.\n+ For compatibility with Sublime's VCS status in the status bar.\n \"\"\"\n- return \"{}:/\".format(match.group(1).lower())\n+ return \"{}:\".format(match.group(1).upper())\n", "issue": "On Windows, the drive letter in server responsed file URIs are lowercase.\n**Describe the bug**\r\n\r\nI tried both intelephense and pyright, they both returned lowercased drive letter thus I suspect it's a standard. (or maybe VSCode's LSP lib does it)\r\n\r\nhttps://user-images.githubusercontent.com/6594915/123961095-96286c80-d9e2-11eb-8ada-0da9af754a55.mp4\r\n\r\nIn \"Goto Definition...\", this causes ST to open a file whose drive letter is in lowercase. And that may cause various mysterious problem sometimes... Or maybe, this should be fixed in ST core.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Install LSP-intelephense with a Windows build ST\r\n2. Open a PHP project\r\n3. Make sure the definition file is not opened in a tab already\r\n4. Do \"Goto Definition\"\r\n5. The newly opened tab should have a lower drive letter\r\n\r\n**Expected behavior**\r\n\r\nThe drive letter should be uppercase.\r\n\r\n**Environment (please complete the following information):**\r\n- OS: Win10 21H1 x64\r\n- Sublime Text version: 4109\r\n- LSP version: 4070-1.6.1\r\n- Language servers used: intelephense, pyright\r\n\r\n**Additional context**\r\n\r\nThis is a Windows-only issue as it's case-insensitive.\r\n\n", "before_files": [{"content": "from .typing import Any, Tuple\nfrom urllib.parse import quote\nfrom urllib.parse import urljoin\nfrom urllib.parse import urlparse\nfrom urllib.request import pathname2url\nfrom urllib.request import url2pathname\nimport os\nimport re\n\nimport sublime\n\n\ndef filename_to_uri(file_name: str) -> str:\n \"\"\"\n Convert a file name obtained from view.file_name() into an URI\n \"\"\"\n prefix = sublime.installed_packages_path()\n if file_name.startswith(prefix):\n return _to_resource_uri(file_name, prefix)\n prefix = sublime.packages_path()\n if file_name.startswith(prefix) and not os.path.exists(file_name):\n return _to_resource_uri(file_name, prefix)\n path = pathname2url(file_name)\n re.sub(r\"^([A-Z]):/\", _lowercase_driveletter, path)\n return urljoin(\"file:\", path)\n\n\ndef view_to_uri(view: sublime.View) -> str:\n file_name = view.file_name()\n if not file_name:\n return \"buffer://sublime/{}\".format(view.buffer_id())\n return filename_to_uri(file_name)\n\n\ndef uri_to_filename(uri: str) -> str:\n \"\"\"\n DEPRECATED: An URI associated to a view does not necessarily have a \"file:\" scheme.\n Use urllib.parse.urlparse to determine the scheme and go from there.\n Use urllib.parse.unquote to unquote the path.\n \"\"\"\n parsed = urlparse(uri)\n assert parsed.scheme == \"file\"\n if os.name == 'nt':\n # url2pathname does not understand %3A (VS Code's encoding forced on all servers :/)\n return url2pathname(parsed.path).strip('\\\\')\n else:\n return url2pathname(parsed.path)\n\n\ndef parse_uri(uri: str) -> Tuple[str, str]:\n \"\"\"\n Parses an URI into a tuple where the first element is the URI scheme. The\n second element is the local filesystem path if the URI is a file URI,\n otherwise the second element is the original URI.\n \"\"\"\n parsed = urlparse(uri)\n if parsed.scheme == \"file\":\n if os.name == 'nt':\n # TODO: this is wrong for UNC paths\n return parsed.scheme, url2pathname(parsed.path).strip('\\\\')\n return parsed.scheme, url2pathname(parsed.path)\n return parsed.scheme, uri\n\n\ndef _to_resource_uri(path: str, prefix: str) -> str:\n \"\"\"\n Terrible hacks from ST core leak into packages as well.\n\n See: https://github.com/sublimehq/sublime_text/issues/3742\n \"\"\"\n return \"res://Packages{}\".format(quote(path[len(prefix):]))\n\n\ndef _lowercase_driveletter(match: Any) -> str:\n \"\"\"\n For compatibility with certain other language clients.\n \"\"\"\n return \"{}:/\".format(match.group(1).lower())\n", "path": "plugin/core/url.py"}], "after_files": [{"content": "from .typing import Any, Tuple\nfrom urllib.parse import quote\nfrom urllib.parse import urljoin\nfrom urllib.parse import urlparse\nfrom urllib.request import pathname2url\nfrom urllib.request import url2pathname\nimport os\nimport re\n\nimport sublime\n\n\ndef filename_to_uri(file_name: str) -> str:\n \"\"\"\n Convert a file name obtained from view.file_name() into an URI\n \"\"\"\n prefix = sublime.installed_packages_path()\n if file_name.startswith(prefix):\n return _to_resource_uri(file_name, prefix)\n prefix = sublime.packages_path()\n if file_name.startswith(prefix) and not os.path.exists(file_name):\n return _to_resource_uri(file_name, prefix)\n path = pathname2url(file_name)\n return urljoin(\"file:\", path)\n\n\ndef view_to_uri(view: sublime.View) -> str:\n file_name = view.file_name()\n if not file_name:\n return \"buffer://sublime/{}\".format(view.buffer_id())\n return filename_to_uri(file_name)\n\n\ndef uri_to_filename(uri: str) -> str:\n \"\"\"\n DEPRECATED: An URI associated to a view does not necessarily have a \"file:\" scheme.\n Use urllib.parse.urlparse to determine the scheme and go from there.\n Use urllib.parse.unquote to unquote the path.\n \"\"\"\n parsed = urlparse(uri)\n assert parsed.scheme == \"file\"\n if os.name == 'nt':\n # url2pathname does not understand %3A (VS Code's encoding forced on all servers :/)\n path = url2pathname(parsed.path).strip('\\\\')\n return re.sub(r\"^([a-z]):\", _uppercase_driveletter, path)\n else:\n return url2pathname(parsed.path)\n\n\ndef parse_uri(uri: str) -> Tuple[str, str]:\n \"\"\"\n Parses an URI into a tuple where the first element is the URI scheme. The\n second element is the local filesystem path if the URI is a file URI,\n otherwise the second element is the original URI.\n \"\"\"\n parsed = urlparse(uri)\n if parsed.scheme == \"file\":\n if os.name == 'nt':\n # TODO: this is wrong for UNC paths\n return parsed.scheme, url2pathname(parsed.path).strip('\\\\')\n return parsed.scheme, url2pathname(parsed.path)\n return parsed.scheme, uri\n\n\ndef _to_resource_uri(path: str, prefix: str) -> str:\n \"\"\"\n Terrible hacks from ST core leak into packages as well.\n\n See: https://github.com/sublimehq/sublime_text/issues/3742\n \"\"\"\n return \"res://Packages{}\".format(quote(path[len(prefix):]))\n\n\ndef _uppercase_driveletter(match: Any) -> str:\n \"\"\"\n For compatibility with Sublime's VCS status in the status bar.\n \"\"\"\n return \"{}:\".format(match.group(1).upper())\n", "path": "plugin/core/url.py"}]} | 1,336 | 321 |
gh_patches_debug_2968 | rasdani/github-patches | git_diff | ibis-project__ibis-2426 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
fix bigquery version
https://dev.azure.com/ibis-project/ibis/_build/results?buildId=3396&view=logs&j=8f09edc2-e3b7-52de-126a-0225c4f3efa1&t=78a72aec-b398-558e-7c0d-2d33604b9e53
I think we need to limit the upper bound of bigquery library here.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 """Ibis setup module."""
3 import pathlib
4 import sys
5
6 from setuptools import find_packages, setup
7
8 import versioneer
9
10 LONG_DESCRIPTION = """
11 Ibis is a productivity-centric Python big data framework.
12
13 See http://ibis-project.org
14 """
15
16 VERSION = sys.version_info.major, sys.version_info.minor
17
18 impala_requires = ['hdfs>=2.0.16', 'sqlalchemy>=1.1,<1.3.7', 'requests']
19 impala_requires.append('impyla[kerberos]>=0.15.0')
20
21 sqlite_requires = ['sqlalchemy>=1.1,<1.3.7']
22 postgres_requires = sqlite_requires + ['psycopg2']
23 mysql_requires = sqlite_requires + ['pymysql']
24
25 omniscidb_requires = ['pymapd>=0.12.0']
26 kerberos_requires = ['requests-kerberos']
27 visualization_requires = ['graphviz']
28 clickhouse_requires = [
29 'clickhouse-driver>=0.1.3',
30 'clickhouse-cityhash',
31 ]
32 bigquery_requires = ['google-cloud-bigquery>=1.12.0', 'pydata-google-auth']
33 hdf5_requires = ['tables>=3.0.0']
34
35 parquet_requires = ['pyarrow>=0.12.0']
36 spark_requires = ['pyspark>=2.4.3']
37
38 geospatial_requires = ['geoalchemy2', 'geopandas', 'shapely']
39
40 all_requires = (
41 impala_requires
42 + postgres_requires
43 + omniscidb_requires
44 + mysql_requires
45 + kerberos_requires
46 + visualization_requires
47 + clickhouse_requires
48 + bigquery_requires
49 + hdf5_requires
50 + parquet_requires
51 + spark_requires
52 + geospatial_requires
53 )
54
55 develop_requires = all_requires + [
56 'black',
57 'click',
58 'pydocstyle==4.0.1',
59 'flake8',
60 'isort',
61 'mypy',
62 'pre-commit',
63 'pygit2',
64 'pytest>=4.5',
65 ]
66
67 install_requires = [
68 line.strip()
69 for line in pathlib.Path(__file__)
70 .parent.joinpath('requirements.txt')
71 .read_text()
72 .splitlines()
73 ]
74
75 setup(
76 name='ibis-framework',
77 url='https://github.com/ibis-project/ibis',
78 packages=find_packages(),
79 version=versioneer.get_version(),
80 cmdclass=versioneer.get_cmdclass(),
81 install_requires=install_requires,
82 python_requires='>=3.7',
83 extras_require={
84 'all': all_requires,
85 'develop': develop_requires,
86 'impala': impala_requires,
87 'kerberos': kerberos_requires,
88 'postgres': postgres_requires,
89 'omniscidb': omniscidb_requires,
90 'mysql': mysql_requires,
91 'sqlite': sqlite_requires,
92 'visualization': visualization_requires,
93 'clickhouse': clickhouse_requires,
94 'bigquery': bigquery_requires,
95 'hdf5': hdf5_requires,
96 'parquet': parquet_requires,
97 'spark': spark_requires,
98 'geospatial': geospatial_requires,
99 },
100 description="Productivity-centric Python Big Data Framework",
101 long_description=LONG_DESCRIPTION,
102 classifiers=[
103 'Development Status :: 4 - Beta',
104 'Operating System :: OS Independent',
105 'Intended Audience :: Science/Research',
106 'Programming Language :: Python',
107 'Programming Language :: Python :: 3',
108 'Topic :: Scientific/Engineering',
109 ],
110 license='Apache License, Version 2.0',
111 maintainer="Phillip Cloud",
112 maintainer_email="[email protected]",
113 )
114
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -29,7 +29,10 @@
'clickhouse-driver>=0.1.3',
'clickhouse-cityhash',
]
-bigquery_requires = ['google-cloud-bigquery>=1.12.0', 'pydata-google-auth']
+bigquery_requires = [
+ 'google-cloud-bigquery[bqstorage,pandas]>=1.12.0,<2.0.0dev',
+ 'pydata-google-auth',
+]
hdf5_requires = ['tables>=3.0.0']
parquet_requires = ['pyarrow>=0.12.0']
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -29,7 +29,10 @@\n 'clickhouse-driver>=0.1.3',\n 'clickhouse-cityhash',\n ]\n-bigquery_requires = ['google-cloud-bigquery>=1.12.0', 'pydata-google-auth']\n+bigquery_requires = [\n+ 'google-cloud-bigquery[bqstorage,pandas]>=1.12.0,<2.0.0dev',\n+ 'pydata-google-auth',\n+]\n hdf5_requires = ['tables>=3.0.0']\n \n parquet_requires = ['pyarrow>=0.12.0']\n", "issue": "fix bigquery version\nhttps://dev.azure.com/ibis-project/ibis/_build/results?buildId=3396&view=logs&j=8f09edc2-e3b7-52de-126a-0225c4f3efa1&t=78a72aec-b398-558e-7c0d-2d33604b9e53\r\n\r\nI think we need to limit the upper bound of bigquery library here.\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"Ibis setup module.\"\"\"\nimport pathlib\nimport sys\n\nfrom setuptools import find_packages, setup\n\nimport versioneer\n\nLONG_DESCRIPTION = \"\"\"\nIbis is a productivity-centric Python big data framework.\n\nSee http://ibis-project.org\n\"\"\"\n\nVERSION = sys.version_info.major, sys.version_info.minor\n\nimpala_requires = ['hdfs>=2.0.16', 'sqlalchemy>=1.1,<1.3.7', 'requests']\nimpala_requires.append('impyla[kerberos]>=0.15.0')\n\nsqlite_requires = ['sqlalchemy>=1.1,<1.3.7']\npostgres_requires = sqlite_requires + ['psycopg2']\nmysql_requires = sqlite_requires + ['pymysql']\n\nomniscidb_requires = ['pymapd>=0.12.0']\nkerberos_requires = ['requests-kerberos']\nvisualization_requires = ['graphviz']\nclickhouse_requires = [\n 'clickhouse-driver>=0.1.3',\n 'clickhouse-cityhash',\n]\nbigquery_requires = ['google-cloud-bigquery>=1.12.0', 'pydata-google-auth']\nhdf5_requires = ['tables>=3.0.0']\n\nparquet_requires = ['pyarrow>=0.12.0']\nspark_requires = ['pyspark>=2.4.3']\n\ngeospatial_requires = ['geoalchemy2', 'geopandas', 'shapely']\n\nall_requires = (\n impala_requires\n + postgres_requires\n + omniscidb_requires\n + mysql_requires\n + kerberos_requires\n + visualization_requires\n + clickhouse_requires\n + bigquery_requires\n + hdf5_requires\n + parquet_requires\n + spark_requires\n + geospatial_requires\n)\n\ndevelop_requires = all_requires + [\n 'black',\n 'click',\n 'pydocstyle==4.0.1',\n 'flake8',\n 'isort',\n 'mypy',\n 'pre-commit',\n 'pygit2',\n 'pytest>=4.5',\n]\n\ninstall_requires = [\n line.strip()\n for line in pathlib.Path(__file__)\n .parent.joinpath('requirements.txt')\n .read_text()\n .splitlines()\n]\n\nsetup(\n name='ibis-framework',\n url='https://github.com/ibis-project/ibis',\n packages=find_packages(),\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n install_requires=install_requires,\n python_requires='>=3.7',\n extras_require={\n 'all': all_requires,\n 'develop': develop_requires,\n 'impala': impala_requires,\n 'kerberos': kerberos_requires,\n 'postgres': postgres_requires,\n 'omniscidb': omniscidb_requires,\n 'mysql': mysql_requires,\n 'sqlite': sqlite_requires,\n 'visualization': visualization_requires,\n 'clickhouse': clickhouse_requires,\n 'bigquery': bigquery_requires,\n 'hdf5': hdf5_requires,\n 'parquet': parquet_requires,\n 'spark': spark_requires,\n 'geospatial': geospatial_requires,\n },\n description=\"Productivity-centric Python Big Data Framework\",\n long_description=LONG_DESCRIPTION,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Operating System :: OS Independent',\n 'Intended Audience :: Science/Research',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Topic :: Scientific/Engineering',\n ],\n license='Apache License, Version 2.0',\n maintainer=\"Phillip Cloud\",\n maintainer_email=\"[email protected]\",\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\"\"\"Ibis setup module.\"\"\"\nimport pathlib\nimport sys\n\nfrom setuptools import find_packages, setup\n\nimport versioneer\n\nLONG_DESCRIPTION = \"\"\"\nIbis is a productivity-centric Python big data framework.\n\nSee http://ibis-project.org\n\"\"\"\n\nVERSION = sys.version_info.major, sys.version_info.minor\n\nimpala_requires = ['hdfs>=2.0.16', 'sqlalchemy>=1.1,<1.3.7', 'requests']\nimpala_requires.append('impyla[kerberos]>=0.15.0')\n\nsqlite_requires = ['sqlalchemy>=1.1,<1.3.7']\npostgres_requires = sqlite_requires + ['psycopg2']\nmysql_requires = sqlite_requires + ['pymysql']\n\nomniscidb_requires = ['pymapd>=0.12.0']\nkerberos_requires = ['requests-kerberos']\nvisualization_requires = ['graphviz']\nclickhouse_requires = [\n 'clickhouse-driver>=0.1.3',\n 'clickhouse-cityhash',\n]\nbigquery_requires = [\n 'google-cloud-bigquery[bqstorage,pandas]>=1.12.0,<2.0.0dev',\n 'pydata-google-auth',\n]\nhdf5_requires = ['tables>=3.0.0']\n\nparquet_requires = ['pyarrow>=0.12.0']\nspark_requires = ['pyspark>=2.4.3']\n\ngeospatial_requires = ['geoalchemy2', 'geopandas', 'shapely']\n\nall_requires = (\n impala_requires\n + postgres_requires\n + omniscidb_requires\n + mysql_requires\n + kerberos_requires\n + visualization_requires\n + clickhouse_requires\n + bigquery_requires\n + hdf5_requires\n + parquet_requires\n + spark_requires\n + geospatial_requires\n)\n\ndevelop_requires = all_requires + [\n 'black',\n 'click',\n 'pydocstyle==4.0.1',\n 'flake8',\n 'isort',\n 'mypy',\n 'pre-commit',\n 'pygit2',\n 'pytest>=4.5',\n]\n\ninstall_requires = [\n line.strip()\n for line in pathlib.Path(__file__)\n .parent.joinpath('requirements.txt')\n .read_text()\n .splitlines()\n]\n\nsetup(\n name='ibis-framework',\n url='https://github.com/ibis-project/ibis',\n packages=find_packages(),\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n install_requires=install_requires,\n python_requires='>=3.7',\n extras_require={\n 'all': all_requires,\n 'develop': develop_requires,\n 'impala': impala_requires,\n 'kerberos': kerberos_requires,\n 'postgres': postgres_requires,\n 'omniscidb': omniscidb_requires,\n 'mysql': mysql_requires,\n 'sqlite': sqlite_requires,\n 'visualization': visualization_requires,\n 'clickhouse': clickhouse_requires,\n 'bigquery': bigquery_requires,\n 'hdf5': hdf5_requires,\n 'parquet': parquet_requires,\n 'spark': spark_requires,\n 'geospatial': geospatial_requires,\n },\n description=\"Productivity-centric Python Big Data Framework\",\n long_description=LONG_DESCRIPTION,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Operating System :: OS Independent',\n 'Intended Audience :: Science/Research',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Topic :: Scientific/Engineering',\n ],\n license='Apache License, Version 2.0',\n maintainer=\"Phillip Cloud\",\n maintainer_email=\"[email protected]\",\n)\n", "path": "setup.py"}]} | 1,408 | 148 |
gh_patches_debug_65084 | rasdani/github-patches | git_diff | cupy__cupy-1837 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bundle header files for fp16
CUDA 9.2 or later allows redistribution of `cuda_fp16.h` and `cuda_fp16.hpp`.
https://docs.nvidia.com/cuda/archive/9.2/eula/#attachment-a
Let's bundle them into the repository and use it to avoid `CUDA_PATH`-based header discovery at runtime.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 import os
4 from setuptools import setup
5 import sys
6
7 import cupy_setup_build
8
9
10 if sys.version_info[:3] == (3, 5, 0):
11 if not int(os.getenv('CUPY_PYTHON_350_FORCE', '0')):
12 msg = """
13 CuPy does not work with Python 3.5.0.
14
15 We strongly recommend to use another version of Python.
16 If you want to use CuPy with Python 3.5.0 at your own risk,
17 set 1 to CUPY_PYTHON_350_FORCE environment variable."""
18 print(msg)
19 sys.exit(1)
20
21
22 requirements = {
23 'setup': [
24 'fastrlock>=0.3',
25 ],
26 'install': [
27 'numpy>=1.9.0',
28 'six>=1.9.0',
29 'fastrlock>=0.3',
30 ],
31 'stylecheck': [
32 'autopep8==1.3.5',
33 'flake8==3.5.0',
34 'pbr==4.0.4',
35 'pycodestyle==2.3.1',
36 ],
37 'test': [
38 'pytest',
39 'mock',
40 ],
41 'doctest': [
42 'matplotlib',
43 'theano',
44 ],
45 'docs': [
46 'sphinx',
47 'sphinx_rtd_theme',
48 ],
49 'travis': [
50 '-r stylecheck',
51 '-r docs',
52 ],
53 'appveyor': [
54 '-r test',
55 ],
56 }
57
58
59 def reduce_requirements(key):
60 # Resolve recursive requirements notation (-r)
61 reqs = requirements[key]
62 resolved_reqs = []
63 for req in reqs:
64 if req.startswith('-r'):
65 depend_key = req[2:].lstrip()
66 reduce_requirements(depend_key)
67 resolved_reqs += requirements[depend_key]
68 else:
69 resolved_reqs.append(req)
70 requirements[key] = resolved_reqs
71
72
73 for k in requirements.keys():
74 reduce_requirements(k)
75
76
77 extras_require = {k: v for k, v in requirements.items() if k != 'install'}
78
79
80 setup_requires = requirements['setup']
81 install_requires = requirements['install']
82 tests_require = requirements['test']
83
84
85 package_data = {
86 'cupy': [
87 'core/include/cupy/complex/arithmetic.h',
88 'core/include/cupy/complex/catrig.h',
89 'core/include/cupy/complex/catrigf.h',
90 'core/include/cupy/complex/ccosh.h',
91 'core/include/cupy/complex/ccoshf.h',
92 'core/include/cupy/complex/cexp.h',
93 'core/include/cupy/complex/cexpf.h',
94 'core/include/cupy/complex/clog.h',
95 'core/include/cupy/complex/clogf.h',
96 'core/include/cupy/complex/complex.h',
97 'core/include/cupy/complex/complex_inl.h',
98 'core/include/cupy/complex/cpow.h',
99 'core/include/cupy/complex/cproj.h',
100 'core/include/cupy/complex/csinh.h',
101 'core/include/cupy/complex/csinhf.h',
102 'core/include/cupy/complex/csqrt.h',
103 'core/include/cupy/complex/csqrtf.h',
104 'core/include/cupy/complex/ctanh.h',
105 'core/include/cupy/complex/ctanhf.h',
106 'core/include/cupy/complex/math_private.h',
107 'core/include/cupy/carray.cuh',
108 'core/include/cupy/complex.cuh',
109 'core/include/cupy/atomics.cuh',
110 'cuda/cupy_thrust.cu',
111 ],
112 }
113
114 package_data['cupy'] += cupy_setup_build.prepare_wheel_libs()
115
116 package_name = cupy_setup_build.get_package_name()
117 long_description = cupy_setup_build.get_long_description()
118 ext_modules = cupy_setup_build.get_ext_modules()
119 build_ext = cupy_setup_build.custom_build_ext
120 sdist = cupy_setup_build.sdist_with_cython
121
122 here = os.path.abspath(os.path.dirname(__file__))
123 # Get __version__ variable
124 exec(open(os.path.join(here, 'cupy', '_version.py')).read())
125
126 setup(
127 name=package_name,
128 version=__version__, # NOQA
129 description='CuPy: NumPy-like API accelerated with CUDA',
130 long_description=long_description,
131 author='Seiya Tokui',
132 author_email='[email protected]',
133 url='https://docs-cupy.chainer.org/',
134 license='MIT License',
135 packages=[
136 'cupy',
137 'cupy.binary',
138 'cupy.core',
139 'cupy.creation',
140 'cupy.cuda',
141 'cupy.cuda.memory_hooks',
142 'cupy.ext',
143 'cupy.fft',
144 'cupy.indexing',
145 'cupy.io',
146 'cupy.linalg',
147 'cupy.logic',
148 'cupy.manipulation',
149 'cupy.math',
150 'cupy.padding',
151 'cupy.prof',
152 'cupy.random',
153 'cupy.sorting',
154 'cupy.sparse',
155 'cupy.sparse.linalg',
156 'cupy.statistics',
157 'cupy.testing',
158 'cupyx',
159 'cupyx.scipy',
160 'cupyx.scipy.ndimage',
161 'cupyx.scipy.sparse',
162 'cupyx.scipy.sparse.linalg',
163 'cupyx.scipy.special',
164 'cupyx.scipy.linalg',
165 'cupyx.linalg',
166 'cupyx.linalg.sparse'
167 ],
168 package_data=package_data,
169 zip_safe=False,
170 setup_requires=setup_requires,
171 install_requires=install_requires,
172 tests_require=tests_require,
173 extras_require=extras_require,
174 ext_modules=ext_modules,
175 cmdclass={'build_ext': build_ext,
176 'sdist': sdist},
177 )
178
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -107,6 +107,8 @@
'core/include/cupy/carray.cuh',
'core/include/cupy/complex.cuh',
'core/include/cupy/atomics.cuh',
+ 'core/include/cupy/_cuda/cuda-*/*.h',
+ 'core/include/cupy/_cuda/cuda-*/*.hpp',
'cuda/cupy_thrust.cu',
],
}
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -107,6 +107,8 @@\n 'core/include/cupy/carray.cuh',\n 'core/include/cupy/complex.cuh',\n 'core/include/cupy/atomics.cuh',\n+ 'core/include/cupy/_cuda/cuda-*/*.h',\n+ 'core/include/cupy/_cuda/cuda-*/*.hpp',\n 'cuda/cupy_thrust.cu',\n ],\n }\n", "issue": "Bundle header files for fp16\nCUDA 9.2 or later allows redistribution of `cuda_fp16.h` and `cuda_fp16.hpp`.\r\nhttps://docs.nvidia.com/cuda/archive/9.2/eula/#attachment-a\r\n\r\nLet's bundle them into the repository and use it to avoid `CUDA_PATH`-based header discovery at runtime.\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport os\nfrom setuptools import setup\nimport sys\n\nimport cupy_setup_build\n\n\nif sys.version_info[:3] == (3, 5, 0):\n if not int(os.getenv('CUPY_PYTHON_350_FORCE', '0')):\n msg = \"\"\"\nCuPy does not work with Python 3.5.0.\n\nWe strongly recommend to use another version of Python.\nIf you want to use CuPy with Python 3.5.0 at your own risk,\nset 1 to CUPY_PYTHON_350_FORCE environment variable.\"\"\"\n print(msg)\n sys.exit(1)\n\n\nrequirements = {\n 'setup': [\n 'fastrlock>=0.3',\n ],\n 'install': [\n 'numpy>=1.9.0',\n 'six>=1.9.0',\n 'fastrlock>=0.3',\n ],\n 'stylecheck': [\n 'autopep8==1.3.5',\n 'flake8==3.5.0',\n 'pbr==4.0.4',\n 'pycodestyle==2.3.1',\n ],\n 'test': [\n 'pytest',\n 'mock',\n ],\n 'doctest': [\n 'matplotlib',\n 'theano',\n ],\n 'docs': [\n 'sphinx',\n 'sphinx_rtd_theme',\n ],\n 'travis': [\n '-r stylecheck',\n '-r docs',\n ],\n 'appveyor': [\n '-r test',\n ],\n}\n\n\ndef reduce_requirements(key):\n # Resolve recursive requirements notation (-r)\n reqs = requirements[key]\n resolved_reqs = []\n for req in reqs:\n if req.startswith('-r'):\n depend_key = req[2:].lstrip()\n reduce_requirements(depend_key)\n resolved_reqs += requirements[depend_key]\n else:\n resolved_reqs.append(req)\n requirements[key] = resolved_reqs\n\n\nfor k in requirements.keys():\n reduce_requirements(k)\n\n\nextras_require = {k: v for k, v in requirements.items() if k != 'install'}\n\n\nsetup_requires = requirements['setup']\ninstall_requires = requirements['install']\ntests_require = requirements['test']\n\n\npackage_data = {\n 'cupy': [\n 'core/include/cupy/complex/arithmetic.h',\n 'core/include/cupy/complex/catrig.h',\n 'core/include/cupy/complex/catrigf.h',\n 'core/include/cupy/complex/ccosh.h',\n 'core/include/cupy/complex/ccoshf.h',\n 'core/include/cupy/complex/cexp.h',\n 'core/include/cupy/complex/cexpf.h',\n 'core/include/cupy/complex/clog.h',\n 'core/include/cupy/complex/clogf.h',\n 'core/include/cupy/complex/complex.h',\n 'core/include/cupy/complex/complex_inl.h',\n 'core/include/cupy/complex/cpow.h',\n 'core/include/cupy/complex/cproj.h',\n 'core/include/cupy/complex/csinh.h',\n 'core/include/cupy/complex/csinhf.h',\n 'core/include/cupy/complex/csqrt.h',\n 'core/include/cupy/complex/csqrtf.h',\n 'core/include/cupy/complex/ctanh.h',\n 'core/include/cupy/complex/ctanhf.h',\n 'core/include/cupy/complex/math_private.h',\n 'core/include/cupy/carray.cuh',\n 'core/include/cupy/complex.cuh',\n 'core/include/cupy/atomics.cuh',\n 'cuda/cupy_thrust.cu',\n ],\n}\n\npackage_data['cupy'] += cupy_setup_build.prepare_wheel_libs()\n\npackage_name = cupy_setup_build.get_package_name()\nlong_description = cupy_setup_build.get_long_description()\next_modules = cupy_setup_build.get_ext_modules()\nbuild_ext = cupy_setup_build.custom_build_ext\nsdist = cupy_setup_build.sdist_with_cython\n\nhere = os.path.abspath(os.path.dirname(__file__))\n# Get __version__ variable\nexec(open(os.path.join(here, 'cupy', '_version.py')).read())\n\nsetup(\n name=package_name,\n version=__version__, # NOQA\n description='CuPy: NumPy-like API accelerated with CUDA',\n long_description=long_description,\n author='Seiya Tokui',\n author_email='[email protected]',\n url='https://docs-cupy.chainer.org/',\n license='MIT License',\n packages=[\n 'cupy',\n 'cupy.binary',\n 'cupy.core',\n 'cupy.creation',\n 'cupy.cuda',\n 'cupy.cuda.memory_hooks',\n 'cupy.ext',\n 'cupy.fft',\n 'cupy.indexing',\n 'cupy.io',\n 'cupy.linalg',\n 'cupy.logic',\n 'cupy.manipulation',\n 'cupy.math',\n 'cupy.padding',\n 'cupy.prof',\n 'cupy.random',\n 'cupy.sorting',\n 'cupy.sparse',\n 'cupy.sparse.linalg',\n 'cupy.statistics',\n 'cupy.testing',\n 'cupyx',\n 'cupyx.scipy',\n 'cupyx.scipy.ndimage',\n 'cupyx.scipy.sparse',\n 'cupyx.scipy.sparse.linalg',\n 'cupyx.scipy.special',\n 'cupyx.scipy.linalg',\n 'cupyx.linalg',\n 'cupyx.linalg.sparse'\n ],\n package_data=package_data,\n zip_safe=False,\n setup_requires=setup_requires,\n install_requires=install_requires,\n tests_require=tests_require,\n extras_require=extras_require,\n ext_modules=ext_modules,\n cmdclass={'build_ext': build_ext,\n 'sdist': sdist},\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nimport os\nfrom setuptools import setup\nimport sys\n\nimport cupy_setup_build\n\n\nif sys.version_info[:3] == (3, 5, 0):\n if not int(os.getenv('CUPY_PYTHON_350_FORCE', '0')):\n msg = \"\"\"\nCuPy does not work with Python 3.5.0.\n\nWe strongly recommend to use another version of Python.\nIf you want to use CuPy with Python 3.5.0 at your own risk,\nset 1 to CUPY_PYTHON_350_FORCE environment variable.\"\"\"\n print(msg)\n sys.exit(1)\n\n\nrequirements = {\n 'setup': [\n 'fastrlock>=0.3',\n ],\n 'install': [\n 'numpy>=1.9.0',\n 'six>=1.9.0',\n 'fastrlock>=0.3',\n ],\n 'stylecheck': [\n 'autopep8==1.3.5',\n 'flake8==3.5.0',\n 'pbr==4.0.4',\n 'pycodestyle==2.3.1',\n ],\n 'test': [\n 'pytest',\n 'mock',\n ],\n 'doctest': [\n 'matplotlib',\n 'theano',\n ],\n 'docs': [\n 'sphinx',\n 'sphinx_rtd_theme',\n ],\n 'travis': [\n '-r stylecheck',\n '-r docs',\n ],\n 'appveyor': [\n '-r test',\n ],\n}\n\n\ndef reduce_requirements(key):\n # Resolve recursive requirements notation (-r)\n reqs = requirements[key]\n resolved_reqs = []\n for req in reqs:\n if req.startswith('-r'):\n depend_key = req[2:].lstrip()\n reduce_requirements(depend_key)\n resolved_reqs += requirements[depend_key]\n else:\n resolved_reqs.append(req)\n requirements[key] = resolved_reqs\n\n\nfor k in requirements.keys():\n reduce_requirements(k)\n\n\nextras_require = {k: v for k, v in requirements.items() if k != 'install'}\n\n\nsetup_requires = requirements['setup']\ninstall_requires = requirements['install']\ntests_require = requirements['test']\n\n\npackage_data = {\n 'cupy': [\n 'core/include/cupy/complex/arithmetic.h',\n 'core/include/cupy/complex/catrig.h',\n 'core/include/cupy/complex/catrigf.h',\n 'core/include/cupy/complex/ccosh.h',\n 'core/include/cupy/complex/ccoshf.h',\n 'core/include/cupy/complex/cexp.h',\n 'core/include/cupy/complex/cexpf.h',\n 'core/include/cupy/complex/clog.h',\n 'core/include/cupy/complex/clogf.h',\n 'core/include/cupy/complex/complex.h',\n 'core/include/cupy/complex/complex_inl.h',\n 'core/include/cupy/complex/cpow.h',\n 'core/include/cupy/complex/cproj.h',\n 'core/include/cupy/complex/csinh.h',\n 'core/include/cupy/complex/csinhf.h',\n 'core/include/cupy/complex/csqrt.h',\n 'core/include/cupy/complex/csqrtf.h',\n 'core/include/cupy/complex/ctanh.h',\n 'core/include/cupy/complex/ctanhf.h',\n 'core/include/cupy/complex/math_private.h',\n 'core/include/cupy/carray.cuh',\n 'core/include/cupy/complex.cuh',\n 'core/include/cupy/atomics.cuh',\n 'core/include/cupy/_cuda/cuda-*/*.h',\n 'core/include/cupy/_cuda/cuda-*/*.hpp',\n 'cuda/cupy_thrust.cu',\n ],\n}\n\npackage_data['cupy'] += cupy_setup_build.prepare_wheel_libs()\n\npackage_name = cupy_setup_build.get_package_name()\nlong_description = cupy_setup_build.get_long_description()\next_modules = cupy_setup_build.get_ext_modules()\nbuild_ext = cupy_setup_build.custom_build_ext\nsdist = cupy_setup_build.sdist_with_cython\n\nhere = os.path.abspath(os.path.dirname(__file__))\n# Get __version__ variable\nexec(open(os.path.join(here, 'cupy', '_version.py')).read())\n\nsetup(\n name=package_name,\n version=__version__, # NOQA\n description='CuPy: NumPy-like API accelerated with CUDA',\n long_description=long_description,\n author='Seiya Tokui',\n author_email='[email protected]',\n url='https://docs-cupy.chainer.org/',\n license='MIT License',\n packages=[\n 'cupy',\n 'cupy.binary',\n 'cupy.core',\n 'cupy.creation',\n 'cupy.cuda',\n 'cupy.cuda.memory_hooks',\n 'cupy.ext',\n 'cupy.fft',\n 'cupy.indexing',\n 'cupy.io',\n 'cupy.linalg',\n 'cupy.logic',\n 'cupy.manipulation',\n 'cupy.math',\n 'cupy.padding',\n 'cupy.prof',\n 'cupy.random',\n 'cupy.sorting',\n 'cupy.sparse',\n 'cupy.sparse.linalg',\n 'cupy.statistics',\n 'cupy.testing',\n 'cupyx',\n 'cupyx.scipy',\n 'cupyx.scipy.ndimage',\n 'cupyx.scipy.sparse',\n 'cupyx.scipy.sparse.linalg',\n 'cupyx.scipy.special',\n 'cupyx.scipy.linalg',\n 'cupyx.linalg',\n 'cupyx.linalg.sparse'\n ],\n package_data=package_data,\n zip_safe=False,\n setup_requires=setup_requires,\n install_requires=install_requires,\n tests_require=tests_require,\n extras_require=extras_require,\n ext_modules=ext_modules,\n cmdclass={'build_ext': build_ext,\n 'sdist': sdist},\n)\n", "path": "setup.py"}]} | 2,022 | 111 |
gh_patches_debug_28757 | rasdani/github-patches | git_diff | WordPress__openverse-api-1083 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add database connectivity to healthcheck endpoint
## Problem
<!-- Describe a problem solved by this feature; or delete the section entirely. -->
The healtcheck endpoint should check that the database is accessible. If the db is inaccessible, the service is definitively not healthy.
## Description
<!-- Describe the feature and how it solves the problem. -->
Add another check (in addition to the ES check) for the database connectivity. Calling `django.db.connection.ensure_connection()` should be sufficient. It raises an error when the database connection is unavailable.
## Alternatives
<!-- Describe any alternative solutions or features you have considered. How is this feature better? -->
## Additional context
<!-- Add any other context about the feature here; or delete the section entirely. -->
<!-- If you would like to work on this, please comment below separately. -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `api/catalog/api/views/health_views.py`
Content:
```
1 from django.conf import settings
2 from rest_framework import status
3 from rest_framework.exceptions import APIException
4 from rest_framework.request import Request
5 from rest_framework.response import Response
6 from rest_framework.views import APIView
7
8
9 class ElasticsearchHealthcheckException(APIException):
10 status_code = status.HTTP_503_SERVICE_UNAVAILABLE
11
12
13 class HealthCheck(APIView):
14 """
15 Return a "200 OK" response if the server is running normally, 503 otherwise.
16
17 This endpoint is used in production to ensure that the server should receive
18 traffic. If no response is provided, the server is deregistered from the
19 load balancer and destroyed.
20 """
21
22 swagger_schema = None
23
24 def _check_es(self) -> Response | None:
25 """Check ES cluster health and raise an exception if ES is not healthy."""
26
27 es_health = settings.ES.cluster.health(timeout="5s")
28
29 if es_health["timed_out"]:
30 raise ElasticsearchHealthcheckException("es_timed_out")
31
32 if (status := es_health["status"]) != "green":
33 raise ElasticsearchHealthcheckException(f"es_status_{status}")
34
35 def get(self, request: Request):
36 if "check_es" in request.query_params:
37 self._check_es()
38
39 return Response({"status": "200 OK"}, status=200)
40
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/api/catalog/api/views/health_views.py b/api/catalog/api/views/health_views.py
--- a/api/catalog/api/views/health_views.py
+++ b/api/catalog/api/views/health_views.py
@@ -1,4 +1,5 @@
from django.conf import settings
+from django.db import connection
from rest_framework import status
from rest_framework.exceptions import APIException
from rest_framework.request import Request
@@ -21,19 +22,33 @@
swagger_schema = None
- def _check_es(self) -> Response | None:
- """Check ES cluster health and raise an exception if ES is not healthy."""
+ @staticmethod
+ def _check_db() -> None:
+ """
+ Check that the database is available.
+ Returns nothing if everything is OK, throws error otherwise.
+ """
+ connection.ensure_connection()
+
+ @staticmethod
+ def _check_es() -> None:
+ """
+ Check Elasticsearch cluster health.
+
+ Raises an exception if ES is not healthy.
+ """
es_health = settings.ES.cluster.health(timeout="5s")
if es_health["timed_out"]:
raise ElasticsearchHealthcheckException("es_timed_out")
- if (status := es_health["status"]) != "green":
- raise ElasticsearchHealthcheckException(f"es_status_{status}")
+ if (es_status := es_health["status"]) != "green":
+ raise ElasticsearchHealthcheckException(f"es_status_{es_status}")
def get(self, request: Request):
if "check_es" in request.query_params:
self._check_es()
+ self._check_db()
return Response({"status": "200 OK"}, status=200)
| {"golden_diff": "diff --git a/api/catalog/api/views/health_views.py b/api/catalog/api/views/health_views.py\n--- a/api/catalog/api/views/health_views.py\n+++ b/api/catalog/api/views/health_views.py\n@@ -1,4 +1,5 @@\n from django.conf import settings\n+from django.db import connection\n from rest_framework import status\n from rest_framework.exceptions import APIException\n from rest_framework.request import Request\n@@ -21,19 +22,33 @@\n \n swagger_schema = None\n \n- def _check_es(self) -> Response | None:\n- \"\"\"Check ES cluster health and raise an exception if ES is not healthy.\"\"\"\n+ @staticmethod\n+ def _check_db() -> None:\n+ \"\"\"\n+ Check that the database is available.\n \n+ Returns nothing if everything is OK, throws error otherwise.\n+ \"\"\"\n+ connection.ensure_connection()\n+\n+ @staticmethod\n+ def _check_es() -> None:\n+ \"\"\"\n+ Check Elasticsearch cluster health.\n+\n+ Raises an exception if ES is not healthy.\n+ \"\"\"\n es_health = settings.ES.cluster.health(timeout=\"5s\")\n \n if es_health[\"timed_out\"]:\n raise ElasticsearchHealthcheckException(\"es_timed_out\")\n \n- if (status := es_health[\"status\"]) != \"green\":\n- raise ElasticsearchHealthcheckException(f\"es_status_{status}\")\n+ if (es_status := es_health[\"status\"]) != \"green\":\n+ raise ElasticsearchHealthcheckException(f\"es_status_{es_status}\")\n \n def get(self, request: Request):\n if \"check_es\" in request.query_params:\n self._check_es()\n+ self._check_db()\n \n return Response({\"status\": \"200 OK\"}, status=200)\n", "issue": "Add database connectivity to healthcheck endpoint\n## Problem\r\n\r\n<!-- Describe a problem solved by this feature; or delete the section entirely. -->\r\nThe healtcheck endpoint should check that the database is accessible. If the db is inaccessible, the service is definitively not healthy.\r\n\r\n## Description\r\n\r\n<!-- Describe the feature and how it solves the problem. -->\r\nAdd another check (in addition to the ES check) for the database connectivity. Calling `django.db.connection.ensure_connection()` should be sufficient. It raises an error when the database connection is unavailable.\r\n\r\n## Alternatives\r\n\r\n<!-- Describe any alternative solutions or features you have considered. How is this feature better? -->\r\n\r\n## Additional context\r\n\r\n<!-- Add any other context about the feature here; or delete the section entirely. -->\r\n\r\n<!-- If you would like to work on this, please comment below separately. -->\r\n\n", "before_files": [{"content": "from django.conf import settings\nfrom rest_framework import status\nfrom rest_framework.exceptions import APIException\nfrom rest_framework.request import Request\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\n\nclass ElasticsearchHealthcheckException(APIException):\n status_code = status.HTTP_503_SERVICE_UNAVAILABLE\n\n\nclass HealthCheck(APIView):\n \"\"\"\n Return a \"200 OK\" response if the server is running normally, 503 otherwise.\n\n This endpoint is used in production to ensure that the server should receive\n traffic. If no response is provided, the server is deregistered from the\n load balancer and destroyed.\n \"\"\"\n\n swagger_schema = None\n\n def _check_es(self) -> Response | None:\n \"\"\"Check ES cluster health and raise an exception if ES is not healthy.\"\"\"\n\n es_health = settings.ES.cluster.health(timeout=\"5s\")\n\n if es_health[\"timed_out\"]:\n raise ElasticsearchHealthcheckException(\"es_timed_out\")\n\n if (status := es_health[\"status\"]) != \"green\":\n raise ElasticsearchHealthcheckException(f\"es_status_{status}\")\n\n def get(self, request: Request):\n if \"check_es\" in request.query_params:\n self._check_es()\n\n return Response({\"status\": \"200 OK\"}, status=200)\n", "path": "api/catalog/api/views/health_views.py"}], "after_files": [{"content": "from django.conf import settings\nfrom django.db import connection\nfrom rest_framework import status\nfrom rest_framework.exceptions import APIException\nfrom rest_framework.request import Request\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\n\nclass ElasticsearchHealthcheckException(APIException):\n status_code = status.HTTP_503_SERVICE_UNAVAILABLE\n\n\nclass HealthCheck(APIView):\n \"\"\"\n Return a \"200 OK\" response if the server is running normally, 503 otherwise.\n\n This endpoint is used in production to ensure that the server should receive\n traffic. If no response is provided, the server is deregistered from the\n load balancer and destroyed.\n \"\"\"\n\n swagger_schema = None\n\n @staticmethod\n def _check_db() -> None:\n \"\"\"\n Check that the database is available.\n\n Returns nothing if everything is OK, throws error otherwise.\n \"\"\"\n connection.ensure_connection()\n\n @staticmethod\n def _check_es() -> None:\n \"\"\"\n Check Elasticsearch cluster health.\n\n Raises an exception if ES is not healthy.\n \"\"\"\n es_health = settings.ES.cluster.health(timeout=\"5s\")\n\n if es_health[\"timed_out\"]:\n raise ElasticsearchHealthcheckException(\"es_timed_out\")\n\n if (es_status := es_health[\"status\"]) != \"green\":\n raise ElasticsearchHealthcheckException(f\"es_status_{es_status}\")\n\n def get(self, request: Request):\n if \"check_es\" in request.query_params:\n self._check_es()\n self._check_db()\n\n return Response({\"status\": \"200 OK\"}, status=200)\n", "path": "api/catalog/api/views/health_views.py"}]} | 791 | 380 |
gh_patches_debug_40029 | rasdani/github-patches | git_diff | watchdogpolska__small_eod-919 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Niekompletny wykaz endpointów API w /api
Na `/api` (np. https://dev.small-eod.siecobywatelska.pl/api/ ) nie mamy kompletnego wykazu endpointów API. Kompletny jest dostępny przez ReDoc np. na https://dev.small-eod.siecobywatelska.pl/api/redoc/ .
Powinniśmy to naprawić, bo wprowadza ryzyko mylnego wrażenia co do zakresu API.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `backend-project/config/urls.py`
Content:
```
1 """small_eod URL Configuration
2
3 The `urlpatterns` list routes URLs to views. For more information please see:
4 https://docs.djangoproject.com/en/3.0/topics/http/urls/
5 Examples:
6 Function views
7 1. Add an import: from my_app import views
8 2. Add a URL to urlpatterns: path('', views.home, name='home')
9 Class-based views
10 1. Add an import: from other_app.views import Home
11 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
12 Including another URLconf
13 1. Import the include() function: from django.urls import include, path
14 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
15 """
16 from django.conf import settings
17 from django.conf.urls.static import static
18 from django.contrib import admin
19 from django.urls import include, path, re_path
20 from drf_yasg2.views import get_schema_view
21 from rest_framework import permissions, routers
22
23 from small_eod.channels.views import ChannelViewSet
24 from small_eod.events.views import EventViewSet
25 from small_eod.institutions.views import InstitutionViewSet
26 from small_eod.notes.views import NoteViewSet
27 from small_eod.tags.views import TagViewSet
28 from small_eod.users.views import UserViewSet
29
30 from .swagger import info
31
32 router = routers.DefaultRouter()
33 router.register(r"channels", ChannelViewSet)
34 router.register(r"events", EventViewSet)
35 router.register(r"institutions", InstitutionViewSet)
36 router.register(r"notes", NoteViewSet)
37 router.register(r"tags", TagViewSet)
38 router.register(r"users", UserViewSet)
39
40 schema_view = get_schema_view(
41 info,
42 # validators=['flex', 'ssv'],
43 public=True,
44 permission_classes=(permissions.AllowAny,),
45 )
46
47 urlpatterns = [
48 path("admin/", admin.site.urls),
49 path("api/", include("small_eod.collections.urls")),
50 path("api/", include("small_eod.cases.urls")),
51 path("api/", include("small_eod.letters.urls")),
52 path("api/", include("small_eod.features.urls")),
53 path("api/", include("small_eod.administrative_units.urls")),
54 path("api/", include("small_eod.autocomplete.urls")),
55 path("api/docs/", schema_view.with_ui("swagger"), name="api_docs"),
56 path("api/redoc/", schema_view.with_ui("redoc"), name="api_redocs"),
57 re_path(
58 "^api/swagger(?P<format>.json|.yaml)$",
59 schema_view.without_ui(),
60 name="schema_swagger",
61 ),
62 path("api/", include(router.urls)),
63 ]
64
65
66 if settings.DEBUG:
67 import debug_toolbar
68
69 urlpatterns += [
70 path("__debug__/", include(debug_toolbar.urls)),
71 ]
72
73 urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
74 urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_URL)
75
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/backend-project/config/urls.py b/backend-project/config/urls.py
--- a/backend-project/config/urls.py
+++ b/backend-project/config/urls.py
@@ -13,6 +13,9 @@
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
+
+import re
+
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
@@ -29,13 +32,56 @@
from .swagger import info
-router = routers.DefaultRouter()
+
+class BetterDefaultRouter(routers.DefaultRouter):
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.include_urls = []
+ self.api_root_dict = {}
+
+ def get_urls(self):
+ urls = super().get_urls()
+ urls.extend(self.include_urls)
+ return urls
+
+ def include(self, module):
+ urlpatterns = getattr(include(module)[0], "urlpatterns")
+ viewnames = set()
+ for urlpattern in urlpatterns:
+ self.include_urls.append(urlpattern)
+ if hasattr(urlpattern, "url_patterns"):
+ viewnames.update([pattern.name for pattern in urlpattern.url_patterns])
+ elif hasattr(urlpattern, "name"):
+ viewnames.add(urlpattern.name)
+ self.api_root_dict.update(
+ {re.sub(r"-list$", "", viewname): viewname for viewname in viewnames}
+ )
+
+ def get_api_root_view(self, api_urls=None):
+ api_root_dict = {}
+ list_name = self.routes[0].name
+
+ for prefix, viewset, basename in self.registry:
+ api_root_dict[prefix] = list_name.format(basename=basename)
+ api_root_dict.update(self.api_root_dict)
+
+ return self.APIRootView.as_view(api_root_dict=api_root_dict)
+
+
+router = BetterDefaultRouter()
+
router.register(r"channels", ChannelViewSet)
router.register(r"events", EventViewSet)
router.register(r"institutions", InstitutionViewSet)
router.register(r"notes", NoteViewSet)
router.register(r"tags", TagViewSet)
router.register(r"users", UserViewSet)
+router.include("small_eod.cases.urls")
+router.include("small_eod.features.urls")
+router.include("small_eod.collections.urls")
+router.include("small_eod.letters.urls")
+router.include("small_eod.administrative_units.urls")
+router.include("small_eod.autocomplete.urls")
schema_view = get_schema_view(
info,
@@ -46,12 +92,6 @@
urlpatterns = [
path("admin/", admin.site.urls),
- path("api/", include("small_eod.collections.urls")),
- path("api/", include("small_eod.cases.urls")),
- path("api/", include("small_eod.letters.urls")),
- path("api/", include("small_eod.features.urls")),
- path("api/", include("small_eod.administrative_units.urls")),
- path("api/", include("small_eod.autocomplete.urls")),
path("api/docs/", schema_view.with_ui("swagger"), name="api_docs"),
path("api/redoc/", schema_view.with_ui("redoc"), name="api_redocs"),
re_path(
@@ -62,7 +102,6 @@
path("api/", include(router.urls)),
]
-
if settings.DEBUG:
import debug_toolbar
| {"golden_diff": "diff --git a/backend-project/config/urls.py b/backend-project/config/urls.py\n--- a/backend-project/config/urls.py\n+++ b/backend-project/config/urls.py\n@@ -13,6 +13,9 @@\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n \"\"\"\n+\n+import re\n+\n from django.conf import settings\n from django.conf.urls.static import static\n from django.contrib import admin\n@@ -29,13 +32,56 @@\n \n from .swagger import info\n \n-router = routers.DefaultRouter()\n+\n+class BetterDefaultRouter(routers.DefaultRouter):\n+ def __init__(self, *args, **kwargs):\n+ super().__init__(*args, **kwargs)\n+ self.include_urls = []\n+ self.api_root_dict = {}\n+\n+ def get_urls(self):\n+ urls = super().get_urls()\n+ urls.extend(self.include_urls)\n+ return urls\n+\n+ def include(self, module):\n+ urlpatterns = getattr(include(module)[0], \"urlpatterns\")\n+ viewnames = set()\n+ for urlpattern in urlpatterns:\n+ self.include_urls.append(urlpattern)\n+ if hasattr(urlpattern, \"url_patterns\"):\n+ viewnames.update([pattern.name for pattern in urlpattern.url_patterns])\n+ elif hasattr(urlpattern, \"name\"):\n+ viewnames.add(urlpattern.name)\n+ self.api_root_dict.update(\n+ {re.sub(r\"-list$\", \"\", viewname): viewname for viewname in viewnames}\n+ )\n+\n+ def get_api_root_view(self, api_urls=None):\n+ api_root_dict = {}\n+ list_name = self.routes[0].name\n+\n+ for prefix, viewset, basename in self.registry:\n+ api_root_dict[prefix] = list_name.format(basename=basename)\n+ api_root_dict.update(self.api_root_dict)\n+\n+ return self.APIRootView.as_view(api_root_dict=api_root_dict)\n+\n+\n+router = BetterDefaultRouter()\n+\n router.register(r\"channels\", ChannelViewSet)\n router.register(r\"events\", EventViewSet)\n router.register(r\"institutions\", InstitutionViewSet)\n router.register(r\"notes\", NoteViewSet)\n router.register(r\"tags\", TagViewSet)\n router.register(r\"users\", UserViewSet)\n+router.include(\"small_eod.cases.urls\")\n+router.include(\"small_eod.features.urls\")\n+router.include(\"small_eod.collections.urls\")\n+router.include(\"small_eod.letters.urls\")\n+router.include(\"small_eod.administrative_units.urls\")\n+router.include(\"small_eod.autocomplete.urls\")\n \n schema_view = get_schema_view(\n info,\n@@ -46,12 +92,6 @@\n \n urlpatterns = [\n path(\"admin/\", admin.site.urls),\n- path(\"api/\", include(\"small_eod.collections.urls\")),\n- path(\"api/\", include(\"small_eod.cases.urls\")),\n- path(\"api/\", include(\"small_eod.letters.urls\")),\n- path(\"api/\", include(\"small_eod.features.urls\")),\n- path(\"api/\", include(\"small_eod.administrative_units.urls\")),\n- path(\"api/\", include(\"small_eod.autocomplete.urls\")),\n path(\"api/docs/\", schema_view.with_ui(\"swagger\"), name=\"api_docs\"),\n path(\"api/redoc/\", schema_view.with_ui(\"redoc\"), name=\"api_redocs\"),\n re_path(\n@@ -62,7 +102,6 @@\n path(\"api/\", include(router.urls)),\n ]\n \n-\n if settings.DEBUG:\n import debug_toolbar\n", "issue": "Niekompletny wykaz endpoint\u00f3w API w /api\nNa `/api` (np. https://dev.small-eod.siecobywatelska.pl/api/ ) nie mamy kompletnego wykazu endpoint\u00f3w API. Kompletny jest dost\u0119pny przez ReDoc np. na https://dev.small-eod.siecobywatelska.pl/api/redoc/ .\r\n\r\nPowinni\u015bmy to naprawi\u0107, bo wprowadza ryzyko mylnego wra\u017cenia co do zakresu API.\n", "before_files": [{"content": "\"\"\"small_eod URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.urls import include, path, re_path\nfrom drf_yasg2.views import get_schema_view\nfrom rest_framework import permissions, routers\n\nfrom small_eod.channels.views import ChannelViewSet\nfrom small_eod.events.views import EventViewSet\nfrom small_eod.institutions.views import InstitutionViewSet\nfrom small_eod.notes.views import NoteViewSet\nfrom small_eod.tags.views import TagViewSet\nfrom small_eod.users.views import UserViewSet\n\nfrom .swagger import info\n\nrouter = routers.DefaultRouter()\nrouter.register(r\"channels\", ChannelViewSet)\nrouter.register(r\"events\", EventViewSet)\nrouter.register(r\"institutions\", InstitutionViewSet)\nrouter.register(r\"notes\", NoteViewSet)\nrouter.register(r\"tags\", TagViewSet)\nrouter.register(r\"users\", UserViewSet)\n\nschema_view = get_schema_view(\n info,\n # validators=['flex', 'ssv'],\n public=True,\n permission_classes=(permissions.AllowAny,),\n)\n\nurlpatterns = [\n path(\"admin/\", admin.site.urls),\n path(\"api/\", include(\"small_eod.collections.urls\")),\n path(\"api/\", include(\"small_eod.cases.urls\")),\n path(\"api/\", include(\"small_eod.letters.urls\")),\n path(\"api/\", include(\"small_eod.features.urls\")),\n path(\"api/\", include(\"small_eod.administrative_units.urls\")),\n path(\"api/\", include(\"small_eod.autocomplete.urls\")),\n path(\"api/docs/\", schema_view.with_ui(\"swagger\"), name=\"api_docs\"),\n path(\"api/redoc/\", schema_view.with_ui(\"redoc\"), name=\"api_redocs\"),\n re_path(\n \"^api/swagger(?P<format>.json|.yaml)$\",\n schema_view.without_ui(),\n name=\"schema_swagger\",\n ),\n path(\"api/\", include(router.urls)),\n]\n\n\nif settings.DEBUG:\n import debug_toolbar\n\n urlpatterns += [\n path(\"__debug__/\", include(debug_toolbar.urls)),\n ]\n\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_URL)\n", "path": "backend-project/config/urls.py"}], "after_files": [{"content": "\"\"\"small_eod URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\n\nimport re\n\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.urls import include, path, re_path\nfrom drf_yasg2.views import get_schema_view\nfrom rest_framework import permissions, routers\n\nfrom small_eod.channels.views import ChannelViewSet\nfrom small_eod.events.views import EventViewSet\nfrom small_eod.institutions.views import InstitutionViewSet\nfrom small_eod.notes.views import NoteViewSet\nfrom small_eod.tags.views import TagViewSet\nfrom small_eod.users.views import UserViewSet\n\nfrom .swagger import info\n\n\nclass BetterDefaultRouter(routers.DefaultRouter):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.include_urls = []\n self.api_root_dict = {}\n\n def get_urls(self):\n urls = super().get_urls()\n urls.extend(self.include_urls)\n return urls\n\n def include(self, module):\n urlpatterns = getattr(include(module)[0], \"urlpatterns\")\n viewnames = set()\n for urlpattern in urlpatterns:\n self.include_urls.append(urlpattern)\n if hasattr(urlpattern, \"url_patterns\"):\n viewnames.update([pattern.name for pattern in urlpattern.url_patterns])\n elif hasattr(urlpattern, \"name\"):\n viewnames.add(urlpattern.name)\n self.api_root_dict.update(\n {re.sub(r\"-list$\", \"\", viewname): viewname for viewname in viewnames}\n )\n\n def get_api_root_view(self, api_urls=None):\n api_root_dict = {}\n list_name = self.routes[0].name\n\n for prefix, viewset, basename in self.registry:\n api_root_dict[prefix] = list_name.format(basename=basename)\n api_root_dict.update(self.api_root_dict)\n\n return self.APIRootView.as_view(api_root_dict=api_root_dict)\n\n\nrouter = BetterDefaultRouter()\n\nrouter.register(r\"channels\", ChannelViewSet)\nrouter.register(r\"events\", EventViewSet)\nrouter.register(r\"institutions\", InstitutionViewSet)\nrouter.register(r\"notes\", NoteViewSet)\nrouter.register(r\"tags\", TagViewSet)\nrouter.register(r\"users\", UserViewSet)\nrouter.include(\"small_eod.cases.urls\")\nrouter.include(\"small_eod.features.urls\")\nrouter.include(\"small_eod.collections.urls\")\nrouter.include(\"small_eod.letters.urls\")\nrouter.include(\"small_eod.administrative_units.urls\")\nrouter.include(\"small_eod.autocomplete.urls\")\n\nschema_view = get_schema_view(\n info,\n # validators=['flex', 'ssv'],\n public=True,\n permission_classes=(permissions.AllowAny,),\n)\n\nurlpatterns = [\n path(\"admin/\", admin.site.urls),\n path(\"api/docs/\", schema_view.with_ui(\"swagger\"), name=\"api_docs\"),\n path(\"api/redoc/\", schema_view.with_ui(\"redoc\"), name=\"api_redocs\"),\n re_path(\n \"^api/swagger(?P<format>.json|.yaml)$\",\n schema_view.without_ui(),\n name=\"schema_swagger\",\n ),\n path(\"api/\", include(router.urls)),\n]\n\nif settings.DEBUG:\n import debug_toolbar\n\n urlpatterns += [\n path(\"__debug__/\", include(debug_toolbar.urls)),\n ]\n\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_URL)\n", "path": "backend-project/config/urls.py"}]} | 1,132 | 773 |
gh_patches_debug_12882 | rasdani/github-patches | git_diff | open-mmlab__mmpretrain-147 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
KeyError: 'LinearHead is not in the head registry'
use config
```python
model = dict(
head=dict(
type='LinearHead',
num_classes=1000,
in_channels=2048,
loss=dict(
type='LabelSmoothLoss',
loss_weight=1.0,
label_smooth_val=0.1,
num_classes=1000),
))
```
got trackback
```python
Traceback (most recent call last):
File "/home/code/open_mmlab_codebase/huatian_bump_blur_cls/tools/train.py", line 177, in <module>
main()
File "/home/code/open_mmlab_codebase/huatian_bump_blur_cls/tools/train.py", line 151, in main
model = build_classifier(cfg.model)
File "/home/code/open_mmlab_codebase/mmclassification/mmcls/models/builder.py", line 38, in build_classifier
return build(cfg, CLASSIFIERS)
File "/home/code/open_mmlab_codebase/mmclassification/mmcls/models/builder.py", line 18, in build
return build_from_cfg(cfg, registry, default_args)
File "/opt/conda/lib/python3.7/site-packages/mmcv/utils/registry.py", line 171, in build_from_cfg
return obj_cls(**args)
File "/home/code/open_mmlab_codebase/mmclassification/mmcls/models/classifiers/image.py", line 18, in __init__
self.head = build_head(head)
File "/home/code/open_mmlab_codebase/mmclassification/mmcls/models/builder.py", line 26, in build_head
return build(cfg, HEADS)
File "/home/code/open_mmlab_codebase/mmclassification/mmcls/models/builder.py", line 18, in build
return build_from_cfg(cfg, registry, default_args)
File "/opt/conda/lib/python3.7/site-packages/mmcv/utils/registry.py", line 164, in build_from_cfg
f'{obj_type} is not in the {registry.name} registry')
KeyError: 'LinearHead is not in the head registry'
```
__check /mmcls/models/heads/*.py, not exist `LinearHead` registered__
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `configs/resnet/resnet50_b64x32_warmup_label_smooth_imagenet.py`
Content:
```
1 _base_ = ['./resnet50_batch2048_warmup.py']
2 model = dict(
3 head=dict(
4 type='LinearHead',
5 num_classes=1000,
6 in_channels=2048,
7 loss=dict(
8 type='LabelSmoothLoss',
9 loss_weight=1.0,
10 label_smooth_val=0.1,
11 num_classes=1000),
12 ))
13
```
Path: `configs/resnet/resnet50_b32x8_label_smooth_imagenet.py`
Content:
```
1 _base_ = ['./resnet50_imagenet_bs256.py']
2 model = dict(
3 head=dict(
4 type='LinearHead',
5 num_classes=1000,
6 in_channels=2048,
7 loss=dict(
8 type='LabelSmoothLoss',
9 loss_weight=1.0,
10 label_smooth_val=0.1,
11 num_classes=1000),
12 ))
13
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/configs/resnet/resnet50_b32x8_label_smooth_imagenet.py b/configs/resnet/resnet50_b32x8_label_smooth_imagenet.py
--- a/configs/resnet/resnet50_b32x8_label_smooth_imagenet.py
+++ b/configs/resnet/resnet50_b32x8_label_smooth_imagenet.py
@@ -1,7 +1,7 @@
_base_ = ['./resnet50_imagenet_bs256.py']
model = dict(
head=dict(
- type='LinearHead',
+ type='LinearClsHead',
num_classes=1000,
in_channels=2048,
loss=dict(
diff --git a/configs/resnet/resnet50_b64x32_warmup_label_smooth_imagenet.py b/configs/resnet/resnet50_b64x32_warmup_label_smooth_imagenet.py
--- a/configs/resnet/resnet50_b64x32_warmup_label_smooth_imagenet.py
+++ b/configs/resnet/resnet50_b64x32_warmup_label_smooth_imagenet.py
@@ -1,7 +1,7 @@
_base_ = ['./resnet50_batch2048_warmup.py']
model = dict(
head=dict(
- type='LinearHead',
+ type='LinearClsHead',
num_classes=1000,
in_channels=2048,
loss=dict(
| {"golden_diff": "diff --git a/configs/resnet/resnet50_b32x8_label_smooth_imagenet.py b/configs/resnet/resnet50_b32x8_label_smooth_imagenet.py\n--- a/configs/resnet/resnet50_b32x8_label_smooth_imagenet.py\n+++ b/configs/resnet/resnet50_b32x8_label_smooth_imagenet.py\n@@ -1,7 +1,7 @@\n _base_ = ['./resnet50_imagenet_bs256.py']\n model = dict(\n head=dict(\n- type='LinearHead',\n+ type='LinearClsHead',\n num_classes=1000,\n in_channels=2048,\n loss=dict(\ndiff --git a/configs/resnet/resnet50_b64x32_warmup_label_smooth_imagenet.py b/configs/resnet/resnet50_b64x32_warmup_label_smooth_imagenet.py\n--- a/configs/resnet/resnet50_b64x32_warmup_label_smooth_imagenet.py\n+++ b/configs/resnet/resnet50_b64x32_warmup_label_smooth_imagenet.py\n@@ -1,7 +1,7 @@\n _base_ = ['./resnet50_batch2048_warmup.py']\n model = dict(\n head=dict(\n- type='LinearHead',\n+ type='LinearClsHead',\n num_classes=1000,\n in_channels=2048,\n loss=dict(\n", "issue": "KeyError: 'LinearHead is not in the head registry'\nuse config\r\n```python\r\nmodel = dict(\r\n head=dict(\r\n type='LinearHead',\r\n num_classes=1000,\r\n in_channels=2048,\r\n loss=dict(\r\n type='LabelSmoothLoss',\r\n loss_weight=1.0,\r\n label_smooth_val=0.1,\r\n num_classes=1000),\r\n ))\r\n```\r\n\r\ngot trackback\r\n```python\r\nTraceback (most recent call last):\r\n File \"/home/code/open_mmlab_codebase/huatian_bump_blur_cls/tools/train.py\", line 177, in <module>\r\n main()\r\n File \"/home/code/open_mmlab_codebase/huatian_bump_blur_cls/tools/train.py\", line 151, in main\r\n model = build_classifier(cfg.model)\r\n File \"/home/code/open_mmlab_codebase/mmclassification/mmcls/models/builder.py\", line 38, in build_classifier\r\n return build(cfg, CLASSIFIERS)\r\n File \"/home/code/open_mmlab_codebase/mmclassification/mmcls/models/builder.py\", line 18, in build\r\n return build_from_cfg(cfg, registry, default_args)\r\n File \"/opt/conda/lib/python3.7/site-packages/mmcv/utils/registry.py\", line 171, in build_from_cfg\r\n return obj_cls(**args)\r\n File \"/home/code/open_mmlab_codebase/mmclassification/mmcls/models/classifiers/image.py\", line 18, in __init__\r\n self.head = build_head(head)\r\n File \"/home/code/open_mmlab_codebase/mmclassification/mmcls/models/builder.py\", line 26, in build_head\r\n return build(cfg, HEADS)\r\n File \"/home/code/open_mmlab_codebase/mmclassification/mmcls/models/builder.py\", line 18, in build\r\n return build_from_cfg(cfg, registry, default_args)\r\n File \"/opt/conda/lib/python3.7/site-packages/mmcv/utils/registry.py\", line 164, in build_from_cfg\r\n f'{obj_type} is not in the {registry.name} registry')\r\nKeyError: 'LinearHead is not in the head registry'\r\n```\r\n\r\n__check /mmcls/models/heads/*.py, not exist `LinearHead` registered__\n", "before_files": [{"content": "_base_ = ['./resnet50_batch2048_warmup.py']\nmodel = dict(\n head=dict(\n type='LinearHead',\n num_classes=1000,\n in_channels=2048,\n loss=dict(\n type='LabelSmoothLoss',\n loss_weight=1.0,\n label_smooth_val=0.1,\n num_classes=1000),\n ))\n", "path": "configs/resnet/resnet50_b64x32_warmup_label_smooth_imagenet.py"}, {"content": "_base_ = ['./resnet50_imagenet_bs256.py']\nmodel = dict(\n head=dict(\n type='LinearHead',\n num_classes=1000,\n in_channels=2048,\n loss=dict(\n type='LabelSmoothLoss',\n loss_weight=1.0,\n label_smooth_val=0.1,\n num_classes=1000),\n ))\n", "path": "configs/resnet/resnet50_b32x8_label_smooth_imagenet.py"}], "after_files": [{"content": "_base_ = ['./resnet50_batch2048_warmup.py']\nmodel = dict(\n head=dict(\n type='LinearClsHead',\n num_classes=1000,\n in_channels=2048,\n loss=dict(\n type='LabelSmoothLoss',\n loss_weight=1.0,\n label_smooth_val=0.1,\n num_classes=1000),\n ))\n", "path": "configs/resnet/resnet50_b64x32_warmup_label_smooth_imagenet.py"}, {"content": "_base_ = ['./resnet50_imagenet_bs256.py']\nmodel = dict(\n head=dict(\n type='LinearClsHead',\n num_classes=1000,\n in_channels=2048,\n loss=dict(\n type='LabelSmoothLoss',\n loss_weight=1.0,\n label_smooth_val=0.1,\n num_classes=1000),\n ))\n", "path": "configs/resnet/resnet50_b32x8_label_smooth_imagenet.py"}]} | 1,016 | 339 |
gh_patches_debug_24780 | rasdani/github-patches | git_diff | apache__airflow-15109 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make Docs builds fallback in case external docs sources are missing
Every now and then our docs builds start to fail because of external dependency (latest example here #14985). And while we are doing caching now of that information, it does not help when the initial retrieval fails. This information does not change often but with the number of dependencies we have it will continue to fail regularly simply because many of those depenencies are not very reliable - they are just a web page hosted somewhere. They are nowhere near the stabilty of even PyPI or Apt sources and we have no mirroring in case of problem.
Maybe we could
a) see if we can use some kind of mirroring scheme (do those sites have mirrrors ? )
b) if not, simply write a simple script that will dump the cached content for those to S3, refresh it in the CI scheduled (nightly) master builds ad have a fallback mechanism to download that from there in case of any problems in CI?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/exts/docs_build/fetch_inventories.py`
Content:
```
1 # Licensed to the Apache Software Foundation (ASF) under one
2 # or more contributor license agreements. See the NOTICE file
3 # distributed with this work for additional information
4 # regarding copyright ownership. The ASF licenses this file
5 # to you under the Apache License, Version 2.0 (the
6 # "License"); you may not use this file except in compliance
7 # with the License. You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing,
12 # software distributed under the License is distributed on an
13 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 # KIND, either express or implied. See the License for the
15 # specific language governing permissions and limitations
16 # under the License.
17
18 import concurrent
19 import concurrent.futures
20 import datetime
21 import os
22 import shutil
23 from itertools import repeat
24 from typing import Iterator, List, Tuple
25
26 import requests
27 from requests.adapters import DEFAULT_POOLSIZE
28
29 from airflow.utils.helpers import partition
30 from docs.exts.docs_build.docs_builder import ( # pylint: disable=no-name-in-module
31 get_available_providers_packages,
32 )
33 from docs.exts.docs_build.third_party_inventories import ( # pylint: disable=no-name-in-module
34 THIRD_PARTY_INDEXES,
35 )
36
37 CURRENT_DIR = os.path.dirname(__file__)
38 ROOT_DIR = os.path.abspath(os.path.join(CURRENT_DIR, os.pardir, os.pardir, os.pardir))
39 DOCS_DIR = os.path.join(ROOT_DIR, 'docs')
40 CACHE_DIR = os.path.join(DOCS_DIR, '_inventory_cache')
41 EXPIRATION_DATE_PATH = os.path.join(DOCS_DIR, '_inventory_cache', "expiration-date")
42
43 S3_DOC_URL = "http://apache-airflow-docs.s3-website.eu-central-1.amazonaws.com"
44 S3_DOC_URL_VERSIONED = S3_DOC_URL + "/docs/{package_name}/latest/objects.inv"
45 S3_DOC_URL_NON_VERSIONED = S3_DOC_URL + "/docs/{package_name}/objects.inv"
46
47
48 def _fetch_file(session: requests.Session, package_name: str, url: str, path: str) -> Tuple[str, bool]:
49 """
50 Download a file and returns status information as a tuple with package
51 name and success status(bool value).
52 """
53 response = session.get(url, allow_redirects=True, stream=True)
54 if not response.ok:
55 print(f"Failed to fetch inventory: {url}")
56 return package_name, False
57
58 os.makedirs(os.path.dirname(path), exist_ok=True)
59 with open(path, 'wb') as f:
60 response.raw.decode_content = True
61 shutil.copyfileobj(response.raw, f)
62 print(f"Fetched inventory: {url}")
63 return package_name, True
64
65
66 def _is_outdated(path: str):
67 if not os.path.exists(path):
68 return True
69 delta = datetime.datetime.now() - datetime.datetime.fromtimestamp(os.path.getmtime(path))
70 return delta > datetime.timedelta(hours=12)
71
72
73 def fetch_inventories():
74 """Fetch all inventories for Airflow documentation packages and store in cache."""
75 os.makedirs(os.path.dirname(CACHE_DIR), exist_ok=True)
76 to_download: List[Tuple[str, str, str]] = []
77
78 for pkg_name in get_available_providers_packages():
79 to_download.append(
80 (
81 pkg_name,
82 S3_DOC_URL_VERSIONED.format(package_name=pkg_name),
83 f'{CACHE_DIR}/{pkg_name}/objects.inv',
84 )
85 )
86 to_download.append(
87 (
88 "apache-airflow",
89 S3_DOC_URL_VERSIONED.format(package_name='apache-airflow'),
90 f'{CACHE_DIR}/apache-airflow/objects.inv',
91 )
92 )
93 for pkg_name in ['apache-airflow-providers', 'docker-stack']:
94 to_download.append(
95 (
96 pkg_name,
97 S3_DOC_URL_NON_VERSIONED.format(package_name=pkg_name),
98 f'{CACHE_DIR}/{pkg_name}/objects.inv',
99 )
100 )
101 to_download.extend(
102 (
103 pkg_name,
104 f"{doc_url}/objects.inv",
105 f'{CACHE_DIR}/{pkg_name}/objects.inv',
106 )
107 for pkg_name, doc_url in THIRD_PARTY_INDEXES.items()
108 )
109
110 to_download = [(pkg_name, url, path) for pkg_name, url, path in to_download if _is_outdated(path)]
111 if not to_download:
112 print("Nothing to do")
113 return []
114
115 print(f"To download {len(to_download)} inventorie(s)")
116
117 with requests.Session() as session, concurrent.futures.ThreadPoolExecutor(DEFAULT_POOLSIZE) as pool:
118 download_results: Iterator[Tuple[str, bool]] = pool.map(
119 _fetch_file,
120 repeat(session, len(to_download)),
121 (pkg_name for pkg_name, _, _ in to_download),
122 (url for _, url, _ in to_download),
123 (path for _, _, path in to_download),
124 )
125 failed, success = partition(lambda d: d[1], download_results)
126 failed, success = list(failed), list(success)
127 print(f"Result: {len(success)} success, {len(failed)} failed")
128 if failed:
129 print("Failed packages:")
130 for pkg_no, (pkg_name, _) in enumerate(failed, start=1):
131 print(f"{pkg_no}. {pkg_name}")
132
133 return [pkg_name for pkg_name, status in failed]
134
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/exts/docs_build/fetch_inventories.py b/docs/exts/docs_build/fetch_inventories.py
--- a/docs/exts/docs_build/fetch_inventories.py
+++ b/docs/exts/docs_build/fetch_inventories.py
@@ -20,10 +20,13 @@
import datetime
import os
import shutil
+import sys
+import traceback
from itertools import repeat
from typing import Iterator, List, Tuple
import requests
+import urllib3.exceptions
from requests.adapters import DEFAULT_POOLSIZE
from airflow.utils.helpers import partition
@@ -50,9 +53,15 @@
Download a file and returns status information as a tuple with package
name and success status(bool value).
"""
- response = session.get(url, allow_redirects=True, stream=True)
+ try:
+ response = session.get(url, allow_redirects=True, stream=True)
+ except (requests.RequestException, urllib3.exceptions.HTTPError):
+ print(f"Failed to fetch inventory: {url}")
+ traceback.print_exc(file=sys.stderr)
+ return package_name, False
if not response.ok:
print(f"Failed to fetch inventory: {url}")
+ print(f"Failed with status: {response.status_code}", file=sys.stderr)
return package_name, False
os.makedirs(os.path.dirname(path), exist_ok=True)
| {"golden_diff": "diff --git a/docs/exts/docs_build/fetch_inventories.py b/docs/exts/docs_build/fetch_inventories.py\n--- a/docs/exts/docs_build/fetch_inventories.py\n+++ b/docs/exts/docs_build/fetch_inventories.py\n@@ -20,10 +20,13 @@\n import datetime\n import os\n import shutil\n+import sys\n+import traceback\n from itertools import repeat\n from typing import Iterator, List, Tuple\n \n import requests\n+import urllib3.exceptions\n from requests.adapters import DEFAULT_POOLSIZE\n \n from airflow.utils.helpers import partition\n@@ -50,9 +53,15 @@\n Download a file and returns status information as a tuple with package\n name and success status(bool value).\n \"\"\"\n- response = session.get(url, allow_redirects=True, stream=True)\n+ try:\n+ response = session.get(url, allow_redirects=True, stream=True)\n+ except (requests.RequestException, urllib3.exceptions.HTTPError):\n+ print(f\"Failed to fetch inventory: {url}\")\n+ traceback.print_exc(file=sys.stderr)\n+ return package_name, False\n if not response.ok:\n print(f\"Failed to fetch inventory: {url}\")\n+ print(f\"Failed with status: {response.status_code}\", file=sys.stderr)\n return package_name, False\n \n os.makedirs(os.path.dirname(path), exist_ok=True)\n", "issue": "Make Docs builds fallback in case external docs sources are missing\nEvery now and then our docs builds start to fail because of external dependency (latest example here #14985). And while we are doing caching now of that information, it does not help when the initial retrieval fails. This information does not change often but with the number of dependencies we have it will continue to fail regularly simply because many of those depenencies are not very reliable - they are just a web page hosted somewhere. They are nowhere near the stabilty of even PyPI or Apt sources and we have no mirroring in case of problem.\r\n\r\nMaybe we could \r\n\r\na) see if we can use some kind of mirroring scheme (do those sites have mirrrors ? )\r\nb) if not, simply write a simple script that will dump the cached content for those to S3, refresh it in the CI scheduled (nightly) master builds ad have a fallback mechanism to download that from there in case of any problems in CI?\r\n\r\n \n", "before_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport concurrent\nimport concurrent.futures\nimport datetime\nimport os\nimport shutil\nfrom itertools import repeat\nfrom typing import Iterator, List, Tuple\n\nimport requests\nfrom requests.adapters import DEFAULT_POOLSIZE\n\nfrom airflow.utils.helpers import partition\nfrom docs.exts.docs_build.docs_builder import ( # pylint: disable=no-name-in-module\n get_available_providers_packages,\n)\nfrom docs.exts.docs_build.third_party_inventories import ( # pylint: disable=no-name-in-module\n THIRD_PARTY_INDEXES,\n)\n\nCURRENT_DIR = os.path.dirname(__file__)\nROOT_DIR = os.path.abspath(os.path.join(CURRENT_DIR, os.pardir, os.pardir, os.pardir))\nDOCS_DIR = os.path.join(ROOT_DIR, 'docs')\nCACHE_DIR = os.path.join(DOCS_DIR, '_inventory_cache')\nEXPIRATION_DATE_PATH = os.path.join(DOCS_DIR, '_inventory_cache', \"expiration-date\")\n\nS3_DOC_URL = \"http://apache-airflow-docs.s3-website.eu-central-1.amazonaws.com\"\nS3_DOC_URL_VERSIONED = S3_DOC_URL + \"/docs/{package_name}/latest/objects.inv\"\nS3_DOC_URL_NON_VERSIONED = S3_DOC_URL + \"/docs/{package_name}/objects.inv\"\n\n\ndef _fetch_file(session: requests.Session, package_name: str, url: str, path: str) -> Tuple[str, bool]:\n \"\"\"\n Download a file and returns status information as a tuple with package\n name and success status(bool value).\n \"\"\"\n response = session.get(url, allow_redirects=True, stream=True)\n if not response.ok:\n print(f\"Failed to fetch inventory: {url}\")\n return package_name, False\n\n os.makedirs(os.path.dirname(path), exist_ok=True)\n with open(path, 'wb') as f:\n response.raw.decode_content = True\n shutil.copyfileobj(response.raw, f)\n print(f\"Fetched inventory: {url}\")\n return package_name, True\n\n\ndef _is_outdated(path: str):\n if not os.path.exists(path):\n return True\n delta = datetime.datetime.now() - datetime.datetime.fromtimestamp(os.path.getmtime(path))\n return delta > datetime.timedelta(hours=12)\n\n\ndef fetch_inventories():\n \"\"\"Fetch all inventories for Airflow documentation packages and store in cache.\"\"\"\n os.makedirs(os.path.dirname(CACHE_DIR), exist_ok=True)\n to_download: List[Tuple[str, str, str]] = []\n\n for pkg_name in get_available_providers_packages():\n to_download.append(\n (\n pkg_name,\n S3_DOC_URL_VERSIONED.format(package_name=pkg_name),\n f'{CACHE_DIR}/{pkg_name}/objects.inv',\n )\n )\n to_download.append(\n (\n \"apache-airflow\",\n S3_DOC_URL_VERSIONED.format(package_name='apache-airflow'),\n f'{CACHE_DIR}/apache-airflow/objects.inv',\n )\n )\n for pkg_name in ['apache-airflow-providers', 'docker-stack']:\n to_download.append(\n (\n pkg_name,\n S3_DOC_URL_NON_VERSIONED.format(package_name=pkg_name),\n f'{CACHE_DIR}/{pkg_name}/objects.inv',\n )\n )\n to_download.extend(\n (\n pkg_name,\n f\"{doc_url}/objects.inv\",\n f'{CACHE_DIR}/{pkg_name}/objects.inv',\n )\n for pkg_name, doc_url in THIRD_PARTY_INDEXES.items()\n )\n\n to_download = [(pkg_name, url, path) for pkg_name, url, path in to_download if _is_outdated(path)]\n if not to_download:\n print(\"Nothing to do\")\n return []\n\n print(f\"To download {len(to_download)} inventorie(s)\")\n\n with requests.Session() as session, concurrent.futures.ThreadPoolExecutor(DEFAULT_POOLSIZE) as pool:\n download_results: Iterator[Tuple[str, bool]] = pool.map(\n _fetch_file,\n repeat(session, len(to_download)),\n (pkg_name for pkg_name, _, _ in to_download),\n (url for _, url, _ in to_download),\n (path for _, _, path in to_download),\n )\n failed, success = partition(lambda d: d[1], download_results)\n failed, success = list(failed), list(success)\n print(f\"Result: {len(success)} success, {len(failed)} failed\")\n if failed:\n print(\"Failed packages:\")\n for pkg_no, (pkg_name, _) in enumerate(failed, start=1):\n print(f\"{pkg_no}. {pkg_name}\")\n\n return [pkg_name for pkg_name, status in failed]\n", "path": "docs/exts/docs_build/fetch_inventories.py"}], "after_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport concurrent\nimport concurrent.futures\nimport datetime\nimport os\nimport shutil\nimport sys\nimport traceback\nfrom itertools import repeat\nfrom typing import Iterator, List, Tuple\n\nimport requests\nimport urllib3.exceptions\nfrom requests.adapters import DEFAULT_POOLSIZE\n\nfrom airflow.utils.helpers import partition\nfrom docs.exts.docs_build.docs_builder import ( # pylint: disable=no-name-in-module\n get_available_providers_packages,\n)\nfrom docs.exts.docs_build.third_party_inventories import ( # pylint: disable=no-name-in-module\n THIRD_PARTY_INDEXES,\n)\n\nCURRENT_DIR = os.path.dirname(__file__)\nROOT_DIR = os.path.abspath(os.path.join(CURRENT_DIR, os.pardir, os.pardir, os.pardir))\nDOCS_DIR = os.path.join(ROOT_DIR, 'docs')\nCACHE_DIR = os.path.join(DOCS_DIR, '_inventory_cache')\nEXPIRATION_DATE_PATH = os.path.join(DOCS_DIR, '_inventory_cache', \"expiration-date\")\n\nS3_DOC_URL = \"http://apache-airflow-docs.s3-website.eu-central-1.amazonaws.com\"\nS3_DOC_URL_VERSIONED = S3_DOC_URL + \"/docs/{package_name}/latest/objects.inv\"\nS3_DOC_URL_NON_VERSIONED = S3_DOC_URL + \"/docs/{package_name}/objects.inv\"\n\n\ndef _fetch_file(session: requests.Session, package_name: str, url: str, path: str) -> Tuple[str, bool]:\n \"\"\"\n Download a file and returns status information as a tuple with package\n name and success status(bool value).\n \"\"\"\n try:\n response = session.get(url, allow_redirects=True, stream=True)\n except (requests.RequestException, urllib3.exceptions.HTTPError):\n print(f\"Failed to fetch inventory: {url}\")\n traceback.print_exc(file=sys.stderr)\n return package_name, False\n if not response.ok:\n print(f\"Failed to fetch inventory: {url}\")\n print(f\"Failed with status: {response.status_code}\", file=sys.stderr)\n return package_name, False\n\n os.makedirs(os.path.dirname(path), exist_ok=True)\n with open(path, 'wb') as f:\n response.raw.decode_content = True\n shutil.copyfileobj(response.raw, f)\n print(f\"Fetched inventory: {url}\")\n return package_name, True\n\n\ndef _is_outdated(path: str):\n if not os.path.exists(path):\n return True\n delta = datetime.datetime.now() - datetime.datetime.fromtimestamp(os.path.getmtime(path))\n return delta > datetime.timedelta(hours=12)\n\n\ndef fetch_inventories():\n \"\"\"Fetch all inventories for Airflow documentation packages and store in cache.\"\"\"\n os.makedirs(os.path.dirname(CACHE_DIR), exist_ok=True)\n to_download: List[Tuple[str, str, str]] = []\n\n for pkg_name in get_available_providers_packages():\n to_download.append(\n (\n pkg_name,\n S3_DOC_URL_VERSIONED.format(package_name=pkg_name),\n f'{CACHE_DIR}/{pkg_name}/objects.inv',\n )\n )\n to_download.append(\n (\n \"apache-airflow\",\n S3_DOC_URL_VERSIONED.format(package_name='apache-airflow'),\n f'{CACHE_DIR}/apache-airflow/objects.inv',\n )\n )\n for pkg_name in ['apache-airflow-providers', 'docker-stack']:\n to_download.append(\n (\n pkg_name,\n S3_DOC_URL_NON_VERSIONED.format(package_name=pkg_name),\n f'{CACHE_DIR}/{pkg_name}/objects.inv',\n )\n )\n to_download.extend(\n (\n pkg_name,\n f\"{doc_url}/objects.inv\",\n f'{CACHE_DIR}/{pkg_name}/objects.inv',\n )\n for pkg_name, doc_url in THIRD_PARTY_INDEXES.items()\n )\n\n to_download = [(pkg_name, url, path) for pkg_name, url, path in to_download if _is_outdated(path)]\n if not to_download:\n print(\"Nothing to do\")\n return []\n\n print(f\"To download {len(to_download)} inventorie(s)\")\n\n with requests.Session() as session, concurrent.futures.ThreadPoolExecutor(DEFAULT_POOLSIZE) as pool:\n download_results: Iterator[Tuple[str, bool]] = pool.map(\n _fetch_file,\n repeat(session, len(to_download)),\n (pkg_name for pkg_name, _, _ in to_download),\n (url for _, url, _ in to_download),\n (path for _, _, path in to_download),\n )\n failed, success = partition(lambda d: d[1], download_results)\n failed, success = list(failed), list(success)\n print(f\"Result: {len(success)} success, {len(failed)} failed\")\n if failed:\n print(\"Failed packages:\")\n for pkg_no, (pkg_name, _) in enumerate(failed, start=1):\n print(f\"{pkg_no}. {pkg_name}\")\n\n return [pkg_name for pkg_name, status in failed]\n", "path": "docs/exts/docs_build/fetch_inventories.py"}]} | 1,927 | 297 |
gh_patches_debug_12800 | rasdani/github-patches | git_diff | mindsdb__mindsdb-712 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
KeyError: 'mongodb' on start
Starting Mindsdb(python -m mindsdb) version 2.8.1 throws:
```
Failed to start mongodb API with exception 'mongodb'
Traceback (most recent call last):
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/__main__.py", line 83, in <module>
p = ctx.Process(target=start_functions[api], args=(config_path, True,))
KeyError: 'mongodb'
Traceback (most recent call last):
File "/usr/local/lib/python3.7/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/local/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/__main__.py", line 83, in <module>
p = ctx.Process(target=start_functions[api], args=(config_path, True,))
KeyError: 'mongodb'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mindsdb/__main__.py`
Content:
```
1 import atexit
2 import traceback
3 import sys
4 import os
5
6 import torch.multiprocessing as mp
7
8 from mindsdb_native.config import CONFIG
9
10 from mindsdb.utilities.config import Config
11 from mindsdb.interfaces.native.mindsdb import MindsdbNative
12 from mindsdb.interfaces.custom.custom_models import CustomModels
13 from mindsdb.api.http.start import start as start_http
14 from mindsdb.api.mysql.start import start as start_mysql
15 from mindsdb.api.mongo.start import start as start_mongo
16 from mindsdb.utilities.fs import get_or_create_dir_struct
17 from mindsdb.interfaces.database.database import DatabaseWrapper
18 from mindsdb.utilities.functions import args_parse
19
20
21 def close_api_gracefully(p_arr):
22 for p in p_arr:
23 sys.stdout.flush()
24 p.terminate()
25 p.join()
26 sys.stdout.flush()
27
28
29 if __name__ == '__main__':
30 mp.freeze_support()
31
32 args = args_parse()
33
34 config_path = args.config
35 if config_path is None:
36 config_dir, _ = get_or_create_dir_struct()
37 config_path = os.path.join(config_dir, 'config.json')
38
39 print(f'Using configuration file: {config_path}')
40 config = Config(config_path)
41
42 if args.api is None:
43 api_arr = [api for api in config['api']]
44 else:
45 api_arr = args.api.split(',')
46
47 start_functions = {
48 'http': start_http,
49 'mysql': start_mysql,
50 'mongo': start_mongo
51 }
52
53 mdb = MindsdbNative(config)
54 cst = CustomModels(config)
55 # @TODO Maybe just use `get_model_data` directly here ? Seems like a useless abstraction
56 model_data_arr = [
57 {
58 'name': x['name'],
59 'predict': x['predict'],
60 'data_analysis': mdb.get_model_data(x['name'])['data_analysis_v2']
61 } for x in mdb.get_models()
62 ]
63
64 for m in model_data_arr:
65 if 'columns_to_ignore' in m['data_analysis']:
66 del m['data_analysis']['columns_to_ignore']
67 if 'train_std_dev' in m['data_analysis']:
68 del m['data_analysis']['train_std_dev']
69
70 model_data_arr.extend(cst.get_models())
71
72 dbw = DatabaseWrapper(config)
73 dbw.register_predictors(model_data_arr)
74
75 for broken_name in [name for name, connected in dbw.check_connections().items() if connected is False]:
76 print(f'Error failed to integrate with database aliased: {broken_name}')
77
78 p_arr = []
79 ctx = mp.get_context('spawn')
80 for api in api_arr:
81 print(f'Starting Mindsdb {api} API !')
82 try:
83 p = ctx.Process(target=start_functions[api], args=(config_path, True,))
84 p.start()
85 p_arr.append(p)
86 print(f'Started Mindsdb {api} API !')
87 except Exception as e:
88 close_api_gracefully(p_arr)
89 print(f'Failed to start {api} API with exception {e}')
90 print(traceback.format_exc())
91 raise
92
93 atexit.register(close_api_gracefully, p_arr=p_arr)
94
95 for p in p_arr:
96 p.join()
97
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mindsdb/__main__.py b/mindsdb/__main__.py
--- a/mindsdb/__main__.py
+++ b/mindsdb/__main__.py
@@ -40,14 +40,20 @@
config = Config(config_path)
if args.api is None:
- api_arr = [api for api in config['api']]
+ api_arr = ['http', 'mysql']
else:
api_arr = args.api.split(',')
+ for api in api_arr:
+ if api not in config:
+ print(f"Trying run '{api}' API, but is no config for this api.")
+ print(f"Please, fill config['api']['{api}']")
+ sys.exit(0)
+
start_functions = {
'http': start_http,
'mysql': start_mysql,
- 'mongo': start_mongo
+ 'mongodb': start_mongo
}
mdb = MindsdbNative(config)
| {"golden_diff": "diff --git a/mindsdb/__main__.py b/mindsdb/__main__.py\n--- a/mindsdb/__main__.py\n+++ b/mindsdb/__main__.py\n@@ -40,14 +40,20 @@\n config = Config(config_path)\n \n if args.api is None:\n- api_arr = [api for api in config['api']]\n+ api_arr = ['http', 'mysql']\n else:\n api_arr = args.api.split(',')\n \n+ for api in api_arr:\n+ if api not in config:\n+ print(f\"Trying run '{api}' API, but is no config for this api.\")\n+ print(f\"Please, fill config['api']['{api}']\")\n+ sys.exit(0)\n+\n start_functions = {\n 'http': start_http,\n 'mysql': start_mysql,\n- 'mongo': start_mongo\n+ 'mongodb': start_mongo\n }\n \n mdb = MindsdbNative(config)\n", "issue": "KeyError: 'mongodb' on start\nStarting Mindsdb(python -m mindsdb) version 2.8.1 throws:\r\n\r\n```\r\nFailed to start mongodb API with exception 'mongodb'\r\nTraceback (most recent call last):\r\n File \"/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/__main__.py\", line 83, in <module>\r\n p = ctx.Process(target=start_functions[api], args=(config_path, True,))\r\nKeyError: 'mongodb'\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.7/runpy.py\", line 193, in _run_module_as_main\r\n \"__main__\", mod_spec)\r\n File \"/usr/local/lib/python3.7/runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/__main__.py\", line 83, in <module>\r\n p = ctx.Process(target=start_functions[api], args=(config_path, True,))\r\nKeyError: 'mongodb'\r\n```\n", "before_files": [{"content": "import atexit\nimport traceback\nimport sys\nimport os\n\nimport torch.multiprocessing as mp\n\nfrom mindsdb_native.config import CONFIG\n\nfrom mindsdb.utilities.config import Config\nfrom mindsdb.interfaces.native.mindsdb import MindsdbNative\nfrom mindsdb.interfaces.custom.custom_models import CustomModels\nfrom mindsdb.api.http.start import start as start_http\nfrom mindsdb.api.mysql.start import start as start_mysql\nfrom mindsdb.api.mongo.start import start as start_mongo\nfrom mindsdb.utilities.fs import get_or_create_dir_struct\nfrom mindsdb.interfaces.database.database import DatabaseWrapper\nfrom mindsdb.utilities.functions import args_parse\n\n\ndef close_api_gracefully(p_arr):\n for p in p_arr:\n sys.stdout.flush()\n p.terminate()\n p.join()\n sys.stdout.flush()\n\n\nif __name__ == '__main__':\n mp.freeze_support()\n\n args = args_parse()\n\n config_path = args.config\n if config_path is None:\n config_dir, _ = get_or_create_dir_struct()\n config_path = os.path.join(config_dir, 'config.json')\n\n print(f'Using configuration file: {config_path}')\n config = Config(config_path)\n\n if args.api is None:\n api_arr = [api for api in config['api']]\n else:\n api_arr = args.api.split(',')\n\n start_functions = {\n 'http': start_http,\n 'mysql': start_mysql,\n 'mongo': start_mongo\n }\n\n mdb = MindsdbNative(config)\n cst = CustomModels(config)\n # @TODO Maybe just use `get_model_data` directly here ? Seems like a useless abstraction\n model_data_arr = [\n {\n 'name': x['name'],\n 'predict': x['predict'],\n 'data_analysis': mdb.get_model_data(x['name'])['data_analysis_v2']\n } for x in mdb.get_models()\n ]\n\n for m in model_data_arr:\n if 'columns_to_ignore' in m['data_analysis']:\n del m['data_analysis']['columns_to_ignore']\n if 'train_std_dev' in m['data_analysis']:\n del m['data_analysis']['train_std_dev']\n\n model_data_arr.extend(cst.get_models())\n\n dbw = DatabaseWrapper(config)\n dbw.register_predictors(model_data_arr)\n\n for broken_name in [name for name, connected in dbw.check_connections().items() if connected is False]:\n print(f'Error failed to integrate with database aliased: {broken_name}')\n\n p_arr = []\n ctx = mp.get_context('spawn')\n for api in api_arr:\n print(f'Starting Mindsdb {api} API !')\n try:\n p = ctx.Process(target=start_functions[api], args=(config_path, True,))\n p.start()\n p_arr.append(p)\n print(f'Started Mindsdb {api} API !')\n except Exception as e:\n close_api_gracefully(p_arr)\n print(f'Failed to start {api} API with exception {e}')\n print(traceback.format_exc())\n raise\n\n atexit.register(close_api_gracefully, p_arr=p_arr)\n\n for p in p_arr:\n p.join()\n", "path": "mindsdb/__main__.py"}], "after_files": [{"content": "import atexit\nimport traceback\nimport sys\nimport os\n\nimport torch.multiprocessing as mp\n\nfrom mindsdb_native.config import CONFIG\n\nfrom mindsdb.utilities.config import Config\nfrom mindsdb.interfaces.native.mindsdb import MindsdbNative\nfrom mindsdb.interfaces.custom.custom_models import CustomModels\nfrom mindsdb.api.http.start import start as start_http\nfrom mindsdb.api.mysql.start import start as start_mysql\nfrom mindsdb.api.mongo.start import start as start_mongo\nfrom mindsdb.utilities.fs import get_or_create_dir_struct\nfrom mindsdb.interfaces.database.database import DatabaseWrapper\nfrom mindsdb.utilities.functions import args_parse\n\n\ndef close_api_gracefully(p_arr):\n for p in p_arr:\n sys.stdout.flush()\n p.terminate()\n p.join()\n sys.stdout.flush()\n\n\nif __name__ == '__main__':\n mp.freeze_support()\n\n args = args_parse()\n\n config_path = args.config\n if config_path is None:\n config_dir, _ = get_or_create_dir_struct()\n config_path = os.path.join(config_dir, 'config.json')\n\n print(f'Using configuration file: {config_path}')\n config = Config(config_path)\n\n if args.api is None:\n api_arr = ['http', 'mysql']\n else:\n api_arr = args.api.split(',')\n\n for api in api_arr:\n if api not in config:\n print(f\"Trying run '{api}' API, but is no config for this api.\")\n print(f\"Please, fill config['api']['{api}']\")\n sys.exit(0)\n\n start_functions = {\n 'http': start_http,\n 'mysql': start_mysql,\n 'mongodb': start_mongo\n }\n\n mdb = MindsdbNative(config)\n cst = CustomModels(config)\n # @TODO Maybe just use `get_model_data` directly here ? Seems like a useless abstraction\n model_data_arr = [\n {\n 'name': x['name'],\n 'predict': x['predict'],\n 'data_analysis': mdb.get_model_data(x['name'])['data_analysis_v2']\n } for x in mdb.get_models()\n ]\n\n for m in model_data_arr:\n if 'columns_to_ignore' in m['data_analysis']:\n del m['data_analysis']['columns_to_ignore']\n if 'train_std_dev' in m['data_analysis']:\n del m['data_analysis']['train_std_dev']\n\n model_data_arr.extend(cst.get_models())\n\n dbw = DatabaseWrapper(config)\n dbw.register_predictors(model_data_arr)\n\n for broken_name in [name for name, connected in dbw.check_connections().items() if connected is False]:\n print(f'Error failed to integrate with database aliased: {broken_name}')\n\n p_arr = []\n ctx = mp.get_context('spawn')\n for api in api_arr:\n print(f'Starting Mindsdb {api} API !')\n try:\n p = ctx.Process(target=start_functions[api], args=(config_path, True,))\n p.start()\n p_arr.append(p)\n print(f'Started Mindsdb {api} API !')\n except Exception as e:\n close_api_gracefully(p_arr)\n print(f'Failed to start {api} API with exception {e}')\n print(traceback.format_exc())\n raise\n\n atexit.register(close_api_gracefully, p_arr=p_arr)\n\n for p in p_arr:\n p.join()\n", "path": "mindsdb/__main__.py"}]} | 1,385 | 214 |
gh_patches_debug_4504 | rasdani/github-patches | git_diff | saleor__saleor-2803 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Grapql query for home page
### What I'm trying to achieve
I want to have a shop homepage which shows:
* new arrivals,
* product in a sale,
* featured products,
* featured collection,
* categories links
### Describe a proposed solution
```graphql
query HomePage {
shop {
featuredCollection {
id
name
}
}
featured: products(first: 10, collectionSlug: "featured") {
edges {
node {
id
name
thumbnailUrl
category {
id
name
}
price {
amount
currency
}
}
}
}
newArrivals: products(first: 10, sortBy: "creation_date") {
edges {
node {
id
name
thumbnailUrl
category {
id
name
}
price {
amount
currency
}
}
}
}
sales: products(first: 10, collectionSlug: "sales") {
edges {
node {
id
name
thumbnailUrl
category {
id
name
}
price {
amount
currency
}
}
}
}
categories {
edges {
node {
id
name
}
}
}
}
```
### Other solutions I've tried and won't work
I introduced:
* filter by collection slug for featured and sales. That is the simplest approach which I have in my mind.
* exposing homepage collection in the shop query,
* sorting products by creation data for new arrivals.
This is only a proposition. If you have a better approach in mind please share it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `saleor/product/filters.py`
Content:
```
1 from collections import OrderedDict
2
3 from django.db.models import Q
4 from django.forms import CheckboxSelectMultiple, ValidationError
5 from django.utils.translation import pgettext_lazy
6 from django_filters import MultipleChoiceFilter, OrderingFilter, RangeFilter
7
8 from ..core.filters import SortedFilterSet
9 from .models import Product, ProductAttribute
10
11 SORT_BY_FIELDS = OrderedDict([
12 ('name', pgettext_lazy('Product list sorting option', 'name')),
13 ('price', pgettext_lazy('Product list sorting option', 'price'))])
14
15
16 class ProductFilter(SortedFilterSet):
17 sort_by = OrderingFilter(
18 label=pgettext_lazy('Product list sorting form', 'Sort by'),
19 fields=SORT_BY_FIELDS.keys(),
20 field_labels=SORT_BY_FIELDS)
21 price = RangeFilter(
22 label=pgettext_lazy('Currency amount', 'Price'))
23
24 class Meta:
25 model = Product
26 fields = []
27
28 def __init__(self, *args, **kwargs):
29 super().__init__(*args, **kwargs)
30 self.product_attributes, self.variant_attributes = (
31 self._get_attributes())
32 self.filters.update(self._get_product_attributes_filters())
33 self.filters.update(self._get_product_variants_attributes_filters())
34 self.filters = OrderedDict(sorted(self.filters.items()))
35
36 def _get_attributes(self):
37 q_product_attributes = self._get_product_attributes_lookup()
38 q_variant_attributes = self._get_variant_attributes_lookup()
39 product_attributes = (
40 ProductAttribute.objects.all()
41 .prefetch_related('translations', 'values__translations')
42 .filter(q_product_attributes)
43 .distinct())
44 variant_attributes = (
45 ProductAttribute.objects.all()
46 .prefetch_related('translations', 'values__translations')
47 .filter(q_variant_attributes)
48 .distinct())
49 return product_attributes, variant_attributes
50
51 def _get_product_attributes_lookup(self):
52 raise NotImplementedError()
53
54 def _get_variant_attributes_lookup(self):
55 raise NotImplementedError()
56
57 def _get_product_attributes_filters(self):
58 filters = {}
59 for attribute in self.product_attributes:
60 filters[attribute.slug] = MultipleChoiceFilter(
61 name='attributes__%s' % attribute.pk,
62 label=attribute.translated.name,
63 widget=CheckboxSelectMultiple,
64 choices=self._get_attribute_choices(attribute))
65 return filters
66
67 def _get_product_variants_attributes_filters(self):
68 filters = {}
69 for attribute in self.variant_attributes:
70 filters[attribute.slug] = MultipleChoiceFilter(
71 name='variants__attributes__%s' % attribute.pk,
72 label=attribute.translated.name,
73 widget=CheckboxSelectMultiple,
74 choices=self._get_attribute_choices(attribute))
75 return filters
76
77 def _get_attribute_choices(self, attribute):
78 return [
79 (choice.pk, choice.translated.name)
80 for choice in attribute.values.all()]
81
82 def validate_sort_by(self, value):
83 if value.strip('-') not in SORT_BY_FIELDS:
84 raise ValidationError(
85 pgettext_lazy(
86 'Validation error for sort_by filter',
87 '%(value)s is not a valid sorting option'),
88 params={'value': value})
89
90
91 class ProductCategoryFilter(ProductFilter):
92 def __init__(self, *args, **kwargs):
93 self.category = kwargs.pop('category')
94 super().__init__(*args, **kwargs)
95
96 def _get_product_attributes_lookup(self):
97 return Q(product_types__products__category=self.category)
98
99 def _get_variant_attributes_lookup(self):
100 return Q(product_variant_types__products__category=self.category)
101
102
103 class ProductCollectionFilter(ProductFilter):
104 def __init__(self, *args, **kwargs):
105 self.collection = kwargs.pop('collection')
106 super().__init__(*args, **kwargs)
107
108 def _get_product_attributes_lookup(self):
109 return Q(product_types__products__collections=self.collection)
110
111 def _get_variant_attributes_lookup(self):
112 return Q(product_variant_types__products__collections=self.collection)
113
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/saleor/product/filters.py b/saleor/product/filters.py
--- a/saleor/product/filters.py
+++ b/saleor/product/filters.py
@@ -10,7 +10,9 @@
SORT_BY_FIELDS = OrderedDict([
('name', pgettext_lazy('Product list sorting option', 'name')),
- ('price', pgettext_lazy('Product list sorting option', 'price'))])
+ ('price', pgettext_lazy('Product list sorting option', 'price')),
+ ('updated_at', pgettext_lazy(
+ 'Product list sorting option', 'last updated'))])
class ProductFilter(SortedFilterSet):
| {"golden_diff": "diff --git a/saleor/product/filters.py b/saleor/product/filters.py\n--- a/saleor/product/filters.py\n+++ b/saleor/product/filters.py\n@@ -10,7 +10,9 @@\n \n SORT_BY_FIELDS = OrderedDict([\n ('name', pgettext_lazy('Product list sorting option', 'name')),\n- ('price', pgettext_lazy('Product list sorting option', 'price'))])\n+ ('price', pgettext_lazy('Product list sorting option', 'price')),\n+ ('updated_at', pgettext_lazy(\n+ 'Product list sorting option', 'last updated'))])\n \n \n class ProductFilter(SortedFilterSet):\n", "issue": "Grapql query for home page\n### What I'm trying to achieve\r\nI want to have a shop homepage which shows:\r\n* new arrivals,\r\n* product in a sale,\r\n* featured products,\r\n* featured collection,\r\n* categories links\r\n\r\n### Describe a proposed solution\r\n```graphql\r\nquery HomePage {\r\n shop {\r\n featuredCollection {\r\n id\r\n name\r\n }\r\n }\r\n featured: products(first: 10, collectionSlug: \"featured\") {\r\n edges {\r\n node {\r\n id\r\n name\r\n thumbnailUrl\r\n category {\r\n id\r\n name\r\n }\r\n price {\r\n amount\r\n currency\r\n }\r\n }\r\n }\r\n }\r\n newArrivals: products(first: 10, sortBy: \"creation_date\") {\r\n edges {\r\n node {\r\n id\r\n name\r\n thumbnailUrl\r\n category {\r\n id\r\n name\r\n }\r\n price {\r\n amount\r\n currency\r\n }\r\n }\r\n }\r\n }\r\n sales: products(first: 10, collectionSlug: \"sales\") {\r\n edges {\r\n node {\r\n id\r\n name\r\n thumbnailUrl\r\n category {\r\n id\r\n name\r\n }\r\n price {\r\n amount\r\n currency\r\n }\r\n }\r\n }\r\n }\r\n categories {\r\n edges {\r\n node {\r\n id\r\n name\r\n }\r\n }\r\n }\r\n}\r\n\r\n```\r\n\r\n### Other solutions I've tried and won't work\r\nI introduced:\r\n* filter by collection slug for featured and sales. That is the simplest approach which I have in my mind.\r\n* exposing homepage collection in the shop query,\r\n* sorting products by creation data for new arrivals.\r\n\r\nThis is only a proposition. If you have a better approach in mind please share it.\n", "before_files": [{"content": "from collections import OrderedDict\n\nfrom django.db.models import Q\nfrom django.forms import CheckboxSelectMultiple, ValidationError\nfrom django.utils.translation import pgettext_lazy\nfrom django_filters import MultipleChoiceFilter, OrderingFilter, RangeFilter\n\nfrom ..core.filters import SortedFilterSet\nfrom .models import Product, ProductAttribute\n\nSORT_BY_FIELDS = OrderedDict([\n ('name', pgettext_lazy('Product list sorting option', 'name')),\n ('price', pgettext_lazy('Product list sorting option', 'price'))])\n\n\nclass ProductFilter(SortedFilterSet):\n sort_by = OrderingFilter(\n label=pgettext_lazy('Product list sorting form', 'Sort by'),\n fields=SORT_BY_FIELDS.keys(),\n field_labels=SORT_BY_FIELDS)\n price = RangeFilter(\n label=pgettext_lazy('Currency amount', 'Price'))\n\n class Meta:\n model = Product\n fields = []\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.product_attributes, self.variant_attributes = (\n self._get_attributes())\n self.filters.update(self._get_product_attributes_filters())\n self.filters.update(self._get_product_variants_attributes_filters())\n self.filters = OrderedDict(sorted(self.filters.items()))\n\n def _get_attributes(self):\n q_product_attributes = self._get_product_attributes_lookup()\n q_variant_attributes = self._get_variant_attributes_lookup()\n product_attributes = (\n ProductAttribute.objects.all()\n .prefetch_related('translations', 'values__translations')\n .filter(q_product_attributes)\n .distinct())\n variant_attributes = (\n ProductAttribute.objects.all()\n .prefetch_related('translations', 'values__translations')\n .filter(q_variant_attributes)\n .distinct())\n return product_attributes, variant_attributes\n\n def _get_product_attributes_lookup(self):\n raise NotImplementedError()\n\n def _get_variant_attributes_lookup(self):\n raise NotImplementedError()\n\n def _get_product_attributes_filters(self):\n filters = {}\n for attribute in self.product_attributes:\n filters[attribute.slug] = MultipleChoiceFilter(\n name='attributes__%s' % attribute.pk,\n label=attribute.translated.name,\n widget=CheckboxSelectMultiple,\n choices=self._get_attribute_choices(attribute))\n return filters\n\n def _get_product_variants_attributes_filters(self):\n filters = {}\n for attribute in self.variant_attributes:\n filters[attribute.slug] = MultipleChoiceFilter(\n name='variants__attributes__%s' % attribute.pk,\n label=attribute.translated.name,\n widget=CheckboxSelectMultiple,\n choices=self._get_attribute_choices(attribute))\n return filters\n\n def _get_attribute_choices(self, attribute):\n return [\n (choice.pk, choice.translated.name)\n for choice in attribute.values.all()]\n\n def validate_sort_by(self, value):\n if value.strip('-') not in SORT_BY_FIELDS:\n raise ValidationError(\n pgettext_lazy(\n 'Validation error for sort_by filter',\n '%(value)s is not a valid sorting option'),\n params={'value': value})\n\n\nclass ProductCategoryFilter(ProductFilter):\n def __init__(self, *args, **kwargs):\n self.category = kwargs.pop('category')\n super().__init__(*args, **kwargs)\n\n def _get_product_attributes_lookup(self):\n return Q(product_types__products__category=self.category)\n\n def _get_variant_attributes_lookup(self):\n return Q(product_variant_types__products__category=self.category)\n\n\nclass ProductCollectionFilter(ProductFilter):\n def __init__(self, *args, **kwargs):\n self.collection = kwargs.pop('collection')\n super().__init__(*args, **kwargs)\n\n def _get_product_attributes_lookup(self):\n return Q(product_types__products__collections=self.collection)\n\n def _get_variant_attributes_lookup(self):\n return Q(product_variant_types__products__collections=self.collection)\n", "path": "saleor/product/filters.py"}], "after_files": [{"content": "from collections import OrderedDict\n\nfrom django.db.models import Q\nfrom django.forms import CheckboxSelectMultiple, ValidationError\nfrom django.utils.translation import pgettext_lazy\nfrom django_filters import MultipleChoiceFilter, OrderingFilter, RangeFilter\n\nfrom ..core.filters import SortedFilterSet\nfrom .models import Product, ProductAttribute\n\nSORT_BY_FIELDS = OrderedDict([\n ('name', pgettext_lazy('Product list sorting option', 'name')),\n ('price', pgettext_lazy('Product list sorting option', 'price')),\n ('updated_at', pgettext_lazy(\n 'Product list sorting option', 'last updated'))])\n\n\nclass ProductFilter(SortedFilterSet):\n sort_by = OrderingFilter(\n label=pgettext_lazy('Product list sorting form', 'Sort by'),\n fields=SORT_BY_FIELDS.keys(),\n field_labels=SORT_BY_FIELDS)\n price = RangeFilter(\n label=pgettext_lazy('Currency amount', 'Price'))\n\n class Meta:\n model = Product\n fields = []\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.product_attributes, self.variant_attributes = (\n self._get_attributes())\n self.filters.update(self._get_product_attributes_filters())\n self.filters.update(self._get_product_variants_attributes_filters())\n self.filters = OrderedDict(sorted(self.filters.items()))\n\n def _get_attributes(self):\n q_product_attributes = self._get_product_attributes_lookup()\n q_variant_attributes = self._get_variant_attributes_lookup()\n product_attributes = (\n ProductAttribute.objects.all()\n .prefetch_related('translations', 'values__translations')\n .filter(q_product_attributes)\n .distinct())\n variant_attributes = (\n ProductAttribute.objects.all()\n .prefetch_related('translations', 'values__translations')\n .filter(q_variant_attributes)\n .distinct())\n return product_attributes, variant_attributes\n\n def _get_product_attributes_lookup(self):\n raise NotImplementedError()\n\n def _get_variant_attributes_lookup(self):\n raise NotImplementedError()\n\n def _get_product_attributes_filters(self):\n filters = {}\n for attribute in self.product_attributes:\n filters[attribute.slug] = MultipleChoiceFilter(\n name='attributes__%s' % attribute.pk,\n label=attribute.translated.name,\n widget=CheckboxSelectMultiple,\n choices=self._get_attribute_choices(attribute))\n return filters\n\n def _get_product_variants_attributes_filters(self):\n filters = {}\n for attribute in self.variant_attributes:\n filters[attribute.slug] = MultipleChoiceFilter(\n name='variants__attributes__%s' % attribute.pk,\n label=attribute.translated.name,\n widget=CheckboxSelectMultiple,\n choices=self._get_attribute_choices(attribute))\n return filters\n\n def _get_attribute_choices(self, attribute):\n return [\n (choice.pk, choice.translated.name)\n for choice in attribute.values.all()]\n\n def validate_sort_by(self, value):\n if value.strip('-') not in SORT_BY_FIELDS:\n raise ValidationError(\n pgettext_lazy(\n 'Validation error for sort_by filter',\n '%(value)s is not a valid sorting option'),\n params={'value': value})\n\n\nclass ProductCategoryFilter(ProductFilter):\n def __init__(self, *args, **kwargs):\n self.category = kwargs.pop('category')\n super().__init__(*args, **kwargs)\n\n def _get_product_attributes_lookup(self):\n return Q(product_types__products__category=self.category)\n\n def _get_variant_attributes_lookup(self):\n return Q(product_variant_types__products__category=self.category)\n\n\nclass ProductCollectionFilter(ProductFilter):\n def __init__(self, *args, **kwargs):\n self.collection = kwargs.pop('collection')\n super().__init__(*args, **kwargs)\n\n def _get_product_attributes_lookup(self):\n return Q(product_types__products__collections=self.collection)\n\n def _get_variant_attributes_lookup(self):\n return Q(product_variant_types__products__collections=self.collection)\n", "path": "saleor/product/filters.py"}]} | 1,659 | 142 |
gh_patches_debug_25279 | rasdani/github-patches | git_diff | plone__Products.CMFPlone-2204 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error message broken on Zope-Root
http://localhost:8080/foo when `foo` does not exists (i.e. is not a Plone instance) results in a traceback.
In Plone 5.1 we used to get:
```
<h2>Site Error</h2> <p>An error was encountered while publishing this resource. </p> <p><strong>Resource not found</strong></p> Sorry, the requested resource does not exist.<p>Check the URL and try again.</p><p><b>Resource:</b> foo GET</p> <hr noshade="noshade"/> <p>Troubleshooting Suggestions</p> <ul> <li>The URL may be incorrect.</li> <li>The parameters passed to this resource may be incorrect.</li> <li>A resource that this resource relies on may be encountering an error.</li> </ul> <p>For more detailed information about the error, please refer to the error log. </p> <p>If the error persists please contact the site maintainer. Thank you for your patience. </p>
```
That was ugly (escaped html) but the text was correct.
Plone 5.2 on the other hand tries to render the `ExceptionView` and fails sind the app has no method `Language()`.
```
Traceback (innermost last):
Module ZServer.ZPublisher.Publish, line 261, in publish_module_standard
Module Products.PDBDebugMode.runcall, line 83, in pdb_publish
Module ZServer.ZPublisher.Publish, line 182, in publish
Module ZServer.ZPublisher.exceptionhook, line 117, in __call__
Module Products.CMFPlone.browser.exceptions, line 49, in __call__
Module Products.Five.browser.pagetemplatefile, line 125, in __call__
Module Products.Five.browser.pagetemplatefile, line 60, in __call__
Module zope.pagetemplate.pagetemplate, line 134, in pt_render
Module Products.PageTemplates.engine, line 85, in __call__
Module z3c.pt.pagetemplate, line 163, in render
Module chameleon.zpt.template, line 261, in render
Module chameleon.template, line 191, in render
Module chameleon.template, line 171, in render
Module 4195113b17720aee65cd4ca2a7e7ba2d.py, line 1095, in render
Module 9fafff3b78c7ea63dcd15308ddf75fb8.py, line 652, in render_master
Module Products.PageTemplates.expression, line 105, in __call__
Module plone.app.layout.globals.portal, line 80, in language
AttributeError: 'RequestContainer' object has no attribute 'Language'
- Expression: "portal_state/language"
- Filename: ... one/Products/CMFPlone/browser/templates/main_template.pt
- Location: (line 12: col 11)
- Source: lang portal_state/language;
^^^^^^^^^^^^^^^^^^^^^
- Arguments:
repeat: {...} (0)
template: <ViewPageTemplateFile - at 0x10da74950>
views: <ViewMapper - at 0x10e47d9d0>
modules: <_SecureModuleImporter - at 0x10867bed0>
args: <tuple - at 0x1066a0050>
here: <ImplicitAcquisitionWrapper at 0x10d34b960>
user: <SpecialUser - at 0x108457290>
nothing: <NoneType - at 0x1065eeeb8>
container: <ImplicitAcquisitionWrapper at 0x10d34b960>
request: <HTTPRequest - at 0x10e5f1c90>
wrapped_repeat: <SafeMapping - at 0x10c50a5f0>
traverse_subpath: <list - at 0x10e88d488>
default: <object - at 0x1066f6ba0>
loop: {...} (0) context: <ImplicitAcquisitionWrapper at 0x10d34b960>
view: <SimpleViewClass from /Users/pbauer/workspace/coredev/src/Products.CMFPlone/Products/CMFPlone/browser/templates/error_message.pt index.html at 0x10c45f9d0>
translate: <function translate at 0x10e59f050>
root: <ImplicitAcquisitionWrapper at 0x10d34b960>
options: {...} (2)
target_language: <NoneType - at 0x1065eeeb8>
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Products/CMFPlone/browser/exceptions.py`
Content:
```
1 from AccessControl import getSecurityManager
2 from Products.Five import BrowserView
3 from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
4 from zExceptions.ExceptionFormatter import format_exception
5 import json
6 import sys
7
8
9 class ExceptionView(BrowserView):
10 basic_template = ViewPageTemplateFile('templates/basic_error_message.pt')
11
12 def is_manager(self):
13 return getSecurityManager().checkPermission(
14 'Manage portal', self.context)
15
16 def __call__(self):
17 exception = self.context
18 self.context = self.__parent__
19 request = self.request
20
21 error_type = exception.__class__.__name__
22 exc_type, value, traceback = sys.exc_info()
23 error_tb = ''.join(
24 format_exception(exc_type, value, traceback, as_html=True))
25 request.response.setStatus(exc_type)
26
27 # Indicate exception as JSON
28 if "text/html" not in request.getHeader('Accept', ''):
29 request.response.setHeader("Content-Type", "application/json")
30 return json.dumps({
31 'error_type': error_type,
32 })
33
34 # Use a simplified template if main_template is not available
35 try:
36 self.context.unrestrictedTraverse('main_template')
37 except:
38 template = self.basic_template
39 else:
40 template = self.index
41
42 # Render page with user-facing error notice
43 request.set('disable_border', True)
44 request.set('disable_plone.leftcolumn', True)
45 request.set('disable_plone.rightcolumn', True)
46
47 return template(
48 error_type=error_type,
49 error_tb=error_tb,
50 )
51
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/Products/CMFPlone/browser/exceptions.py b/Products/CMFPlone/browser/exceptions.py
--- a/Products/CMFPlone/browser/exceptions.py
+++ b/Products/CMFPlone/browser/exceptions.py
@@ -1,7 +1,10 @@
+# -*- coding: utf-8 -*-
from AccessControl import getSecurityManager
from Products.Five import BrowserView
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from zExceptions.ExceptionFormatter import format_exception
+from zope.component.hooks import getSite
+
import json
import sys
@@ -31,13 +34,17 @@
'error_type': error_type,
})
- # Use a simplified template if main_template is not available
- try:
- self.context.unrestrictedTraverse('main_template')
- except:
+ if getSite() is None:
+ # We cannot get the site, so we cannot render our nice template
template = self.basic_template
else:
- template = self.index
+ # Use a simplified template if main_template is not available
+ try:
+ self.context.unrestrictedTraverse('main_template')
+ except:
+ template = self.basic_template
+ else:
+ template = self.index
# Render page with user-facing error notice
request.set('disable_border', True)
| {"golden_diff": "diff --git a/Products/CMFPlone/browser/exceptions.py b/Products/CMFPlone/browser/exceptions.py\n--- a/Products/CMFPlone/browser/exceptions.py\n+++ b/Products/CMFPlone/browser/exceptions.py\n@@ -1,7 +1,10 @@\n+# -*- coding: utf-8 -*-\n from AccessControl import getSecurityManager\n from Products.Five import BrowserView\n from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile\n from zExceptions.ExceptionFormatter import format_exception\n+from zope.component.hooks import getSite\n+\n import json\n import sys\n \n@@ -31,13 +34,17 @@\n 'error_type': error_type,\n })\n \n- # Use a simplified template if main_template is not available\n- try:\n- self.context.unrestrictedTraverse('main_template')\n- except:\n+ if getSite() is None:\n+ # We cannot get the site, so we cannot render our nice template\n template = self.basic_template\n else:\n- template = self.index\n+ # Use a simplified template if main_template is not available\n+ try:\n+ self.context.unrestrictedTraverse('main_template')\n+ except:\n+ template = self.basic_template\n+ else:\n+ template = self.index\n \n # Render page with user-facing error notice\n request.set('disable_border', True)\n", "issue": "Error message broken on Zope-Root\nhttp://localhost:8080/foo when `foo` does not exists (i.e. is not a Plone instance) results in a traceback.\r\n\r\nIn Plone 5.1 we used to get:\r\n```\r\n<h2>Site Error</h2> <p>An error was encountered while publishing this resource. </p> <p><strong>Resource not found</strong></p> Sorry, the requested resource does not exist.<p>Check the URL and try again.</p><p><b>Resource:</b> foo GET</p> <hr noshade=\"noshade\"/> <p>Troubleshooting Suggestions</p> <ul> <li>The URL may be incorrect.</li> <li>The parameters passed to this resource may be incorrect.</li> <li>A resource that this resource relies on may be encountering an error.</li> </ul> <p>For more detailed information about the error, please refer to the error log. </p> <p>If the error persists please contact the site maintainer. Thank you for your patience. </p> \r\n```\r\nThat was ugly (escaped html) but the text was correct.\r\n\r\nPlone 5.2 on the other hand tries to render the `ExceptionView` and fails sind the app has no method `Language()`. \r\n\r\n```\r\nTraceback (innermost last):\r\n\r\n Module ZServer.ZPublisher.Publish, line 261, in publish_module_standard\r\n Module Products.PDBDebugMode.runcall, line 83, in pdb_publish\r\n Module ZServer.ZPublisher.Publish, line 182, in publish\r\n Module ZServer.ZPublisher.exceptionhook, line 117, in __call__\r\n Module Products.CMFPlone.browser.exceptions, line 49, in __call__\r\n Module Products.Five.browser.pagetemplatefile, line 125, in __call__\r\n Module Products.Five.browser.pagetemplatefile, line 60, in __call__\r\n Module zope.pagetemplate.pagetemplate, line 134, in pt_render\r\n Module Products.PageTemplates.engine, line 85, in __call__\r\n Module z3c.pt.pagetemplate, line 163, in render\r\n Module chameleon.zpt.template, line 261, in render\r\n Module chameleon.template, line 191, in render\r\n Module chameleon.template, line 171, in render\r\n Module 4195113b17720aee65cd4ca2a7e7ba2d.py, line 1095, in render\r\n Module 9fafff3b78c7ea63dcd15308ddf75fb8.py, line 652, in render_master\r\n Module Products.PageTemplates.expression, line 105, in __call__\r\n Module plone.app.layout.globals.portal, line 80, in language\r\n\r\nAttributeError: 'RequestContainer' object has no attribute 'Language'\r\n\r\n - Expression: \"portal_state/language\"\r\n - Filename: ... one/Products/CMFPlone/browser/templates/main_template.pt\r\n - Location: (line 12: col 11)\r\n - Source: lang portal_state/language; \r\n\r\n ^^^^^^^^^^^^^^^^^^^^^\r\n\r\n - Arguments: \r\n repeat: {...} (0) \r\n template: <ViewPageTemplateFile - at 0x10da74950> \r\n views: <ViewMapper - at 0x10e47d9d0> \r\n modules: <_SecureModuleImporter - at 0x10867bed0> \r\n args: <tuple - at 0x1066a0050> \r\n here: <ImplicitAcquisitionWrapper at 0x10d34b960> \r\n user: <SpecialUser - at 0x108457290> \r\n nothing: <NoneType - at 0x1065eeeb8> \r\n container: <ImplicitAcquisitionWrapper at 0x10d34b960> \r\n request: <HTTPRequest - at 0x10e5f1c90> \r\n wrapped_repeat: <SafeMapping - at 0x10c50a5f0> \r\n traverse_subpath: <list - at 0x10e88d488> \r\n default: <object - at 0x1066f6ba0> \r\n loop: {...} (0) context: <ImplicitAcquisitionWrapper at 0x10d34b960> \r\n view: <SimpleViewClass from /Users/pbauer/workspace/coredev/src/Products.CMFPlone/Products/CMFPlone/browser/templates/error_message.pt index.html at 0x10c45f9d0> \r\n translate: <function translate at 0x10e59f050> \r\n root: <ImplicitAcquisitionWrapper at 0x10d34b960> \r\n options: {...} (2) \r\n target_language: <NoneType - at 0x1065eeeb8>\r\n```\n", "before_files": [{"content": "from AccessControl import getSecurityManager\nfrom Products.Five import BrowserView\nfrom Products.Five.browser.pagetemplatefile import ViewPageTemplateFile\nfrom zExceptions.ExceptionFormatter import format_exception\nimport json\nimport sys\n\n\nclass ExceptionView(BrowserView):\n basic_template = ViewPageTemplateFile('templates/basic_error_message.pt')\n\n def is_manager(self):\n return getSecurityManager().checkPermission(\n 'Manage portal', self.context)\n\n def __call__(self):\n exception = self.context\n self.context = self.__parent__\n request = self.request\n\n error_type = exception.__class__.__name__\n exc_type, value, traceback = sys.exc_info()\n error_tb = ''.join(\n format_exception(exc_type, value, traceback, as_html=True))\n request.response.setStatus(exc_type)\n\n # Indicate exception as JSON\n if \"text/html\" not in request.getHeader('Accept', ''):\n request.response.setHeader(\"Content-Type\", \"application/json\")\n return json.dumps({\n 'error_type': error_type,\n })\n\n # Use a simplified template if main_template is not available\n try:\n self.context.unrestrictedTraverse('main_template')\n except:\n template = self.basic_template\n else:\n template = self.index\n\n # Render page with user-facing error notice\n request.set('disable_border', True)\n request.set('disable_plone.leftcolumn', True)\n request.set('disable_plone.rightcolumn', True)\n\n return template(\n error_type=error_type,\n error_tb=error_tb,\n )\n", "path": "Products/CMFPlone/browser/exceptions.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom AccessControl import getSecurityManager\nfrom Products.Five import BrowserView\nfrom Products.Five.browser.pagetemplatefile import ViewPageTemplateFile\nfrom zExceptions.ExceptionFormatter import format_exception\nfrom zope.component.hooks import getSite\n\nimport json\nimport sys\n\n\nclass ExceptionView(BrowserView):\n basic_template = ViewPageTemplateFile('templates/basic_error_message.pt')\n\n def is_manager(self):\n return getSecurityManager().checkPermission(\n 'Manage portal', self.context)\n\n def __call__(self):\n exception = self.context\n self.context = self.__parent__\n request = self.request\n\n error_type = exception.__class__.__name__\n exc_type, value, traceback = sys.exc_info()\n error_tb = ''.join(\n format_exception(exc_type, value, traceback, as_html=True))\n request.response.setStatus(exc_type)\n\n # Indicate exception as JSON\n if \"text/html\" not in request.getHeader('Accept', ''):\n request.response.setHeader(\"Content-Type\", \"application/json\")\n return json.dumps({\n 'error_type': error_type,\n })\n\n if getSite() is None:\n # We cannot get the site, so we cannot render our nice template\n template = self.basic_template\n else:\n # Use a simplified template if main_template is not available\n try:\n self.context.unrestrictedTraverse('main_template')\n except:\n template = self.basic_template\n else:\n template = self.index\n\n # Render page with user-facing error notice\n request.set('disable_border', True)\n request.set('disable_plone.leftcolumn', True)\n request.set('disable_plone.rightcolumn', True)\n\n return template(\n error_type=error_type,\n error_tb=error_tb,\n )\n", "path": "Products/CMFPlone/browser/exceptions.py"}]} | 1,842 | 307 |
gh_patches_debug_29292 | rasdani/github-patches | git_diff | e-valuation__EvaP-721 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Only internal redirects
The platform should only redirect to internal pages after logging in.
(handled in `evaluation/views.py index`)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `evap/evaluation/views.py`
Content:
```
1 from django.contrib import messages
2 from django.contrib.auth import login as auth_login
3 from django.shortcuts import redirect, render
4 from django.utils.translation import ugettext as _
5
6 from evap.evaluation.forms import NewKeyForm, LoginKeyForm, LoginUsernameForm
7 from evap.evaluation.models import UserProfile, FaqSection, EmailTemplate, Semester
8
9
10 def index(request):
11 """Main entry page into EvaP providing all the login options available. THe username/password
12 login is thought to be used for internal users, e.g. by connecting to a LDAP directory.
13 The login key mechanism is meant to be used to include external participants, e.g. visiting
14 students or visiting contributors.
15 """
16
17 # parse the form data into the respective form
18 submit_type = request.POST.get("submit_type", "no_submit")
19 new_key_form = NewKeyForm(request.POST if submit_type == "new_key" else None)
20 login_key_form = LoginKeyForm(request.POST if submit_type == "login_key" else None)
21 login_username_form = LoginUsernameForm(request, request.POST if submit_type == "login_username" else None)
22
23 # process form data
24 if request.method == 'POST':
25 if new_key_form.is_valid():
26 # user wants a new login key
27 profile = new_key_form.get_user()
28 profile.generate_login_key()
29 profile.save()
30
31 EmailTemplate.send_login_key_to_user(new_key_form.get_user())
32
33 messages.success(request, _("Successfully sent email with new login key."))
34 elif login_key_form.is_valid():
35 # user would like to login with a login key and passed key test
36 auth_login(request, login_key_form.get_user())
37 elif login_username_form.is_valid():
38 # user would like to login with username and password and passed password test
39 auth_login(request, login_username_form.get_user())
40
41 # clean up our test cookie
42 if request.session.test_cookie_worked():
43 request.session.delete_test_cookie()
44
45 # if not logged in by now, render form
46 if not request.user.is_authenticated():
47 # set test cookie to verify whether they work in the next step
48 request.session.set_test_cookie()
49
50 template_data = dict(new_key_form=new_key_form, login_key_form=login_key_form, login_username_form=login_username_form)
51 return render(request, "index.html", template_data)
52 else:
53 user, created = UserProfile.objects.get_or_create(username=request.user.username)
54
55 # check for redirect variable
56 redirect_to = request.GET.get("next", None)
57 if redirect_to is not None:
58 if redirect_to.startswith("/staff/"):
59 if request.user.is_staff:
60 return redirect(redirect_to)
61 elif redirect_to.startswith("/grades/"):
62 if request.user.is_grade_publisher:
63 return redirect(redirect_to)
64 elif redirect_to.startswith("/contributor/"):
65 if user.is_contributor:
66 return redirect(redirect_to)
67 elif redirect_to.startswith("/student/"):
68 if user.is_participant:
69 return redirect(redirect_to)
70 else:
71 return redirect(redirect_to)
72
73 # redirect user to appropriate start page
74 if request.user.is_staff:
75 return redirect('staff:index')
76 elif request.user.is_grade_publisher:
77 return redirect('grades:semester_view', Semester.active_semester().id)
78 elif user.is_contributor_or_delegate:
79 return redirect('contributor:index')
80 elif user.is_participant:
81 return redirect('student:index')
82 else:
83 return redirect('results:index')
84
85
86 def faq(request):
87 return render(request, "faq.html", dict(sections=FaqSection.objects.all()))
88
89 def legal_notice(request):
90 return render(request, "legal_notice.html", dict())
91
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/evap/evaluation/views.py b/evap/evaluation/views.py
--- a/evap/evaluation/views.py
+++ b/evap/evaluation/views.py
@@ -2,13 +2,14 @@
from django.contrib.auth import login as auth_login
from django.shortcuts import redirect, render
from django.utils.translation import ugettext as _
+from django.core.urlresolvers import resolve, Resolver404
from evap.evaluation.forms import NewKeyForm, LoginKeyForm, LoginUsernameForm
from evap.evaluation.models import UserProfile, FaqSection, EmailTemplate, Semester
def index(request):
- """Main entry page into EvaP providing all the login options available. THe username/password
+ """Main entry page into EvaP providing all the login options available. The username/password
login is thought to be used for internal users, e.g. by connecting to a LDAP directory.
The login key mechanism is meant to be used to include external participants, e.g. visiting
students or visiting contributors.
@@ -68,7 +69,12 @@
if user.is_participant:
return redirect(redirect_to)
else:
- return redirect(redirect_to)
+ try:
+ resolve(redirect_to)
+ except Resolver404:
+ pass
+ else:
+ return redirect(redirect_to)
# redirect user to appropriate start page
if request.user.is_staff:
| {"golden_diff": "diff --git a/evap/evaluation/views.py b/evap/evaluation/views.py\n--- a/evap/evaluation/views.py\n+++ b/evap/evaluation/views.py\n@@ -2,13 +2,14 @@\n from django.contrib.auth import login as auth_login\n from django.shortcuts import redirect, render\n from django.utils.translation import ugettext as _\n+from django.core.urlresolvers import resolve, Resolver404\n \n from evap.evaluation.forms import NewKeyForm, LoginKeyForm, LoginUsernameForm\n from evap.evaluation.models import UserProfile, FaqSection, EmailTemplate, Semester\n \n \n def index(request):\n- \"\"\"Main entry page into EvaP providing all the login options available. THe username/password\n+ \"\"\"Main entry page into EvaP providing all the login options available. The username/password\n login is thought to be used for internal users, e.g. by connecting to a LDAP directory.\n The login key mechanism is meant to be used to include external participants, e.g. visiting\n students or visiting contributors.\n@@ -68,7 +69,12 @@\n if user.is_participant:\n return redirect(redirect_to)\n else:\n- return redirect(redirect_to)\n+ try:\n+ resolve(redirect_to)\n+ except Resolver404:\n+ pass\n+ else:\n+ return redirect(redirect_to)\n \n # redirect user to appropriate start page\n if request.user.is_staff:\n", "issue": "Only internal redirects\nThe platform should only redirect to internal pages after logging in.\n\n(handled in `evaluation/views.py index`)\n\n", "before_files": [{"content": "from django.contrib import messages\nfrom django.contrib.auth import login as auth_login\nfrom django.shortcuts import redirect, render\nfrom django.utils.translation import ugettext as _\n\nfrom evap.evaluation.forms import NewKeyForm, LoginKeyForm, LoginUsernameForm\nfrom evap.evaluation.models import UserProfile, FaqSection, EmailTemplate, Semester\n\n\ndef index(request):\n \"\"\"Main entry page into EvaP providing all the login options available. THe username/password\n login is thought to be used for internal users, e.g. by connecting to a LDAP directory.\n The login key mechanism is meant to be used to include external participants, e.g. visiting\n students or visiting contributors.\n \"\"\"\n\n # parse the form data into the respective form\n submit_type = request.POST.get(\"submit_type\", \"no_submit\")\n new_key_form = NewKeyForm(request.POST if submit_type == \"new_key\" else None)\n login_key_form = LoginKeyForm(request.POST if submit_type == \"login_key\" else None)\n login_username_form = LoginUsernameForm(request, request.POST if submit_type == \"login_username\" else None)\n\n # process form data\n if request.method == 'POST':\n if new_key_form.is_valid():\n # user wants a new login key\n profile = new_key_form.get_user()\n profile.generate_login_key()\n profile.save()\n\n EmailTemplate.send_login_key_to_user(new_key_form.get_user())\n\n messages.success(request, _(\"Successfully sent email with new login key.\"))\n elif login_key_form.is_valid():\n # user would like to login with a login key and passed key test\n auth_login(request, login_key_form.get_user())\n elif login_username_form.is_valid():\n # user would like to login with username and password and passed password test\n auth_login(request, login_username_form.get_user())\n\n # clean up our test cookie\n if request.session.test_cookie_worked():\n request.session.delete_test_cookie()\n\n # if not logged in by now, render form\n if not request.user.is_authenticated():\n # set test cookie to verify whether they work in the next step\n request.session.set_test_cookie()\n\n template_data = dict(new_key_form=new_key_form, login_key_form=login_key_form, login_username_form=login_username_form)\n return render(request, \"index.html\", template_data)\n else:\n user, created = UserProfile.objects.get_or_create(username=request.user.username)\n\n # check for redirect variable\n redirect_to = request.GET.get(\"next\", None)\n if redirect_to is not None:\n if redirect_to.startswith(\"/staff/\"):\n if request.user.is_staff:\n return redirect(redirect_to)\n elif redirect_to.startswith(\"/grades/\"):\n if request.user.is_grade_publisher:\n return redirect(redirect_to)\n elif redirect_to.startswith(\"/contributor/\"):\n if user.is_contributor:\n return redirect(redirect_to)\n elif redirect_to.startswith(\"/student/\"):\n if user.is_participant:\n return redirect(redirect_to)\n else:\n return redirect(redirect_to)\n\n # redirect user to appropriate start page\n if request.user.is_staff:\n return redirect('staff:index')\n elif request.user.is_grade_publisher:\n return redirect('grades:semester_view', Semester.active_semester().id)\n elif user.is_contributor_or_delegate:\n return redirect('contributor:index')\n elif user.is_participant:\n return redirect('student:index')\n else:\n return redirect('results:index')\n\n\ndef faq(request):\n return render(request, \"faq.html\", dict(sections=FaqSection.objects.all()))\n\ndef legal_notice(request):\n return render(request, \"legal_notice.html\", dict())\n", "path": "evap/evaluation/views.py"}], "after_files": [{"content": "from django.contrib import messages\nfrom django.contrib.auth import login as auth_login\nfrom django.shortcuts import redirect, render\nfrom django.utils.translation import ugettext as _\nfrom django.core.urlresolvers import resolve, Resolver404\n\nfrom evap.evaluation.forms import NewKeyForm, LoginKeyForm, LoginUsernameForm\nfrom evap.evaluation.models import UserProfile, FaqSection, EmailTemplate, Semester\n\n\ndef index(request):\n \"\"\"Main entry page into EvaP providing all the login options available. The username/password\n login is thought to be used for internal users, e.g. by connecting to a LDAP directory.\n The login key mechanism is meant to be used to include external participants, e.g. visiting\n students or visiting contributors.\n \"\"\"\n\n # parse the form data into the respective form\n submit_type = request.POST.get(\"submit_type\", \"no_submit\")\n new_key_form = NewKeyForm(request.POST if submit_type == \"new_key\" else None)\n login_key_form = LoginKeyForm(request.POST if submit_type == \"login_key\" else None)\n login_username_form = LoginUsernameForm(request, request.POST if submit_type == \"login_username\" else None)\n\n # process form data\n if request.method == 'POST':\n if new_key_form.is_valid():\n # user wants a new login key\n profile = new_key_form.get_user()\n profile.generate_login_key()\n profile.save()\n\n EmailTemplate.send_login_key_to_user(new_key_form.get_user())\n\n messages.success(request, _(\"Successfully sent email with new login key.\"))\n elif login_key_form.is_valid():\n # user would like to login with a login key and passed key test\n auth_login(request, login_key_form.get_user())\n elif login_username_form.is_valid():\n # user would like to login with username and password and passed password test\n auth_login(request, login_username_form.get_user())\n\n # clean up our test cookie\n if request.session.test_cookie_worked():\n request.session.delete_test_cookie()\n\n # if not logged in by now, render form\n if not request.user.is_authenticated():\n # set test cookie to verify whether they work in the next step\n request.session.set_test_cookie()\n\n template_data = dict(new_key_form=new_key_form, login_key_form=login_key_form, login_username_form=login_username_form)\n return render(request, \"index.html\", template_data)\n else:\n user, created = UserProfile.objects.get_or_create(username=request.user.username)\n\n # check for redirect variable\n redirect_to = request.GET.get(\"next\", None)\n if redirect_to is not None:\n if redirect_to.startswith(\"/staff/\"):\n if request.user.is_staff:\n return redirect(redirect_to)\n elif redirect_to.startswith(\"/grades/\"):\n if request.user.is_grade_publisher:\n return redirect(redirect_to)\n elif redirect_to.startswith(\"/contributor/\"):\n if user.is_contributor:\n return redirect(redirect_to)\n elif redirect_to.startswith(\"/student/\"):\n if user.is_participant:\n return redirect(redirect_to)\n else:\n try:\n resolve(redirect_to)\n except Resolver404:\n pass\n else:\n return redirect(redirect_to)\n\n # redirect user to appropriate start page\n if request.user.is_staff:\n return redirect('staff:index')\n elif request.user.is_grade_publisher:\n return redirect('grades:semester_view', Semester.active_semester().id)\n elif user.is_contributor_or_delegate:\n return redirect('contributor:index')\n elif user.is_participant:\n return redirect('student:index')\n else:\n return redirect('results:index')\n\n\ndef faq(request):\n return render(request, \"faq.html\", dict(sections=FaqSection.objects.all()))\n\ndef legal_notice(request):\n return render(request, \"legal_notice.html\", dict())\n", "path": "evap/evaluation/views.py"}]} | 1,240 | 314 |
gh_patches_debug_4673 | rasdani/github-patches | git_diff | paperless-ngx__paperless-ngx-2112 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Problem with tesseract after Bugfix: Some tesseract languages aren't detected as installed. @stumpylog (#2057)
### Description
Hi,
after Fixes [2044 ](https://github.com/paperless-ngx/paperless-ngx/issues/2044)I have problem with OCR and paperless-ngx.
Before this commit I use next ENV :
>
- PAPERLESS_OCR_LANGUAGE=srp_latn+srp
- PAPERLESS_OCR_LANGUAGES=srp-latn srp script-latn
and everything work.
After this commit if dont make any changes in ENV error is:
?: The selected ocr language srp_latn is not installed. Paperless cannot OCR your documents without it. Please fix PAPERLESS_OCR_LANGUAGE.
If i make changes in ENV, replace _ with -:
>
- PAPERLESS_OCR_LANGUAGE=srp-latn+srp
- PAPERLESS_OCR_LANGUAGES=srp-latn srp script-latn
After this change system install lang and start paperless, but if I upload any document, OCR dont work, error is:
`[2022-12-04 13:05:46,369] [ERROR] [paperless.consumer] Error while consuming document apr 2022.pdf: MissingDependencyError: OCR engine does not have language data for the following requested languages:
srp-latn
**
Paperless-ngx 1.10.0 WORK
Paperless-ngx 1.10.1 DONT WORK
**
### Steps to reproduce
1. Add this ENV
- PAPERLESS_OCR_LANGUAGE=srp-latn+srp
- PAPERLESS_OCR_LANGUAGES=srp-latn srp script-latn
2. Upload any document
### Webserver logs
```bash
[2022-12-04 13:05:46,369] [ERROR] [paperless.consumer] Error while consuming document apr 2022.pdf: MissingDependencyError: OCR engine does not have language data for the following requested languages:
srp-latn
Note: most languages are identified by a 3-digit ISO 639-2 Code
Traceback (most recent call last):
File "/usr/src/paperless/src/paperless_tesseract/parsers.py", line 292, in parse
ocrmypdf.ocr(**args)
File "/usr/local/lib/python3.9/site-packages/ocrmypdf/api.py", line 331, in ocr
check_options(options, plugin_manager)
File "/usr/local/lib/python3.9/site-packages/ocrmypdf/_validation.py", line 246, in check_options
_check_plugin_options(options, plugin_manager)
File "/usr/local/lib/python3.9/site-packages/ocrmypdf/_validation.py", line 241, in _check_plugin_options
check_options_languages(options, ocr_engine_languages)
File "/usr/local/lib/python3.9/site-packages/ocrmypdf/_validation.py", line 70, in check_options_languages
raise MissingDependencyError(msg)
ocrmypdf.exceptions.MissingDependencyError: OCR engine does not have language data for the following requested languages:
srp-latn
Note: most languages are identified by a 3-digit ISO 639-2 Code
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/src/paperless/src/documents/consumer.py", line 337, in try_consume_file
document_parser.parse(self.path, mime_type, self.filename)
File "/usr/src/paperless/src/paperless_tesseract/parsers.py", line 346, in parse
raise ParseError(f"{e.__class__.__name__}: {str(e)}") from e
documents.parsers.ParseError: MissingDependencyError: OCR engine does not have language data for the following requested languages:
srp-latn
Note: most languages are identified by a 3-digit ISO 639-2 Code
```
### Browser logs
_No response_
### Paperless-ngx version
1.10.1
### Host OS
Docker
### Installation method
Docker - official image
### Browser
_No response_
### Configuration changes
_No response_
### Other
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/paperless_tesseract/checks.py`
Content:
```
1 import shutil
2 import subprocess
3
4 from django.conf import settings
5 from django.core.checks import Error
6 from django.core.checks import register
7 from django.core.checks import Warning
8
9
10 def get_tesseract_langs():
11 proc = subprocess.run(
12 [shutil.which("tesseract"), "--list-langs"],
13 capture_output=True,
14 )
15
16 # Decode bytes to string, split on newlines, trim out the header
17 proc_lines = proc.stdout.decode("utf8", errors="ignore").strip().split("\n")[1:]
18
19 # Replace _ with - to convert two part languages to the expected code
20 return [x.replace("_", "-") for x in proc_lines]
21
22
23 @register()
24 def check_default_language_available(app_configs, **kwargs):
25 installed_langs = get_tesseract_langs()
26
27 if not settings.OCR_LANGUAGE:
28 return [
29 Warning(
30 "No OCR language has been specified with PAPERLESS_OCR_LANGUAGE. "
31 "This means that tesseract will fallback to english.",
32 ),
33 ]
34
35 specified_langs = settings.OCR_LANGUAGE.split("+")
36
37 for lang in specified_langs:
38 if lang not in installed_langs:
39 return [
40 Error(
41 f"The selected ocr language {lang} is "
42 f"not installed. Paperless cannot OCR your documents "
43 f"without it. Please fix PAPERLESS_OCR_LANGUAGE.",
44 ),
45 ]
46
47 return []
48
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/paperless_tesseract/checks.py b/src/paperless_tesseract/checks.py
--- a/src/paperless_tesseract/checks.py
+++ b/src/paperless_tesseract/checks.py
@@ -16,8 +16,7 @@
# Decode bytes to string, split on newlines, trim out the header
proc_lines = proc.stdout.decode("utf8", errors="ignore").strip().split("\n")[1:]
- # Replace _ with - to convert two part languages to the expected code
- return [x.replace("_", "-") for x in proc_lines]
+ return [x.strip() for x in proc_lines]
@register()
| {"golden_diff": "diff --git a/src/paperless_tesseract/checks.py b/src/paperless_tesseract/checks.py\n--- a/src/paperless_tesseract/checks.py\n+++ b/src/paperless_tesseract/checks.py\n@@ -16,8 +16,7 @@\n # Decode bytes to string, split on newlines, trim out the header\n proc_lines = proc.stdout.decode(\"utf8\", errors=\"ignore\").strip().split(\"\\n\")[1:]\n \n- # Replace _ with - to convert two part languages to the expected code\n- return [x.replace(\"_\", \"-\") for x in proc_lines]\n+ return [x.strip() for x in proc_lines]\n \n \n @register()\n", "issue": "[BUG] Problem with tesseract after Bugfix: Some tesseract languages aren't detected as installed. @stumpylog (#2057)\n### Description\r\n\r\nHi,\r\nafter Fixes [2044 ](https://github.com/paperless-ngx/paperless-ngx/issues/2044)I have problem with OCR and paperless-ngx.\r\n\r\nBefore this commit I use next ENV :\r\n\r\n> \r\n\r\n - PAPERLESS_OCR_LANGUAGE=srp_latn+srp\r\n - PAPERLESS_OCR_LANGUAGES=srp-latn srp script-latn \r\n\r\nand everything work.\r\n\r\nAfter this commit if dont make any changes in ENV error is:\r\n?: The selected ocr language srp_latn is not installed. Paperless cannot OCR your documents without it. Please fix PAPERLESS_OCR_LANGUAGE.\r\n\r\nIf i make changes in ENV, replace _ with -:\r\n\r\n> \r\n\r\n - PAPERLESS_OCR_LANGUAGE=srp-latn+srp\r\n - PAPERLESS_OCR_LANGUAGES=srp-latn srp script-latn\r\nAfter this change system install lang and start paperless, but if I upload any document, OCR dont work, error is:\r\n\r\n`[2022-12-04 13:05:46,369] [ERROR] [paperless.consumer] Error while consuming document apr 2022.pdf: MissingDependencyError: OCR engine does not have language data for the following requested languages:\r\n\r\nsrp-latn\r\n\r\n**\r\nPaperless-ngx 1.10.0 WORK\r\nPaperless-ngx 1.10.1 DONT WORK\r\n**\r\n### Steps to reproduce\r\n\r\n1. Add this ENV\r\n - PAPERLESS_OCR_LANGUAGE=srp-latn+srp\r\n - PAPERLESS_OCR_LANGUAGES=srp-latn srp script-latn\r\n\r\n2. Upload any document\r\n\r\n### Webserver logs\r\n\r\n```bash\r\n[2022-12-04 13:05:46,369] [ERROR] [paperless.consumer] Error while consuming document apr 2022.pdf: MissingDependencyError: OCR engine does not have language data for the following requested languages:\r\n\r\nsrp-latn\r\n\r\nNote: most languages are identified by a 3-digit ISO 639-2 Code\r\n\r\nTraceback (most recent call last):\r\n\r\n File \"/usr/src/paperless/src/paperless_tesseract/parsers.py\", line 292, in parse\r\n\r\n ocrmypdf.ocr(**args)\r\n\r\n File \"/usr/local/lib/python3.9/site-packages/ocrmypdf/api.py\", line 331, in ocr\r\n\r\n check_options(options, plugin_manager)\r\n\r\n File \"/usr/local/lib/python3.9/site-packages/ocrmypdf/_validation.py\", line 246, in check_options\r\n\r\n _check_plugin_options(options, plugin_manager)\r\n\r\n File \"/usr/local/lib/python3.9/site-packages/ocrmypdf/_validation.py\", line 241, in _check_plugin_options\r\n\r\n check_options_languages(options, ocr_engine_languages)\r\n\r\n File \"/usr/local/lib/python3.9/site-packages/ocrmypdf/_validation.py\", line 70, in check_options_languages\r\n\r\n raise MissingDependencyError(msg)\r\n\r\nocrmypdf.exceptions.MissingDependencyError: OCR engine does not have language data for the following requested languages:\r\n\r\nsrp-latn\r\n\r\nNote: most languages are identified by a 3-digit ISO 639-2 Code\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n\r\n File \"/usr/src/paperless/src/documents/consumer.py\", line 337, in try_consume_file\r\n\r\n document_parser.parse(self.path, mime_type, self.filename)\r\n\r\n File \"/usr/src/paperless/src/paperless_tesseract/parsers.py\", line 346, in parse\r\n\r\n raise ParseError(f\"{e.__class__.__name__}: {str(e)}\") from e\r\n\r\ndocuments.parsers.ParseError: MissingDependencyError: OCR engine does not have language data for the following requested languages:\r\n\r\nsrp-latn\r\n\r\nNote: most languages are identified by a 3-digit ISO 639-2 Code\r\n```\r\n\r\n\r\n### Browser logs\r\n\r\n_No response_\r\n\r\n### Paperless-ngx version\r\n\r\n1.10.1\r\n\r\n### Host OS\r\n\r\nDocker\r\n\r\n### Installation method\r\n\r\nDocker - official image\r\n\r\n### Browser\r\n\r\n_No response_\r\n\r\n### Configuration changes\r\n\r\n_No response_\r\n\r\n### Other\r\n\r\n_No response_\n", "before_files": [{"content": "import shutil\nimport subprocess\n\nfrom django.conf import settings\nfrom django.core.checks import Error\nfrom django.core.checks import register\nfrom django.core.checks import Warning\n\n\ndef get_tesseract_langs():\n proc = subprocess.run(\n [shutil.which(\"tesseract\"), \"--list-langs\"],\n capture_output=True,\n )\n\n # Decode bytes to string, split on newlines, trim out the header\n proc_lines = proc.stdout.decode(\"utf8\", errors=\"ignore\").strip().split(\"\\n\")[1:]\n\n # Replace _ with - to convert two part languages to the expected code\n return [x.replace(\"_\", \"-\") for x in proc_lines]\n\n\n@register()\ndef check_default_language_available(app_configs, **kwargs):\n installed_langs = get_tesseract_langs()\n\n if not settings.OCR_LANGUAGE:\n return [\n Warning(\n \"No OCR language has been specified with PAPERLESS_OCR_LANGUAGE. \"\n \"This means that tesseract will fallback to english.\",\n ),\n ]\n\n specified_langs = settings.OCR_LANGUAGE.split(\"+\")\n\n for lang in specified_langs:\n if lang not in installed_langs:\n return [\n Error(\n f\"The selected ocr language {lang} is \"\n f\"not installed. Paperless cannot OCR your documents \"\n f\"without it. Please fix PAPERLESS_OCR_LANGUAGE.\",\n ),\n ]\n\n return []\n", "path": "src/paperless_tesseract/checks.py"}], "after_files": [{"content": "import shutil\nimport subprocess\n\nfrom django.conf import settings\nfrom django.core.checks import Error\nfrom django.core.checks import register\nfrom django.core.checks import Warning\n\n\ndef get_tesseract_langs():\n proc = subprocess.run(\n [shutil.which(\"tesseract\"), \"--list-langs\"],\n capture_output=True,\n )\n\n # Decode bytes to string, split on newlines, trim out the header\n proc_lines = proc.stdout.decode(\"utf8\", errors=\"ignore\").strip().split(\"\\n\")[1:]\n\n return [x.strip() for x in proc_lines]\n\n\n@register()\ndef check_default_language_available(app_configs, **kwargs):\n installed_langs = get_tesseract_langs()\n\n if not settings.OCR_LANGUAGE:\n return [\n Warning(\n \"No OCR language has been specified with PAPERLESS_OCR_LANGUAGE. \"\n \"This means that tesseract will fallback to english.\",\n ),\n ]\n\n specified_langs = settings.OCR_LANGUAGE.split(\"+\")\n\n for lang in specified_langs:\n if lang not in installed_langs:\n return [\n Error(\n f\"The selected ocr language {lang} is \"\n f\"not installed. Paperless cannot OCR your documents \"\n f\"without it. Please fix PAPERLESS_OCR_LANGUAGE.\",\n ),\n ]\n\n return []\n", "path": "src/paperless_tesseract/checks.py"}]} | 1,613 | 151 |
gh_patches_debug_60522 | rasdani/github-patches | git_diff | streamlit__streamlit-7257 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Changing start_time of st.video() doesn't work for the same video
### Checklist
- [X] I have searched the [existing issues](https://github.com/streamlit/streamlit/issues) for similar issues.
- [X] I added a very descriptive title to this issue.
- [X] I have provided sufficient information below to help reproduce this issue.
### Summary
Changing start_time of st.video() doesn't work for the same video.
### Reproducible Code Example
```Python
import streamlit as st
timestamp = st.text_input('timestamp', '6')
st.video('local video path', start_time=int(timestamp))
```
### Steps To Reproduce
1. Replace 'local video path' with your own video path in the provided code, and run the code
2. Type different timestamp in the text input box
3. The video timestamp doesn't change
### Expected Behavior
The timestamp should change as start_time changes.
### Current Behavior
The video timestamp doesn't change. It always shows the initial timestamp. However, if you change the video to a different one in the source code and rerun the app, the timestamp will change correctly.
### Is this a regression?
- [ ] Yes, this used to work in a previous version.
### Debug info
- Streamlit version: 1.25.0
- Python version: Python 3.10.11
- Operating System: Windows 11 Home 22H2
- Browser: Microsoft Edge Version 115.0.1901.188 (Official build) (64-bit)
### Additional Information
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `e2e/scripts/st_video.py`
Content:
```
1 # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import requests
16
17 import streamlit as st
18
19 url = "https://www.w3schools.com/html/mov_bbb.mp4"
20 file = requests.get(url).content
21 st.video(file)
22
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/e2e/scripts/st_video.py b/e2e/scripts/st_video.py
--- a/e2e/scripts/st_video.py
+++ b/e2e/scripts/st_video.py
@@ -19,3 +19,7 @@
url = "https://www.w3schools.com/html/mov_bbb.mp4"
file = requests.get(url).content
st.video(file)
+
+# Test start time with widget
+timestamp = st.number_input("Start Time (in seconds)", min_value=0, value=6)
+st.video(url, start_time=int(timestamp))
| {"golden_diff": "diff --git a/e2e/scripts/st_video.py b/e2e/scripts/st_video.py\n--- a/e2e/scripts/st_video.py\n+++ b/e2e/scripts/st_video.py\n@@ -19,3 +19,7 @@\n url = \"https://www.w3schools.com/html/mov_bbb.mp4\"\n file = requests.get(url).content\n st.video(file)\n+\n+# Test start time with widget\n+timestamp = st.number_input(\"Start Time (in seconds)\", min_value=0, value=6)\n+st.video(url, start_time=int(timestamp))\n", "issue": "Changing start_time of st.video() doesn't work for the same video\n### Checklist\r\n\r\n- [X] I have searched the [existing issues](https://github.com/streamlit/streamlit/issues) for similar issues.\r\n- [X] I added a very descriptive title to this issue.\r\n- [X] I have provided sufficient information below to help reproduce this issue.\r\n\r\n### Summary\r\n\r\nChanging start_time of st.video() doesn't work for the same video.\r\n\r\n### Reproducible Code Example\r\n\r\n```Python\r\nimport streamlit as st\r\n\r\ntimestamp = st.text_input('timestamp', '6')\r\nst.video('local video path', start_time=int(timestamp))\r\n```\r\n\r\n\r\n### Steps To Reproduce\r\n\r\n1. Replace 'local video path' with your own video path in the provided code, and run the code\r\n2. Type different timestamp in the text input box\r\n3. The video timestamp doesn't change\r\n\r\n### Expected Behavior\r\n\r\nThe timestamp should change as start_time changes.\r\n\r\n### Current Behavior\r\n\r\nThe video timestamp doesn't change. It always shows the initial timestamp. However, if you change the video to a different one in the source code and rerun the app, the timestamp will change correctly.\r\n\r\n### Is this a regression?\r\n\r\n- [ ] Yes, this used to work in a previous version.\r\n\r\n### Debug info\r\n\r\n- Streamlit version: 1.25.0\r\n- Python version: Python 3.10.11\r\n- Operating System: Windows 11 Home 22H2\r\n- Browser: Microsoft Edge Version 115.0.1901.188 (Official build) (64-bit)\r\n\r\n\r\n### Additional Information\r\n\r\n_No response_\n", "before_files": [{"content": "# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport requests\n\nimport streamlit as st\n\nurl = \"https://www.w3schools.com/html/mov_bbb.mp4\"\nfile = requests.get(url).content\nst.video(file)\n", "path": "e2e/scripts/st_video.py"}], "after_files": [{"content": "# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport requests\n\nimport streamlit as st\n\nurl = \"https://www.w3schools.com/html/mov_bbb.mp4\"\nfile = requests.get(url).content\nst.video(file)\n\n# Test start time with widget\ntimestamp = st.number_input(\"Start Time (in seconds)\", min_value=0, value=6)\nst.video(url, start_time=int(timestamp))\n", "path": "e2e/scripts/st_video.py"}]} | 833 | 122 |
gh_patches_debug_9299 | rasdani/github-patches | git_diff | certbot__certbot-4857 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Flesh out oldest tests
We should find the oldest versions of all our Python dependencies used in OS packages and add them to the [oldest tests](https://github.com/certbot/certbot/blob/master/tox.ini#L36) in Travis. This will prevent bugs like #3098 and #4040 from slipping into a release.
The two distros I'd check here are CentOS 7 and Debian 8.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `acme/setup.py`
Content:
```
1 import sys
2
3 from setuptools import setup
4 from setuptools import find_packages
5
6
7 version = '0.16.0.dev0'
8
9 # Please update tox.ini when modifying dependency version requirements
10 install_requires = [
11 # load_pem_private/public_key (>=0.6)
12 # rsa_recover_prime_factors (>=0.8)
13 'cryptography>=0.8',
14 # Connection.set_tlsext_host_name (>=0.13)
15 'mock',
16 'PyOpenSSL>=0.13',
17 'pyrfc3339',
18 'pytz',
19 # requests>=2.10 is required to fix
20 # https://github.com/shazow/urllib3/issues/556. This requirement can be
21 # relaxed to 'requests[security]>=2.4.1', however, less useful errors
22 # will be raised for some network/SSL errors.
23 'requests[security]>=2.10',
24 # For pkg_resources. >=1.0 so pip resolves it to a version cryptography
25 # will tolerate; see #2599:
26 'setuptools>=1.0',
27 'six',
28 ]
29
30 # env markers cause problems with older pip and setuptools
31 if sys.version_info < (2, 7):
32 install_requires.extend([
33 'argparse',
34 'ordereddict',
35 ])
36
37 dev_extras = [
38 'nose',
39 'tox',
40 ]
41
42 docs_extras = [
43 'Sphinx>=1.0', # autodoc_member_order = 'bysource', autodoc_default_flags
44 'sphinx_rtd_theme',
45 ]
46
47
48 setup(
49 name='acme',
50 version=version,
51 description='ACME protocol implementation in Python',
52 url='https://github.com/letsencrypt/letsencrypt',
53 author="Certbot Project",
54 author_email='[email protected]',
55 license='Apache License 2.0',
56 classifiers=[
57 'Development Status :: 3 - Alpha',
58 'Intended Audience :: Developers',
59 'License :: OSI Approved :: Apache Software License',
60 'Programming Language :: Python',
61 'Programming Language :: Python :: 2',
62 'Programming Language :: Python :: 2.6',
63 'Programming Language :: Python :: 2.7',
64 'Programming Language :: Python :: 3',
65 'Programming Language :: Python :: 3.3',
66 'Programming Language :: Python :: 3.4',
67 'Programming Language :: Python :: 3.5',
68 'Programming Language :: Python :: 3.6',
69 'Topic :: Internet :: WWW/HTTP',
70 'Topic :: Security',
71 ],
72
73 packages=find_packages(),
74 include_package_data=True,
75 install_requires=install_requires,
76 extras_require={
77 'dev': dev_extras,
78 'docs': docs_extras,
79 },
80 entry_points={
81 'console_scripts': [
82 'jws = acme.jose.jws:CLI.run',
83 ],
84 },
85 test_suite='acme',
86 )
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/acme/setup.py b/acme/setup.py
--- a/acme/setup.py
+++ b/acme/setup.py
@@ -16,11 +16,7 @@
'PyOpenSSL>=0.13',
'pyrfc3339',
'pytz',
- # requests>=2.10 is required to fix
- # https://github.com/shazow/urllib3/issues/556. This requirement can be
- # relaxed to 'requests[security]>=2.4.1', however, less useful errors
- # will be raised for some network/SSL errors.
- 'requests[security]>=2.10',
+ 'requests[security]>=2.4.1', # security extras added in 2.4.1
# For pkg_resources. >=1.0 so pip resolves it to a version cryptography
# will tolerate; see #2599:
'setuptools>=1.0',
| {"golden_diff": "diff --git a/acme/setup.py b/acme/setup.py\n--- a/acme/setup.py\n+++ b/acme/setup.py\n@@ -16,11 +16,7 @@\n 'PyOpenSSL>=0.13',\n 'pyrfc3339',\n 'pytz',\n- # requests>=2.10 is required to fix\n- # https://github.com/shazow/urllib3/issues/556. This requirement can be\n- # relaxed to 'requests[security]>=2.4.1', however, less useful errors\n- # will be raised for some network/SSL errors.\n- 'requests[security]>=2.10',\n+ 'requests[security]>=2.4.1', # security extras added in 2.4.1\n # For pkg_resources. >=1.0 so pip resolves it to a version cryptography\n # will tolerate; see #2599:\n 'setuptools>=1.0',\n", "issue": "Flesh out oldest tests\nWe should find the oldest versions of all our Python dependencies used in OS packages and add them to the [oldest tests](https://github.com/certbot/certbot/blob/master/tox.ini#L36) in Travis. This will prevent bugs like #3098 and #4040 from slipping into a release.\r\n\r\nThe two distros I'd check here are CentOS 7 and Debian 8.\n", "before_files": [{"content": "import sys\n\nfrom setuptools import setup\nfrom setuptools import find_packages\n\n\nversion = '0.16.0.dev0'\n\n# Please update tox.ini when modifying dependency version requirements\ninstall_requires = [\n # load_pem_private/public_key (>=0.6)\n # rsa_recover_prime_factors (>=0.8)\n 'cryptography>=0.8',\n # Connection.set_tlsext_host_name (>=0.13)\n 'mock',\n 'PyOpenSSL>=0.13',\n 'pyrfc3339',\n 'pytz',\n # requests>=2.10 is required to fix\n # https://github.com/shazow/urllib3/issues/556. This requirement can be\n # relaxed to 'requests[security]>=2.4.1', however, less useful errors\n # will be raised for some network/SSL errors.\n 'requests[security]>=2.10',\n # For pkg_resources. >=1.0 so pip resolves it to a version cryptography\n # will tolerate; see #2599:\n 'setuptools>=1.0',\n 'six',\n]\n\n# env markers cause problems with older pip and setuptools\nif sys.version_info < (2, 7):\n install_requires.extend([\n 'argparse',\n 'ordereddict',\n ])\n\ndev_extras = [\n 'nose',\n 'tox',\n]\n\ndocs_extras = [\n 'Sphinx>=1.0', # autodoc_member_order = 'bysource', autodoc_default_flags\n 'sphinx_rtd_theme',\n]\n\n\nsetup(\n name='acme',\n version=version,\n description='ACME protocol implementation in Python',\n url='https://github.com/letsencrypt/letsencrypt',\n author=\"Certbot Project\",\n author_email='[email protected]',\n license='Apache License 2.0',\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Security',\n ],\n\n packages=find_packages(),\n include_package_data=True,\n install_requires=install_requires,\n extras_require={\n 'dev': dev_extras,\n 'docs': docs_extras,\n },\n entry_points={\n 'console_scripts': [\n 'jws = acme.jose.jws:CLI.run',\n ],\n },\n test_suite='acme',\n)\n", "path": "acme/setup.py"}], "after_files": [{"content": "import sys\n\nfrom setuptools import setup\nfrom setuptools import find_packages\n\n\nversion = '0.16.0.dev0'\n\n# Please update tox.ini when modifying dependency version requirements\ninstall_requires = [\n # load_pem_private/public_key (>=0.6)\n # rsa_recover_prime_factors (>=0.8)\n 'cryptography>=0.8',\n # Connection.set_tlsext_host_name (>=0.13)\n 'mock',\n 'PyOpenSSL>=0.13',\n 'pyrfc3339',\n 'pytz',\n 'requests[security]>=2.4.1', # security extras added in 2.4.1\n # For pkg_resources. >=1.0 so pip resolves it to a version cryptography\n # will tolerate; see #2599:\n 'setuptools>=1.0',\n 'six',\n]\n\n# env markers cause problems with older pip and setuptools\nif sys.version_info < (2, 7):\n install_requires.extend([\n 'argparse',\n 'ordereddict',\n ])\n\ndev_extras = [\n 'nose',\n 'tox',\n]\n\ndocs_extras = [\n 'Sphinx>=1.0', # autodoc_member_order = 'bysource', autodoc_default_flags\n 'sphinx_rtd_theme',\n]\n\n\nsetup(\n name='acme',\n version=version,\n description='ACME protocol implementation in Python',\n url='https://github.com/letsencrypt/letsencrypt',\n author=\"Certbot Project\",\n author_email='[email protected]',\n license='Apache License 2.0',\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Security',\n ],\n\n packages=find_packages(),\n include_package_data=True,\n install_requires=install_requires,\n extras_require={\n 'dev': dev_extras,\n 'docs': docs_extras,\n },\n entry_points={\n 'console_scripts': [\n 'jws = acme.jose.jws:CLI.run',\n ],\n },\n test_suite='acme',\n)\n", "path": "acme/setup.py"}]} | 1,163 | 219 |
gh_patches_debug_27784 | rasdani/github-patches | git_diff | searx__searx-1186 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bing Video search engine doesn't work
Hallo,
yesterday I've set up my own instance of searx. Thereby I discovered A problem with the Bing Video search engine. This is the shown error message:
```
Die folgenden Suchmaschinen können die Ergebnisse nicht empfangen:
bing videos (unexpected crash: list index out of range)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `searx/engines/bing_videos.py`
Content:
```
1 """
2 Bing (Videos)
3
4 @website https://www.bing.com/videos
5 @provide-api yes (http://datamarket.azure.com/dataset/bing/search)
6
7 @using-api no
8 @results HTML
9 @stable no
10 @parse url, title, content, thumbnail
11 """
12
13 from json import loads
14 from lxml import html
15 from searx.engines.bing_images import _fetch_supported_languages, supported_languages_url, get_region_code
16 from searx.engines.xpath import extract_text
17 from searx.url_utils import urlencode
18
19
20 categories = ['videos']
21 paging = True
22 safesearch = True
23 time_range_support = True
24 number_of_results = 10
25 language_support = True
26
27 search_url = 'https://www.bing.com/videos/asyncv2?{query}&async=content&'\
28 'first={offset}&count={number_of_results}&CW=1366&CH=25&FORM=R5VR5'
29 time_range_string = '&qft=+filterui:videoage-lt{interval}'
30 time_range_dict = {'day': '1440',
31 'week': '10080',
32 'month': '43200',
33 'year': '525600'}
34
35 # safesearch definitions
36 safesearch_types = {2: 'STRICT',
37 1: 'DEMOTE',
38 0: 'OFF'}
39
40
41 # do search-request
42 def request(query, params):
43 offset = (params['pageno'] - 1) * 10 + 1
44
45 # safesearch cookie
46 params['cookies']['SRCHHPGUSR'] = \
47 'ADLT=' + safesearch_types.get(params['safesearch'], 'DEMOTE')
48
49 # language cookie
50 region = get_region_code(params['language'], lang_list=supported_languages)
51 params['cookies']['_EDGE_S'] = 'mkt=' + region + '&F=1'
52
53 # query and paging
54 params['url'] = search_url.format(query=urlencode({'q': query}),
55 offset=offset,
56 number_of_results=number_of_results)
57
58 # time range
59 if params['time_range'] in time_range_dict:
60 params['url'] += time_range_string.format(interval=time_range_dict[params['time_range']])
61
62 return params
63
64
65 # get response from search-request
66 def response(resp):
67 results = []
68
69 dom = html.fromstring(resp.text)
70
71 for result in dom.xpath('//div[@class="dg_u"]'):
72
73 # try to extract the url
74 url_container = result.xpath('.//div[@class="sa_wrapper"]/@data-eventpayload')
75 if len(url_container) > 0:
76 url = loads(url_container[0])['purl']
77 else:
78 url = result.xpath('./a/@href')[0]
79
80 # discard results that do not return an external url
81 # very recent results sometimes don't return the video's url
82 if url.startswith('/videos/search?'):
83 continue
84
85 title = extract_text(result.xpath('./a//div[@class="tl"]'))
86 content = extract_text(result.xpath('.//div[@class="pubInfo"]'))
87 thumbnail = result.xpath('.//div[@class="vthumb"]/img/@src')[0]
88
89 results.append({'url': url,
90 'title': title,
91 'content': content,
92 'thumbnail': thumbnail,
93 'template': 'videos.html'})
94
95 # first page ignores requested number of results
96 if len(results) >= number_of_results:
97 break
98
99 return results
100
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/searx/engines/bing_videos.py b/searx/engines/bing_videos.py
--- a/searx/engines/bing_videos.py
+++ b/searx/engines/bing_videos.py
@@ -69,22 +69,11 @@
dom = html.fromstring(resp.text)
for result in dom.xpath('//div[@class="dg_u"]'):
-
- # try to extract the url
- url_container = result.xpath('.//div[@class="sa_wrapper"]/@data-eventpayload')
- if len(url_container) > 0:
- url = loads(url_container[0])['purl']
- else:
- url = result.xpath('./a/@href')[0]
-
- # discard results that do not return an external url
- # very recent results sometimes don't return the video's url
- if url.startswith('/videos/search?'):
- continue
-
- title = extract_text(result.xpath('./a//div[@class="tl"]'))
- content = extract_text(result.xpath('.//div[@class="pubInfo"]'))
- thumbnail = result.xpath('.//div[@class="vthumb"]/img/@src')[0]
+ url = result.xpath('./div[@class="mc_vtvc"]/a/@href')[0]
+ url = 'https://bing.com' + url
+ title = extract_text(result.xpath('./div/a/div/div[@class="mc_vtvc_title"]/@title'))
+ content = extract_text(result.xpath('./div/a/div/div/div/div/text()'))
+ thumbnail = result.xpath('./div/a/div/div/img/@src')[0]
results.append({'url': url,
'title': title,
@@ -92,7 +81,6 @@
'thumbnail': thumbnail,
'template': 'videos.html'})
- # first page ignores requested number of results
if len(results) >= number_of_results:
break
| {"golden_diff": "diff --git a/searx/engines/bing_videos.py b/searx/engines/bing_videos.py\n--- a/searx/engines/bing_videos.py\n+++ b/searx/engines/bing_videos.py\n@@ -69,22 +69,11 @@\n dom = html.fromstring(resp.text)\n \n for result in dom.xpath('//div[@class=\"dg_u\"]'):\n-\n- # try to extract the url\n- url_container = result.xpath('.//div[@class=\"sa_wrapper\"]/@data-eventpayload')\n- if len(url_container) > 0:\n- url = loads(url_container[0])['purl']\n- else:\n- url = result.xpath('./a/@href')[0]\n-\n- # discard results that do not return an external url\n- # very recent results sometimes don't return the video's url\n- if url.startswith('/videos/search?'):\n- continue\n-\n- title = extract_text(result.xpath('./a//div[@class=\"tl\"]'))\n- content = extract_text(result.xpath('.//div[@class=\"pubInfo\"]'))\n- thumbnail = result.xpath('.//div[@class=\"vthumb\"]/img/@src')[0]\n+ url = result.xpath('./div[@class=\"mc_vtvc\"]/a/@href')[0]\n+ url = 'https://bing.com' + url\n+ title = extract_text(result.xpath('./div/a/div/div[@class=\"mc_vtvc_title\"]/@title'))\n+ content = extract_text(result.xpath('./div/a/div/div/div/div/text()'))\n+ thumbnail = result.xpath('./div/a/div/div/img/@src')[0]\n \n results.append({'url': url,\n 'title': title,\n@@ -92,7 +81,6 @@\n 'thumbnail': thumbnail,\n 'template': 'videos.html'})\n \n- # first page ignores requested number of results\n if len(results) >= number_of_results:\n break\n", "issue": "Bing Video search engine doesn't work\nHallo,\r\n\r\nyesterday I've set up my own instance of searx. Thereby I discovered A problem with the Bing Video search engine. This is the shown error message:\r\n\r\n```\r\nDie folgenden Suchmaschinen k\u00f6nnen die Ergebnisse nicht empfangen:\r\nbing videos (unexpected crash: list index out of range)\r\n```\n", "before_files": [{"content": "\"\"\"\n Bing (Videos)\n\n @website https://www.bing.com/videos\n @provide-api yes (http://datamarket.azure.com/dataset/bing/search)\n\n @using-api no\n @results HTML\n @stable no\n @parse url, title, content, thumbnail\n\"\"\"\n\nfrom json import loads\nfrom lxml import html\nfrom searx.engines.bing_images import _fetch_supported_languages, supported_languages_url, get_region_code\nfrom searx.engines.xpath import extract_text\nfrom searx.url_utils import urlencode\n\n\ncategories = ['videos']\npaging = True\nsafesearch = True\ntime_range_support = True\nnumber_of_results = 10\nlanguage_support = True\n\nsearch_url = 'https://www.bing.com/videos/asyncv2?{query}&async=content&'\\\n 'first={offset}&count={number_of_results}&CW=1366&CH=25&FORM=R5VR5'\ntime_range_string = '&qft=+filterui:videoage-lt{interval}'\ntime_range_dict = {'day': '1440',\n 'week': '10080',\n 'month': '43200',\n 'year': '525600'}\n\n# safesearch definitions\nsafesearch_types = {2: 'STRICT',\n 1: 'DEMOTE',\n 0: 'OFF'}\n\n\n# do search-request\ndef request(query, params):\n offset = (params['pageno'] - 1) * 10 + 1\n\n # safesearch cookie\n params['cookies']['SRCHHPGUSR'] = \\\n 'ADLT=' + safesearch_types.get(params['safesearch'], 'DEMOTE')\n\n # language cookie\n region = get_region_code(params['language'], lang_list=supported_languages)\n params['cookies']['_EDGE_S'] = 'mkt=' + region + '&F=1'\n\n # query and paging\n params['url'] = search_url.format(query=urlencode({'q': query}),\n offset=offset,\n number_of_results=number_of_results)\n\n # time range\n if params['time_range'] in time_range_dict:\n params['url'] += time_range_string.format(interval=time_range_dict[params['time_range']])\n\n return params\n\n\n# get response from search-request\ndef response(resp):\n results = []\n\n dom = html.fromstring(resp.text)\n\n for result in dom.xpath('//div[@class=\"dg_u\"]'):\n\n # try to extract the url\n url_container = result.xpath('.//div[@class=\"sa_wrapper\"]/@data-eventpayload')\n if len(url_container) > 0:\n url = loads(url_container[0])['purl']\n else:\n url = result.xpath('./a/@href')[0]\n\n # discard results that do not return an external url\n # very recent results sometimes don't return the video's url\n if url.startswith('/videos/search?'):\n continue\n\n title = extract_text(result.xpath('./a//div[@class=\"tl\"]'))\n content = extract_text(result.xpath('.//div[@class=\"pubInfo\"]'))\n thumbnail = result.xpath('.//div[@class=\"vthumb\"]/img/@src')[0]\n\n results.append({'url': url,\n 'title': title,\n 'content': content,\n 'thumbnail': thumbnail,\n 'template': 'videos.html'})\n\n # first page ignores requested number of results\n if len(results) >= number_of_results:\n break\n\n return results\n", "path": "searx/engines/bing_videos.py"}], "after_files": [{"content": "\"\"\"\n Bing (Videos)\n\n @website https://www.bing.com/videos\n @provide-api yes (http://datamarket.azure.com/dataset/bing/search)\n\n @using-api no\n @results HTML\n @stable no\n @parse url, title, content, thumbnail\n\"\"\"\n\nfrom json import loads\nfrom lxml import html\nfrom searx.engines.bing_images import _fetch_supported_languages, supported_languages_url, get_region_code\nfrom searx.engines.xpath import extract_text\nfrom searx.url_utils import urlencode\n\n\ncategories = ['videos']\npaging = True\nsafesearch = True\ntime_range_support = True\nnumber_of_results = 10\nlanguage_support = True\n\nsearch_url = 'https://www.bing.com/videos/asyncv2?{query}&async=content&'\\\n 'first={offset}&count={number_of_results}&CW=1366&CH=25&FORM=R5VR5'\ntime_range_string = '&qft=+filterui:videoage-lt{interval}'\ntime_range_dict = {'day': '1440',\n 'week': '10080',\n 'month': '43200',\n 'year': '525600'}\n\n# safesearch definitions\nsafesearch_types = {2: 'STRICT',\n 1: 'DEMOTE',\n 0: 'OFF'}\n\n\n# do search-request\ndef request(query, params):\n offset = (params['pageno'] - 1) * 10 + 1\n\n # safesearch cookie\n params['cookies']['SRCHHPGUSR'] = \\\n 'ADLT=' + safesearch_types.get(params['safesearch'], 'DEMOTE')\n\n # language cookie\n region = get_region_code(params['language'], lang_list=supported_languages)\n params['cookies']['_EDGE_S'] = 'mkt=' + region + '&F=1'\n\n # query and paging\n params['url'] = search_url.format(query=urlencode({'q': query}),\n offset=offset,\n number_of_results=number_of_results)\n\n # time range\n if params['time_range'] in time_range_dict:\n params['url'] += time_range_string.format(interval=time_range_dict[params['time_range']])\n\n return params\n\n\n# get response from search-request\ndef response(resp):\n results = []\n\n dom = html.fromstring(resp.text)\n\n for result in dom.xpath('//div[@class=\"dg_u\"]'):\n url = result.xpath('./div[@class=\"mc_vtvc\"]/a/@href')[0]\n url = 'https://bing.com' + url\n title = extract_text(result.xpath('./div/a/div/div[@class=\"mc_vtvc_title\"]/@title'))\n content = extract_text(result.xpath('./div/a/div/div/div/div/text()'))\n thumbnail = result.xpath('./div/a/div/div/img/@src')[0]\n\n results.append({'url': url,\n 'title': title,\n 'content': content,\n 'thumbnail': thumbnail,\n 'template': 'videos.html'})\n\n if len(results) >= number_of_results:\n break\n\n return results\n", "path": "searx/engines/bing_videos.py"}]} | 1,320 | 426 |
gh_patches_debug_35308 | rasdani/github-patches | git_diff | openstates__openstates-scrapers-1334 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AZ Legislator with the following id has an invalid phone number AZL000372
State: AZ (be sure to include in ticket title)
This repository is for issues with state data, for feature requests, etc.
please visit the contributor guide (see above message) to file the issue in the correct place.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `openstates/az/legislators.py`
Content:
```
1 from billy.scrape import NoDataForPeriod
2 from billy.scrape.legislators import LegislatorScraper, Legislator
3 from lxml import html
4
5 import re, datetime
6
7 class AZLegislatorScraper(LegislatorScraper):
8 jurisdiction = 'az'
9 parties = {
10 'R': 'Republican',
11 'D': 'Democratic',
12 'L': 'Libertarian',
13 'I': 'Independent',
14 'G': 'Green'
15 }
16
17 def get_party(self, abbr):
18 return self.parties[abbr]
19
20 def scrape(self, chamber, term):
21 # TODO: old AZ scraper allowed old sessions, they seem to be gone?
22 self.validate_term(term, latest_only=True)
23
24 body = {'lower': 'H', 'upper': 'S'}[chamber]
25 url = 'http://www.azleg.gov/MemberRoster/?body=' + body
26 page = self.get(url).text
27
28 # there is a bad comment closing tag on this page
29 page = page.replace('--!>', '-->')
30
31 root = html.fromstring(page)
32
33 path = '//table//tr'
34 roster = root.xpath(path)[1:]
35 for row in roster:
36 position = ''
37 name, district, party, email, room, phone, = row.xpath('td')
38
39 if email.attrib.get('class') == 'vacantmember':
40 continue # Skip any vacant members.
41
42 link = name.xpath('string(a/@href)')
43 if len(name) == 1:
44 name = name.text_content().strip()
45 else:
46 position = name.tail.strip()
47 name = name[0].text_content().strip()
48 if '--' in name:
49 name = name.split('--')[0].strip()
50
51 linkpage = self.get(link).text
52 linkpage = linkpage.replace('--!>', '-->')
53 linkroot = html.fromstring(linkpage)
54 linkroot.make_links_absolute(link)
55
56 photos = linkroot.xpath("//img[contains(@src, 'MemberPhoto')]")
57
58 if len(photos) != 1:
59 self.warning('no photo on ' + link)
60 photo_url = ''
61 else:
62 photo_url = photos[0].attrib['src']
63
64 district = district.text_content()
65 party = party.text_content().strip()
66 email = email.text_content().strip()
67
68 if email.startswith('Email: '):
69 email = email.replace('Email: ', '').lower() + '@azleg.gov'
70 else:
71 email = ''
72
73 party = self.get_party(party)
74 room = room.text_content().strip()
75 if chamber == 'lower':
76 address = "House of Representatives\n"
77 else:
78 address = "Senate\n"
79 address = address + "1700 West Washington\n Room " + room \
80 + "\nPhoenix, AZ 85007"
81
82 phone = phone.text_content().strip()
83 if not phone.startswith('602'):
84 phone = "602-" + phone
85
86 leg = Legislator(term, chamber, district, full_name=name,
87 party=party, url=link,
88 photo_url=photo_url)
89
90 leg.add_office('capitol', 'Capitol Office', address=address,
91 phone=phone, email=email)
92
93 if position:
94 leg.add_role( position, term, chamber=chamber,
95 district=district, party=party)
96
97 leg.add_source(url)
98
99 #Probably just get this from the committee scraper
100 #self.scrape_member_page(link, session, chamber, leg)
101 self.save_legislator(leg)
102
103 def scrape_member_page(self, url, session, chamber, leg):
104 html = self.get(url).text
105 root = html.fromstring(html)
106 #get the committee membership
107 c = root.xpath('//td/div/strong[contains(text(), "Committee")]')
108 for row in c.xpath('ancestor::table[1]')[1:]:
109 name = row[0].text_content().strip()
110 role = row[1].text_content().strip()
111 leg.add_role(role, session, chamber=chamber, committee=name)
112
113 leg.add_source(url)
114
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/openstates/az/legislators.py b/openstates/az/legislators.py
--- a/openstates/az/legislators.py
+++ b/openstates/az/legislators.py
@@ -1,8 +1,7 @@
-from billy.scrape import NoDataForPeriod
from billy.scrape.legislators import LegislatorScraper, Legislator
from lxml import html
+import re
-import re, datetime
class AZLegislatorScraper(LegislatorScraper):
jurisdiction = 'az'
@@ -80,30 +79,30 @@
+ "\nPhoenix, AZ 85007"
phone = phone.text_content().strip()
- if not phone.startswith('602'):
+ if '602' not in re.findall(r'(\d+)', phone):
phone = "602-" + phone
leg = Legislator(term, chamber, district, full_name=name,
- party=party, url=link,
- photo_url=photo_url)
+ party=party, url=link,
+ photo_url=photo_url)
leg.add_office('capitol', 'Capitol Office', address=address,
phone=phone, email=email)
if position:
- leg.add_role( position, term, chamber=chamber,
+ leg.add_role(position, term, chamber=chamber,
district=district, party=party)
leg.add_source(url)
- #Probably just get this from the committee scraper
- #self.scrape_member_page(link, session, chamber, leg)
+ # Probably just get this from the committee scraper
+ # self.scrape_member_page(link, session, chamber, leg)
self.save_legislator(leg)
def scrape_member_page(self, url, session, chamber, leg):
html = self.get(url).text
root = html.fromstring(html)
- #get the committee membership
+ # get the committee membership
c = root.xpath('//td/div/strong[contains(text(), "Committee")]')
for row in c.xpath('ancestor::table[1]')[1:]:
name = row[0].text_content().strip()
| {"golden_diff": "diff --git a/openstates/az/legislators.py b/openstates/az/legislators.py\n--- a/openstates/az/legislators.py\n+++ b/openstates/az/legislators.py\n@@ -1,8 +1,7 @@\n-from billy.scrape import NoDataForPeriod\n from billy.scrape.legislators import LegislatorScraper, Legislator\n from lxml import html\n+import re\n \n-import re, datetime\n \n class AZLegislatorScraper(LegislatorScraper):\n jurisdiction = 'az'\n@@ -80,30 +79,30 @@\n + \"\\nPhoenix, AZ 85007\"\n \n phone = phone.text_content().strip()\n- if not phone.startswith('602'):\n+ if '602' not in re.findall(r'(\\d+)', phone):\n phone = \"602-\" + phone\n \n leg = Legislator(term, chamber, district, full_name=name,\n- party=party, url=link,\n- photo_url=photo_url)\n+ party=party, url=link,\n+ photo_url=photo_url)\n \n leg.add_office('capitol', 'Capitol Office', address=address,\n phone=phone, email=email)\n \n if position:\n- leg.add_role( position, term, chamber=chamber,\n+ leg.add_role(position, term, chamber=chamber,\n district=district, party=party)\n \n leg.add_source(url)\n \n- #Probably just get this from the committee scraper\n- #self.scrape_member_page(link, session, chamber, leg)\n+ # Probably just get this from the committee scraper\n+ # self.scrape_member_page(link, session, chamber, leg)\n self.save_legislator(leg)\n \n def scrape_member_page(self, url, session, chamber, leg):\n html = self.get(url).text\n root = html.fromstring(html)\n- #get the committee membership\n+ # get the committee membership\n c = root.xpath('//td/div/strong[contains(text(), \"Committee\")]')\n for row in c.xpath('ancestor::table[1]')[1:]:\n name = row[0].text_content().strip()\n", "issue": "AZ Legislator with the following id has an invalid phone number AZL000372\nState: AZ (be sure to include in ticket title)\r\n\r\nThis repository is for issues with state data, for feature requests, etc.\r\nplease visit the contributor guide (see above message) to file the issue in the correct place.\r\n\n", "before_files": [{"content": "from billy.scrape import NoDataForPeriod\nfrom billy.scrape.legislators import LegislatorScraper, Legislator\nfrom lxml import html\n\nimport re, datetime\n\nclass AZLegislatorScraper(LegislatorScraper):\n jurisdiction = 'az'\n parties = {\n 'R': 'Republican',\n 'D': 'Democratic',\n 'L': 'Libertarian',\n 'I': 'Independent',\n 'G': 'Green'\n }\n\n def get_party(self, abbr):\n return self.parties[abbr]\n\n def scrape(self, chamber, term):\n # TODO: old AZ scraper allowed old sessions, they seem to be gone?\n self.validate_term(term, latest_only=True)\n\n body = {'lower': 'H', 'upper': 'S'}[chamber]\n url = 'http://www.azleg.gov/MemberRoster/?body=' + body\n page = self.get(url).text\n\n # there is a bad comment closing tag on this page\n page = page.replace('--!>', '-->')\n\n root = html.fromstring(page)\n\n path = '//table//tr'\n roster = root.xpath(path)[1:]\n for row in roster:\n position = ''\n name, district, party, email, room, phone, = row.xpath('td')\n\n if email.attrib.get('class') == 'vacantmember':\n continue # Skip any vacant members.\n\n link = name.xpath('string(a/@href)')\n if len(name) == 1:\n name = name.text_content().strip()\n else:\n position = name.tail.strip()\n name = name[0].text_content().strip()\n if '--' in name:\n name = name.split('--')[0].strip()\n\n linkpage = self.get(link).text\n linkpage = linkpage.replace('--!>', '-->')\n linkroot = html.fromstring(linkpage)\n linkroot.make_links_absolute(link)\n\n photos = linkroot.xpath(\"//img[contains(@src, 'MemberPhoto')]\")\n\n if len(photos) != 1:\n self.warning('no photo on ' + link)\n photo_url = ''\n else:\n photo_url = photos[0].attrib['src']\n\n district = district.text_content()\n party = party.text_content().strip()\n email = email.text_content().strip()\n\n if email.startswith('Email: '):\n email = email.replace('Email: ', '').lower() + '@azleg.gov'\n else:\n email = ''\n\n party = self.get_party(party)\n room = room.text_content().strip()\n if chamber == 'lower':\n address = \"House of Representatives\\n\"\n else:\n address = \"Senate\\n\"\n address = address + \"1700 West Washington\\n Room \" + room \\\n + \"\\nPhoenix, AZ 85007\"\n\n phone = phone.text_content().strip()\n if not phone.startswith('602'):\n phone = \"602-\" + phone\n\n leg = Legislator(term, chamber, district, full_name=name,\n party=party, url=link,\n photo_url=photo_url)\n\n leg.add_office('capitol', 'Capitol Office', address=address,\n phone=phone, email=email)\n\n if position:\n leg.add_role( position, term, chamber=chamber,\n district=district, party=party)\n\n leg.add_source(url)\n\n #Probably just get this from the committee scraper\n #self.scrape_member_page(link, session, chamber, leg)\n self.save_legislator(leg)\n\n def scrape_member_page(self, url, session, chamber, leg):\n html = self.get(url).text\n root = html.fromstring(html)\n #get the committee membership\n c = root.xpath('//td/div/strong[contains(text(), \"Committee\")]')\n for row in c.xpath('ancestor::table[1]')[1:]:\n name = row[0].text_content().strip()\n role = row[1].text_content().strip()\n leg.add_role(role, session, chamber=chamber, committee=name)\n\n leg.add_source(url)\n", "path": "openstates/az/legislators.py"}], "after_files": [{"content": "from billy.scrape.legislators import LegislatorScraper, Legislator\nfrom lxml import html\nimport re\n\n\nclass AZLegislatorScraper(LegislatorScraper):\n jurisdiction = 'az'\n parties = {\n 'R': 'Republican',\n 'D': 'Democratic',\n 'L': 'Libertarian',\n 'I': 'Independent',\n 'G': 'Green'\n }\n\n def get_party(self, abbr):\n return self.parties[abbr]\n\n def scrape(self, chamber, term):\n # TODO: old AZ scraper allowed old sessions, they seem to be gone?\n self.validate_term(term, latest_only=True)\n\n body = {'lower': 'H', 'upper': 'S'}[chamber]\n url = 'http://www.azleg.gov/MemberRoster/?body=' + body\n page = self.get(url).text\n\n # there is a bad comment closing tag on this page\n page = page.replace('--!>', '-->')\n\n root = html.fromstring(page)\n\n path = '//table//tr'\n roster = root.xpath(path)[1:]\n for row in roster:\n position = ''\n name, district, party, email, room, phone, = row.xpath('td')\n\n if email.attrib.get('class') == 'vacantmember':\n continue # Skip any vacant members.\n\n link = name.xpath('string(a/@href)')\n if len(name) == 1:\n name = name.text_content().strip()\n else:\n position = name.tail.strip()\n name = name[0].text_content().strip()\n if '--' in name:\n name = name.split('--')[0].strip()\n\n linkpage = self.get(link).text\n linkpage = linkpage.replace('--!>', '-->')\n linkroot = html.fromstring(linkpage)\n linkroot.make_links_absolute(link)\n\n photos = linkroot.xpath(\"//img[contains(@src, 'MemberPhoto')]\")\n\n if len(photos) != 1:\n self.warning('no photo on ' + link)\n photo_url = ''\n else:\n photo_url = photos[0].attrib['src']\n\n district = district.text_content()\n party = party.text_content().strip()\n email = email.text_content().strip()\n\n if email.startswith('Email: '):\n email = email.replace('Email: ', '').lower() + '@azleg.gov'\n else:\n email = ''\n\n party = self.get_party(party)\n room = room.text_content().strip()\n if chamber == 'lower':\n address = \"House of Representatives\\n\"\n else:\n address = \"Senate\\n\"\n address = address + \"1700 West Washington\\n Room \" + room \\\n + \"\\nPhoenix, AZ 85007\"\n\n phone = phone.text_content().strip()\n if '602' not in re.findall(r'(\\d+)', phone):\n phone = \"602-\" + phone\n\n leg = Legislator(term, chamber, district, full_name=name,\n party=party, url=link,\n photo_url=photo_url)\n\n leg.add_office('capitol', 'Capitol Office', address=address,\n phone=phone, email=email)\n\n if position:\n leg.add_role(position, term, chamber=chamber,\n district=district, party=party)\n\n leg.add_source(url)\n\n # Probably just get this from the committee scraper\n # self.scrape_member_page(link, session, chamber, leg)\n self.save_legislator(leg)\n\n def scrape_member_page(self, url, session, chamber, leg):\n html = self.get(url).text\n root = html.fromstring(html)\n # get the committee membership\n c = root.xpath('//td/div/strong[contains(text(), \"Committee\")]')\n for row in c.xpath('ancestor::table[1]')[1:]:\n name = row[0].text_content().strip()\n role = row[1].text_content().strip()\n leg.add_role(role, session, chamber=chamber, committee=name)\n\n leg.add_source(url)\n", "path": "openstates/az/legislators.py"}]} | 1,474 | 491 |
gh_patches_debug_3289 | rasdani/github-patches | git_diff | mne-tools__mne-bids-272 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
JOSS publication
At the MNE-Sprint in Paris, @teonbrooks @jasmainak and I discussed about writing a short report on MNE-BIDS and publishing it in [JOSS](https://joss.theoj.org/about).
JOSS articles generally provide a very high level description of the software and its relevance:
> Your submission should probably be somewhere between 250-1000 words.
It would allow us to properly point to MNE-BIDS in citations and get some scholarly recognition for our work.
I suggest that we take `pybids` as an example and create a [`/paper`](https://github.com/bids-standard/pybids/tree/master/paper) directory in our repository where we prepare the submission.
Publishing at JOSS would mean that mne-bids stays separate from mne-python instead of being integrated eventually. In a short discussion with @agramfort, we all approved of this idea, because it will allow us to stay with our lightweight and "independent" repository, while users can still benefit from mne-bids by using it as a simple "module" to MNE-Python.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #! /usr/bin/env python
2 """Setup MNE-BIDS."""
3 import os
4 from setuptools import setup, find_packages
5
6 # get the version
7 version = None
8 with open(os.path.join('mne_bids', '__init__.py'), 'r') as fid:
9 for line in (line.strip() for line in fid):
10 if line.startswith('__version__'):
11 version = line.split('=')[1].strip().strip('\'')
12 break
13 if version is None:
14 raise RuntimeError('Could not determine version')
15
16
17 descr = ('An MNE project for organizing and formatting MEG and EEG data '
18 'according to the BIDS specification.')
19
20 DISTNAME = 'mne-bids'
21 DESCRIPTION = descr
22 MAINTAINER = 'Mainak Jas'
23 MAINTAINER_EMAIL = '[email protected]'
24 URL = 'https://mne.tools/mne-bids/'
25 LICENSE = 'BSD (3-clause)'
26 DOWNLOAD_URL = 'https://github.com/mne-tools/mne-bids.git'
27 VERSION = version
28
29 if __name__ == "__main__":
30 setup(name=DISTNAME,
31 maintainer=MAINTAINER,
32 maintainer_email=MAINTAINER_EMAIL,
33 description=DESCRIPTION,
34 license=LICENSE,
35 url=URL,
36 version=VERSION,
37 download_url=DOWNLOAD_URL,
38 long_description=open('README.rst').read(),
39 long_description_content_type='text/x-rst',
40 classifiers=[
41 'Intended Audience :: Science/Research',
42 'Intended Audience :: Developers',
43 'License :: OSI Approved',
44 'Programming Language :: Python',
45 'Topic :: Software Development',
46 'Topic :: Scientific/Engineering',
47 'Operating System :: Microsoft :: Windows',
48 'Operating System :: POSIX',
49 'Operating System :: Unix',
50 'Operating System :: MacOS',
51 ],
52 platforms='any',
53 packages=find_packages(),
54 scripts=['bin/mne_bids'],
55 project_urls={
56 'Documentation': 'https://mne.tools/mne-bids',
57 'Bug Reports': 'https://github.com/mne-tools/mne-bids/issues',
58 'Source': 'https://github.com/mne-tools/mne-bids',
59 },
60 )
61
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -14,8 +14,8 @@
raise RuntimeError('Could not determine version')
-descr = ('An MNE project for organizing and formatting MEG and EEG data '
- 'according to the BIDS specification.')
+descr = ('MNE-BIDS: Organizing MEG, EEG, and iEEG data according to the BIDS '
+ 'specification and facilitating their analysis with MNE-Python')
DISTNAME = 'mne-bids'
DESCRIPTION = descr
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -14,8 +14,8 @@\n raise RuntimeError('Could not determine version')\n \n \n-descr = ('An MNE project for organizing and formatting MEG and EEG data '\n- 'according to the BIDS specification.')\n+descr = ('MNE-BIDS: Organizing MEG, EEG, and iEEG data according to the BIDS '\n+ 'specification and facilitating their analysis with MNE-Python')\n \n DISTNAME = 'mne-bids'\n DESCRIPTION = descr\n", "issue": "JOSS publication\nAt the MNE-Sprint in Paris, @teonbrooks @jasmainak and I discussed about writing a short report on MNE-BIDS and publishing it in [JOSS](https://joss.theoj.org/about).\r\n\r\nJOSS articles generally provide a very high level description of the software and its relevance:\r\n\r\n> Your submission should probably be somewhere between 250-1000 words.\r\n\r\nIt would allow us to properly point to MNE-BIDS in citations and get some scholarly recognition for our work.\r\n\r\nI suggest that we take `pybids` as an example and create a [`/paper`](https://github.com/bids-standard/pybids/tree/master/paper) directory in our repository where we prepare the submission.\r\n\r\nPublishing at JOSS would mean that mne-bids stays separate from mne-python instead of being integrated eventually. In a short discussion with @agramfort, we all approved of this idea, because it will allow us to stay with our lightweight and \"independent\" repository, while users can still benefit from mne-bids by using it as a simple \"module\" to MNE-Python.\r\n\r\n\n", "before_files": [{"content": "#! /usr/bin/env python\n\"\"\"Setup MNE-BIDS.\"\"\"\nimport os\nfrom setuptools import setup, find_packages\n\n# get the version\nversion = None\nwith open(os.path.join('mne_bids', '__init__.py'), 'r') as fid:\n for line in (line.strip() for line in fid):\n if line.startswith('__version__'):\n version = line.split('=')[1].strip().strip('\\'')\n break\nif version is None:\n raise RuntimeError('Could not determine version')\n\n\ndescr = ('An MNE project for organizing and formatting MEG and EEG data '\n 'according to the BIDS specification.')\n\nDISTNAME = 'mne-bids'\nDESCRIPTION = descr\nMAINTAINER = 'Mainak Jas'\nMAINTAINER_EMAIL = '[email protected]'\nURL = 'https://mne.tools/mne-bids/'\nLICENSE = 'BSD (3-clause)'\nDOWNLOAD_URL = 'https://github.com/mne-tools/mne-bids.git'\nVERSION = version\n\nif __name__ == \"__main__\":\n setup(name=DISTNAME,\n maintainer=MAINTAINER,\n maintainer_email=MAINTAINER_EMAIL,\n description=DESCRIPTION,\n license=LICENSE,\n url=URL,\n version=VERSION,\n download_url=DOWNLOAD_URL,\n long_description=open('README.rst').read(),\n long_description_content_type='text/x-rst',\n classifiers=[\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved',\n 'Programming Language :: Python',\n 'Topic :: Software Development',\n 'Topic :: Scientific/Engineering',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Operating System :: MacOS',\n ],\n platforms='any',\n packages=find_packages(),\n scripts=['bin/mne_bids'],\n project_urls={\n 'Documentation': 'https://mne.tools/mne-bids',\n 'Bug Reports': 'https://github.com/mne-tools/mne-bids/issues',\n 'Source': 'https://github.com/mne-tools/mne-bids',\n },\n )\n", "path": "setup.py"}], "after_files": [{"content": "#! /usr/bin/env python\n\"\"\"Setup MNE-BIDS.\"\"\"\nimport os\nfrom setuptools import setup, find_packages\n\n# get the version\nversion = None\nwith open(os.path.join('mne_bids', '__init__.py'), 'r') as fid:\n for line in (line.strip() for line in fid):\n if line.startswith('__version__'):\n version = line.split('=')[1].strip().strip('\\'')\n break\nif version is None:\n raise RuntimeError('Could not determine version')\n\n\ndescr = ('MNE-BIDS: Organizing MEG, EEG, and iEEG data according to the BIDS '\n 'specification and facilitating their analysis with MNE-Python')\n\nDISTNAME = 'mne-bids'\nDESCRIPTION = descr\nMAINTAINER = 'Mainak Jas'\nMAINTAINER_EMAIL = '[email protected]'\nURL = 'https://mne.tools/mne-bids/'\nLICENSE = 'BSD (3-clause)'\nDOWNLOAD_URL = 'https://github.com/mne-tools/mne-bids.git'\nVERSION = version\n\nif __name__ == \"__main__\":\n setup(name=DISTNAME,\n maintainer=MAINTAINER,\n maintainer_email=MAINTAINER_EMAIL,\n description=DESCRIPTION,\n license=LICENSE,\n url=URL,\n version=VERSION,\n download_url=DOWNLOAD_URL,\n long_description=open('README.rst').read(),\n long_description_content_type='text/x-rst',\n classifiers=[\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved',\n 'Programming Language :: Python',\n 'Topic :: Software Development',\n 'Topic :: Scientific/Engineering',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Operating System :: MacOS',\n ],\n platforms='any',\n packages=find_packages(),\n scripts=['bin/mne_bids'],\n project_urls={\n 'Documentation': 'https://mne.tools/mne-bids',\n 'Bug Reports': 'https://github.com/mne-tools/mne-bids/issues',\n 'Source': 'https://github.com/mne-tools/mne-bids',\n },\n )\n", "path": "setup.py"}]} | 1,070 | 126 |
gh_patches_debug_24877 | rasdani/github-patches | git_diff | plone__Products.CMFPlone-2101 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
add data-base-url attribute to HTML body tag in Plone 4
Since plone 4.3.12, the `<base href` attribute in HTML generated by Plone no longer always points to the context URL as it used to prior to the change. This change broke Plone and some add-ons. More breakage may still surface. Fixes have varied because no alternative was provided when the change was made.
For a lengthy background, see the [discussion](https://community.plone.org/t/how-to-get-context-url-in-js-on-plone-4-3-12/4031).
Rather than rolling back the change which was done to support some other things and would require reverting them, I suggest providing a future-proof alternative (thanks @rodfersou for suggesting using a new attribute):
Plone 5 has removed `<base href` completely. Instead Plone 5 has added a `data-base-url` attribute to the HTML `body` tag. Which points to the context URL.
So, I suggest same be done for Plone 4. That way, anything in Plone core and/or add-ons needing context URL in Javascript have a future-proof way of getting it from here on.
@@sharing is broken on Page objects in Plone 4.3.12 and 4.3.14
## BUG/PROBLEM REPORT (OR OTHER COMMON ISSUE)
### What I did:
1. Create vanilla Plone 4.3.14 site.
2. Add private Page with title Test at the top of the site
3. Navigate to sharing tab for that page
4. Type some characters into the Search box
5. Kaboom: exception
### What I expect to happen:
List of potential users as search results
### What actually happened:
Large python back trace because the search form AJAX accessed:
http://localhost:8080/Plone2/test/@@sharing/@@updateSharingInfo
rather than the following used by Plone 4.3.11:
http://localhost:8080/Plone2/test/@@updateSharingInfo
The root cause appears to be:
https://pypi.python.org/pypi/plone.app.layout/2.3.17
2.3.15 (2016-06-28)
Fixes:
_Fix base tag differs from actual URL (fixes [86](https://github.com/plone/plone.app.layout/issues/86)). [rodfersou]_
which was actually made **after** plone.app.layout 2.3.15 was released, (December 2016): that comment is placed incorrectly in the README file. I'm happy to make a bug report there as well.
### What version of Plone/ Addons I am using:
Vanilla Plone 4.3.14, which uses plone.app.layout 2.3.17. So does Plone 4.3.12, and I see exactly the same problem there.
(NB: Pinning plone.app.layout 2.3.15 in Plone 4.3.14 resolves the problem).
Update: appears to be the same issue discussed in: https://github.com/plone/Products.CMFPlone/issues/2051
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Products/CMFPlone/browser/jsvariables.py`
Content:
```
1 from zope.i18n import translate
2 from zope.publisher.browser import BrowserView
3
4 from Products.CMFCore.utils import getToolByName
5 from Products.CMFPlone import PloneMessageFactory as _
6
7
8 TEMPLATE = """\
9 var portal_url = '%(portal_url)s';
10 var form_modified_message = '%(form_modified)s';
11 var form_resubmit_message = '%(form_resubmit)s';
12 var external_links_open_new_window = '%(open_links)s';
13 var mark_special_links = '%(mark_links)s';
14 var ajax_noresponse_message = '%(ajax_noresponse)s';
15 """
16
17 FORM_MODIFIED = _(u'text_form_modified_message',
18 default=u'Your form has not been saved. All changes you '
19 u'have made will be lost.')
20
21 FORM_RESUBMIT = _(u'text_form_resubmit_message',
22 default=u'You already clicked the submit button. Do you '
23 u'really want to submit this form again?')
24
25 AJAX_NORESPONSE = _(u'text_ajax_noresponse_message',
26 default=u'No response from server. Please try again '
27 u'later.')
28
29
30 class JSVariables(BrowserView):
31
32 def __call__(self, *args, **kwargs):
33 context = self.context
34 response = self.request.response
35 response.setHeader('content-type', 'text/javascript;;charset=utf-8')
36
37 props = getToolByName(context, 'portal_properties').site_properties
38 portal_url = getToolByName(context, 'portal_url')()
39
40 # the following are flags for mark_special_links.js
41 # links get the target="_blank" attribute
42 open_links = props.getProperty('external_links_open_new_window',
43 'false')
44 mark_links = props.getProperty('mark_special_links', 'false')
45
46 form_modified = translate(FORM_MODIFIED, context=self.request)
47 form_resubmit = translate(FORM_RESUBMIT, context=self.request)
48 ajax_noresponse = translate(AJAX_NORESPONSE, context=self.request)
49
50 # escape_for_js
51 form_modified = form_modified.replace("'", "\\'")
52 form_resubmit = form_resubmit.replace("'", "\\'")
53 ajax_noresponse = ajax_noresponse.replace("'", "\\'")
54
55 return TEMPLATE % dict(
56 portal_url=portal_url,
57 open_links=open_links,
58 mark_links=mark_links,
59 form_modified=form_modified,
60 form_resubmit=form_resubmit,
61 ajax_noresponse=ajax_noresponse,
62 )
63
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/Products/CMFPlone/browser/jsvariables.py b/Products/CMFPlone/browser/jsvariables.py
--- a/Products/CMFPlone/browser/jsvariables.py
+++ b/Products/CMFPlone/browser/jsvariables.py
@@ -7,6 +7,7 @@
TEMPLATE = """\
var portal_url = '%(portal_url)s';
+var base_url = '%(base_url)s';
var form_modified_message = '%(form_modified)s';
var form_resubmit_message = '%(form_resubmit)s';
var external_links_open_new_window = '%(open_links)s';
@@ -36,6 +37,7 @@
props = getToolByName(context, 'portal_properties').site_properties
portal_url = getToolByName(context, 'portal_url')()
+ base_url = self.request['HTTP_REFERER']
# the following are flags for mark_special_links.js
# links get the target="_blank" attribute
@@ -54,6 +56,7 @@
return TEMPLATE % dict(
portal_url=portal_url,
+ base_url=base_url,
open_links=open_links,
mark_links=mark_links,
form_modified=form_modified,
| {"golden_diff": "diff --git a/Products/CMFPlone/browser/jsvariables.py b/Products/CMFPlone/browser/jsvariables.py\n--- a/Products/CMFPlone/browser/jsvariables.py\n+++ b/Products/CMFPlone/browser/jsvariables.py\n@@ -7,6 +7,7 @@\n \n TEMPLATE = \"\"\"\\\n var portal_url = '%(portal_url)s';\n+var base_url = '%(base_url)s';\n var form_modified_message = '%(form_modified)s';\n var form_resubmit_message = '%(form_resubmit)s';\n var external_links_open_new_window = '%(open_links)s';\n@@ -36,6 +37,7 @@\n \n props = getToolByName(context, 'portal_properties').site_properties\n portal_url = getToolByName(context, 'portal_url')()\n+ base_url = self.request['HTTP_REFERER']\n \n # the following are flags for mark_special_links.js\n # links get the target=\"_blank\" attribute\n@@ -54,6 +56,7 @@\n \n return TEMPLATE % dict(\n portal_url=portal_url,\n+ base_url=base_url,\n open_links=open_links,\n mark_links=mark_links,\n form_modified=form_modified,\n", "issue": "add data-base-url attribute to HTML body tag in Plone 4\nSince plone 4.3.12, the `<base href` attribute in HTML generated by Plone no longer always points to the context URL as it used to prior to the change. This change broke Plone and some add-ons. More breakage may still surface. Fixes have varied because no alternative was provided when the change was made.\r\n\r\nFor a lengthy background, see the [discussion](https://community.plone.org/t/how-to-get-context-url-in-js-on-plone-4-3-12/4031). \r\n\r\nRather than rolling back the change which was done to support some other things and would require reverting them, I suggest providing a future-proof alternative (thanks @rodfersou for suggesting using a new attribute):\r\n\r\nPlone 5 has removed `<base href` completely. Instead Plone 5 has added a `data-base-url` attribute to the HTML `body` tag. Which points to the context URL.\r\n\r\nSo, I suggest same be done for Plone 4. That way, anything in Plone core and/or add-ons needing context URL in Javascript have a future-proof way of getting it from here on.\r\n\r\n\n@@sharing is broken on Page objects in Plone 4.3.12 and 4.3.14\n## BUG/PROBLEM REPORT (OR OTHER COMMON ISSUE)\r\n\r\n### What I did:\r\n\r\n1. Create vanilla Plone 4.3.14 site.\r\n2. Add private Page with title Test at the top of the site\r\n3. Navigate to sharing tab for that page\r\n4. Type some characters into the Search box\r\n5. Kaboom: exception\r\n\r\n### What I expect to happen:\r\n\r\nList of potential users as search results\r\n\r\n### What actually happened:\r\n\r\nLarge python back trace because the search form AJAX accessed:\r\n\r\n http://localhost:8080/Plone2/test/@@sharing/@@updateSharingInfo\r\n\r\nrather than the following used by Plone 4.3.11:\r\n\r\n http://localhost:8080/Plone2/test/@@updateSharingInfo\r\n \r\nThe root cause appears to be:\r\n\r\nhttps://pypi.python.org/pypi/plone.app.layout/2.3.17\r\n\r\n 2.3.15 (2016-06-28)\r\n Fixes:\r\n _Fix base tag differs from actual URL (fixes [86](https://github.com/plone/plone.app.layout/issues/86)). [rodfersou]_\r\n\r\nwhich was actually made **after** plone.app.layout 2.3.15 was released, (December 2016): that comment is placed incorrectly in the README file. I'm happy to make a bug report there as well.\r\n\r\n### What version of Plone/ Addons I am using:\r\n\r\nVanilla Plone 4.3.14, which uses plone.app.layout 2.3.17. So does Plone 4.3.12, and I see exactly the same problem there.\r\n\r\n(NB: Pinning plone.app.layout 2.3.15 in Plone 4.3.14 resolves the problem).\r\n\r\nUpdate: appears to be the same issue discussed in: https://github.com/plone/Products.CMFPlone/issues/2051\r\n\n", "before_files": [{"content": "from zope.i18n import translate\nfrom zope.publisher.browser import BrowserView\n\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFPlone import PloneMessageFactory as _\n\n\nTEMPLATE = \"\"\"\\\nvar portal_url = '%(portal_url)s';\nvar form_modified_message = '%(form_modified)s';\nvar form_resubmit_message = '%(form_resubmit)s';\nvar external_links_open_new_window = '%(open_links)s';\nvar mark_special_links = '%(mark_links)s';\nvar ajax_noresponse_message = '%(ajax_noresponse)s';\n\"\"\"\n\nFORM_MODIFIED = _(u'text_form_modified_message',\n default=u'Your form has not been saved. All changes you '\n u'have made will be lost.')\n\nFORM_RESUBMIT = _(u'text_form_resubmit_message',\n default=u'You already clicked the submit button. Do you '\n u'really want to submit this form again?')\n\nAJAX_NORESPONSE = _(u'text_ajax_noresponse_message',\n default=u'No response from server. Please try again '\n u'later.')\n\n\nclass JSVariables(BrowserView):\n\n def __call__(self, *args, **kwargs):\n context = self.context\n response = self.request.response\n response.setHeader('content-type', 'text/javascript;;charset=utf-8')\n\n props = getToolByName(context, 'portal_properties').site_properties\n portal_url = getToolByName(context, 'portal_url')()\n\n # the following are flags for mark_special_links.js\n # links get the target=\"_blank\" attribute\n open_links = props.getProperty('external_links_open_new_window',\n 'false')\n mark_links = props.getProperty('mark_special_links', 'false')\n\n form_modified = translate(FORM_MODIFIED, context=self.request)\n form_resubmit = translate(FORM_RESUBMIT, context=self.request)\n ajax_noresponse = translate(AJAX_NORESPONSE, context=self.request)\n\n # escape_for_js\n form_modified = form_modified.replace(\"'\", \"\\\\'\")\n form_resubmit = form_resubmit.replace(\"'\", \"\\\\'\")\n ajax_noresponse = ajax_noresponse.replace(\"'\", \"\\\\'\")\n\n return TEMPLATE % dict(\n portal_url=portal_url,\n open_links=open_links,\n mark_links=mark_links,\n form_modified=form_modified,\n form_resubmit=form_resubmit,\n ajax_noresponse=ajax_noresponse,\n )\n", "path": "Products/CMFPlone/browser/jsvariables.py"}], "after_files": [{"content": "from zope.i18n import translate\nfrom zope.publisher.browser import BrowserView\n\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFPlone import PloneMessageFactory as _\n\n\nTEMPLATE = \"\"\"\\\nvar portal_url = '%(portal_url)s';\nvar base_url = '%(base_url)s';\nvar form_modified_message = '%(form_modified)s';\nvar form_resubmit_message = '%(form_resubmit)s';\nvar external_links_open_new_window = '%(open_links)s';\nvar mark_special_links = '%(mark_links)s';\nvar ajax_noresponse_message = '%(ajax_noresponse)s';\n\"\"\"\n\nFORM_MODIFIED = _(u'text_form_modified_message',\n default=u'Your form has not been saved. All changes you '\n u'have made will be lost.')\n\nFORM_RESUBMIT = _(u'text_form_resubmit_message',\n default=u'You already clicked the submit button. Do you '\n u'really want to submit this form again?')\n\nAJAX_NORESPONSE = _(u'text_ajax_noresponse_message',\n default=u'No response from server. Please try again '\n u'later.')\n\n\nclass JSVariables(BrowserView):\n\n def __call__(self, *args, **kwargs):\n context = self.context\n response = self.request.response\n response.setHeader('content-type', 'text/javascript;;charset=utf-8')\n\n props = getToolByName(context, 'portal_properties').site_properties\n portal_url = getToolByName(context, 'portal_url')()\n base_url = self.request['HTTP_REFERER']\n\n # the following are flags for mark_special_links.js\n # links get the target=\"_blank\" attribute\n open_links = props.getProperty('external_links_open_new_window',\n 'false')\n mark_links = props.getProperty('mark_special_links', 'false')\n\n form_modified = translate(FORM_MODIFIED, context=self.request)\n form_resubmit = translate(FORM_RESUBMIT, context=self.request)\n ajax_noresponse = translate(AJAX_NORESPONSE, context=self.request)\n\n # escape_for_js\n form_modified = form_modified.replace(\"'\", \"\\\\'\")\n form_resubmit = form_resubmit.replace(\"'\", \"\\\\'\")\n ajax_noresponse = ajax_noresponse.replace(\"'\", \"\\\\'\")\n\n return TEMPLATE % dict(\n portal_url=portal_url,\n base_url=base_url,\n open_links=open_links,\n mark_links=mark_links,\n form_modified=form_modified,\n form_resubmit=form_resubmit,\n ajax_noresponse=ajax_noresponse,\n )\n", "path": "Products/CMFPlone/browser/jsvariables.py"}]} | 1,609 | 261 |
gh_patches_debug_19644 | rasdani/github-patches | git_diff | great-expectations__great_expectations-5207 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use cleaner solution for non-truncating division in python 2
Prefer `from __future__ import division` to `1.*x/y`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_min.py`
Content:
```
1 import pandas as pd
2
3 from great_expectations.execution_engine import (
4 PandasExecutionEngine,
5 SparkDFExecutionEngine,
6 SqlAlchemyExecutionEngine,
7 )
8 from great_expectations.expectations.metrics.column_aggregate_metric_provider import (
9 ColumnAggregateMetricProvider,
10 column_aggregate_partial,
11 column_aggregate_value,
12 )
13 from great_expectations.expectations.metrics.import_manager import F, sa
14
15
16 class ColumnValuesLengthMin(ColumnAggregateMetricProvider):
17 metric_name = "column_values.length.min"
18
19 @column_aggregate_value(engine=PandasExecutionEngine, filter_column_isnull=True)
20 def _pandas(cls, column: pd.Series, **kwargs: dict) -> int:
21 return column.map(len).min()
22
23 @column_aggregate_partial(
24 engine=SqlAlchemyExecutionEngine, filter_column_isnull=True
25 )
26 def _sqlalchemy(cls, column, **kwargs: dict): # type: ignore[no-untyped-def]
27 return sa.func.min(sa.func.length(column))
28
29 @column_aggregate_partial(engine=SparkDFExecutionEngine, filter_column_isnull=True)
30 def _spark(cls, column: str, **kwargs: dict): # type: ignore[no-untyped-def]
31 return F.min(F.length(F.col(column)))
32
```
Path: `great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_max.py`
Content:
```
1 import pandas as pd
2
3 from great_expectations.execution_engine import (
4 PandasExecutionEngine,
5 SparkDFExecutionEngine,
6 SqlAlchemyExecutionEngine,
7 )
8 from great_expectations.expectations.metrics.column_aggregate_metric_provider import (
9 ColumnAggregateMetricProvider,
10 column_aggregate_partial,
11 column_aggregate_value,
12 )
13 from great_expectations.expectations.metrics.import_manager import F, sa
14
15
16 class ColumnValuesLengthMax(ColumnAggregateMetricProvider):
17 metric_name = "column_values.length.max"
18
19 @column_aggregate_value(engine=PandasExecutionEngine, filter_column_isnull=True)
20 def _pandas(cls, column: pd.Series, **kwargs: dict) -> int:
21 return column.map(len).max()
22
23 @column_aggregate_partial(
24 engine=SqlAlchemyExecutionEngine, filter_column_isnull=True
25 )
26 def _sqlalchemy(cls, column, **kwargs: dict): # type: ignore[no-untyped-def]
27 return sa.func.max(sa.func.length(column))
28
29 @column_aggregate_partial(engine=SparkDFExecutionEngine, filter_column_isnull=True)
30 def _spark(cls, column: str, **kwargs: dict): # type: ignore[no-untyped-def]
31 return F.max(F.length(F.col(column)))
32
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_max.py b/great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_max.py
--- a/great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_max.py
+++ b/great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_max.py
@@ -27,5 +27,5 @@
return sa.func.max(sa.func.length(column))
@column_aggregate_partial(engine=SparkDFExecutionEngine, filter_column_isnull=True)
- def _spark(cls, column: str, **kwargs: dict): # type: ignore[no-untyped-def]
- return F.max(F.length(F.col(column)))
+ def _spark(cls, column, **kwargs: dict): # type: ignore[no-untyped-def]
+ return F.max(F.length(column))
diff --git a/great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_min.py b/great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_min.py
--- a/great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_min.py
+++ b/great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_min.py
@@ -27,5 +27,5 @@
return sa.func.min(sa.func.length(column))
@column_aggregate_partial(engine=SparkDFExecutionEngine, filter_column_isnull=True)
- def _spark(cls, column: str, **kwargs: dict): # type: ignore[no-untyped-def]
- return F.min(F.length(F.col(column)))
+ def _spark(cls, column, **kwargs: dict): # type: ignore[no-untyped-def]
+ return F.min(F.length(column))
| {"golden_diff": "diff --git a/great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_max.py b/great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_max.py\n--- a/great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_max.py\n+++ b/great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_max.py\n@@ -27,5 +27,5 @@\n return sa.func.max(sa.func.length(column))\n \n @column_aggregate_partial(engine=SparkDFExecutionEngine, filter_column_isnull=True)\n- def _spark(cls, column: str, **kwargs: dict): # type: ignore[no-untyped-def]\n- return F.max(F.length(F.col(column)))\n+ def _spark(cls, column, **kwargs: dict): # type: ignore[no-untyped-def]\n+ return F.max(F.length(column))\ndiff --git a/great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_min.py b/great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_min.py\n--- a/great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_min.py\n+++ b/great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_min.py\n@@ -27,5 +27,5 @@\n return sa.func.min(sa.func.length(column))\n \n @column_aggregate_partial(engine=SparkDFExecutionEngine, filter_column_isnull=True)\n- def _spark(cls, column: str, **kwargs: dict): # type: ignore[no-untyped-def]\n- return F.min(F.length(F.col(column)))\n+ def _spark(cls, column, **kwargs: dict): # type: ignore[no-untyped-def]\n+ return F.min(F.length(column))\n", "issue": "Use cleaner solution for non-truncating division in python 2\nPrefer `from __future__ import division` to `1.*x/y`\n", "before_files": [{"content": "import pandas as pd\n\nfrom great_expectations.execution_engine import (\n PandasExecutionEngine,\n SparkDFExecutionEngine,\n SqlAlchemyExecutionEngine,\n)\nfrom great_expectations.expectations.metrics.column_aggregate_metric_provider import (\n ColumnAggregateMetricProvider,\n column_aggregate_partial,\n column_aggregate_value,\n)\nfrom great_expectations.expectations.metrics.import_manager import F, sa\n\n\nclass ColumnValuesLengthMin(ColumnAggregateMetricProvider):\n metric_name = \"column_values.length.min\"\n\n @column_aggregate_value(engine=PandasExecutionEngine, filter_column_isnull=True)\n def _pandas(cls, column: pd.Series, **kwargs: dict) -> int:\n return column.map(len).min()\n\n @column_aggregate_partial(\n engine=SqlAlchemyExecutionEngine, filter_column_isnull=True\n )\n def _sqlalchemy(cls, column, **kwargs: dict): # type: ignore[no-untyped-def]\n return sa.func.min(sa.func.length(column))\n\n @column_aggregate_partial(engine=SparkDFExecutionEngine, filter_column_isnull=True)\n def _spark(cls, column: str, **kwargs: dict): # type: ignore[no-untyped-def]\n return F.min(F.length(F.col(column)))\n", "path": "great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_min.py"}, {"content": "import pandas as pd\n\nfrom great_expectations.execution_engine import (\n PandasExecutionEngine,\n SparkDFExecutionEngine,\n SqlAlchemyExecutionEngine,\n)\nfrom great_expectations.expectations.metrics.column_aggregate_metric_provider import (\n ColumnAggregateMetricProvider,\n column_aggregate_partial,\n column_aggregate_value,\n)\nfrom great_expectations.expectations.metrics.import_manager import F, sa\n\n\nclass ColumnValuesLengthMax(ColumnAggregateMetricProvider):\n metric_name = \"column_values.length.max\"\n\n @column_aggregate_value(engine=PandasExecutionEngine, filter_column_isnull=True)\n def _pandas(cls, column: pd.Series, **kwargs: dict) -> int:\n return column.map(len).max()\n\n @column_aggregate_partial(\n engine=SqlAlchemyExecutionEngine, filter_column_isnull=True\n )\n def _sqlalchemy(cls, column, **kwargs: dict): # type: ignore[no-untyped-def]\n return sa.func.max(sa.func.length(column))\n\n @column_aggregate_partial(engine=SparkDFExecutionEngine, filter_column_isnull=True)\n def _spark(cls, column: str, **kwargs: dict): # type: ignore[no-untyped-def]\n return F.max(F.length(F.col(column)))\n", "path": "great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_max.py"}], "after_files": [{"content": "import pandas as pd\n\nfrom great_expectations.execution_engine import (\n PandasExecutionEngine,\n SparkDFExecutionEngine,\n SqlAlchemyExecutionEngine,\n)\nfrom great_expectations.expectations.metrics.column_aggregate_metric_provider import (\n ColumnAggregateMetricProvider,\n column_aggregate_partial,\n column_aggregate_value,\n)\nfrom great_expectations.expectations.metrics.import_manager import F, sa\n\n\nclass ColumnValuesLengthMin(ColumnAggregateMetricProvider):\n metric_name = \"column_values.length.min\"\n\n @column_aggregate_value(engine=PandasExecutionEngine, filter_column_isnull=True)\n def _pandas(cls, column: pd.Series, **kwargs: dict) -> int:\n return column.map(len).min()\n\n @column_aggregate_partial(\n engine=SqlAlchemyExecutionEngine, filter_column_isnull=True\n )\n def _sqlalchemy(cls, column, **kwargs: dict): # type: ignore[no-untyped-def]\n return sa.func.min(sa.func.length(column))\n\n @column_aggregate_partial(engine=SparkDFExecutionEngine, filter_column_isnull=True)\n def _spark(cls, column, **kwargs: dict): # type: ignore[no-untyped-def]\n return F.min(F.length(column))\n", "path": "great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_min.py"}, {"content": "import pandas as pd\n\nfrom great_expectations.execution_engine import (\n PandasExecutionEngine,\n SparkDFExecutionEngine,\n SqlAlchemyExecutionEngine,\n)\nfrom great_expectations.expectations.metrics.column_aggregate_metric_provider import (\n ColumnAggregateMetricProvider,\n column_aggregate_partial,\n column_aggregate_value,\n)\nfrom great_expectations.expectations.metrics.import_manager import F, sa\n\n\nclass ColumnValuesLengthMax(ColumnAggregateMetricProvider):\n metric_name = \"column_values.length.max\"\n\n @column_aggregate_value(engine=PandasExecutionEngine, filter_column_isnull=True)\n def _pandas(cls, column: pd.Series, **kwargs: dict) -> int:\n return column.map(len).max()\n\n @column_aggregate_partial(\n engine=SqlAlchemyExecutionEngine, filter_column_isnull=True\n )\n def _sqlalchemy(cls, column, **kwargs: dict): # type: ignore[no-untyped-def]\n return sa.func.max(sa.func.length(column))\n\n @column_aggregate_partial(engine=SparkDFExecutionEngine, filter_column_isnull=True)\n def _spark(cls, column, **kwargs: dict): # type: ignore[no-untyped-def]\n return F.max(F.length(column))\n", "path": "great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_max.py"}]} | 968 | 396 |
gh_patches_debug_2335 | rasdani/github-patches | git_diff | scikit-hep__pyhf-1841 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update `test` dependency from `nteract-scrapbook` to `scrapbook`
### Summary
Running the notebook tests generates the warning
```pytb
warnings.warn("'nteract-scrapbook' package has been renamed to `scrapbook`. No new releases are going out for this old package name.", FutureWarning)
```
as [`nteract-scrapbook`](https://pypi.org/project/nteract-scrapbook/) is now [`scrapbook`](https://pypi.org/project/scrapbook/). All that needs to be done is to change the name used in `steup.py` for the `test` extra:
https://github.com/scikit-hep/pyhf/blob/29bc6daed55b40711fabd9b22d3e76f9ee15657d/setup.py#L42
### Additional Information
_No response_
### Code of Conduct
- [X] I agree to follow the Code of Conduct
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import setup
2
3 extras_require = {
4 'shellcomplete': ['click_completion'],
5 'tensorflow': [
6 'tensorflow>=2.3.1', # c.f. https://github.com/tensorflow/tensorflow/pull/40789
7 'tensorflow-probability>=0.11.0', # c.f. PR #1657
8 ],
9 'torch': ['torch>=1.10.0'], # c.f. PR #1657
10 'jax': ['jax>=0.2.10', 'jaxlib>=0.1.60,!=0.1.68'], # c.f. Issue 1501
11 'xmlio': ['uproot>=4.1.1'], # c.f. PR #1567
12 'minuit': ['iminuit>=2.4.0'], # c.f. PR #1306
13 }
14 extras_require['backends'] = sorted(
15 set(
16 extras_require['tensorflow']
17 + extras_require['torch']
18 + extras_require['jax']
19 + extras_require['minuit']
20 )
21 )
22 extras_require['contrib'] = sorted({'matplotlib', 'requests'})
23 extras_require['lint'] = sorted({'flake8', 'black>=22.1.0'})
24
25 extras_require['test'] = sorted(
26 set(
27 extras_require['backends']
28 + extras_require['xmlio']
29 + extras_require['contrib']
30 + extras_require['shellcomplete']
31 + [
32 'scikit-hep-testdata>=0.4.11',
33 'pytest>=6.0',
34 'pytest-cov>=2.5.1',
35 'pytest-mock',
36 'requests-mock>=1.9.0',
37 'pytest-benchmark[histogram]',
38 'pytest-console-scripts',
39 'pytest-mpl',
40 'pydocstyle',
41 'papermill~=2.0',
42 'nteract-scrapbook~=0.2',
43 'jupyter',
44 'graphviz',
45 ]
46 )
47 )
48 extras_require['docs'] = sorted(
49 set(
50 extras_require['xmlio']
51 + extras_require['contrib']
52 + [
53 'sphinx>=4.0.0',
54 'sphinxcontrib-bibtex~=2.1',
55 'sphinx-click',
56 'sphinx_rtd_theme',
57 'nbsphinx!=0.8.8', # c.f. https://github.com/spatialaudio/nbsphinx/issues/620
58 'ipywidgets',
59 'sphinx-issues',
60 'sphinx-copybutton>=0.3.2',
61 'sphinx-togglebutton>=0.3.0',
62 ]
63 )
64 )
65 extras_require['develop'] = sorted(
66 set(
67 extras_require['docs']
68 + extras_require['lint']
69 + extras_require['test']
70 + [
71 'nbdime',
72 'tbump>=6.7.0',
73 'ipython',
74 'pre-commit',
75 'check-manifest',
76 'codemetapy>=0.3.4',
77 'twine',
78 ]
79 )
80 )
81 extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
82
83
84 setup(
85 extras_require=extras_require,
86 use_scm_version=lambda: {'local_scheme': lambda version: ''},
87 )
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -38,8 +38,8 @@
'pytest-console-scripts',
'pytest-mpl',
'pydocstyle',
- 'papermill~=2.0',
- 'nteract-scrapbook~=0.2',
+ 'papermill~=2.3.4',
+ 'scrapbook~=0.5.0',
'jupyter',
'graphviz',
]
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -38,8 +38,8 @@\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n- 'papermill~=2.0',\n- 'nteract-scrapbook~=0.2',\n+ 'papermill~=2.3.4',\n+ 'scrapbook~=0.5.0',\n 'jupyter',\n 'graphviz',\n ]\n", "issue": "Update `test` dependency from `nteract-scrapbook` to `scrapbook`\n### Summary\n\nRunning the notebook tests generates the warning\r\n\r\n```pytb\r\nwarnings.warn(\"'nteract-scrapbook' package has been renamed to `scrapbook`. No new releases are going out for this old package name.\", FutureWarning)\r\n```\r\n\r\nas [`nteract-scrapbook`](https://pypi.org/project/nteract-scrapbook/) is now [`scrapbook`](https://pypi.org/project/scrapbook/). All that needs to be done is to change the name used in `steup.py` for the `test` extra:\r\n\r\nhttps://github.com/scikit-hep/pyhf/blob/29bc6daed55b40711fabd9b22d3e76f9ee15657d/setup.py#L42\n\n### Additional Information\n\n_No response_\n\n### Code of Conduct\n\n- [X] I agree to follow the Code of Conduct\n", "before_files": [{"content": "from setuptools import setup\n\nextras_require = {\n 'shellcomplete': ['click_completion'],\n 'tensorflow': [\n 'tensorflow>=2.3.1', # c.f. https://github.com/tensorflow/tensorflow/pull/40789\n 'tensorflow-probability>=0.11.0', # c.f. PR #1657\n ],\n 'torch': ['torch>=1.10.0'], # c.f. PR #1657\n 'jax': ['jax>=0.2.10', 'jaxlib>=0.1.60,!=0.1.68'], # c.f. Issue 1501\n 'xmlio': ['uproot>=4.1.1'], # c.f. PR #1567\n 'minuit': ['iminuit>=2.4.0'], # c.f. PR #1306\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted({'matplotlib', 'requests'})\nextras_require['lint'] = sorted({'flake8', 'black>=22.1.0'})\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + extras_require['shellcomplete']\n + [\n 'scikit-hep-testdata>=0.4.11',\n 'pytest>=6.0',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'requests-mock>=1.9.0',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'jupyter',\n 'graphviz',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n extras_require['xmlio']\n + extras_require['contrib']\n + [\n 'sphinx>=4.0.0',\n 'sphinxcontrib-bibtex~=2.1',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx!=0.8.8', # c.f. https://github.com/spatialaudio/nbsphinx/issues/620\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>=0.3.2',\n 'sphinx-togglebutton>=0.3.0',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['lint']\n + extras_require['test']\n + [\n 'nbdime',\n 'tbump>=6.7.0',\n 'ipython',\n 'pre-commit',\n 'check-manifest',\n 'codemetapy>=0.3.4',\n 'twine',\n ]\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n extras_require=extras_require,\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import setup\n\nextras_require = {\n 'shellcomplete': ['click_completion'],\n 'tensorflow': [\n 'tensorflow>=2.3.1', # c.f. https://github.com/tensorflow/tensorflow/pull/40789\n 'tensorflow-probability>=0.11.0', # c.f. PR #1657\n ],\n 'torch': ['torch>=1.10.0'], # c.f. PR #1657\n 'jax': ['jax>=0.2.10', 'jaxlib>=0.1.60,!=0.1.68'], # c.f. Issue 1501\n 'xmlio': ['uproot>=4.1.1'], # c.f. PR #1567\n 'minuit': ['iminuit>=2.4.0'], # c.f. PR #1306\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted({'matplotlib', 'requests'})\nextras_require['lint'] = sorted({'flake8', 'black>=22.1.0'})\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + extras_require['shellcomplete']\n + [\n 'scikit-hep-testdata>=0.4.11',\n 'pytest>=6.0',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'requests-mock>=1.9.0',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'papermill~=2.3.4',\n 'scrapbook~=0.5.0',\n 'jupyter',\n 'graphviz',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n extras_require['xmlio']\n + extras_require['contrib']\n + [\n 'sphinx>=4.0.0',\n 'sphinxcontrib-bibtex~=2.1',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx!=0.8.8', # c.f. https://github.com/spatialaudio/nbsphinx/issues/620\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>=0.3.2',\n 'sphinx-togglebutton>=0.3.0',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['lint']\n + extras_require['test']\n + [\n 'nbdime',\n 'tbump>=6.7.0',\n 'ipython',\n 'pre-commit',\n 'check-manifest',\n 'codemetapy>=0.3.4',\n 'twine',\n ]\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n extras_require=extras_require,\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}]} | 1,366 | 111 |
gh_patches_debug_60747 | rasdani/github-patches | git_diff | hi-primus__optimus-872 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Json file exploration/profiling
Unstructured data as JSON can not be explored as regular tabular data. I have been exploring using tree depth and count to highlight the user in which nodes could have important data.
Some work in progress, here. https://github.com/ironmussa/Optimus/blob/develop-3.0/optimus/engines/pandas/io/json.py
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `optimus/engines/pandas/io/json.py`
Content:
```
1 import glob
2
3 import pandas as pd
4 import ujson
5 from glom import glom
6
7 from optimus.infer import is_dict, is_list, is_str, is_int
8
9 META = "_meta"
10 PROPERTIES = "_properties"
11 ITEMS = "_items"
12
13 COL_DEPTH = "depth"
14
15
16 class JSON:
17 def __init__(self):
18 self.data = None
19
20 def load(self, path):
21 """
22 Load a file in JSON format
23 :param path:
24 :return:
25 """
26 all_json = glob.glob(path, recursive=True)
27 # pd.read_json("data/corona.json")
28 with open(all_json[0]) as f:
29 self.data = ujson.load(f)
30
31 def schema(self):
32 """
33 Return a JSON with the count, dtype and nested structure
34 :return:
35 """
36
37 def _schema(_data, _keys):
38 if isinstance(_data, dict):
39 for x, y in _data.items():
40 if is_dict(y):
41 _keys[x] = {META: {"count": len(y), "dtype": type(y)}}
42 if len(y) > 0:
43 _keys[x][PROPERTIES] = {}
44 _schema(y, _keys[x][PROPERTIES])
45 elif is_list(y):
46 _keys[x] = {META: {"count": len(y), "dtype": type(y)}}
47 if len(y) > 0:
48 _keys[x] = {ITEMS: {PROPERTIES: {}, META: {"count": len(y), "dtype": type(y)}}}
49 _schema(y, _keys[x][ITEMS][PROPERTIES])
50 elif is_str(y):
51 _keys[x] = {META: {"count": len(y), "dtype": type(y)}}
52 _schema(y, _keys[x])
53 elif is_int(y):
54 _keys[x] = {META: {"dtype": type(y)}}
55 _schema(y, _keys[x])
56
57 elif is_list(_data):
58 for x in _data:
59 _schema(x, _keys)
60
61 keys = {}
62 _schema(self.data, keys)
63 return keys
64
65 def freq(self, n=100):
66 """
67 Calculate the count on every dict or list in the json
68 :param n:
69 :return:
70 """
71
72 def _profile(keys, parent, result=None):
73 for key, values in keys.items():
74 if values.get(PROPERTIES):
75 _meta = values.get(META)
76 _properties = values.get(PROPERTIES)
77 elif values.get(ITEMS):
78 _meta = values.get(ITEMS).get(META)
79 _properties = values.get(ITEMS).get(PROPERTIES)
80
81 if values.get(PROPERTIES) or values.get(ITEMS):
82 result.append([key, _meta["count"], _meta["dtype"], parent, len(parent)])
83 _profile(_properties, parent + [key], result=result)
84
85 data = []
86 _profile(self.schema(), [], data)
87 df = pd.DataFrame(data, columns=['key', 'count', 'dtype', 'path', COL_DEPTH])
88 df = df.sort_values(by=["count", COL_DEPTH], ascending=[False, True]).head(n).to_dict(orient='row')
89 return df
90
91 def flatten(self, path):
92 """
93 Flatten a JSON from a json path
94 :param path:
95 :return:
96 """
97
98 def _flatten_json(_values):
99 out = {}
100
101 def flatten(x, name=''):
102 if type(x) is dict:
103 for a in x:
104 flatten(x[a], name + a + '_')
105 elif type(x) is list:
106 # i = 0
107 for a in x:
108 # flatten(a, name + str(i) + '_')
109 flatten(a, name + '_')
110 # i += 1
111 else:
112 out[name[:-1]] = x
113
114 flatten(_values)
115 return out
116
117 result = []
118 value = glom(self.data, path, skip_exc=KeyError)
119 if is_list(value):
120 for i in value:
121 result.append((_flatten_json(i)))
122 elif is_dict(value):
123 for i, j in value.items():
124 a = {"col": i}
125 a.update(_flatten_json(j))
126 result.append(a)
127 return result
128
129 def to_pandas(self, path):
130 result = self.flatten(path)
131 return pd.DataFrame(data=result)
132
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/optimus/engines/pandas/io/json.py b/optimus/engines/pandas/io/json.py
--- a/optimus/engines/pandas/io/json.py
+++ b/optimus/engines/pandas/io/json.py
@@ -121,7 +121,7 @@
result.append((_flatten_json(i)))
elif is_dict(value):
for i, j in value.items():
- a = {"col": i}
+ a = {path: i}
a.update(_flatten_json(j))
result.append(a)
return result
| {"golden_diff": "diff --git a/optimus/engines/pandas/io/json.py b/optimus/engines/pandas/io/json.py\n--- a/optimus/engines/pandas/io/json.py\n+++ b/optimus/engines/pandas/io/json.py\n@@ -121,7 +121,7 @@\n result.append((_flatten_json(i)))\n elif is_dict(value):\n for i, j in value.items():\n- a = {\"col\": i}\n+ a = {path: i}\n a.update(_flatten_json(j))\n result.append(a)\n return result\n", "issue": "Json file exploration/profiling\nUnstructured data as JSON can not be explored as regular tabular data. I have been exploring using tree depth and count to highlight the user in which nodes could have important data.\r\n\r\nSome work in progress, here. https://github.com/ironmussa/Optimus/blob/develop-3.0/optimus/engines/pandas/io/json.py\n", "before_files": [{"content": "import glob\n\nimport pandas as pd\nimport ujson\nfrom glom import glom\n\nfrom optimus.infer import is_dict, is_list, is_str, is_int\n\nMETA = \"_meta\"\nPROPERTIES = \"_properties\"\nITEMS = \"_items\"\n\nCOL_DEPTH = \"depth\"\n\n\nclass JSON:\n def __init__(self):\n self.data = None\n\n def load(self, path):\n \"\"\"\n Load a file in JSON format\n :param path:\n :return:\n \"\"\"\n all_json = glob.glob(path, recursive=True)\n # pd.read_json(\"data/corona.json\")\n with open(all_json[0]) as f:\n self.data = ujson.load(f)\n\n def schema(self):\n \"\"\"\n Return a JSON with the count, dtype and nested structure\n :return:\n \"\"\"\n\n def _schema(_data, _keys):\n if isinstance(_data, dict):\n for x, y in _data.items():\n if is_dict(y):\n _keys[x] = {META: {\"count\": len(y), \"dtype\": type(y)}}\n if len(y) > 0:\n _keys[x][PROPERTIES] = {}\n _schema(y, _keys[x][PROPERTIES])\n elif is_list(y):\n _keys[x] = {META: {\"count\": len(y), \"dtype\": type(y)}}\n if len(y) > 0:\n _keys[x] = {ITEMS: {PROPERTIES: {}, META: {\"count\": len(y), \"dtype\": type(y)}}}\n _schema(y, _keys[x][ITEMS][PROPERTIES])\n elif is_str(y):\n _keys[x] = {META: {\"count\": len(y), \"dtype\": type(y)}}\n _schema(y, _keys[x])\n elif is_int(y):\n _keys[x] = {META: {\"dtype\": type(y)}}\n _schema(y, _keys[x])\n\n elif is_list(_data):\n for x in _data:\n _schema(x, _keys)\n\n keys = {}\n _schema(self.data, keys)\n return keys\n\n def freq(self, n=100):\n \"\"\"\n Calculate the count on every dict or list in the json\n :param n:\n :return:\n \"\"\"\n\n def _profile(keys, parent, result=None):\n for key, values in keys.items():\n if values.get(PROPERTIES):\n _meta = values.get(META)\n _properties = values.get(PROPERTIES)\n elif values.get(ITEMS):\n _meta = values.get(ITEMS).get(META)\n _properties = values.get(ITEMS).get(PROPERTIES)\n\n if values.get(PROPERTIES) or values.get(ITEMS):\n result.append([key, _meta[\"count\"], _meta[\"dtype\"], parent, len(parent)])\n _profile(_properties, parent + [key], result=result)\n\n data = []\n _profile(self.schema(), [], data)\n df = pd.DataFrame(data, columns=['key', 'count', 'dtype', 'path', COL_DEPTH])\n df = df.sort_values(by=[\"count\", COL_DEPTH], ascending=[False, True]).head(n).to_dict(orient='row')\n return df\n\n def flatten(self, path):\n \"\"\"\n Flatten a JSON from a json path\n :param path:\n :return:\n \"\"\"\n\n def _flatten_json(_values):\n out = {}\n\n def flatten(x, name=''):\n if type(x) is dict:\n for a in x:\n flatten(x[a], name + a + '_')\n elif type(x) is list:\n # i = 0\n for a in x:\n # flatten(a, name + str(i) + '_')\n flatten(a, name + '_')\n # i += 1\n else:\n out[name[:-1]] = x\n\n flatten(_values)\n return out\n\n result = []\n value = glom(self.data, path, skip_exc=KeyError)\n if is_list(value):\n for i in value:\n result.append((_flatten_json(i)))\n elif is_dict(value):\n for i, j in value.items():\n a = {\"col\": i}\n a.update(_flatten_json(j))\n result.append(a)\n return result\n\n def to_pandas(self, path):\n result = self.flatten(path)\n return pd.DataFrame(data=result)\n", "path": "optimus/engines/pandas/io/json.py"}], "after_files": [{"content": "import glob\n\nimport pandas as pd\nimport ujson\nfrom glom import glom\n\nfrom optimus.infer import is_dict, is_list, is_str, is_int\n\nMETA = \"_meta\"\nPROPERTIES = \"_properties\"\nITEMS = \"_items\"\n\nCOL_DEPTH = \"depth\"\n\n\nclass JSON:\n def __init__(self):\n self.data = None\n\n def load(self, path):\n \"\"\"\n Load a file in JSON format\n :param path:\n :return:\n \"\"\"\n all_json = glob.glob(path, recursive=True)\n # pd.read_json(\"data/corona.json\")\n with open(all_json[0]) as f:\n self.data = ujson.load(f)\n\n def schema(self):\n \"\"\"\n Return a JSON with the count, dtype and nested structure\n :return:\n \"\"\"\n\n def _schema(_data, _keys):\n if isinstance(_data, dict):\n for x, y in _data.items():\n if is_dict(y):\n _keys[x] = {META: {\"count\": len(y), \"dtype\": type(y)}}\n if len(y) > 0:\n _keys[x][PROPERTIES] = {}\n _schema(y, _keys[x][PROPERTIES])\n elif is_list(y):\n _keys[x] = {META: {\"count\": len(y), \"dtype\": type(y)}}\n if len(y) > 0:\n _keys[x] = {ITEMS: {PROPERTIES: {}, META: {\"count\": len(y), \"dtype\": type(y)}}}\n _schema(y, _keys[x][ITEMS][PROPERTIES])\n elif is_str(y):\n _keys[x] = {META: {\"count\": len(y), \"dtype\": type(y)}}\n _schema(y, _keys[x])\n elif is_int(y):\n _keys[x] = {META: {\"dtype\": type(y)}}\n _schema(y, _keys[x])\n\n elif is_list(_data):\n for x in _data:\n _schema(x, _keys)\n\n keys = {}\n _schema(self.data, keys)\n return keys\n\n def freq(self, n=100):\n \"\"\"\n Calculate the count on every dict or list in the json\n :param n:\n :return:\n \"\"\"\n\n def _profile(keys, parent, result=None):\n for key, values in keys.items():\n if values.get(PROPERTIES):\n _meta = values.get(META)\n _properties = values.get(PROPERTIES)\n elif values.get(ITEMS):\n _meta = values.get(ITEMS).get(META)\n _properties = values.get(ITEMS).get(PROPERTIES)\n\n if values.get(PROPERTIES) or values.get(ITEMS):\n result.append([key, _meta[\"count\"], _meta[\"dtype\"], parent, len(parent)])\n _profile(_properties, parent + [key], result=result)\n\n data = []\n _profile(self.schema(), [], data)\n df = pd.DataFrame(data, columns=['key', 'count', 'dtype', 'path', COL_DEPTH])\n df = df.sort_values(by=[\"count\", COL_DEPTH], ascending=[False, True]).head(n).to_dict(orient='row')\n return df\n\n def flatten(self, path):\n \"\"\"\n Flatten a JSON from a json path\n :param path:\n :return:\n \"\"\"\n\n def _flatten_json(_values):\n out = {}\n\n def flatten(x, name=''):\n if type(x) is dict:\n for a in x:\n flatten(x[a], name + a + '_')\n elif type(x) is list:\n # i = 0\n for a in x:\n # flatten(a, name + str(i) + '_')\n flatten(a, name + '_')\n # i += 1\n else:\n out[name[:-1]] = x\n\n flatten(_values)\n return out\n\n result = []\n value = glom(self.data, path, skip_exc=KeyError)\n if is_list(value):\n for i in value:\n result.append((_flatten_json(i)))\n elif is_dict(value):\n for i, j in value.items():\n a = {path: i}\n a.update(_flatten_json(j))\n result.append(a)\n return result\n\n def to_pandas(self, path):\n result = self.flatten(path)\n return pd.DataFrame(data=result)\n", "path": "optimus/engines/pandas/io/json.py"}]} | 1,584 | 128 |
gh_patches_debug_8107 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-2643 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Transparent mode fail with looking up failure.
##### Steps to reproduce the problem:
1. Launch Wifi Access Point(OS X)
2. Setup pfctl configuration so that http packet will be forwarded.
3. Launch mitmproxy ( `sudo mitmproxy -p 8080 -m transparent --showhost` )
4. Access web page after connecting to AP which launched before.
5. See event log.
##### Any other comments? What have you tried so far?
When I tried to use transparent mode with OS X(10.11.6).
RuntimeError("Could not resolve original destination.") raised.
I investigated this bug.
And I found that this is caused by difference between AF_INET's and AF_INET6's peername.
https://github.com/mitmproxy/mitmproxy/blob/de006ea8adc08b9a8a6aa94eda2b30468727c307/mitmproxy/net/tcp.py#L567
If we use AF_INET, getpeername() return string like `"192.168.2.5:45670"`.
But if we use AF_INET6, getpeername() return string like `"::ffff:192.168.2.5:45670"`.
`pfctl -s state` 's result is like below.
`ALL tcp 192.168.2.5:45670 -> xx.xx.xx.xx:33291 -> xx.xx.xx.xx:443 ESTABLISHED:ESTABLISHED`
As you see, `::ffff:` doesn't exist.
So [lookup](https://github.com/mitmproxy/mitmproxy/blob/f17c0fdac636f7269f4885294e2a8d2c52c23590/mitmproxy/platform/pf.py#L4) function raises RuntimeError() because `spec in i` condition won't become true.
##### System information
Mitmproxy version: 3.0.0 (release version)
Python version: 3.6.2
Platform: Darwin-15.6.0-x86_64-i386-64bit
SSL version: OpenSSL 1.0.2l 25 May 2017
Mac version: 10.11.6 ('', '', '') x86_64
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mitmproxy/platform/pf.py`
Content:
```
1 import sys
2
3
4 def lookup(address, port, s):
5 """
6 Parse the pfctl state output s, to look up the destination host
7 matching the client (address, port).
8
9 Returns an (address, port) tuple, or None.
10 """
11 s = s.decode()
12 spec = "%s:%s" % (address, port)
13 for i in s.split("\n"):
14 if "ESTABLISHED:ESTABLISHED" in i and spec in i:
15 s = i.split()
16 if len(s) > 4:
17 if sys.platform.startswith("freebsd"):
18 # strip parentheses for FreeBSD pfctl
19 s = s[3][1:-1].split(":")
20 else:
21 s = s[4].split(":")
22
23 if len(s) == 2:
24 return s[0], int(s[1])
25 raise RuntimeError("Could not resolve original destination.")
26
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mitmproxy/platform/pf.py b/mitmproxy/platform/pf.py
--- a/mitmproxy/platform/pf.py
+++ b/mitmproxy/platform/pf.py
@@ -1,3 +1,4 @@
+import re
import sys
@@ -8,6 +9,9 @@
Returns an (address, port) tuple, or None.
"""
+ # We may get an ipv4-mapped ipv6 address here, e.g. ::ffff:127.0.0.1.
+ # Those still appear as "127.0.0.1" in the table, so we need to strip the prefix.
+ address = re.sub("^::ffff:(?=\d+.\d+.\d+.\d+$)", "", address)
s = s.decode()
spec = "%s:%s" % (address, port)
for i in s.split("\n"):
| {"golden_diff": "diff --git a/mitmproxy/platform/pf.py b/mitmproxy/platform/pf.py\n--- a/mitmproxy/platform/pf.py\n+++ b/mitmproxy/platform/pf.py\n@@ -1,3 +1,4 @@\n+import re\n import sys\n \n \n@@ -8,6 +9,9 @@\n \n Returns an (address, port) tuple, or None.\n \"\"\"\n+ # We may get an ipv4-mapped ipv6 address here, e.g. ::ffff:127.0.0.1.\n+ # Those still appear as \"127.0.0.1\" in the table, so we need to strip the prefix.\n+ address = re.sub(\"^::ffff:(?=\\d+.\\d+.\\d+.\\d+$)\", \"\", address)\n s = s.decode()\n spec = \"%s:%s\" % (address, port)\n for i in s.split(\"\\n\"):\n", "issue": "Transparent mode fail with looking up failure.\n##### Steps to reproduce the problem:\r\n\r\n1. Launch Wifi Access Point(OS X)\r\n2. Setup pfctl configuration so that http packet will be forwarded.\r\n3. Launch mitmproxy ( `sudo mitmproxy -p 8080 -m transparent --showhost` )\r\n4. Access web page after connecting to AP which launched before.\r\n5. See event log.\r\n\r\n\r\n##### Any other comments? What have you tried so far?\r\n\r\nWhen I tried to use transparent mode with OS X(10.11.6).\r\nRuntimeError(\"Could not resolve original destination.\") raised.\r\n\r\nI investigated this bug.\r\nAnd I found that this is caused by difference between AF_INET's and AF_INET6's peername.\r\nhttps://github.com/mitmproxy/mitmproxy/blob/de006ea8adc08b9a8a6aa94eda2b30468727c307/mitmproxy/net/tcp.py#L567\r\n\r\nIf we use AF_INET, getpeername() return string like `\"192.168.2.5:45670\"`.\r\nBut if we use AF_INET6, getpeername() return string like `\"::ffff:192.168.2.5:45670\"`.\r\n\r\n`pfctl -s state` 's result is like below.\r\n`ALL tcp 192.168.2.5:45670 -> xx.xx.xx.xx:33291 -> xx.xx.xx.xx:443 ESTABLISHED:ESTABLISHED`\r\n\r\nAs you see, `::ffff:` doesn't exist.\r\n\r\nSo [lookup](https://github.com/mitmproxy/mitmproxy/blob/f17c0fdac636f7269f4885294e2a8d2c52c23590/mitmproxy/platform/pf.py#L4) function raises RuntimeError() because `spec in i` condition won't become true.\r\n\r\n##### System information\r\n\r\nMitmproxy version: 3.0.0 (release version)\r\nPython version: 3.6.2\r\nPlatform: Darwin-15.6.0-x86_64-i386-64bit\r\nSSL version: OpenSSL 1.0.2l 25 May 2017\r\nMac version: 10.11.6 ('', '', '') x86_64\n", "before_files": [{"content": "import sys\n\n\ndef lookup(address, port, s):\n \"\"\"\n Parse the pfctl state output s, to look up the destination host\n matching the client (address, port).\n\n Returns an (address, port) tuple, or None.\n \"\"\"\n s = s.decode()\n spec = \"%s:%s\" % (address, port)\n for i in s.split(\"\\n\"):\n if \"ESTABLISHED:ESTABLISHED\" in i and spec in i:\n s = i.split()\n if len(s) > 4:\n if sys.platform.startswith(\"freebsd\"):\n # strip parentheses for FreeBSD pfctl\n s = s[3][1:-1].split(\":\")\n else:\n s = s[4].split(\":\")\n\n if len(s) == 2:\n return s[0], int(s[1])\n raise RuntimeError(\"Could not resolve original destination.\")\n", "path": "mitmproxy/platform/pf.py"}], "after_files": [{"content": "import re\nimport sys\n\n\ndef lookup(address, port, s):\n \"\"\"\n Parse the pfctl state output s, to look up the destination host\n matching the client (address, port).\n\n Returns an (address, port) tuple, or None.\n \"\"\"\n # We may get an ipv4-mapped ipv6 address here, e.g. ::ffff:127.0.0.1.\n # Those still appear as \"127.0.0.1\" in the table, so we need to strip the prefix.\n address = re.sub(\"^::ffff:(?=\\d+.\\d+.\\d+.\\d+$)\", \"\", address)\n s = s.decode()\n spec = \"%s:%s\" % (address, port)\n for i in s.split(\"\\n\"):\n if \"ESTABLISHED:ESTABLISHED\" in i and spec in i:\n s = i.split()\n if len(s) > 4:\n if sys.platform.startswith(\"freebsd\"):\n # strip parentheses for FreeBSD pfctl\n s = s[3][1:-1].split(\":\")\n else:\n s = s[4].split(\":\")\n\n if len(s) == 2:\n return s[0], int(s[1])\n raise RuntimeError(\"Could not resolve original destination.\")\n", "path": "mitmproxy/platform/pf.py"}]} | 1,034 | 202 |
gh_patches_debug_40372 | rasdani/github-patches | git_diff | wright-group__WrightTools-1044 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
from_solis: import fails when no metadata
It is possible to export asc image from Solis software without metadata (clearly this is not preferred, but it should be handled gracefully).
from_solis assumes "Data and Time" field exists in metadata (for attrs timestamp). Fall back on file creation date as an alternative.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `WrightTools/data/_solis.py`
Content:
```
1 """Andor."""
2
3
4 # --- import --------------------------------------------------------------------------------------
5
6
7 import os
8 import pathlib
9 import time
10
11 import numpy as np
12
13 from ._data import Data
14 from .. import exceptions as wt_exceptions
15 from ..kit import _timestamp as timestamp
16
17
18 # --- define --------------------------------------------------------------------------------------
19
20
21 __all__ = ["from_Solis"]
22
23
24 # --- from function -------------------------------------------------------------------------------
25
26
27 def from_Solis(filepath, name=None, parent=None, verbose=True) -> Data:
28 """Create a data object from Andor Solis software (ascii exports).
29
30 Parameters
31 ----------
32 filepath : path-like
33 Path to file (should be .asc format).
34 Can be either a local or remote file (http/ftp).
35 Can be compressed with gz/bz2, decompression based on file name.
36 name : string (optional)
37 Name to give to the created data object. If None, filename is used.
38 Default is None.
39 parent : WrightTools.Collection (optional)
40 Collection to place new data object within. Default is None.
41 verbose : boolean (optional)
42 Toggle talkback. Default is True.
43
44 Returns
45 -------
46 data
47 New data object.
48 """
49 # parse filepath
50 filestr = os.fspath(filepath)
51 filepath = pathlib.Path(filepath)
52
53 if not ".asc" in filepath.suffixes:
54 wt_exceptions.WrongFileTypeWarning.warn(filepath, ".asc")
55 # parse name
56 if not name:
57 name = filepath.name.split(".")[0]
58 # create data
59 ds = np.DataSource(None)
60 f = ds.open(filestr, "rt")
61 axis0 = []
62 arr = []
63 attrs = {}
64
65 line0 = f.readline().strip()[:-1]
66 line0 = [float(x) for x in line0.split(",")] # TODO: robust to space, tab, comma
67 axis0.append(line0.pop(0))
68 arr.append(line0)
69
70 def get_frames(f, arr, axis0):
71 axis0_written = False
72 while True:
73 line = f.readline().strip()[:-1]
74 if len(line) == 0:
75 break
76 else:
77 line = [float(x) for x in line.split(",")]
78 # signature of new frames is restart of axis0
79 if not axis0_written and (line[0] == axis0[0]):
80 axis0_written = True
81 if axis0_written:
82 line.pop(0)
83 else:
84 axis0.append(line.pop(0))
85 arr.append(line)
86 return arr, axis0
87
88 arr, axis0 = get_frames(f, arr, axis0)
89 nframes = len(arr) // len(axis0)
90
91 i = 0
92 while i < 3:
93 line = f.readline().strip()
94 if len(line) == 0:
95 i += 1
96 else:
97 try:
98 key, val = line.split(":", 1)
99 except ValueError:
100 pass
101 else:
102 attrs[key.strip()] = val.strip()
103
104 f.close()
105 created = attrs["Date and Time"] # is this UTC?
106 created = time.strptime(created, "%a %b %d %H:%M:%S %Y")
107 created = timestamp.TimeStamp(time.mktime(created)).RFC3339
108
109 kwargs = {"name": name, "kind": "Solis", "source": filestr, "created": created}
110 if parent is None:
111 data = Data(**kwargs)
112 else:
113 data = parent.create_data(**kwargs)
114
115 axis0 = np.array(axis0)
116 if float(attrs["Grating Groove Density (l/mm)"]) == 0:
117 xname = "xindex"
118 xunits = None
119 else:
120 xname = "wm"
121 xunits = "nm"
122 axes = [xname, "yindex"]
123
124 if nframes == 1:
125 arr = np.array(arr)
126 data.create_variable(name=xname, values=axis0[:, None], units=xunits)
127 data.create_variable(name="yindex", values=np.arange(arr.shape[-1])[None, :], units=None)
128 else:
129 arr = np.array(arr).reshape(nframes, len(axis0), len(arr[0]))
130 data.create_variable(name="frame", values=np.arange(nframes)[:, None, None], units=None)
131 data.create_variable(name=xname, values=axis0[None, :, None], units=xunits)
132 data.create_variable(
133 name="yindex", values=np.arange(arr.shape[-1])[None, None, :], units=None
134 )
135 axes = ["frame"] + axes
136
137 data.transform(*axes)
138 arr /= float(attrs["Exposure Time (secs)"])
139 # signal has units of Hz because time normalized
140 data.create_channel(name="signal", values=arr, signed=False, units="Hz")
141
142 for key, val in attrs.items():
143 data.attrs[key] = val
144
145 # finish
146 if verbose:
147 print("data created at {0}".format(data.fullpath))
148 print(" axes: {0}".format(data.axis_names))
149 print(" shape: {0}".format(data.shape))
150 return data
151
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/WrightTools/data/_solis.py b/WrightTools/data/_solis.py
--- a/WrightTools/data/_solis.py
+++ b/WrightTools/data/_solis.py
@@ -7,6 +7,7 @@
import os
import pathlib
import time
+import warnings
import numpy as np
@@ -43,8 +44,20 @@
Returns
-------
- data
+ data : WrightTools.Data
New data object.
+ Channels: `signal`. If exposure time is in metadata, signal is given as a count rate (Hz).
+ Variables, Axes: `yindex` and `xindex` (no grating) or `wm` (grating)
+
+ Notes
+ -----
+ When exporting as ascii, including metadata is optional.
+ It is _strongly recommended_ that you include metadata in exports.
+ Metadata informs the image creation date, exposure time, and axes.
+ However, if metadata is not present, this importer will make its best guesses to populate these fields accurately.
+
+ Saving processed data (e.g. vertically-binned data) in Solis software can remove/omit important metadata, so we advise exporting the raw camera images.
+
"""
# parse filepath
filestr = os.fspath(filepath)
@@ -102,9 +115,17 @@
attrs[key.strip()] = val.strip()
f.close()
- created = attrs["Date and Time"] # is this UTC?
- created = time.strptime(created, "%a %b %d %H:%M:%S %Y")
- created = timestamp.TimeStamp(time.mktime(created)).RFC3339
+
+ try:
+ created = attrs["Date and Time"] # is this UTC?
+ created = time.strptime(created, "%a %b %d %H:%M:%S %Y")
+ created = timestamp.TimeStamp(time.mktime(created)).RFC3339
+ except KeyError: # use file creation time
+ created = os.stat(filepath).st_mtime
+ created = timestamp.TimeStamp(created).RFC3339
+ warnings.warn(
+ f"{filepath.name} has no 'Date and Time' field: using file modified time instead: {created}"
+ )
kwargs = {"name": name, "kind": "Solis", "source": filestr, "created": created}
if parent is None:
@@ -113,7 +134,15 @@
data = parent.create_data(**kwargs)
axis0 = np.array(axis0)
- if float(attrs["Grating Groove Density (l/mm)"]) == 0:
+ try:
+ groove_density = float(attrs["Grating Groove Density (l/mm)"])
+ except KeyError: # assume no grating
+ warnings.warn(
+ f"{filepath.name} has no 'Grating Groove Density (1/mm)' field: guessing x axis units."
+ )
+ groove_density = isinstance(axis0[0], float)
+
+ if groove_density == 0:
xname = "xindex"
xunits = None
else:
@@ -135,9 +164,17 @@
axes = ["frame"] + axes
data.transform(*axes)
- arr /= float(attrs["Exposure Time (secs)"])
- # signal has units of Hz because time normalized
- data.create_channel(name="signal", values=arr, signed=False, units="Hz")
+ try:
+ exposure_time = float(attrs["Exposure Time (secs)"])
+ if exposure_time == 0:
+ raise ZeroDivisionError
+ arr /= exposure_time
+ except (KeyError, ZeroDivisionError) as e: # do not normalize
+ warnings.warn(f"{filepath.name} camera signal cannot be given as a count rate.")
+ data.create_channel(name="signal", values=arr, signed=False)
+ else:
+ # signal has units of Hz because time normalized
+ data.create_channel(name="signal", values=arr, signed=False, units="Hz")
for key, val in attrs.items():
data.attrs[key] = val
| {"golden_diff": "diff --git a/WrightTools/data/_solis.py b/WrightTools/data/_solis.py\n--- a/WrightTools/data/_solis.py\n+++ b/WrightTools/data/_solis.py\n@@ -7,6 +7,7 @@\n import os\n import pathlib\n import time\n+import warnings\n \n import numpy as np\n \n@@ -43,8 +44,20 @@\n \n Returns\n -------\n- data\n+ data : WrightTools.Data\n New data object.\n+ Channels: `signal`. If exposure time is in metadata, signal is given as a count rate (Hz).\n+ Variables, Axes: `yindex` and `xindex` (no grating) or `wm` (grating)\n+\n+ Notes\n+ -----\n+ When exporting as ascii, including metadata is optional.\n+ It is _strongly recommended_ that you include metadata in exports.\n+ Metadata informs the image creation date, exposure time, and axes.\n+ However, if metadata is not present, this importer will make its best guesses to populate these fields accurately.\n+\n+ Saving processed data (e.g. vertically-binned data) in Solis software can remove/omit important metadata, so we advise exporting the raw camera images.\n+\n \"\"\"\n # parse filepath\n filestr = os.fspath(filepath)\n@@ -102,9 +115,17 @@\n attrs[key.strip()] = val.strip()\n \n f.close()\n- created = attrs[\"Date and Time\"] # is this UTC?\n- created = time.strptime(created, \"%a %b %d %H:%M:%S %Y\")\n- created = timestamp.TimeStamp(time.mktime(created)).RFC3339\n+\n+ try:\n+ created = attrs[\"Date and Time\"] # is this UTC?\n+ created = time.strptime(created, \"%a %b %d %H:%M:%S %Y\")\n+ created = timestamp.TimeStamp(time.mktime(created)).RFC3339\n+ except KeyError: # use file creation time\n+ created = os.stat(filepath).st_mtime\n+ created = timestamp.TimeStamp(created).RFC3339\n+ warnings.warn(\n+ f\"{filepath.name} has no 'Date and Time' field: using file modified time instead: {created}\"\n+ )\n \n kwargs = {\"name\": name, \"kind\": \"Solis\", \"source\": filestr, \"created\": created}\n if parent is None:\n@@ -113,7 +134,15 @@\n data = parent.create_data(**kwargs)\n \n axis0 = np.array(axis0)\n- if float(attrs[\"Grating Groove Density (l/mm)\"]) == 0:\n+ try:\n+ groove_density = float(attrs[\"Grating Groove Density (l/mm)\"])\n+ except KeyError: # assume no grating\n+ warnings.warn(\n+ f\"{filepath.name} has no 'Grating Groove Density (1/mm)' field: guessing x axis units.\"\n+ )\n+ groove_density = isinstance(axis0[0], float)\n+\n+ if groove_density == 0:\n xname = \"xindex\"\n xunits = None\n else:\n@@ -135,9 +164,17 @@\n axes = [\"frame\"] + axes\n \n data.transform(*axes)\n- arr /= float(attrs[\"Exposure Time (secs)\"])\n- # signal has units of Hz because time normalized\n- data.create_channel(name=\"signal\", values=arr, signed=False, units=\"Hz\")\n+ try:\n+ exposure_time = float(attrs[\"Exposure Time (secs)\"])\n+ if exposure_time == 0:\n+ raise ZeroDivisionError\n+ arr /= exposure_time\n+ except (KeyError, ZeroDivisionError) as e: # do not normalize\n+ warnings.warn(f\"{filepath.name} camera signal cannot be given as a count rate.\")\n+ data.create_channel(name=\"signal\", values=arr, signed=False)\n+ else:\n+ # signal has units of Hz because time normalized\n+ data.create_channel(name=\"signal\", values=arr, signed=False, units=\"Hz\")\n \n for key, val in attrs.items():\n data.attrs[key] = val\n", "issue": "from_solis: import fails when no metadata\nIt is possible to export asc image from Solis software without metadata (clearly this is not preferred, but it should be handled gracefully). \r\n\r\nfrom_solis assumes \"Data and Time\" field exists in metadata (for attrs timestamp). Fall back on file creation date as an alternative. \n", "before_files": [{"content": "\"\"\"Andor.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport os\nimport pathlib\nimport time\n\nimport numpy as np\n\nfrom ._data import Data\nfrom .. import exceptions as wt_exceptions\nfrom ..kit import _timestamp as timestamp\n\n\n# --- define --------------------------------------------------------------------------------------\n\n\n__all__ = [\"from_Solis\"]\n\n\n# --- from function -------------------------------------------------------------------------------\n\n\ndef from_Solis(filepath, name=None, parent=None, verbose=True) -> Data:\n \"\"\"Create a data object from Andor Solis software (ascii exports).\n\n Parameters\n ----------\n filepath : path-like\n Path to file (should be .asc format).\n Can be either a local or remote file (http/ftp).\n Can be compressed with gz/bz2, decompression based on file name.\n name : string (optional)\n Name to give to the created data object. If None, filename is used.\n Default is None.\n parent : WrightTools.Collection (optional)\n Collection to place new data object within. Default is None.\n verbose : boolean (optional)\n Toggle talkback. Default is True.\n\n Returns\n -------\n data\n New data object.\n \"\"\"\n # parse filepath\n filestr = os.fspath(filepath)\n filepath = pathlib.Path(filepath)\n\n if not \".asc\" in filepath.suffixes:\n wt_exceptions.WrongFileTypeWarning.warn(filepath, \".asc\")\n # parse name\n if not name:\n name = filepath.name.split(\".\")[0]\n # create data\n ds = np.DataSource(None)\n f = ds.open(filestr, \"rt\")\n axis0 = []\n arr = []\n attrs = {}\n\n line0 = f.readline().strip()[:-1]\n line0 = [float(x) for x in line0.split(\",\")] # TODO: robust to space, tab, comma\n axis0.append(line0.pop(0))\n arr.append(line0)\n\n def get_frames(f, arr, axis0):\n axis0_written = False\n while True:\n line = f.readline().strip()[:-1]\n if len(line) == 0:\n break\n else:\n line = [float(x) for x in line.split(\",\")]\n # signature of new frames is restart of axis0\n if not axis0_written and (line[0] == axis0[0]):\n axis0_written = True\n if axis0_written:\n line.pop(0)\n else:\n axis0.append(line.pop(0))\n arr.append(line)\n return arr, axis0\n\n arr, axis0 = get_frames(f, arr, axis0)\n nframes = len(arr) // len(axis0)\n\n i = 0\n while i < 3:\n line = f.readline().strip()\n if len(line) == 0:\n i += 1\n else:\n try:\n key, val = line.split(\":\", 1)\n except ValueError:\n pass\n else:\n attrs[key.strip()] = val.strip()\n\n f.close()\n created = attrs[\"Date and Time\"] # is this UTC?\n created = time.strptime(created, \"%a %b %d %H:%M:%S %Y\")\n created = timestamp.TimeStamp(time.mktime(created)).RFC3339\n\n kwargs = {\"name\": name, \"kind\": \"Solis\", \"source\": filestr, \"created\": created}\n if parent is None:\n data = Data(**kwargs)\n else:\n data = parent.create_data(**kwargs)\n\n axis0 = np.array(axis0)\n if float(attrs[\"Grating Groove Density (l/mm)\"]) == 0:\n xname = \"xindex\"\n xunits = None\n else:\n xname = \"wm\"\n xunits = \"nm\"\n axes = [xname, \"yindex\"]\n\n if nframes == 1:\n arr = np.array(arr)\n data.create_variable(name=xname, values=axis0[:, None], units=xunits)\n data.create_variable(name=\"yindex\", values=np.arange(arr.shape[-1])[None, :], units=None)\n else:\n arr = np.array(arr).reshape(nframes, len(axis0), len(arr[0]))\n data.create_variable(name=\"frame\", values=np.arange(nframes)[:, None, None], units=None)\n data.create_variable(name=xname, values=axis0[None, :, None], units=xunits)\n data.create_variable(\n name=\"yindex\", values=np.arange(arr.shape[-1])[None, None, :], units=None\n )\n axes = [\"frame\"] + axes\n\n data.transform(*axes)\n arr /= float(attrs[\"Exposure Time (secs)\"])\n # signal has units of Hz because time normalized\n data.create_channel(name=\"signal\", values=arr, signed=False, units=\"Hz\")\n\n for key, val in attrs.items():\n data.attrs[key] = val\n\n # finish\n if verbose:\n print(\"data created at {0}\".format(data.fullpath))\n print(\" axes: {0}\".format(data.axis_names))\n print(\" shape: {0}\".format(data.shape))\n return data\n", "path": "WrightTools/data/_solis.py"}], "after_files": [{"content": "\"\"\"Andor.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport os\nimport pathlib\nimport time\nimport warnings\n\nimport numpy as np\n\nfrom ._data import Data\nfrom .. import exceptions as wt_exceptions\nfrom ..kit import _timestamp as timestamp\n\n\n# --- define --------------------------------------------------------------------------------------\n\n\n__all__ = [\"from_Solis\"]\n\n\n# --- from function -------------------------------------------------------------------------------\n\n\ndef from_Solis(filepath, name=None, parent=None, verbose=True) -> Data:\n \"\"\"Create a data object from Andor Solis software (ascii exports).\n\n Parameters\n ----------\n filepath : path-like\n Path to file (should be .asc format).\n Can be either a local or remote file (http/ftp).\n Can be compressed with gz/bz2, decompression based on file name.\n name : string (optional)\n Name to give to the created data object. If None, filename is used.\n Default is None.\n parent : WrightTools.Collection (optional)\n Collection to place new data object within. Default is None.\n verbose : boolean (optional)\n Toggle talkback. Default is True.\n\n Returns\n -------\n data : WrightTools.Data\n New data object.\n Channels: `signal`. If exposure time is in metadata, signal is given as a count rate (Hz).\n Variables, Axes: `yindex` and `xindex` (no grating) or `wm` (grating)\n\n Notes\n -----\n When exporting as ascii, including metadata is optional.\n It is _strongly recommended_ that you include metadata in exports.\n Metadata informs the image creation date, exposure time, and axes.\n However, if metadata is not present, this importer will make its best guesses to populate these fields accurately.\n\n Saving processed data (e.g. vertically-binned data) in Solis software can remove/omit important metadata, so we advise exporting the raw camera images.\n\n \"\"\"\n # parse filepath\n filestr = os.fspath(filepath)\n filepath = pathlib.Path(filepath)\n\n if not \".asc\" in filepath.suffixes:\n wt_exceptions.WrongFileTypeWarning.warn(filepath, \".asc\")\n # parse name\n if not name:\n name = filepath.name.split(\".\")[0]\n # create data\n ds = np.DataSource(None)\n f = ds.open(filestr, \"rt\")\n axis0 = []\n arr = []\n attrs = {}\n\n line0 = f.readline().strip()[:-1]\n line0 = [float(x) for x in line0.split(\",\")] # TODO: robust to space, tab, comma\n axis0.append(line0.pop(0))\n arr.append(line0)\n\n def get_frames(f, arr, axis0):\n axis0_written = False\n while True:\n line = f.readline().strip()[:-1]\n if len(line) == 0:\n break\n else:\n line = [float(x) for x in line.split(\",\")]\n # signature of new frames is restart of axis0\n if not axis0_written and (line[0] == axis0[0]):\n axis0_written = True\n if axis0_written:\n line.pop(0)\n else:\n axis0.append(line.pop(0))\n arr.append(line)\n return arr, axis0\n\n arr, axis0 = get_frames(f, arr, axis0)\n nframes = len(arr) // len(axis0)\n\n i = 0\n while i < 3:\n line = f.readline().strip()\n if len(line) == 0:\n i += 1\n else:\n try:\n key, val = line.split(\":\", 1)\n except ValueError:\n pass\n else:\n attrs[key.strip()] = val.strip()\n\n f.close()\n\n try:\n created = attrs[\"Date and Time\"] # is this UTC?\n created = time.strptime(created, \"%a %b %d %H:%M:%S %Y\")\n created = timestamp.TimeStamp(time.mktime(created)).RFC3339\n except KeyError: # use file creation time\n created = os.stat(filepath).st_mtime\n created = timestamp.TimeStamp(created).RFC3339\n warnings.warn(\n f\"{filepath.name} has no 'Date and Time' field: using file modified time instead: {created}\"\n )\n\n kwargs = {\"name\": name, \"kind\": \"Solis\", \"source\": filestr, \"created\": created}\n if parent is None:\n data = Data(**kwargs)\n else:\n data = parent.create_data(**kwargs)\n\n axis0 = np.array(axis0)\n try:\n groove_density = float(attrs[\"Grating Groove Density (l/mm)\"])\n except KeyError: # assume no grating\n warnings.warn(\n f\"{filepath.name} has no 'Grating Groove Density (1/mm)' field: guessing x axis units.\"\n )\n groove_density = isinstance(axis0[0], float)\n\n if groove_density == 0:\n xname = \"xindex\"\n xunits = None\n else:\n xname = \"wm\"\n xunits = \"nm\"\n axes = [xname, \"yindex\"]\n\n if nframes == 1:\n arr = np.array(arr)\n data.create_variable(name=xname, values=axis0[:, None], units=xunits)\n data.create_variable(name=\"yindex\", values=np.arange(arr.shape[-1])[None, :], units=None)\n else:\n arr = np.array(arr).reshape(nframes, len(axis0), len(arr[0]))\n data.create_variable(name=\"frame\", values=np.arange(nframes)[:, None, None], units=None)\n data.create_variable(name=xname, values=axis0[None, :, None], units=xunits)\n data.create_variable(\n name=\"yindex\", values=np.arange(arr.shape[-1])[None, None, :], units=None\n )\n axes = [\"frame\"] + axes\n\n data.transform(*axes)\n try:\n exposure_time = float(attrs[\"Exposure Time (secs)\"])\n if exposure_time == 0:\n raise ZeroDivisionError\n arr /= exposure_time\n except (KeyError, ZeroDivisionError) as e: # do not normalize\n warnings.warn(f\"{filepath.name} camera signal cannot be given as a count rate.\")\n data.create_channel(name=\"signal\", values=arr, signed=False)\n else:\n # signal has units of Hz because time normalized\n data.create_channel(name=\"signal\", values=arr, signed=False, units=\"Hz\")\n\n for key, val in attrs.items():\n data.attrs[key] = val\n\n # finish\n if verbose:\n print(\"data created at {0}\".format(data.fullpath))\n print(\" axes: {0}\".format(data.axis_names))\n print(\" shape: {0}\".format(data.shape))\n return data\n", "path": "WrightTools/data/_solis.py"}]} | 1,793 | 928 |
gh_patches_debug_42436 | rasdani/github-patches | git_diff | conan-io__conan-center-index-15293 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[request] spix/0.5
### Package Name/Version
spix/0.5
### Changelog
https://github.com/faaxm/spix/releases/tag/v0.5
### Context about the new update
I will push a PR for this version
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `recipes/spix/all/conanfile.py`
Content:
```
1 from conan import ConanFile
2 from conan.errors import ConanInvalidConfiguration
3 from conan.tools.files import apply_conandata_patches, export_conandata_patches, get, copy, rm, rmdir, replace_in_file
4 from conan.tools.build import check_min_cppstd
5 from conan.tools.scm import Version
6 from conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout
7 import os
8
9
10 required_conan_version = ">=1.52.0"
11
12
13 class SpixConan(ConanFile):
14 name = "spix"
15 description = "UI test automation library for QtQuick/QML Apps"
16 license = "MIT"
17 url = "https://github.com/conan-io/conan-center-index"
18 homepage = "https://github.com/faaxm/spix"
19 topics = ("automation", "qt", "qml", "qt-quick", "qt5", "qtquick", "automated-testing", "qt-qml", "qml-applications")
20 settings = "os", "arch", "compiler", "build_type"
21 options = {
22 "shared": [True, False],
23 "fPIC": [True, False],
24 }
25 default_options = {
26 "shared": False,
27 "fPIC": True,
28 }
29
30 @property
31 def _minimum_cpp_standard(self):
32 return 14
33
34 @property
35 def _compilers_minimum_version(self):
36 return {
37 "Visual Studio": "14",
38 "gcc": "5",
39 "clang": "3.4",
40 "apple-clang": "10"
41 }
42
43 def export_sources(self):
44 export_conandata_patches(self)
45
46 def config_options(self):
47 if self.settings.os == "Windows":
48 del self.options.fPIC
49
50 def configure(self):
51 if self.options.shared:
52 try:
53 del self.options.fPIC
54 except Exception:
55 pass
56
57 def layout(self):
58 cmake_layout(self, src_folder="src")
59
60 def requirements(self):
61 self.requires("anyrpc/1.0.2")
62 self.requires("qt/6.3.1")
63 self.requires("expat/2.4.9")
64
65 def validate(self):
66 if self.info.settings.compiler.cppstd:
67 check_min_cppstd(self, self._minimum_cpp_standard)
68 minimum_version = self._compilers_minimum_version.get(str(self.info.settings.compiler), False)
69 if minimum_version and Version(self.info.settings.compiler.version) < minimum_version:
70 raise ConanInvalidConfiguration(
71 f"{self.ref} requires C++{self._minimum_cpp_standard}, which your compiler does not support."
72 )
73
74 if Version(self.dependencies["qt"].ref.version).major == 6 and not self.options["qt"].qtshadertools:
75 raise ConanInvalidConfiguration(f"{self.ref} requires qt:qtshadertools to get the Quick module")
76 if not (self.options["qt"].gui and self.options["qt"].qtdeclarative):
77 raise ConanInvalidConfiguration(f"{self.ref} requires qt:gui and qt:qtdeclarative to get the Quick module")
78
79 def source(self):
80 get(self, **self.conan_data["sources"][self.version], destination=self.source_folder, strip_root=True)
81
82 def generate(self):
83 tc = CMakeToolchain(self)
84 tc.variables["SPIX_BUILD_EXAMPLES"] = False
85 tc.variables["SPIX_BUILD_TESTS"] = False
86 tc.variables["SPIX_QT_MAJOR"] = Version(self.dependencies["qt"].ref.version).major
87 tc.generate()
88
89 deps = CMakeDeps(self)
90 deps.generate()
91
92 def _patch_sources(self):
93 apply_conandata_patches(self)
94 if Version(self.deps_cpp_info["qt"].version).major == 6:
95 replace_in_file(self, os.path.join(self.source_folder, "CMakeLists.txt"), "set(CMAKE_CXX_STANDARD 14)", "set(CMAKE_CXX_STANDARD 17)")
96
97 def build(self):
98 self._patch_sources()
99 cmake = CMake(self)
100 cmake.configure()
101 cmake.build()
102
103 def package(self):
104 copy(self, pattern="LICENSE.txt", dst=os.path.join(self.package_folder, "licenses"), src=self.source_folder)
105 cmake = CMake(self)
106 cmake.install()
107
108 rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig"))
109 rmdir(self, os.path.join(self.package_folder, "lib", "cmake"))
110 rmdir(self, os.path.join(self.package_folder, "share"))
111 rm(self, "*.la", os.path.join(self.package_folder, "lib"))
112 rm(self, "*.pdb", os.path.join(self.package_folder, "lib"))
113 rm(self, "*.pdb", os.path.join(self.package_folder, "bin"))
114
115 def package_info(self):
116 self.cpp_info.libs = ["Spix"]
117 self.cpp_info.set_property("cmake_file_name", "Spix")
118 self.cpp_info.set_property("cmake_target_name", "Spix::Spix")
119
120 # TODO remove once conan v2 removed cmake_find_package_*
121 self.cpp_info.names["cmake_find_package"] = "Spix"
122 self.cpp_info.names["cmake_find_package_multi"] = "Spix"
123
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/recipes/spix/all/conanfile.py b/recipes/spix/all/conanfile.py
--- a/recipes/spix/all/conanfile.py
+++ b/recipes/spix/all/conanfile.py
@@ -7,7 +7,7 @@
import os
-required_conan_version = ">=1.52.0"
+required_conan_version = ">=1.53.0"
class SpixConan(ConanFile):
@@ -29,16 +29,26 @@
@property
def _minimum_cpp_standard(self):
- return 14
+ return 14 if self.version == "0.4" else 17
@property
def _compilers_minimum_version(self):
- return {
- "Visual Studio": "14",
- "gcc": "5",
- "clang": "3.4",
- "apple-clang": "10"
- }
+ if self.version == "0.4":
+ return {
+ "Visual Studio": "14",
+ "msvc": "190",
+ "gcc": "5",
+ "clang": "3.4",
+ "apple-clang": "10"
+ }
+ else:
+ return {
+ "Visual Studio": "15.7",
+ "msvc": "192", # FIXME: 15.7 is actually 1914 but needs to be tested
+ "gcc": "7",
+ "clang": "5",
+ "apple-clang": "10",
+ }
def export_sources(self):
export_conandata_patches(self)
@@ -49,24 +59,20 @@
def configure(self):
if self.options.shared:
- try:
- del self.options.fPIC
- except Exception:
- pass
+ self.options.rm_safe("fPIC")
def layout(self):
cmake_layout(self, src_folder="src")
def requirements(self):
self.requires("anyrpc/1.0.2")
- self.requires("qt/6.3.1")
- self.requires("expat/2.4.9")
+ self.requires("qt/6.4.2")
def validate(self):
- if self.info.settings.compiler.cppstd:
+ if self.settings.compiler.cppstd:
check_min_cppstd(self, self._minimum_cpp_standard)
- minimum_version = self._compilers_minimum_version.get(str(self.info.settings.compiler), False)
- if minimum_version and Version(self.info.settings.compiler.version) < minimum_version:
+ minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False)
+ if minimum_version and Version(self.settings.compiler.version) < minimum_version:
raise ConanInvalidConfiguration(
f"{self.ref} requires C++{self._minimum_cpp_standard}, which your compiler does not support."
)
@@ -77,7 +83,7 @@
raise ConanInvalidConfiguration(f"{self.ref} requires qt:gui and qt:qtdeclarative to get the Quick module")
def source(self):
- get(self, **self.conan_data["sources"][self.version], destination=self.source_folder, strip_root=True)
+ get(self, **self.conan_data["sources"][self.version], strip_root=True)
def generate(self):
tc = CMakeToolchain(self)
@@ -87,11 +93,13 @@
tc.generate()
deps = CMakeDeps(self)
+ deps.set_property("anyrpc", "cmake_file_name", "AnyRPC")
+ deps.set_property("anyrpc", "cmake_target_name", "AnyRPC::anyrpc")
deps.generate()
def _patch_sources(self):
apply_conandata_patches(self)
- if Version(self.deps_cpp_info["qt"].version).major == 6:
+ if self.version == "0.4" and Version(self.dependencies["qt"].ref.version).major == 6:
replace_in_file(self, os.path.join(self.source_folder, "CMakeLists.txt"), "set(CMAKE_CXX_STANDARD 14)", "set(CMAKE_CXX_STANDARD 17)")
def build(self):
| {"golden_diff": "diff --git a/recipes/spix/all/conanfile.py b/recipes/spix/all/conanfile.py\n--- a/recipes/spix/all/conanfile.py\n+++ b/recipes/spix/all/conanfile.py\n@@ -7,7 +7,7 @@\n import os\n \n \n-required_conan_version = \">=1.52.0\"\n+required_conan_version = \">=1.53.0\"\n \n \n class SpixConan(ConanFile):\n@@ -29,16 +29,26 @@\n \n @property\n def _minimum_cpp_standard(self):\n- return 14\n+ return 14 if self.version == \"0.4\" else 17\n \n @property\n def _compilers_minimum_version(self):\n- return {\n- \"Visual Studio\": \"14\",\n- \"gcc\": \"5\",\n- \"clang\": \"3.4\",\n- \"apple-clang\": \"10\"\n- }\n+ if self.version == \"0.4\":\n+ return {\n+ \"Visual Studio\": \"14\",\n+ \"msvc\": \"190\",\n+ \"gcc\": \"5\",\n+ \"clang\": \"3.4\",\n+ \"apple-clang\": \"10\"\n+ }\n+ else:\n+ return {\n+ \"Visual Studio\": \"15.7\",\n+ \"msvc\": \"192\", # FIXME: 15.7 is actually 1914 but needs to be tested\n+ \"gcc\": \"7\",\n+ \"clang\": \"5\",\n+ \"apple-clang\": \"10\",\n+ }\n \n def export_sources(self):\n export_conandata_patches(self)\n@@ -49,24 +59,20 @@\n \n def configure(self):\n if self.options.shared:\n- try:\n- del self.options.fPIC\n- except Exception:\n- pass\n+ self.options.rm_safe(\"fPIC\")\n \n def layout(self):\n cmake_layout(self, src_folder=\"src\")\n \n def requirements(self):\n self.requires(\"anyrpc/1.0.2\")\n- self.requires(\"qt/6.3.1\")\n- self.requires(\"expat/2.4.9\")\n+ self.requires(\"qt/6.4.2\")\n \n def validate(self):\n- if self.info.settings.compiler.cppstd:\n+ if self.settings.compiler.cppstd:\n check_min_cppstd(self, self._minimum_cpp_standard)\n- minimum_version = self._compilers_minimum_version.get(str(self.info.settings.compiler), False)\n- if minimum_version and Version(self.info.settings.compiler.version) < minimum_version:\n+ minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False)\n+ if minimum_version and Version(self.settings.compiler.version) < minimum_version:\n raise ConanInvalidConfiguration(\n f\"{self.ref} requires C++{self._minimum_cpp_standard}, which your compiler does not support.\"\n )\n@@ -77,7 +83,7 @@\n raise ConanInvalidConfiguration(f\"{self.ref} requires qt:gui and qt:qtdeclarative to get the Quick module\")\n \n def source(self):\n- get(self, **self.conan_data[\"sources\"][self.version], destination=self.source_folder, strip_root=True)\n+ get(self, **self.conan_data[\"sources\"][self.version], strip_root=True)\n \n def generate(self):\n tc = CMakeToolchain(self)\n@@ -87,11 +93,13 @@\n tc.generate()\n \n deps = CMakeDeps(self)\n+ deps.set_property(\"anyrpc\", \"cmake_file_name\", \"AnyRPC\")\n+ deps.set_property(\"anyrpc\", \"cmake_target_name\", \"AnyRPC::anyrpc\")\n deps.generate()\n \n def _patch_sources(self):\n apply_conandata_patches(self)\n- if Version(self.deps_cpp_info[\"qt\"].version).major == 6:\n+ if self.version == \"0.4\" and Version(self.dependencies[\"qt\"].ref.version).major == 6:\n replace_in_file(self, os.path.join(self.source_folder, \"CMakeLists.txt\"), \"set(CMAKE_CXX_STANDARD 14)\", \"set(CMAKE_CXX_STANDARD 17)\")\n \n def build(self):\n", "issue": "[request] spix/0.5\n### Package Name/Version\n\nspix/0.5\n\n### Changelog\n\nhttps://github.com/faaxm/spix/releases/tag/v0.5\n\n### Context about the new update\n\nI will push a PR for this version\n", "before_files": [{"content": "from conan import ConanFile\nfrom conan.errors import ConanInvalidConfiguration\nfrom conan.tools.files import apply_conandata_patches, export_conandata_patches, get, copy, rm, rmdir, replace_in_file\nfrom conan.tools.build import check_min_cppstd\nfrom conan.tools.scm import Version\nfrom conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout\nimport os\n\n\nrequired_conan_version = \">=1.52.0\"\n\n\nclass SpixConan(ConanFile):\n name = \"spix\"\n description = \"UI test automation library for QtQuick/QML Apps\"\n license = \"MIT\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/faaxm/spix\"\n topics = (\"automation\", \"qt\", \"qml\", \"qt-quick\", \"qt5\", \"qtquick\", \"automated-testing\", \"qt-qml\", \"qml-applications\")\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n }\n\n @property\n def _minimum_cpp_standard(self):\n return 14\n\n @property\n def _compilers_minimum_version(self):\n return {\n \"Visual Studio\": \"14\",\n \"gcc\": \"5\",\n \"clang\": \"3.4\",\n \"apple-clang\": \"10\"\n }\n\n def export_sources(self):\n export_conandata_patches(self)\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n try:\n del self.options.fPIC\n except Exception:\n pass\n\n def layout(self):\n cmake_layout(self, src_folder=\"src\")\n\n def requirements(self):\n self.requires(\"anyrpc/1.0.2\")\n self.requires(\"qt/6.3.1\")\n self.requires(\"expat/2.4.9\")\n \n def validate(self):\n if self.info.settings.compiler.cppstd:\n check_min_cppstd(self, self._minimum_cpp_standard)\n minimum_version = self._compilers_minimum_version.get(str(self.info.settings.compiler), False)\n if minimum_version and Version(self.info.settings.compiler.version) < minimum_version:\n raise ConanInvalidConfiguration(\n f\"{self.ref} requires C++{self._minimum_cpp_standard}, which your compiler does not support.\"\n )\n\n if Version(self.dependencies[\"qt\"].ref.version).major == 6 and not self.options[\"qt\"].qtshadertools:\n raise ConanInvalidConfiguration(f\"{self.ref} requires qt:qtshadertools to get the Quick module\")\n if not (self.options[\"qt\"].gui and self.options[\"qt\"].qtdeclarative):\n raise ConanInvalidConfiguration(f\"{self.ref} requires qt:gui and qt:qtdeclarative to get the Quick module\")\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version], destination=self.source_folder, strip_root=True)\n\n def generate(self):\n tc = CMakeToolchain(self)\n tc.variables[\"SPIX_BUILD_EXAMPLES\"] = False\n tc.variables[\"SPIX_BUILD_TESTS\"] = False\n tc.variables[\"SPIX_QT_MAJOR\"] = Version(self.dependencies[\"qt\"].ref.version).major\n tc.generate()\n\n deps = CMakeDeps(self)\n deps.generate()\n\n def _patch_sources(self):\n apply_conandata_patches(self)\n if Version(self.deps_cpp_info[\"qt\"].version).major == 6:\n replace_in_file(self, os.path.join(self.source_folder, \"CMakeLists.txt\"), \"set(CMAKE_CXX_STANDARD 14)\", \"set(CMAKE_CXX_STANDARD 17)\")\n\n def build(self):\n self._patch_sources()\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n\n def package(self):\n copy(self, pattern=\"LICENSE.txt\", dst=os.path.join(self.package_folder, \"licenses\"), src=self.source_folder)\n cmake = CMake(self)\n cmake.install()\n\n rmdir(self, os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n rmdir(self, os.path.join(self.package_folder, \"lib\", \"cmake\"))\n rmdir(self, os.path.join(self.package_folder, \"share\"))\n rm(self, \"*.la\", os.path.join(self.package_folder, \"lib\"))\n rm(self, \"*.pdb\", os.path.join(self.package_folder, \"lib\"))\n rm(self, \"*.pdb\", os.path.join(self.package_folder, \"bin\"))\n\n def package_info(self):\n self.cpp_info.libs = [\"Spix\"]\n self.cpp_info.set_property(\"cmake_file_name\", \"Spix\") \n self.cpp_info.set_property(\"cmake_target_name\", \"Spix::Spix\")\n \n # TODO remove once conan v2 removed cmake_find_package_*\n self.cpp_info.names[\"cmake_find_package\"] = \"Spix\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"Spix\"\n", "path": "recipes/spix/all/conanfile.py"}], "after_files": [{"content": "from conan import ConanFile\nfrom conan.errors import ConanInvalidConfiguration\nfrom conan.tools.files import apply_conandata_patches, export_conandata_patches, get, copy, rm, rmdir, replace_in_file\nfrom conan.tools.build import check_min_cppstd\nfrom conan.tools.scm import Version\nfrom conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout\nimport os\n\n\nrequired_conan_version = \">=1.53.0\"\n\n\nclass SpixConan(ConanFile):\n name = \"spix\"\n description = \"UI test automation library for QtQuick/QML Apps\"\n license = \"MIT\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/faaxm/spix\"\n topics = (\"automation\", \"qt\", \"qml\", \"qt-quick\", \"qt5\", \"qtquick\", \"automated-testing\", \"qt-qml\", \"qml-applications\")\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n }\n\n @property\n def _minimum_cpp_standard(self):\n return 14 if self.version == \"0.4\" else 17\n\n @property\n def _compilers_minimum_version(self):\n if self.version == \"0.4\":\n return {\n \"Visual Studio\": \"14\",\n \"msvc\": \"190\",\n \"gcc\": \"5\",\n \"clang\": \"3.4\",\n \"apple-clang\": \"10\"\n }\n else:\n return {\n \"Visual Studio\": \"15.7\",\n \"msvc\": \"192\", # FIXME: 15.7 is actually 1914 but needs to be tested\n \"gcc\": \"7\",\n \"clang\": \"5\",\n \"apple-clang\": \"10\",\n }\n\n def export_sources(self):\n export_conandata_patches(self)\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n self.options.rm_safe(\"fPIC\")\n\n def layout(self):\n cmake_layout(self, src_folder=\"src\")\n\n def requirements(self):\n self.requires(\"anyrpc/1.0.2\")\n self.requires(\"qt/6.4.2\")\n \n def validate(self):\n if self.settings.compiler.cppstd:\n check_min_cppstd(self, self._minimum_cpp_standard)\n minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False)\n if minimum_version and Version(self.settings.compiler.version) < minimum_version:\n raise ConanInvalidConfiguration(\n f\"{self.ref} requires C++{self._minimum_cpp_standard}, which your compiler does not support.\"\n )\n\n if Version(self.dependencies[\"qt\"].ref.version).major == 6 and not self.options[\"qt\"].qtshadertools:\n raise ConanInvalidConfiguration(f\"{self.ref} requires qt:qtshadertools to get the Quick module\")\n if not (self.options[\"qt\"].gui and self.options[\"qt\"].qtdeclarative):\n raise ConanInvalidConfiguration(f\"{self.ref} requires qt:gui and qt:qtdeclarative to get the Quick module\")\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version], strip_root=True)\n\n def generate(self):\n tc = CMakeToolchain(self)\n tc.variables[\"SPIX_BUILD_EXAMPLES\"] = False\n tc.variables[\"SPIX_BUILD_TESTS\"] = False\n tc.variables[\"SPIX_QT_MAJOR\"] = Version(self.dependencies[\"qt\"].ref.version).major\n tc.generate()\n\n deps = CMakeDeps(self)\n deps.set_property(\"anyrpc\", \"cmake_file_name\", \"AnyRPC\")\n deps.set_property(\"anyrpc\", \"cmake_target_name\", \"AnyRPC::anyrpc\")\n deps.generate()\n\n def _patch_sources(self):\n apply_conandata_patches(self)\n if self.version == \"0.4\" and Version(self.dependencies[\"qt\"].ref.version).major == 6:\n replace_in_file(self, os.path.join(self.source_folder, \"CMakeLists.txt\"), \"set(CMAKE_CXX_STANDARD 14)\", \"set(CMAKE_CXX_STANDARD 17)\")\n\n def build(self):\n self._patch_sources()\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n\n def package(self):\n copy(self, pattern=\"LICENSE.txt\", dst=os.path.join(self.package_folder, \"licenses\"), src=self.source_folder)\n cmake = CMake(self)\n cmake.install()\n\n rmdir(self, os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n rmdir(self, os.path.join(self.package_folder, \"lib\", \"cmake\"))\n rmdir(self, os.path.join(self.package_folder, \"share\"))\n rm(self, \"*.la\", os.path.join(self.package_folder, \"lib\"))\n rm(self, \"*.pdb\", os.path.join(self.package_folder, \"lib\"))\n rm(self, \"*.pdb\", os.path.join(self.package_folder, \"bin\"))\n\n def package_info(self):\n self.cpp_info.libs = [\"Spix\"]\n self.cpp_info.set_property(\"cmake_file_name\", \"Spix\") \n self.cpp_info.set_property(\"cmake_target_name\", \"Spix::Spix\")\n \n # TODO remove once conan v2 removed cmake_find_package_*\n self.cpp_info.names[\"cmake_find_package\"] = \"Spix\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"Spix\"\n", "path": "recipes/spix/all/conanfile.py"}]} | 1,715 | 938 |
gh_patches_debug_28026 | rasdani/github-patches | git_diff | bridgecrewio__checkov-961 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
False positive on CKV_AWS_59 - APIGatewayAuthorization ignores API keys
**Describe the bug**
**CKV_AWS_59** assumes the API is open to the public if authorization is **NONE**. However, API keys are another option on REST APIs and are configured separately. See https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-setup-api-key-with-console.html.
**To Reproduce**
The following terraform code will cause a test failure, which appears to be against the spirit of the rule:
```terraform
resource "aws_api_gateway_method" "POST" {
...
authorization = NONE
api_key_required = true
...
}
````
**Expected behavior**
I would expect this configuration to be considered secure.
**Desktop (please complete the following information):**
- OS: linux/amd64 Docker image bridgecrew/checkov:1.0.833
- Checkov Version 1.0.833
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/cloudformation/checks/resource/aws/APIGatewayAuthorization.py`
Content:
```
1 from checkov.common.models.enums import CheckResult, CheckCategories
2 from checkov.cloudformation.checks.resource.base_resource_check import BaseResourceCheck
3
4 class APIGatewayAuthorization(BaseResourceCheck):
5
6 def __init__(self):
7 name = "Ensure there is no open access to back-end resources through API"
8 id = "CKV_AWS_59"
9 supported_resources = ['AWS::ApiGateway::Method']
10 categories = [CheckCategories.GENERAL_SECURITY]
11 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
12
13 def scan_resource_conf(self, conf):
14 if 'Properties' in conf.keys():
15 if 'HttpMethod' in conf['Properties'].keys() and 'AuthorizationType' in conf['Properties'].keys():
16 if conf['Properties']['HttpMethod'] != "OPTIONS" and conf['Properties']['AuthorizationType'] == "NONE":
17 return CheckResult.FAILED
18 return CheckResult.PASSED
19
20 check = APIGatewayAuthorization()
21
```
Path: `checkov/terraform/checks/resource/aws/APIGatewayAuthorization.py`
Content:
```
1 from checkov.common.models.enums import CheckResult, CheckCategories
2 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
3
4
5 class APIGatewayAuthorization(BaseResourceCheck):
6
7 def __init__(self):
8 name = "Ensure there is no open access to back-end resources through API"
9 id = "CKV_AWS_59"
10 supported_resources = ['aws_api_gateway_method']
11 categories = [CheckCategories.GENERAL_SECURITY]
12 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
13
14 def scan_resource_conf(self, conf):
15 self.evaluated_keys = ['http_method', 'authorization']
16 if conf['http_method'][0] != "OPTIONS" and conf['authorization'][0] == "NONE":
17 return CheckResult.FAILED
18 return CheckResult.PASSED
19
20
21 check = APIGatewayAuthorization()
22
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/checkov/cloudformation/checks/resource/aws/APIGatewayAuthorization.py b/checkov/cloudformation/checks/resource/aws/APIGatewayAuthorization.py
--- a/checkov/cloudformation/checks/resource/aws/APIGatewayAuthorization.py
+++ b/checkov/cloudformation/checks/resource/aws/APIGatewayAuthorization.py
@@ -14,7 +14,8 @@
if 'Properties' in conf.keys():
if 'HttpMethod' in conf['Properties'].keys() and 'AuthorizationType' in conf['Properties'].keys():
if conf['Properties']['HttpMethod'] != "OPTIONS" and conf['Properties']['AuthorizationType'] == "NONE":
- return CheckResult.FAILED
+ if 'ApiKeyRequired' not in conf['Properties'].keys() or conf['Properties']['ApiKeyRequired'] == False:
+ return CheckResult.FAILED
return CheckResult.PASSED
check = APIGatewayAuthorization()
diff --git a/checkov/terraform/checks/resource/aws/APIGatewayAuthorization.py b/checkov/terraform/checks/resource/aws/APIGatewayAuthorization.py
--- a/checkov/terraform/checks/resource/aws/APIGatewayAuthorization.py
+++ b/checkov/terraform/checks/resource/aws/APIGatewayAuthorization.py
@@ -12,8 +12,8 @@
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def scan_resource_conf(self, conf):
- self.evaluated_keys = ['http_method', 'authorization']
- if conf['http_method'][0] != "OPTIONS" and conf['authorization'][0] == "NONE":
+ self.evaluated_keys = ['http_method', 'authorization', 'api_key_required']
+ if conf['http_method'][0] != "OPTIONS" and conf['authorization'][0] == "NONE" and ('api_key_required' not in conf or conf['api_key_required'][0] == False):
return CheckResult.FAILED
return CheckResult.PASSED
| {"golden_diff": "diff --git a/checkov/cloudformation/checks/resource/aws/APIGatewayAuthorization.py b/checkov/cloudformation/checks/resource/aws/APIGatewayAuthorization.py\n--- a/checkov/cloudformation/checks/resource/aws/APIGatewayAuthorization.py\n+++ b/checkov/cloudformation/checks/resource/aws/APIGatewayAuthorization.py\n@@ -14,7 +14,8 @@\n if 'Properties' in conf.keys():\n if 'HttpMethod' in conf['Properties'].keys() and 'AuthorizationType' in conf['Properties'].keys():\n if conf['Properties']['HttpMethod'] != \"OPTIONS\" and conf['Properties']['AuthorizationType'] == \"NONE\":\n- return CheckResult.FAILED\n+ if 'ApiKeyRequired' not in conf['Properties'].keys() or conf['Properties']['ApiKeyRequired'] == False:\n+ return CheckResult.FAILED\n return CheckResult.PASSED\n \n check = APIGatewayAuthorization()\ndiff --git a/checkov/terraform/checks/resource/aws/APIGatewayAuthorization.py b/checkov/terraform/checks/resource/aws/APIGatewayAuthorization.py\n--- a/checkov/terraform/checks/resource/aws/APIGatewayAuthorization.py\n+++ b/checkov/terraform/checks/resource/aws/APIGatewayAuthorization.py\n@@ -12,8 +12,8 @@\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n \n def scan_resource_conf(self, conf):\n- self.evaluated_keys = ['http_method', 'authorization']\n- if conf['http_method'][0] != \"OPTIONS\" and conf['authorization'][0] == \"NONE\":\n+ self.evaluated_keys = ['http_method', 'authorization', 'api_key_required']\n+ if conf['http_method'][0] != \"OPTIONS\" and conf['authorization'][0] == \"NONE\" and ('api_key_required' not in conf or conf['api_key_required'][0] == False):\n return CheckResult.FAILED\n return CheckResult.PASSED\n", "issue": "False positive on CKV_AWS_59 - APIGatewayAuthorization ignores API keys\n**Describe the bug**\r\n**CKV_AWS_59** assumes the API is open to the public if authorization is **NONE**. However, API keys are another option on REST APIs and are configured separately. See https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-setup-api-key-with-console.html.\r\n\r\n**To Reproduce**\r\nThe following terraform code will cause a test failure, which appears to be against the spirit of the rule:\r\n```terraform\r\nresource \"aws_api_gateway_method\" \"POST\" {\r\n...\r\nauthorization = NONE\r\napi_key_required = true\r\n...\r\n}\r\n````\r\n\r\n**Expected behavior**\r\nI would expect this configuration to be considered secure. \r\n\r\n**Desktop (please complete the following information):**\r\n - OS: linux/amd64 Docker image bridgecrew/checkov:1.0.833\r\n - Checkov Version 1.0.833\r\n\r\n\r\n\n", "before_files": [{"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.cloudformation.checks.resource.base_resource_check import BaseResourceCheck\n\nclass APIGatewayAuthorization(BaseResourceCheck):\n\n def __init__(self):\n name = \"Ensure there is no open access to back-end resources through API\"\n id = \"CKV_AWS_59\"\n supported_resources = ['AWS::ApiGateway::Method']\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n if 'Properties' in conf.keys():\n if 'HttpMethod' in conf['Properties'].keys() and 'AuthorizationType' in conf['Properties'].keys():\n if conf['Properties']['HttpMethod'] != \"OPTIONS\" and conf['Properties']['AuthorizationType'] == \"NONE\":\n return CheckResult.FAILED\n return CheckResult.PASSED\n\ncheck = APIGatewayAuthorization()\n", "path": "checkov/cloudformation/checks/resource/aws/APIGatewayAuthorization.py"}, {"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\n\nclass APIGatewayAuthorization(BaseResourceCheck):\n\n def __init__(self):\n name = \"Ensure there is no open access to back-end resources through API\"\n id = \"CKV_AWS_59\"\n supported_resources = ['aws_api_gateway_method']\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n self.evaluated_keys = ['http_method', 'authorization']\n if conf['http_method'][0] != \"OPTIONS\" and conf['authorization'][0] == \"NONE\":\n return CheckResult.FAILED\n return CheckResult.PASSED\n\n\ncheck = APIGatewayAuthorization()\n", "path": "checkov/terraform/checks/resource/aws/APIGatewayAuthorization.py"}], "after_files": [{"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.cloudformation.checks.resource.base_resource_check import BaseResourceCheck\n\nclass APIGatewayAuthorization(BaseResourceCheck):\n\n def __init__(self):\n name = \"Ensure there is no open access to back-end resources through API\"\n id = \"CKV_AWS_59\"\n supported_resources = ['AWS::ApiGateway::Method']\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n if 'Properties' in conf.keys():\n if 'HttpMethod' in conf['Properties'].keys() and 'AuthorizationType' in conf['Properties'].keys():\n if conf['Properties']['HttpMethod'] != \"OPTIONS\" and conf['Properties']['AuthorizationType'] == \"NONE\":\n if 'ApiKeyRequired' not in conf['Properties'].keys() or conf['Properties']['ApiKeyRequired'] == False:\n return CheckResult.FAILED\n return CheckResult.PASSED\n\ncheck = APIGatewayAuthorization()\n", "path": "checkov/cloudformation/checks/resource/aws/APIGatewayAuthorization.py"}, {"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\n\nclass APIGatewayAuthorization(BaseResourceCheck):\n\n def __init__(self):\n name = \"Ensure there is no open access to back-end resources through API\"\n id = \"CKV_AWS_59\"\n supported_resources = ['aws_api_gateway_method']\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n self.evaluated_keys = ['http_method', 'authorization', 'api_key_required']\n if conf['http_method'][0] != \"OPTIONS\" and conf['authorization'][0] == \"NONE\" and ('api_key_required' not in conf or conf['api_key_required'][0] == False):\n return CheckResult.FAILED\n return CheckResult.PASSED\n\n\ncheck = APIGatewayAuthorization()\n", "path": "checkov/terraform/checks/resource/aws/APIGatewayAuthorization.py"}]} | 964 | 412 |
gh_patches_debug_6386 | rasdani/github-patches | git_diff | huggingface__huggingface_hub-757 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
chore: Updated the pillow version specifier
Hello there :wave:
Following up on #755, I believe the core build requirements were meant to be updated as well. This aligns the version specifier of Pillow between the core build and the "tests" extra build!
Any feedback is welcome!
cc @osanseviero @Narsil
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `api-inference-community/setup.py`
Content:
```
1 from setuptools import setup
2
3
4 setup(
5 name="api_inference_community",
6 version="0.0.21",
7 description="A package with helper tools to build an API Inference docker app for Hugging Face API inference using huggingface_hub",
8 url="http://github.com/huggingface/api-inference-community",
9 author="Nicolas Patry",
10 author_email="[email protected]",
11 license="MIT",
12 packages=["api_inference_community"],
13 python_requires=">=3.6.0",
14 zip_safe=False,
15 install_requires=list(line for line in open("requirements.txt", "r")),
16 extras_require={
17 "test": [
18 "httpx>=0.18",
19 "Pillow>=8.2",
20 "httpx>=0.18",
21 "torch>=1.9.0",
22 "pytest>=6.2",
23 ]
24 },
25 )
26
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/api-inference-community/setup.py b/api-inference-community/setup.py
--- a/api-inference-community/setup.py
+++ b/api-inference-community/setup.py
@@ -3,7 +3,7 @@
setup(
name="api_inference_community",
- version="0.0.21",
+ version="0.0.23",
description="A package with helper tools to build an API Inference docker app for Hugging Face API inference using huggingface_hub",
url="http://github.com/huggingface/api-inference-community",
author="Nicolas Patry",
| {"golden_diff": "diff --git a/api-inference-community/setup.py b/api-inference-community/setup.py\n--- a/api-inference-community/setup.py\n+++ b/api-inference-community/setup.py\n@@ -3,7 +3,7 @@\n \n setup(\n name=\"api_inference_community\",\n- version=\"0.0.21\",\n+ version=\"0.0.23\",\n description=\"A package with helper tools to build an API Inference docker app for Hugging Face API inference using huggingface_hub\",\n url=\"http://github.com/huggingface/api-inference-community\",\n author=\"Nicolas Patry\",\n", "issue": "chore: Updated the pillow version specifier\nHello there :wave: \r\n\r\nFollowing up on #755, I believe the core build requirements were meant to be updated as well. This aligns the version specifier of Pillow between the core build and the \"tests\" extra build!\r\n\r\nAny feedback is welcome!\r\n\r\ncc @osanseviero @Narsil \n", "before_files": [{"content": "from setuptools import setup\n\n\nsetup(\n name=\"api_inference_community\",\n version=\"0.0.21\",\n description=\"A package with helper tools to build an API Inference docker app for Hugging Face API inference using huggingface_hub\",\n url=\"http://github.com/huggingface/api-inference-community\",\n author=\"Nicolas Patry\",\n author_email=\"[email protected]\",\n license=\"MIT\",\n packages=[\"api_inference_community\"],\n python_requires=\">=3.6.0\",\n zip_safe=False,\n install_requires=list(line for line in open(\"requirements.txt\", \"r\")),\n extras_require={\n \"test\": [\n \"httpx>=0.18\",\n \"Pillow>=8.2\",\n \"httpx>=0.18\",\n \"torch>=1.9.0\",\n \"pytest>=6.2\",\n ]\n },\n)\n", "path": "api-inference-community/setup.py"}], "after_files": [{"content": "from setuptools import setup\n\n\nsetup(\n name=\"api_inference_community\",\n version=\"0.0.23\",\n description=\"A package with helper tools to build an API Inference docker app for Hugging Face API inference using huggingface_hub\",\n url=\"http://github.com/huggingface/api-inference-community\",\n author=\"Nicolas Patry\",\n author_email=\"[email protected]\",\n license=\"MIT\",\n packages=[\"api_inference_community\"],\n python_requires=\">=3.6.0\",\n zip_safe=False,\n install_requires=list(line for line in open(\"requirements.txt\", \"r\")),\n extras_require={\n \"test\": [\n \"httpx>=0.18\",\n \"Pillow>=8.2\",\n \"httpx>=0.18\",\n \"torch>=1.9.0\",\n \"pytest>=6.2\",\n ]\n },\n)\n", "path": "api-inference-community/setup.py"}]} | 574 | 130 |
gh_patches_debug_23307 | rasdani/github-patches | git_diff | pypa__virtualenv-1730 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Broken activation in Windows for python3
virtualenv activation through `activate_this.py` is broken in Windows for python3. Check this very simple piece of code
```
import os
import subprocess
import sys
from distutils.spawn import find_executable
venv = find_executable("virtualenv")
testdir = os.path.join(os.path.curdir, 'testenv')
subprocess.check_output((venv, testdir, "-p", sys.executable))
bin_path = os.path.join(testdir, "Scripts") if sys.platform in ("win32", "cygwin") else os.path.join(testdir, "bin")
path = os.path.join(bin_path, "activate_this.py")
with open(path) as f:
exec(f.read(), {"__file__": path})
```
This generates a `AttributeError: 'str' object has no attribute 'decode'`. Taking a look at the `activate_this.py` code:
```
for lib in "..\Lib\site-packages".split(os.pathsep):
path = os.path.realpath(os.path.join(bin_dir, lib))
site.addsitedir(path.decode("utf-8") if "yes" else path)
```
it's indeed normal that we get the error because we're always calling `decode()` over a str. Question is where the `__DECODE_PATH__` from https://github.com/pypa/virtualenv/blob/master/src/virtualenv/activation/python/activate_this.py#L28 is assigned to `"yes"`?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/virtualenv/activation/python/__init__.py`
Content:
```
1 from __future__ import absolute_import, unicode_literals
2
3 import os
4 from collections import OrderedDict
5
6 from virtualenv.info import WIN_CPYTHON_2
7 from virtualenv.util.path import Path
8 from virtualenv.util.six import ensure_text
9
10 from ..via_template import ViaTemplateActivator
11
12
13 class PythonActivator(ViaTemplateActivator):
14 def templates(self):
15 yield Path("activate_this.py")
16
17 def replacements(self, creator, dest_folder):
18 replacements = super(PythonActivator, self).replacements(creator, dest_folder)
19 lib_folders = OrderedDict((os.path.relpath(str(i), str(dest_folder)), None) for i in creator.libs)
20 replacements.update(
21 {
22 "__LIB_FOLDERS__": ensure_text(os.pathsep.join(lib_folders.keys())),
23 "__DECODE_PATH__": ("yes" if WIN_CPYTHON_2 else ""),
24 }
25 )
26 return replacements
27
28 @staticmethod
29 def _repr_unicode(creator, value):
30 py2 = creator.interpreter.version_info.major == 2
31 if py2: # on Python 2 we need to encode this into explicit utf-8, py3 supports unicode literals
32 value = ensure_text(repr(value.encode("utf-8"))[1:-1])
33 return value
34
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/virtualenv/activation/python/__init__.py b/src/virtualenv/activation/python/__init__.py
--- a/src/virtualenv/activation/python/__init__.py
+++ b/src/virtualenv/activation/python/__init__.py
@@ -3,7 +3,6 @@
import os
from collections import OrderedDict
-from virtualenv.info import WIN_CPYTHON_2
from virtualenv.util.path import Path
from virtualenv.util.six import ensure_text
@@ -17,10 +16,11 @@
def replacements(self, creator, dest_folder):
replacements = super(PythonActivator, self).replacements(creator, dest_folder)
lib_folders = OrderedDict((os.path.relpath(str(i), str(dest_folder)), None) for i in creator.libs)
+ win_py2 = creator.interpreter.platform == "win32" and creator.interpreter.version_info.major == 2
replacements.update(
{
"__LIB_FOLDERS__": ensure_text(os.pathsep.join(lib_folders.keys())),
- "__DECODE_PATH__": ("yes" if WIN_CPYTHON_2 else ""),
+ "__DECODE_PATH__": ("yes" if win_py2 else ""),
}
)
return replacements
| {"golden_diff": "diff --git a/src/virtualenv/activation/python/__init__.py b/src/virtualenv/activation/python/__init__.py\n--- a/src/virtualenv/activation/python/__init__.py\n+++ b/src/virtualenv/activation/python/__init__.py\n@@ -3,7 +3,6 @@\n import os\n from collections import OrderedDict\n \n-from virtualenv.info import WIN_CPYTHON_2\n from virtualenv.util.path import Path\n from virtualenv.util.six import ensure_text\n \n@@ -17,10 +16,11 @@\n def replacements(self, creator, dest_folder):\n replacements = super(PythonActivator, self).replacements(creator, dest_folder)\n lib_folders = OrderedDict((os.path.relpath(str(i), str(dest_folder)), None) for i in creator.libs)\n+ win_py2 = creator.interpreter.platform == \"win32\" and creator.interpreter.version_info.major == 2\n replacements.update(\n {\n \"__LIB_FOLDERS__\": ensure_text(os.pathsep.join(lib_folders.keys())),\n- \"__DECODE_PATH__\": (\"yes\" if WIN_CPYTHON_2 else \"\"),\n+ \"__DECODE_PATH__\": (\"yes\" if win_py2 else \"\"),\n }\n )\n return replacements\n", "issue": "Broken activation in Windows for python3\nvirtualenv activation through `activate_this.py` is broken in Windows for python3. Check this very simple piece of code\r\n\r\n```\r\nimport os\r\nimport subprocess\r\nimport sys\r\nfrom distutils.spawn import find_executable\r\n\r\nvenv = find_executable(\"virtualenv\")\r\ntestdir = os.path.join(os.path.curdir, 'testenv')\r\n\r\nsubprocess.check_output((venv, testdir, \"-p\", sys.executable))\r\n\r\nbin_path = os.path.join(testdir, \"Scripts\") if sys.platform in (\"win32\", \"cygwin\") else os.path.join(testdir, \"bin\")\r\n\r\npath = os.path.join(bin_path, \"activate_this.py\")\r\nwith open(path) as f:\r\n exec(f.read(), {\"__file__\": path})\r\n```\r\n\r\nThis generates a `AttributeError: 'str' object has no attribute 'decode'`. Taking a look at the `activate_this.py` code:\r\n\r\n```\r\nfor lib in \"..\\Lib\\site-packages\".split(os.pathsep):\r\n path = os.path.realpath(os.path.join(bin_dir, lib))\r\n site.addsitedir(path.decode(\"utf-8\") if \"yes\" else path)\r\n```\r\n\r\nit's indeed normal that we get the error because we're always calling `decode()` over a str. Question is where the `__DECODE_PATH__` from https://github.com/pypa/virtualenv/blob/master/src/virtualenv/activation/python/activate_this.py#L28 is assigned to `\"yes\"`?\n", "before_files": [{"content": "from __future__ import absolute_import, unicode_literals\n\nimport os\nfrom collections import OrderedDict\n\nfrom virtualenv.info import WIN_CPYTHON_2\nfrom virtualenv.util.path import Path\nfrom virtualenv.util.six import ensure_text\n\nfrom ..via_template import ViaTemplateActivator\n\n\nclass PythonActivator(ViaTemplateActivator):\n def templates(self):\n yield Path(\"activate_this.py\")\n\n def replacements(self, creator, dest_folder):\n replacements = super(PythonActivator, self).replacements(creator, dest_folder)\n lib_folders = OrderedDict((os.path.relpath(str(i), str(dest_folder)), None) for i in creator.libs)\n replacements.update(\n {\n \"__LIB_FOLDERS__\": ensure_text(os.pathsep.join(lib_folders.keys())),\n \"__DECODE_PATH__\": (\"yes\" if WIN_CPYTHON_2 else \"\"),\n }\n )\n return replacements\n\n @staticmethod\n def _repr_unicode(creator, value):\n py2 = creator.interpreter.version_info.major == 2\n if py2: # on Python 2 we need to encode this into explicit utf-8, py3 supports unicode literals\n value = ensure_text(repr(value.encode(\"utf-8\"))[1:-1])\n return value\n", "path": "src/virtualenv/activation/python/__init__.py"}], "after_files": [{"content": "from __future__ import absolute_import, unicode_literals\n\nimport os\nfrom collections import OrderedDict\n\nfrom virtualenv.util.path import Path\nfrom virtualenv.util.six import ensure_text\n\nfrom ..via_template import ViaTemplateActivator\n\n\nclass PythonActivator(ViaTemplateActivator):\n def templates(self):\n yield Path(\"activate_this.py\")\n\n def replacements(self, creator, dest_folder):\n replacements = super(PythonActivator, self).replacements(creator, dest_folder)\n lib_folders = OrderedDict((os.path.relpath(str(i), str(dest_folder)), None) for i in creator.libs)\n win_py2 = creator.interpreter.platform == \"win32\" and creator.interpreter.version_info.major == 2\n replacements.update(\n {\n \"__LIB_FOLDERS__\": ensure_text(os.pathsep.join(lib_folders.keys())),\n \"__DECODE_PATH__\": (\"yes\" if win_py2 else \"\"),\n }\n )\n return replacements\n\n @staticmethod\n def _repr_unicode(creator, value):\n py2 = creator.interpreter.version_info.major == 2\n if py2: # on Python 2 we need to encode this into explicit utf-8, py3 supports unicode literals\n value = ensure_text(repr(value.encode(\"utf-8\"))[1:-1])\n return value\n", "path": "src/virtualenv/activation/python/__init__.py"}]} | 911 | 266 |
gh_patches_debug_25720 | rasdani/github-patches | git_diff | cal-itp__benefits-1343 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Configure a Sentry denylist
Looks like we can configure a `denylist` on `EventScrubber` when calling `sentry_sdk.init`: https://docs.sentry.io/platforms/python/data-management/sensitive-data/
Another more general approach to modifying events is to configure a `before_send` function: https://docs.sentry.io/platforms/python/configuration/filtering/
_Originally posted by @angela-tran in https://github.com/cal-itp/benefits/issues/1334#issuecomment-1490416579_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `benefits/sentry.py`
Content:
```
1 from benefits import VERSION
2 import sentry_sdk
3 from sentry_sdk.integrations.django import DjangoIntegration
4 import shutil
5 import os
6 import subprocess
7
8
9 SENTRY_ENVIRONMENT = os.environ.get("SENTRY_ENVIRONMENT", "local")
10
11
12 def git_available():
13 return bool(shutil.which("git"))
14
15
16 # https://stackoverflow.com/a/24584384/358804
17 def is_git_directory(path="."):
18 dev_null = open(os.devnull, "w")
19 return subprocess.call(["git", "-C", path, "status"], stderr=dev_null, stdout=dev_null) == 0
20
21
22 # https://stackoverflow.com/a/21901260/358804
23 def get_git_revision_hash():
24 return subprocess.check_output(["git", "rev-parse", "HEAD"]).decode("ascii").strip()
25
26
27 def get_sha_file_path():
28 current_file = os.path.dirname(os.path.abspath(__file__))
29 return os.path.join(current_file, "..", "static", "sha.txt")
30
31
32 def get_sha_from_file():
33 sha_path = get_sha_file_path()
34 if os.path.isfile(sha_path):
35 with open(sha_path) as f:
36 return f.read().strip()
37 else:
38 return None
39
40
41 def get_release() -> str:
42 """Returns the first available: the SHA from Git, the value from sha.txt, or the VERSION."""
43
44 if git_available() and is_git_directory():
45 return get_git_revision_hash()
46 else:
47 sha = get_sha_from_file()
48 if sha:
49 return sha
50 else:
51 # one of the above *should* always be available, but including this just in case
52 return VERSION
53
54
55 def configure():
56 SENTRY_DSN = os.environ.get("SENTRY_DSN")
57 if SENTRY_DSN:
58 release = get_release()
59 print(f"Enabling Sentry for environment '{SENTRY_ENVIRONMENT}', release '{release}'...")
60
61 # https://docs.sentry.io/platforms/python/configuration/
62 sentry_sdk.init(
63 dsn=SENTRY_DSN,
64 integrations=[
65 DjangoIntegration(),
66 ],
67 traces_sample_rate=1.0,
68 environment=SENTRY_ENVIRONMENT,
69 release=release,
70 in_app_include=["benefits"],
71 )
72 else:
73 print("SENTRY_DSN not set, so won't send events")
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/benefits/sentry.py b/benefits/sentry.py
--- a/benefits/sentry.py
+++ b/benefits/sentry.py
@@ -1,10 +1,13 @@
-from benefits import VERSION
-import sentry_sdk
-from sentry_sdk.integrations.django import DjangoIntegration
import shutil
import os
import subprocess
+import sentry_sdk
+from sentry_sdk.integrations.django import DjangoIntegration
+from sentry_sdk.scrubber import EventScrubber, DEFAULT_DENYLIST
+
+from benefits import VERSION
+
SENTRY_ENVIRONMENT = os.environ.get("SENTRY_ENVIRONMENT", "local")
@@ -52,6 +55,12 @@
return VERSION
+def get_denylist():
+ # custom denylist
+ denylist = DEFAULT_DENYLIST + ["sub", "name"]
+ return denylist
+
+
def configure():
SENTRY_DSN = os.environ.get("SENTRY_DSN")
if SENTRY_DSN:
@@ -68,6 +77,10 @@
environment=SENTRY_ENVIRONMENT,
release=release,
in_app_include=["benefits"],
+ # send_default_pii must be False (the default) for a custom EventScrubber/denylist
+ # https://docs.sentry.io/platforms/python/data-management/sensitive-data/#event_scrubber
+ send_default_pii=False,
+ event_scrubber=EventScrubber(denylist=get_denylist()),
)
else:
print("SENTRY_DSN not set, so won't send events")
| {"golden_diff": "diff --git a/benefits/sentry.py b/benefits/sentry.py\n--- a/benefits/sentry.py\n+++ b/benefits/sentry.py\n@@ -1,10 +1,13 @@\n-from benefits import VERSION\n-import sentry_sdk\n-from sentry_sdk.integrations.django import DjangoIntegration\n import shutil\n import os\n import subprocess\n \n+import sentry_sdk\n+from sentry_sdk.integrations.django import DjangoIntegration\n+from sentry_sdk.scrubber import EventScrubber, DEFAULT_DENYLIST\n+\n+from benefits import VERSION\n+\n \n SENTRY_ENVIRONMENT = os.environ.get(\"SENTRY_ENVIRONMENT\", \"local\")\n \n@@ -52,6 +55,12 @@\n return VERSION\n \n \n+def get_denylist():\n+ # custom denylist\n+ denylist = DEFAULT_DENYLIST + [\"sub\", \"name\"]\n+ return denylist\n+\n+\n def configure():\n SENTRY_DSN = os.environ.get(\"SENTRY_DSN\")\n if SENTRY_DSN:\n@@ -68,6 +77,10 @@\n environment=SENTRY_ENVIRONMENT,\n release=release,\n in_app_include=[\"benefits\"],\n+ # send_default_pii must be False (the default) for a custom EventScrubber/denylist\n+ # https://docs.sentry.io/platforms/python/data-management/sensitive-data/#event_scrubber\n+ send_default_pii=False,\n+ event_scrubber=EventScrubber(denylist=get_denylist()),\n )\n else:\n print(\"SENTRY_DSN not set, so won't send events\")\n", "issue": "Configure a Sentry denylist\nLooks like we can configure a `denylist` on `EventScrubber` when calling `sentry_sdk.init`: https://docs.sentry.io/platforms/python/data-management/sensitive-data/\r\n\r\nAnother more general approach to modifying events is to configure a `before_send` function: https://docs.sentry.io/platforms/python/configuration/filtering/\r\n\r\n_Originally posted by @angela-tran in https://github.com/cal-itp/benefits/issues/1334#issuecomment-1490416579_\r\n \n", "before_files": [{"content": "from benefits import VERSION\nimport sentry_sdk\nfrom sentry_sdk.integrations.django import DjangoIntegration\nimport shutil\nimport os\nimport subprocess\n\n\nSENTRY_ENVIRONMENT = os.environ.get(\"SENTRY_ENVIRONMENT\", \"local\")\n\n\ndef git_available():\n return bool(shutil.which(\"git\"))\n\n\n# https://stackoverflow.com/a/24584384/358804\ndef is_git_directory(path=\".\"):\n dev_null = open(os.devnull, \"w\")\n return subprocess.call([\"git\", \"-C\", path, \"status\"], stderr=dev_null, stdout=dev_null) == 0\n\n\n# https://stackoverflow.com/a/21901260/358804\ndef get_git_revision_hash():\n return subprocess.check_output([\"git\", \"rev-parse\", \"HEAD\"]).decode(\"ascii\").strip()\n\n\ndef get_sha_file_path():\n current_file = os.path.dirname(os.path.abspath(__file__))\n return os.path.join(current_file, \"..\", \"static\", \"sha.txt\")\n\n\ndef get_sha_from_file():\n sha_path = get_sha_file_path()\n if os.path.isfile(sha_path):\n with open(sha_path) as f:\n return f.read().strip()\n else:\n return None\n\n\ndef get_release() -> str:\n \"\"\"Returns the first available: the SHA from Git, the value from sha.txt, or the VERSION.\"\"\"\n\n if git_available() and is_git_directory():\n return get_git_revision_hash()\n else:\n sha = get_sha_from_file()\n if sha:\n return sha\n else:\n # one of the above *should* always be available, but including this just in case\n return VERSION\n\n\ndef configure():\n SENTRY_DSN = os.environ.get(\"SENTRY_DSN\")\n if SENTRY_DSN:\n release = get_release()\n print(f\"Enabling Sentry for environment '{SENTRY_ENVIRONMENT}', release '{release}'...\")\n\n # https://docs.sentry.io/platforms/python/configuration/\n sentry_sdk.init(\n dsn=SENTRY_DSN,\n integrations=[\n DjangoIntegration(),\n ],\n traces_sample_rate=1.0,\n environment=SENTRY_ENVIRONMENT,\n release=release,\n in_app_include=[\"benefits\"],\n )\n else:\n print(\"SENTRY_DSN not set, so won't send events\")\n", "path": "benefits/sentry.py"}], "after_files": [{"content": "import shutil\nimport os\nimport subprocess\n\nimport sentry_sdk\nfrom sentry_sdk.integrations.django import DjangoIntegration\nfrom sentry_sdk.scrubber import EventScrubber, DEFAULT_DENYLIST\n\nfrom benefits import VERSION\n\n\nSENTRY_ENVIRONMENT = os.environ.get(\"SENTRY_ENVIRONMENT\", \"local\")\n\n\ndef git_available():\n return bool(shutil.which(\"git\"))\n\n\n# https://stackoverflow.com/a/24584384/358804\ndef is_git_directory(path=\".\"):\n dev_null = open(os.devnull, \"w\")\n return subprocess.call([\"git\", \"-C\", path, \"status\"], stderr=dev_null, stdout=dev_null) == 0\n\n\n# https://stackoverflow.com/a/21901260/358804\ndef get_git_revision_hash():\n return subprocess.check_output([\"git\", \"rev-parse\", \"HEAD\"]).decode(\"ascii\").strip()\n\n\ndef get_sha_file_path():\n current_file = os.path.dirname(os.path.abspath(__file__))\n return os.path.join(current_file, \"..\", \"static\", \"sha.txt\")\n\n\ndef get_sha_from_file():\n sha_path = get_sha_file_path()\n if os.path.isfile(sha_path):\n with open(sha_path) as f:\n return f.read().strip()\n else:\n return None\n\n\ndef get_release() -> str:\n \"\"\"Returns the first available: the SHA from Git, the value from sha.txt, or the VERSION.\"\"\"\n\n if git_available() and is_git_directory():\n return get_git_revision_hash()\n else:\n sha = get_sha_from_file()\n if sha:\n return sha\n else:\n # one of the above *should* always be available, but including this just in case\n return VERSION\n\n\ndef get_denylist():\n # custom denylist\n denylist = DEFAULT_DENYLIST + [\"sub\", \"name\"]\n return denylist\n\n\ndef configure():\n SENTRY_DSN = os.environ.get(\"SENTRY_DSN\")\n if SENTRY_DSN:\n release = get_release()\n print(f\"Enabling Sentry for environment '{SENTRY_ENVIRONMENT}', release '{release}'...\")\n\n # https://docs.sentry.io/platforms/python/configuration/\n sentry_sdk.init(\n dsn=SENTRY_DSN,\n integrations=[\n DjangoIntegration(),\n ],\n traces_sample_rate=1.0,\n environment=SENTRY_ENVIRONMENT,\n release=release,\n in_app_include=[\"benefits\"],\n # send_default_pii must be False (the default) for a custom EventScrubber/denylist\n # https://docs.sentry.io/platforms/python/data-management/sensitive-data/#event_scrubber\n send_default_pii=False,\n event_scrubber=EventScrubber(denylist=get_denylist()),\n )\n else:\n print(\"SENTRY_DSN not set, so won't send events\")\n", "path": "benefits/sentry.py"}]} | 1,033 | 355 |
gh_patches_debug_14180 | rasdani/github-patches | git_diff | pre-commit__pre-commit-622 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unstaged files + never ran pre-commit => "No such file or directory: .../.cache/pre-commit/patch..."
```
$ pre-commit run
[WARNING] Unstaged files detected.
[INFO] Stashing unstaged files to /home/asottile/.cache/pre-commit/patch1505686307.
An unexpected error has occurred: IOError: [Errno 2] No such file or directory: '/home/asottile/.cache/pre-commit/patch1505686307'
Check the log at /home/asottile/.cache/pre-commit/pre-commit.log
```
Stacktrace:
```python
Traceback (most recent call last):
File "/home/asottile/workspace/pre-commit/pre_commit/error_handler.py", line 44, in error_handler
yield
File "/home/asottile/workspace/pre-commit/pre_commit/main.py", line 231, in main
return run(runner, args)
File "/home/asottile/workspace/pre-commit/pre_commit/commands/run.py", line 249, in run
with ctx:
File "/usr/lib/python2.7/contextlib.py", line 17, in __enter__
return self.gen.next()
File "/home/asottile/workspace/pre-commit/pre_commit/staged_files_only.py", line 46, in staged_files_only
with io.open(patch_filename, 'wb') as patch_file:
IOError: [Errno 2] No such file or directory: '/home/asottile/.cache/pre-commit/patch1505686307'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/staged_files_only.py`
Content:
```
1 from __future__ import unicode_literals
2
3 import contextlib
4 import io
5 import logging
6 import os.path
7 import time
8
9 from pre_commit.util import CalledProcessError
10 from pre_commit.util import cmd_output
11
12
13 logger = logging.getLogger('pre_commit')
14
15
16 def _git_apply(patch):
17 args = ('apply', '--whitespace=nowarn', patch)
18 try:
19 cmd_output('git', *args, encoding=None)
20 except CalledProcessError:
21 # Retry with autocrlf=false -- see #570
22 cmd_output('git', '-c', 'core.autocrlf=false', *args, encoding=None)
23
24
25 @contextlib.contextmanager
26 def staged_files_only(patch_dir):
27 """Clear any unstaged changes from the git working directory inside this
28 context.
29 """
30 # Determine if there are unstaged files
31 tree = cmd_output('git', 'write-tree')[1].strip()
32 retcode, diff_stdout_binary, _ = cmd_output(
33 'git', 'diff-index', '--ignore-submodules', '--binary',
34 '--exit-code', '--no-color', '--no-ext-diff', tree, '--',
35 retcode=None,
36 encoding=None,
37 )
38 if retcode and diff_stdout_binary.strip():
39 patch_filename = 'patch{}'.format(int(time.time()))
40 patch_filename = os.path.join(patch_dir, patch_filename)
41 logger.warning('Unstaged files detected.')
42 logger.info(
43 'Stashing unstaged files to {}.'.format(patch_filename),
44 )
45 # Save the current unstaged changes as a patch
46 with io.open(patch_filename, 'wb') as patch_file:
47 patch_file.write(diff_stdout_binary)
48
49 # Clear the working directory of unstaged changes
50 cmd_output('git', 'checkout', '--', '.')
51 try:
52 yield
53 finally:
54 # Try to apply the patch we saved
55 try:
56 _git_apply(patch_filename)
57 except CalledProcessError:
58 logger.warning(
59 'Stashed changes conflicted with hook auto-fixes... '
60 'Rolling back fixes...',
61 )
62 # We failed to apply the patch, presumably due to fixes made
63 # by hooks.
64 # Roll back the changes made by hooks.
65 cmd_output('git', 'checkout', '--', '.')
66 _git_apply(patch_filename)
67 logger.info('Restored changes from {}.'.format(patch_filename))
68 else:
69 # There weren't any staged files so we don't need to do anything
70 # special
71 yield
72
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pre_commit/staged_files_only.py b/pre_commit/staged_files_only.py
--- a/pre_commit/staged_files_only.py
+++ b/pre_commit/staged_files_only.py
@@ -8,6 +8,7 @@
from pre_commit.util import CalledProcessError
from pre_commit.util import cmd_output
+from pre_commit.util import mkdirp
logger = logging.getLogger('pre_commit')
@@ -43,6 +44,7 @@
'Stashing unstaged files to {}.'.format(patch_filename),
)
# Save the current unstaged changes as a patch
+ mkdirp(patch_dir)
with io.open(patch_filename, 'wb') as patch_file:
patch_file.write(diff_stdout_binary)
| {"golden_diff": "diff --git a/pre_commit/staged_files_only.py b/pre_commit/staged_files_only.py\n--- a/pre_commit/staged_files_only.py\n+++ b/pre_commit/staged_files_only.py\n@@ -8,6 +8,7 @@\n \n from pre_commit.util import CalledProcessError\n from pre_commit.util import cmd_output\n+from pre_commit.util import mkdirp\n \n \n logger = logging.getLogger('pre_commit')\n@@ -43,6 +44,7 @@\n 'Stashing unstaged files to {}.'.format(patch_filename),\n )\n # Save the current unstaged changes as a patch\n+ mkdirp(patch_dir)\n with io.open(patch_filename, 'wb') as patch_file:\n patch_file.write(diff_stdout_binary)\n", "issue": "Unstaged files + never ran pre-commit => \"No such file or directory: .../.cache/pre-commit/patch...\"\n```\r\n$ pre-commit run\r\n[WARNING] Unstaged files detected.\r\n[INFO] Stashing unstaged files to /home/asottile/.cache/pre-commit/patch1505686307.\r\nAn unexpected error has occurred: IOError: [Errno 2] No such file or directory: '/home/asottile/.cache/pre-commit/patch1505686307'\r\nCheck the log at /home/asottile/.cache/pre-commit/pre-commit.log\r\n```\r\n\r\nStacktrace:\r\n\r\n```python\r\nTraceback (most recent call last):\r\n File \"/home/asottile/workspace/pre-commit/pre_commit/error_handler.py\", line 44, in error_handler\r\n yield\r\n File \"/home/asottile/workspace/pre-commit/pre_commit/main.py\", line 231, in main\r\n return run(runner, args)\r\n File \"/home/asottile/workspace/pre-commit/pre_commit/commands/run.py\", line 249, in run\r\n with ctx:\r\n File \"/usr/lib/python2.7/contextlib.py\", line 17, in __enter__\r\n return self.gen.next()\r\n File \"/home/asottile/workspace/pre-commit/pre_commit/staged_files_only.py\", line 46, in staged_files_only\r\n with io.open(patch_filename, 'wb') as patch_file:\r\nIOError: [Errno 2] No such file or directory: '/home/asottile/.cache/pre-commit/patch1505686307'\r\n```\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport io\nimport logging\nimport os.path\nimport time\n\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import cmd_output\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef _git_apply(patch):\n args = ('apply', '--whitespace=nowarn', patch)\n try:\n cmd_output('git', *args, encoding=None)\n except CalledProcessError:\n # Retry with autocrlf=false -- see #570\n cmd_output('git', '-c', 'core.autocrlf=false', *args, encoding=None)\n\n\[email protected]\ndef staged_files_only(patch_dir):\n \"\"\"Clear any unstaged changes from the git working directory inside this\n context.\n \"\"\"\n # Determine if there are unstaged files\n tree = cmd_output('git', 'write-tree')[1].strip()\n retcode, diff_stdout_binary, _ = cmd_output(\n 'git', 'diff-index', '--ignore-submodules', '--binary',\n '--exit-code', '--no-color', '--no-ext-diff', tree, '--',\n retcode=None,\n encoding=None,\n )\n if retcode and diff_stdout_binary.strip():\n patch_filename = 'patch{}'.format(int(time.time()))\n patch_filename = os.path.join(patch_dir, patch_filename)\n logger.warning('Unstaged files detected.')\n logger.info(\n 'Stashing unstaged files to {}.'.format(patch_filename),\n )\n # Save the current unstaged changes as a patch\n with io.open(patch_filename, 'wb') as patch_file:\n patch_file.write(diff_stdout_binary)\n\n # Clear the working directory of unstaged changes\n cmd_output('git', 'checkout', '--', '.')\n try:\n yield\n finally:\n # Try to apply the patch we saved\n try:\n _git_apply(patch_filename)\n except CalledProcessError:\n logger.warning(\n 'Stashed changes conflicted with hook auto-fixes... '\n 'Rolling back fixes...',\n )\n # We failed to apply the patch, presumably due to fixes made\n # by hooks.\n # Roll back the changes made by hooks.\n cmd_output('git', 'checkout', '--', '.')\n _git_apply(patch_filename)\n logger.info('Restored changes from {}.'.format(patch_filename))\n else:\n # There weren't any staged files so we don't need to do anything\n # special\n yield\n", "path": "pre_commit/staged_files_only.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport io\nimport logging\nimport os.path\nimport time\n\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import mkdirp\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef _git_apply(patch):\n args = ('apply', '--whitespace=nowarn', patch)\n try:\n cmd_output('git', *args, encoding=None)\n except CalledProcessError:\n # Retry with autocrlf=false -- see #570\n cmd_output('git', '-c', 'core.autocrlf=false', *args, encoding=None)\n\n\[email protected]\ndef staged_files_only(patch_dir):\n \"\"\"Clear any unstaged changes from the git working directory inside this\n context.\n \"\"\"\n # Determine if there are unstaged files\n tree = cmd_output('git', 'write-tree')[1].strip()\n retcode, diff_stdout_binary, _ = cmd_output(\n 'git', 'diff-index', '--ignore-submodules', '--binary',\n '--exit-code', '--no-color', '--no-ext-diff', tree, '--',\n retcode=None,\n encoding=None,\n )\n if retcode and diff_stdout_binary.strip():\n patch_filename = 'patch{}'.format(int(time.time()))\n patch_filename = os.path.join(patch_dir, patch_filename)\n logger.warning('Unstaged files detected.')\n logger.info(\n 'Stashing unstaged files to {}.'.format(patch_filename),\n )\n # Save the current unstaged changes as a patch\n mkdirp(patch_dir)\n with io.open(patch_filename, 'wb') as patch_file:\n patch_file.write(diff_stdout_binary)\n\n # Clear the working directory of unstaged changes\n cmd_output('git', 'checkout', '--', '.')\n try:\n yield\n finally:\n # Try to apply the patch we saved\n try:\n _git_apply(patch_filename)\n except CalledProcessError:\n logger.warning(\n 'Stashed changes conflicted with hook auto-fixes... '\n 'Rolling back fixes...',\n )\n # We failed to apply the patch, presumably due to fixes made\n # by hooks.\n # Roll back the changes made by hooks.\n cmd_output('git', 'checkout', '--', '.')\n _git_apply(patch_filename)\n logger.info('Restored changes from {}.'.format(patch_filename))\n else:\n # There weren't any staged files so we don't need to do anything\n # special\n yield\n", "path": "pre_commit/staged_files_only.py"}]} | 1,275 | 155 |
gh_patches_debug_36746 | rasdani/github-patches | git_diff | DataDog__dd-agent-2965 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support ECDSA for ssh_check
ssh_check.py is not support ECDSA ssh key.
paramiko is support ECDSA ssh key.
http://docs.paramiko.org/en/1.17/api/keys.html#module-paramiko.ecdsakey
I changes ssh_key.py, but It's not working.
```
2016-05-08 17:56:27 JST | ERROR | dd.collector | checks.ssh_check(__init__.py:763) | Check 'ssh_check' instance #0 failed
Traceback (most recent call last):
File "/opt/datadog-agent/agent/checks/__init__.py", line 746, in run
self.check(copy.deepcopy(instance))
File "/opt/datadog-agent/agent/checks.d/ssh_check.py", line 70, in check
password=conf.password, pkey=private_key)
File "/opt/datadog-agent/embedded/lib/python2.7/site-packages/paramiko/client.py", line 307, in connect
look_for_keys, gss_auth, gss_kex, gss_deleg_creds, gss_host)
File "/opt/datadog-agent/embedded/lib/python2.7/site-packages/paramiko/client.py", line 519, in _auth
raise saved_exception
AuthenticationException: Authentication failed.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checks.d/ssh_check.py`
Content:
```
1 # (C) Datadog, Inc. 2010-2016
2 # All rights reserved
3 # Licensed under Simplified BSD License (see LICENSE)
4
5 # stdlib
6 from collections import namedtuple
7 import time
8
9 # 3p
10 import paramiko
11
12 # project
13 from checks import AgentCheck
14
15
16 class CheckSSH(AgentCheck):
17
18 OPTIONS = [
19 ('host', True, None, str),
20 ('port', False, 22, int),
21 ('username', True, None, str),
22 ('password', False, None, str),
23 ('private_key_file', False, None, str),
24 ('sftp_check', False, True, bool),
25 ('add_missing_keys', False, False, bool),
26 ]
27
28 Config = namedtuple('Config', [
29 'host',
30 'port',
31 'username',
32 'password',
33 'private_key_file',
34 'sftp_check',
35 'add_missing_keys',
36 ])
37
38 def _load_conf(self, instance):
39 params = []
40 for option, required, default, expected_type in self.OPTIONS:
41 value = instance.get(option)
42 if required and (not value or type(value)) != expected_type :
43 raise Exception("Please specify a valid {0}".format(option))
44
45 if value is None or type(value) != expected_type:
46 self.log.debug("Bad or missing value for {0} parameter. Using default".format(option))
47 value = default
48
49 params.append(value)
50 return self.Config._make(params)
51
52 def check(self, instance):
53 conf = self._load_conf(instance)
54 tags = ["instance:{0}-{1}".format(conf.host, conf.port)]
55
56 private_key = None
57 try:
58 private_key = paramiko.RSAKey.from_private_key_file(conf.private_key_file)
59 except IOError:
60 self.warning("Unable to find private key file: {}".format(conf.private_key_file))
61 except paramiko.ssh_exception.PasswordRequiredException:
62 self.warning("Private key file is encrypted but no password was given")
63 except paramiko.ssh_exception.SSHException:
64 self.warning("Private key file is invalid")
65
66 client = paramiko.SSHClient()
67 if conf.add_missing_keys:
68 client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
69 client.load_system_host_keys()
70
71 exception_message = None
72 #Service Availability to check status of SSH
73 try:
74 client.connect(conf.host, port=conf.port, username=conf.username,
75 password=conf.password, pkey=private_key)
76 self.service_check('ssh.can_connect', AgentCheck.OK, tags=tags,
77 message=exception_message)
78
79 except Exception as e:
80 exception_message = str(e)
81 status = AgentCheck.CRITICAL
82 self.service_check('ssh.can_connect', status, tags=tags,
83 message=exception_message)
84 if conf.sftp_check:
85 self.service_check('sftp.can_connect', status, tags=tags,
86 message=exception_message)
87 raise
88
89 #Service Availability to check status of SFTP
90 if conf.sftp_check:
91 try:
92 sftp = client.open_sftp()
93 #Check response time of SFTP
94 start_time = time.time()
95 sftp.listdir('.')
96 status = AgentCheck.OK
97 end_time = time.time()
98 time_taken = end_time - start_time
99 self.gauge('sftp.response_time', time_taken, tags=tags)
100
101 except Exception as e:
102 exception_message = str(e)
103 status = AgentCheck.CRITICAL
104
105 if exception_message is None:
106 exception_message = "No errors occured"
107
108 self.service_check('sftp.can_connect', status, tags=tags,
109 message=exception_message)
110
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/checks.d/ssh_check.py b/checks.d/ssh_check.py
--- a/checks.d/ssh_check.py
+++ b/checks.d/ssh_check.py
@@ -21,6 +21,7 @@
('username', True, None, str),
('password', False, None, str),
('private_key_file', False, None, str),
+ ('private_key_type', False, 'rsa', str),
('sftp_check', False, True, bool),
('add_missing_keys', False, False, bool),
]
@@ -31,6 +32,7 @@
'username',
'password',
'private_key_file',
+ 'private_key_type',
'sftp_check',
'add_missing_keys',
])
@@ -55,7 +57,10 @@
private_key = None
try:
- private_key = paramiko.RSAKey.from_private_key_file(conf.private_key_file)
+ if conf.private_key_type == 'ecdsa':
+ private_key = paramiko.ECDSAKey.from_private_key_file(conf.private_key_file)
+ else:
+ private_key = paramiko.RSAKey.from_private_key_file(conf.private_key_file)
except IOError:
self.warning("Unable to find private key file: {}".format(conf.private_key_file))
except paramiko.ssh_exception.PasswordRequiredException:
@@ -69,11 +74,11 @@
client.load_system_host_keys()
exception_message = None
- #Service Availability to check status of SSH
+ # Service Availability to check status of SSH
try:
client.connect(conf.host, port=conf.port, username=conf.username,
password=conf.password, pkey=private_key)
- self.service_check('ssh.can_connect', AgentCheck.OK, tags=tags,
+ self.service_check('ssh.can_connect', AgentCheck.OK, tags=tags,
message=exception_message)
except Exception as e:
@@ -86,7 +91,7 @@
message=exception_message)
raise
- #Service Availability to check status of SFTP
+ # Service Availability to check status of SFTP
if conf.sftp_check:
try:
sftp = client.open_sftp()
| {"golden_diff": "diff --git a/checks.d/ssh_check.py b/checks.d/ssh_check.py\n--- a/checks.d/ssh_check.py\n+++ b/checks.d/ssh_check.py\n@@ -21,6 +21,7 @@\n ('username', True, None, str),\n ('password', False, None, str),\n ('private_key_file', False, None, str),\n+ ('private_key_type', False, 'rsa', str),\n ('sftp_check', False, True, bool),\n ('add_missing_keys', False, False, bool),\n ]\n@@ -31,6 +32,7 @@\n 'username',\n 'password',\n 'private_key_file',\n+ 'private_key_type',\n 'sftp_check',\n 'add_missing_keys',\n ])\n@@ -55,7 +57,10 @@\n \n private_key = None\n try:\n- private_key = paramiko.RSAKey.from_private_key_file(conf.private_key_file)\n+ if conf.private_key_type == 'ecdsa':\n+ private_key = paramiko.ECDSAKey.from_private_key_file(conf.private_key_file)\n+ else:\n+ private_key = paramiko.RSAKey.from_private_key_file(conf.private_key_file)\n except IOError:\n self.warning(\"Unable to find private key file: {}\".format(conf.private_key_file))\n except paramiko.ssh_exception.PasswordRequiredException:\n@@ -69,11 +74,11 @@\n client.load_system_host_keys()\n \n exception_message = None\n- #Service Availability to check status of SSH\n+ # Service Availability to check status of SSH\n try:\n client.connect(conf.host, port=conf.port, username=conf.username,\n password=conf.password, pkey=private_key)\n- self.service_check('ssh.can_connect', AgentCheck.OK, tags=tags,\n+ self.service_check('ssh.can_connect', AgentCheck.OK, tags=tags,\n message=exception_message)\n \n except Exception as e:\n@@ -86,7 +91,7 @@\n message=exception_message)\n raise\n \n- #Service Availability to check status of SFTP\n+ # Service Availability to check status of SFTP\n if conf.sftp_check:\n try:\n sftp = client.open_sftp()\n", "issue": "Support ECDSA for ssh_check\nssh_check.py is not support ECDSA ssh key.\nparamiko is support ECDSA ssh key.\nhttp://docs.paramiko.org/en/1.17/api/keys.html#module-paramiko.ecdsakey\n\nI changes ssh_key.py, but It's not working.\n\n```\n2016-05-08 17:56:27 JST | ERROR | dd.collector | checks.ssh_check(__init__.py:763) | Check 'ssh_check' instance #0 failed\nTraceback (most recent call last):\n File \"/opt/datadog-agent/agent/checks/__init__.py\", line 746, in run\n self.check(copy.deepcopy(instance))\n File \"/opt/datadog-agent/agent/checks.d/ssh_check.py\", line 70, in check\n password=conf.password, pkey=private_key)\n File \"/opt/datadog-agent/embedded/lib/python2.7/site-packages/paramiko/client.py\", line 307, in connect\n look_for_keys, gss_auth, gss_kex, gss_deleg_creds, gss_host)\n File \"/opt/datadog-agent/embedded/lib/python2.7/site-packages/paramiko/client.py\", line 519, in _auth\n raise saved_exception\nAuthenticationException: Authentication failed.\n```\n\n", "before_files": [{"content": "# (C) Datadog, Inc. 2010-2016\n# All rights reserved\n# Licensed under Simplified BSD License (see LICENSE)\n\n# stdlib\nfrom collections import namedtuple\nimport time\n\n# 3p\nimport paramiko\n\n# project\nfrom checks import AgentCheck\n\n\nclass CheckSSH(AgentCheck):\n\n OPTIONS = [\n ('host', True, None, str),\n ('port', False, 22, int),\n ('username', True, None, str),\n ('password', False, None, str),\n ('private_key_file', False, None, str),\n ('sftp_check', False, True, bool),\n ('add_missing_keys', False, False, bool),\n ]\n\n Config = namedtuple('Config', [\n 'host',\n 'port',\n 'username',\n 'password',\n 'private_key_file',\n 'sftp_check',\n 'add_missing_keys',\n ])\n\n def _load_conf(self, instance):\n params = []\n for option, required, default, expected_type in self.OPTIONS:\n value = instance.get(option)\n if required and (not value or type(value)) != expected_type :\n raise Exception(\"Please specify a valid {0}\".format(option))\n\n if value is None or type(value) != expected_type:\n self.log.debug(\"Bad or missing value for {0} parameter. Using default\".format(option))\n value = default\n\n params.append(value)\n return self.Config._make(params)\n\n def check(self, instance):\n conf = self._load_conf(instance)\n tags = [\"instance:{0}-{1}\".format(conf.host, conf.port)]\n\n private_key = None\n try:\n private_key = paramiko.RSAKey.from_private_key_file(conf.private_key_file)\n except IOError:\n self.warning(\"Unable to find private key file: {}\".format(conf.private_key_file))\n except paramiko.ssh_exception.PasswordRequiredException:\n self.warning(\"Private key file is encrypted but no password was given\")\n except paramiko.ssh_exception.SSHException:\n self.warning(\"Private key file is invalid\")\n\n client = paramiko.SSHClient()\n if conf.add_missing_keys:\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n client.load_system_host_keys()\n\n exception_message = None\n #Service Availability to check status of SSH\n try:\n client.connect(conf.host, port=conf.port, username=conf.username,\n password=conf.password, pkey=private_key)\n self.service_check('ssh.can_connect', AgentCheck.OK, tags=tags,\n message=exception_message)\n\n except Exception as e:\n exception_message = str(e)\n status = AgentCheck.CRITICAL\n self.service_check('ssh.can_connect', status, tags=tags,\n message=exception_message)\n if conf.sftp_check:\n self.service_check('sftp.can_connect', status, tags=tags,\n message=exception_message)\n raise\n\n #Service Availability to check status of SFTP\n if conf.sftp_check:\n try:\n sftp = client.open_sftp()\n #Check response time of SFTP\n start_time = time.time()\n sftp.listdir('.')\n status = AgentCheck.OK\n end_time = time.time()\n time_taken = end_time - start_time\n self.gauge('sftp.response_time', time_taken, tags=tags)\n\n except Exception as e:\n exception_message = str(e)\n status = AgentCheck.CRITICAL\n\n if exception_message is None:\n exception_message = \"No errors occured\"\n\n self.service_check('sftp.can_connect', status, tags=tags,\n message=exception_message)\n", "path": "checks.d/ssh_check.py"}], "after_files": [{"content": "# (C) Datadog, Inc. 2010-2016\n# All rights reserved\n# Licensed under Simplified BSD License (see LICENSE)\n\n# stdlib\nfrom collections import namedtuple\nimport time\n\n# 3p\nimport paramiko\n\n# project\nfrom checks import AgentCheck\n\n\nclass CheckSSH(AgentCheck):\n\n OPTIONS = [\n ('host', True, None, str),\n ('port', False, 22, int),\n ('username', True, None, str),\n ('password', False, None, str),\n ('private_key_file', False, None, str),\n ('private_key_type', False, 'rsa', str),\n ('sftp_check', False, True, bool),\n ('add_missing_keys', False, False, bool),\n ]\n\n Config = namedtuple('Config', [\n 'host',\n 'port',\n 'username',\n 'password',\n 'private_key_file',\n 'private_key_type',\n 'sftp_check',\n 'add_missing_keys',\n ])\n\n def _load_conf(self, instance):\n params = []\n for option, required, default, expected_type in self.OPTIONS:\n value = instance.get(option)\n if required and (not value or type(value)) != expected_type :\n raise Exception(\"Please specify a valid {0}\".format(option))\n\n if value is None or type(value) != expected_type:\n self.log.debug(\"Bad or missing value for {0} parameter. Using default\".format(option))\n value = default\n\n params.append(value)\n return self.Config._make(params)\n\n def check(self, instance):\n conf = self._load_conf(instance)\n tags = [\"instance:{0}-{1}\".format(conf.host, conf.port)]\n\n private_key = None\n try:\n if conf.private_key_type == 'ecdsa':\n private_key = paramiko.ECDSAKey.from_private_key_file(conf.private_key_file)\n else:\n private_key = paramiko.RSAKey.from_private_key_file(conf.private_key_file)\n except IOError:\n self.warning(\"Unable to find private key file: {}\".format(conf.private_key_file))\n except paramiko.ssh_exception.PasswordRequiredException:\n self.warning(\"Private key file is encrypted but no password was given\")\n except paramiko.ssh_exception.SSHException:\n self.warning(\"Private key file is invalid\")\n\n client = paramiko.SSHClient()\n if conf.add_missing_keys:\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n client.load_system_host_keys()\n\n exception_message = None\n # Service Availability to check status of SSH\n try:\n client.connect(conf.host, port=conf.port, username=conf.username,\n password=conf.password, pkey=private_key)\n self.service_check('ssh.can_connect', AgentCheck.OK, tags=tags,\n message=exception_message)\n\n except Exception as e:\n exception_message = str(e)\n status = AgentCheck.CRITICAL\n self.service_check('ssh.can_connect', status, tags=tags,\n message=exception_message)\n if conf.sftp_check:\n self.service_check('sftp.can_connect', status, tags=tags,\n message=exception_message)\n raise\n\n # Service Availability to check status of SFTP\n if conf.sftp_check:\n try:\n sftp = client.open_sftp()\n #Check response time of SFTP\n start_time = time.time()\n sftp.listdir('.')\n status = AgentCheck.OK\n end_time = time.time()\n time_taken = end_time - start_time\n self.gauge('sftp.response_time', time_taken, tags=tags)\n\n except Exception as e:\n exception_message = str(e)\n status = AgentCheck.CRITICAL\n\n if exception_message is None:\n exception_message = \"No errors occured\"\n\n self.service_check('sftp.can_connect', status, tags=tags,\n message=exception_message)\n", "path": "checks.d/ssh_check.py"}]} | 1,581 | 496 |
gh_patches_debug_2494 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-951 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Projectcontainer active projects count broken
https://mein.berlin.de/projects/stadtforum-berlin-wohnen/
shows `7 of 4` active projects.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `meinberlin/apps/projectcontainers/models.py`
Content:
```
1 from django.db import models
2 from django.utils import timezone
3 from django.utils.translation import ugettext_lazy as _
4
5 from adhocracy4.projects import models as project_models
6
7
8 class ProjectContainer(project_models.Project):
9 projects = models.ManyToManyField(
10 project_models.Project,
11 related_name='containers',
12 verbose_name=_('Projects')
13 )
14
15 @property
16 def not_archived_projects(self):
17 return self.projects.filter(is_archived=False)
18
19 @property
20 def active_projects(self):
21 now = timezone.now()
22 return self.projects.filter(
23 module__phase__start_date__lte=now,
24 module__phase__end_date__gt=now)
25
26 @property
27 def phases(self):
28 from adhocracy4.phases import models as phase_models
29 return phase_models.Phase.objects\
30 .filter(module__project__containers__id=self.id)
31
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/meinberlin/apps/projectcontainers/models.py b/meinberlin/apps/projectcontainers/models.py
--- a/meinberlin/apps/projectcontainers/models.py
+++ b/meinberlin/apps/projectcontainers/models.py
@@ -21,7 +21,7 @@
now = timezone.now()
return self.projects.filter(
module__phase__start_date__lte=now,
- module__phase__end_date__gt=now)
+ module__phase__end_date__gt=now).distinct()
@property
def phases(self):
| {"golden_diff": "diff --git a/meinberlin/apps/projectcontainers/models.py b/meinberlin/apps/projectcontainers/models.py\n--- a/meinberlin/apps/projectcontainers/models.py\n+++ b/meinberlin/apps/projectcontainers/models.py\n@@ -21,7 +21,7 @@\n now = timezone.now()\n return self.projects.filter(\n module__phase__start_date__lte=now,\n- module__phase__end_date__gt=now)\n+ module__phase__end_date__gt=now).distinct()\n \n @property\n def phases(self):\n", "issue": "Projectcontainer active projects count broken\nhttps://mein.berlin.de/projects/stadtforum-berlin-wohnen/\r\n\r\nshows `7 of 4` active projects.\n", "before_files": [{"content": "from django.db import models\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.projects import models as project_models\n\n\nclass ProjectContainer(project_models.Project):\n projects = models.ManyToManyField(\n project_models.Project,\n related_name='containers',\n verbose_name=_('Projects')\n )\n\n @property\n def not_archived_projects(self):\n return self.projects.filter(is_archived=False)\n\n @property\n def active_projects(self):\n now = timezone.now()\n return self.projects.filter(\n module__phase__start_date__lte=now,\n module__phase__end_date__gt=now)\n\n @property\n def phases(self):\n from adhocracy4.phases import models as phase_models\n return phase_models.Phase.objects\\\n .filter(module__project__containers__id=self.id)\n", "path": "meinberlin/apps/projectcontainers/models.py"}], "after_files": [{"content": "from django.db import models\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.projects import models as project_models\n\n\nclass ProjectContainer(project_models.Project):\n projects = models.ManyToManyField(\n project_models.Project,\n related_name='containers',\n verbose_name=_('Projects')\n )\n\n @property\n def not_archived_projects(self):\n return self.projects.filter(is_archived=False)\n\n @property\n def active_projects(self):\n now = timezone.now()\n return self.projects.filter(\n module__phase__start_date__lte=now,\n module__phase__end_date__gt=now).distinct()\n\n @property\n def phases(self):\n from adhocracy4.phases import models as phase_models\n return phase_models.Phase.objects\\\n .filter(module__project__containers__id=self.id)\n", "path": "meinberlin/apps/projectcontainers/models.py"}]} | 537 | 122 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.