problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
10.2k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 582
21k
| num_tokens
int64 271
2.05k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_14814 | rasdani/github-patches | git_diff | bridgecrewio__checkov-599 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update urllib3: HTTP Header Injection vuln
**Describe the bug**
urllib3 needs to be updated to at least 1.25.9 to fix a high severity HTTP Header Injection vulnerability. Snyk info page [here](https://snyk.io/vuln/SNYK-PYTHON-URLLIB3-1014645).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 import logging
3 import os
4 from importlib import util
5 from os import path
6
7 import setuptools
8 from setuptools import setup
9
10 # read the contents of your README file
11 this_directory = path.abspath(path.dirname(__file__))
12 with open(path.join(this_directory, "README.md"), encoding="utf-8") as f:
13 long_description = f.read()
14
15 logger = logging.getLogger(__name__)
16 spec = util.spec_from_file_location(
17 "checkov.version", os.path.join("checkov", "version.py")
18 )
19 # noinspection PyUnresolvedReferences
20 mod = util.module_from_spec(spec)
21 spec.loader.exec_module(mod) # type: ignore
22 version = mod.version # type: ignore
23
24 setup(
25 extras_require={
26 "dev": [
27 "alabaster==0.7.12",
28 "attrs==19.3.0",
29 "babel==2.7.0",
30 "certifi==2019.11.28",
31 "chardet==3.0.4",
32 "coverage==4.5.4",
33 "coverage-badge==1.0.1",
34 "docopt==0.6.2",
35 "docutils==0.15.2",
36 "idna==2.8",
37 "imagesize==1.1.0",
38 "importlib-metadata==1.1.0; python_version < '3.8'",
39 "jinja2==2.10.3",
40 "lark-parser==0.7.8",
41 "markupsafe==1.1.1",
42 "more-itertools==8.0.0",
43 "packaging==19.2",
44 "pluggy==0.13.1",
45 "py==1.8.0",
46 "pygments==2.5.2",
47 "pyparsing==2.4.5",
48 "pytest==5.3.1",
49 "bc-python-hcl2>=0.3.10",
50 "pytz==2019.3",
51 "pyyaml==5.3.1",
52 "requests==2.22.0",
53 "six==1.15.0",
54 "snowballstemmer==2.0.0",
55 "sphinx==2.2.1",
56 "sphinxcontrib-applehelp==1.0.1",
57 "sphinxcontrib-devhelp==1.0.1",
58 "sphinxcontrib-htmlhelp==1.0.2",
59 "sphinxcontrib-jsmath==1.0.1",
60 "sphinxcontrib-qthelp==1.0.2",
61 "sphinxcontrib-serializinghtml==1.1.3",
62 "urllib3==1.25.7",
63 "wcwidth==0.1.7",
64 "zipp==0.6.0",
65 "GitPython==3.1.7",
66 "gitdb==4.0.5"
67 ]
68 },
69 install_requires=[
70 "boto3==1.12.43",
71 "chardet==3.0.4",
72 "colorama==0.4.3",
73 "docopt==0.6.2",
74 "idna==2.8",
75 "jmespath==0.10.0",
76 "junit-xml==1.8",
77 "lark-parser==0.7.8",
78 "bc-python-hcl2>=0.3.11",
79 "pyyaml==5.3.1",
80 "requests==2.22.0",
81 "six==1.15.0",
82 "tabulate==0.8.6",
83 "termcolor==1.1.0",
84 "urllib3==1.25.7",
85 "dpath==1.5.0",
86 "GitPython==3.1.7",
87 "gitdb==4.0.5"
88 ],
89 license="Apache License 2.0",
90 name="checkov",
91 version=version,
92 description="Infrastructure as code static analysis",
93 author="bridgecrew",
94 author_email="[email protected]",
95 url="https://github.com/bridgecrewio/checkov",
96 packages=setuptools.find_packages(exclude=["tests*","integration_tests*"]),
97 scripts=["bin/checkov","bin/checkov.cmd"],
98 long_description=long_description,
99 long_description_content_type="text/markdown",
100 classifiers=[
101 'Environment :: Console',
102 'Intended Audience :: Developers',
103 'Intended Audience :: System Administrators',
104 'Programming Language :: Python :: 3.7',
105 'Topic :: Security',
106 'Topic :: Software Development :: Build Tools'
107 ]
108 )
109
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -59,7 +59,7 @@
"sphinxcontrib-jsmath==1.0.1",
"sphinxcontrib-qthelp==1.0.2",
"sphinxcontrib-serializinghtml==1.1.3",
- "urllib3==1.25.7",
+ "urllib3==1.25.10",
"wcwidth==0.1.7",
"zipp==0.6.0",
"GitPython==3.1.7",
@@ -81,7 +81,7 @@
"six==1.15.0",
"tabulate==0.8.6",
"termcolor==1.1.0",
- "urllib3==1.25.7",
+ "urllib3==1.25.10",
"dpath==1.5.0",
"GitPython==3.1.7",
"gitdb==4.0.5"
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -59,7 +59,7 @@\n \"sphinxcontrib-jsmath==1.0.1\",\n \"sphinxcontrib-qthelp==1.0.2\",\n \"sphinxcontrib-serializinghtml==1.1.3\",\n- \"urllib3==1.25.7\",\n+ \"urllib3==1.25.10\",\n \"wcwidth==0.1.7\",\n \"zipp==0.6.0\",\n \"GitPython==3.1.7\",\n@@ -81,7 +81,7 @@\n \"six==1.15.0\",\n \"tabulate==0.8.6\",\n \"termcolor==1.1.0\",\n- \"urllib3==1.25.7\",\n+ \"urllib3==1.25.10\",\n \"dpath==1.5.0\",\n \"GitPython==3.1.7\",\n \"gitdb==4.0.5\"\n", "issue": "Update urllib3: HTTP Header Injection vuln\n**Describe the bug**\r\nurllib3 needs to be updated to at least 1.25.9 to fix a high severity HTTP Header Injection vulnerability. Snyk info page [here](https://snyk.io/vuln/SNYK-PYTHON-URLLIB3-1014645).\n", "before_files": [{"content": "#!/usr/bin/env python\nimport logging\nimport os\nfrom importlib import util\nfrom os import path\n\nimport setuptools\nfrom setuptools import setup\n\n# read the contents of your README file\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nlogger = logging.getLogger(__name__)\nspec = util.spec_from_file_location(\n \"checkov.version\", os.path.join(\"checkov\", \"version.py\")\n)\n# noinspection PyUnresolvedReferences\nmod = util.module_from_spec(spec)\nspec.loader.exec_module(mod) # type: ignore\nversion = mod.version # type: ignore\n\nsetup(\n extras_require={\n \"dev\": [\n \"alabaster==0.7.12\",\n \"attrs==19.3.0\",\n \"babel==2.7.0\",\n \"certifi==2019.11.28\",\n \"chardet==3.0.4\",\n \"coverage==4.5.4\",\n \"coverage-badge==1.0.1\",\n \"docopt==0.6.2\",\n \"docutils==0.15.2\",\n \"idna==2.8\",\n \"imagesize==1.1.0\",\n \"importlib-metadata==1.1.0; python_version < '3.8'\",\n \"jinja2==2.10.3\",\n \"lark-parser==0.7.8\",\n \"markupsafe==1.1.1\",\n \"more-itertools==8.0.0\",\n \"packaging==19.2\",\n \"pluggy==0.13.1\",\n \"py==1.8.0\",\n \"pygments==2.5.2\",\n \"pyparsing==2.4.5\",\n \"pytest==5.3.1\",\n \"bc-python-hcl2>=0.3.10\",\n \"pytz==2019.3\",\n \"pyyaml==5.3.1\",\n \"requests==2.22.0\",\n \"six==1.15.0\",\n \"snowballstemmer==2.0.0\",\n \"sphinx==2.2.1\",\n \"sphinxcontrib-applehelp==1.0.1\",\n \"sphinxcontrib-devhelp==1.0.1\",\n \"sphinxcontrib-htmlhelp==1.0.2\",\n \"sphinxcontrib-jsmath==1.0.1\",\n \"sphinxcontrib-qthelp==1.0.2\",\n \"sphinxcontrib-serializinghtml==1.1.3\",\n \"urllib3==1.25.7\",\n \"wcwidth==0.1.7\",\n \"zipp==0.6.0\",\n \"GitPython==3.1.7\",\n \"gitdb==4.0.5\"\n ]\n },\n install_requires=[\n \"boto3==1.12.43\",\n \"chardet==3.0.4\",\n \"colorama==0.4.3\",\n \"docopt==0.6.2\",\n \"idna==2.8\",\n \"jmespath==0.10.0\",\n \"junit-xml==1.8\",\n \"lark-parser==0.7.8\",\n \"bc-python-hcl2>=0.3.11\",\n \"pyyaml==5.3.1\",\n \"requests==2.22.0\",\n \"six==1.15.0\",\n \"tabulate==0.8.6\",\n \"termcolor==1.1.0\",\n \"urllib3==1.25.7\",\n \"dpath==1.5.0\",\n \"GitPython==3.1.7\",\n \"gitdb==4.0.5\"\n ],\n license=\"Apache License 2.0\",\n name=\"checkov\",\n version=version,\n description=\"Infrastructure as code static analysis\",\n author=\"bridgecrew\",\n author_email=\"[email protected]\",\n url=\"https://github.com/bridgecrewio/checkov\",\n packages=setuptools.find_packages(exclude=[\"tests*\",\"integration_tests*\"]),\n scripts=[\"bin/checkov\",\"bin/checkov.cmd\"],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n classifiers=[\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Security',\n 'Topic :: Software Development :: Build Tools'\n ]\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nimport logging\nimport os\nfrom importlib import util\nfrom os import path\n\nimport setuptools\nfrom setuptools import setup\n\n# read the contents of your README file\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nlogger = logging.getLogger(__name__)\nspec = util.spec_from_file_location(\n \"checkov.version\", os.path.join(\"checkov\", \"version.py\")\n)\n# noinspection PyUnresolvedReferences\nmod = util.module_from_spec(spec)\nspec.loader.exec_module(mod) # type: ignore\nversion = mod.version # type: ignore\n\nsetup(\n extras_require={\n \"dev\": [\n \"alabaster==0.7.12\",\n \"attrs==19.3.0\",\n \"babel==2.7.0\",\n \"certifi==2019.11.28\",\n \"chardet==3.0.4\",\n \"coverage==4.5.4\",\n \"coverage-badge==1.0.1\",\n \"docopt==0.6.2\",\n \"docutils==0.15.2\",\n \"idna==2.8\",\n \"imagesize==1.1.0\",\n \"importlib-metadata==1.1.0; python_version < '3.8'\",\n \"jinja2==2.10.3\",\n \"lark-parser==0.7.8\",\n \"markupsafe==1.1.1\",\n \"more-itertools==8.0.0\",\n \"packaging==19.2\",\n \"pluggy==0.13.1\",\n \"py==1.8.0\",\n \"pygments==2.5.2\",\n \"pyparsing==2.4.5\",\n \"pytest==5.3.1\",\n \"bc-python-hcl2>=0.3.10\",\n \"pytz==2019.3\",\n \"pyyaml==5.3.1\",\n \"requests==2.22.0\",\n \"six==1.15.0\",\n \"snowballstemmer==2.0.0\",\n \"sphinx==2.2.1\",\n \"sphinxcontrib-applehelp==1.0.1\",\n \"sphinxcontrib-devhelp==1.0.1\",\n \"sphinxcontrib-htmlhelp==1.0.2\",\n \"sphinxcontrib-jsmath==1.0.1\",\n \"sphinxcontrib-qthelp==1.0.2\",\n \"sphinxcontrib-serializinghtml==1.1.3\",\n \"urllib3==1.25.10\",\n \"wcwidth==0.1.7\",\n \"zipp==0.6.0\",\n \"GitPython==3.1.7\",\n \"gitdb==4.0.5\"\n ]\n },\n install_requires=[\n \"boto3==1.12.43\",\n \"chardet==3.0.4\",\n \"colorama==0.4.3\",\n \"docopt==0.6.2\",\n \"idna==2.8\",\n \"jmespath==0.10.0\",\n \"junit-xml==1.8\",\n \"lark-parser==0.7.8\",\n \"bc-python-hcl2>=0.3.11\",\n \"pyyaml==5.3.1\",\n \"requests==2.22.0\",\n \"six==1.15.0\",\n \"tabulate==0.8.6\",\n \"termcolor==1.1.0\",\n \"urllib3==1.25.10\",\n \"dpath==1.5.0\",\n \"GitPython==3.1.7\",\n \"gitdb==4.0.5\"\n ],\n license=\"Apache License 2.0\",\n name=\"checkov\",\n version=version,\n description=\"Infrastructure as code static analysis\",\n author=\"bridgecrew\",\n author_email=\"[email protected]\",\n url=\"https://github.com/bridgecrewio/checkov\",\n packages=setuptools.find_packages(exclude=[\"tests*\",\"integration_tests*\"]),\n scripts=[\"bin/checkov\",\"bin/checkov.cmd\"],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n classifiers=[\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Security',\n 'Topic :: Software Development :: Build Tools'\n ]\n)\n", "path": "setup.py"}]} | 1,588 | 246 |
gh_patches_debug_16857 | rasdani/github-patches | git_diff | cloud-custodian__cloud-custodian-4882 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
c7n_mailer, AWS not installing Lambda, no logs, no errors
I have tried to setup/install the c7n_mailer lambda on our AWS account according to the docs. I have tried it from my Mac and from Docker Images (in a Jenkins pipeline) to no avail. The kicker is I am not getting any error, or output. Is there anything I can look at to see if I have an issue from my end our something on the AWS account. This is the command I am running:
```
c7n-mailer --config mailer.yml --update-lambda
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tools/c7n_mailer/c7n_mailer/deploy.py`
Content:
```
1 # Copyright 2016-2017 Capital One Services, LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from __future__ import absolute_import, division, print_function, unicode_literals
15
16 import copy
17 import json
18 import os
19
20 from c7n.mu import (
21 CloudWatchEventSource,
22 LambdaFunction,
23 LambdaManager,
24 PythonPackageArchive)
25
26
27 entry_source = """\
28 import logging
29
30 from c7n_mailer import handle
31
32 logger = logging.getLogger('custodian.mailer')
33 log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
34 logging.basicConfig(level=logging.INFO, format=log_format)
35 logging.getLogger('botocore').setLevel(logging.WARNING)
36
37 def dispatch(event, context):
38 return handle.start_c7n_mailer(logger)
39 """
40
41
42 def get_archive(config):
43 archive = PythonPackageArchive(modules=[
44 'c7n_mailer',
45 # core deps
46 'jinja2', 'markupsafe', 'ruamel', 'ldap3', 'pyasn1', 'redis',
47 # for other dependencies
48 'pkg_resources',
49 # transport datadog - recursive deps
50 'datadog', 'simplejson', 'decorator',
51 # requests (recursive deps), needed by datadog, slackclient, splunk
52 'requests', 'urllib3', 'idna', 'chardet', 'certifi',
53 # used by splunk; also dependencies of c7n itself
54 'jsonpointer', 'jsonpatch'])
55
56 for d in set(config['templates_folders']):
57 if not os.path.exists(d):
58 continue
59 for t in [f for f in os.listdir(d) if os.path.splitext(f)[1] == '.j2']:
60 with open(os.path.join(d, t)) as fh:
61 archive.add_contents('msg-templates/%s' % t, fh.read())
62
63 function_config = copy.deepcopy(config)
64 function_config['templates_folders'] = ['msg-templates/']
65 archive.add_contents('config.json', json.dumps(function_config))
66 archive.add_contents('periodic.py', entry_source)
67
68 archive.close()
69 return archive
70
71
72 def provision(config, session_factory):
73 func_config = dict(
74 name=config.get('lambda_name', 'cloud-custodian-mailer'),
75 description=config.get('lambda_description', 'Cloud Custodian Mailer'),
76 tags=config.get('lambda_tags', {}),
77 handler='periodic.dispatch',
78 runtime=config['runtime'],
79 memory_size=config['memory'],
80 timeout=config['timeout'],
81 role=config['role'],
82 subnets=config['subnets'],
83 security_groups=config['security_groups'],
84 dead_letter_config=config.get('dead_letter_config', {}),
85 events=[
86 CloudWatchEventSource(
87 {'type': 'periodic',
88 'schedule': config.get('lambda_schedule', 'rate(5 minutes)')},
89 session_factory)
90 ])
91
92 archive = get_archive(config)
93 func = LambdaFunction(func_config, archive)
94 manager = LambdaManager(session_factory)
95 manager.publish(func)
96
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/tools/c7n_mailer/c7n_mailer/deploy.py b/tools/c7n_mailer/c7n_mailer/deploy.py
--- a/tools/c7n_mailer/c7n_mailer/deploy.py
+++ b/tools/c7n_mailer/c7n_mailer/deploy.py
@@ -14,6 +14,7 @@
from __future__ import absolute_import, division, print_function, unicode_literals
import copy
+import logging
import json
import os
@@ -24,6 +25,8 @@
PythonPackageArchive)
+log = logging.getLogger('custodian-mailer')
+
entry_source = """\
import logging
@@ -91,5 +94,6 @@
archive = get_archive(config)
func = LambdaFunction(func_config, archive)
+ log.info("Provisioning mailer lambda %s" % (session_factory().region_name))
manager = LambdaManager(session_factory)
manager.publish(func)
| {"golden_diff": "diff --git a/tools/c7n_mailer/c7n_mailer/deploy.py b/tools/c7n_mailer/c7n_mailer/deploy.py\n--- a/tools/c7n_mailer/c7n_mailer/deploy.py\n+++ b/tools/c7n_mailer/c7n_mailer/deploy.py\n@@ -14,6 +14,7 @@\n from __future__ import absolute_import, division, print_function, unicode_literals\n \n import copy\n+import logging\n import json\n import os\n \n@@ -24,6 +25,8 @@\n PythonPackageArchive)\n \n \n+log = logging.getLogger('custodian-mailer')\n+\n entry_source = \"\"\"\\\n import logging\n \n@@ -91,5 +94,6 @@\n \n archive = get_archive(config)\n func = LambdaFunction(func_config, archive)\n+ log.info(\"Provisioning mailer lambda %s\" % (session_factory().region_name))\n manager = LambdaManager(session_factory)\n manager.publish(func)\n", "issue": "c7n_mailer, AWS not installing Lambda, no logs, no errors\nI have tried to setup/install the c7n_mailer lambda on our AWS account according to the docs. I have tried it from my Mac and from Docker Images (in a Jenkins pipeline) to no avail. The kicker is I am not getting any error, or output. Is there anything I can look at to see if I have an issue from my end our something on the AWS account. This is the command I am running:\r\n```\r\nc7n-mailer --config mailer.yml --update-lambda\r\n```\n", "before_files": [{"content": "# Copyright 2016-2017 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport copy\nimport json\nimport os\n\nfrom c7n.mu import (\n CloudWatchEventSource,\n LambdaFunction,\n LambdaManager,\n PythonPackageArchive)\n\n\nentry_source = \"\"\"\\\nimport logging\n\nfrom c7n_mailer import handle\n\nlogger = logging.getLogger('custodian.mailer')\nlog_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\nlogging.basicConfig(level=logging.INFO, format=log_format)\nlogging.getLogger('botocore').setLevel(logging.WARNING)\n\ndef dispatch(event, context):\n return handle.start_c7n_mailer(logger)\n\"\"\"\n\n\ndef get_archive(config):\n archive = PythonPackageArchive(modules=[\n 'c7n_mailer',\n # core deps\n 'jinja2', 'markupsafe', 'ruamel', 'ldap3', 'pyasn1', 'redis',\n # for other dependencies\n 'pkg_resources',\n # transport datadog - recursive deps\n 'datadog', 'simplejson', 'decorator',\n # requests (recursive deps), needed by datadog, slackclient, splunk\n 'requests', 'urllib3', 'idna', 'chardet', 'certifi',\n # used by splunk; also dependencies of c7n itself\n 'jsonpointer', 'jsonpatch'])\n\n for d in set(config['templates_folders']):\n if not os.path.exists(d):\n continue\n for t in [f for f in os.listdir(d) if os.path.splitext(f)[1] == '.j2']:\n with open(os.path.join(d, t)) as fh:\n archive.add_contents('msg-templates/%s' % t, fh.read())\n\n function_config = copy.deepcopy(config)\n function_config['templates_folders'] = ['msg-templates/']\n archive.add_contents('config.json', json.dumps(function_config))\n archive.add_contents('periodic.py', entry_source)\n\n archive.close()\n return archive\n\n\ndef provision(config, session_factory):\n func_config = dict(\n name=config.get('lambda_name', 'cloud-custodian-mailer'),\n description=config.get('lambda_description', 'Cloud Custodian Mailer'),\n tags=config.get('lambda_tags', {}),\n handler='periodic.dispatch',\n runtime=config['runtime'],\n memory_size=config['memory'],\n timeout=config['timeout'],\n role=config['role'],\n subnets=config['subnets'],\n security_groups=config['security_groups'],\n dead_letter_config=config.get('dead_letter_config', {}),\n events=[\n CloudWatchEventSource(\n {'type': 'periodic',\n 'schedule': config.get('lambda_schedule', 'rate(5 minutes)')},\n session_factory)\n ])\n\n archive = get_archive(config)\n func = LambdaFunction(func_config, archive)\n manager = LambdaManager(session_factory)\n manager.publish(func)\n", "path": "tools/c7n_mailer/c7n_mailer/deploy.py"}], "after_files": [{"content": "# Copyright 2016-2017 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport copy\nimport logging\nimport json\nimport os\n\nfrom c7n.mu import (\n CloudWatchEventSource,\n LambdaFunction,\n LambdaManager,\n PythonPackageArchive)\n\n\nlog = logging.getLogger('custodian-mailer')\n\nentry_source = \"\"\"\\\nimport logging\n\nfrom c7n_mailer import handle\n\nlogger = logging.getLogger('custodian.mailer')\nlog_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\nlogging.basicConfig(level=logging.INFO, format=log_format)\nlogging.getLogger('botocore').setLevel(logging.WARNING)\n\ndef dispatch(event, context):\n return handle.start_c7n_mailer(logger)\n\"\"\"\n\n\ndef get_archive(config):\n archive = PythonPackageArchive(modules=[\n 'c7n_mailer',\n # core deps\n 'jinja2', 'markupsafe', 'ruamel', 'ldap3', 'pyasn1', 'redis',\n # for other dependencies\n 'pkg_resources',\n # transport datadog - recursive deps\n 'datadog', 'simplejson', 'decorator',\n # requests (recursive deps), needed by datadog, slackclient, splunk\n 'requests', 'urllib3', 'idna', 'chardet', 'certifi',\n # used by splunk; also dependencies of c7n itself\n 'jsonpointer', 'jsonpatch'])\n\n for d in set(config['templates_folders']):\n if not os.path.exists(d):\n continue\n for t in [f for f in os.listdir(d) if os.path.splitext(f)[1] == '.j2']:\n with open(os.path.join(d, t)) as fh:\n archive.add_contents('msg-templates/%s' % t, fh.read())\n\n function_config = copy.deepcopy(config)\n function_config['templates_folders'] = ['msg-templates/']\n archive.add_contents('config.json', json.dumps(function_config))\n archive.add_contents('periodic.py', entry_source)\n\n archive.close()\n return archive\n\n\ndef provision(config, session_factory):\n func_config = dict(\n name=config.get('lambda_name', 'cloud-custodian-mailer'),\n description=config.get('lambda_description', 'Cloud Custodian Mailer'),\n tags=config.get('lambda_tags', {}),\n handler='periodic.dispatch',\n runtime=config['runtime'],\n memory_size=config['memory'],\n timeout=config['timeout'],\n role=config['role'],\n subnets=config['subnets'],\n security_groups=config['security_groups'],\n dead_letter_config=config.get('dead_letter_config', {}),\n events=[\n CloudWatchEventSource(\n {'type': 'periodic',\n 'schedule': config.get('lambda_schedule', 'rate(5 minutes)')},\n session_factory)\n ])\n\n archive = get_archive(config)\n func = LambdaFunction(func_config, archive)\n log.info(\"Provisioning mailer lambda %s\" % (session_factory().region_name))\n manager = LambdaManager(session_factory)\n manager.publish(func)\n", "path": "tools/c7n_mailer/c7n_mailer/deploy.py"}]} | 1,337 | 212 |
gh_patches_debug_17873 | rasdani/github-patches | git_diff | PyGithub__PyGithub-1327 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
InputGitTreeElement should allow passing "null" for sha
Github's [Tree creation api](https://developer.github.com/v3/git/trees/#create-a-tree) allows us to pass `sha = null` to indicate that the specified blob needs to be deleted.
However, I don't have a way to pass this info to my `InputGitTreeElement`. I can either give it a str or a `github.GithubObject.NotSet`. This means I have no way of deleting files from a tree using PyGithub (I'd like to delete multiple files in a single commit so tree creation is the ideal choice for me).
The current design is to only pass the `sha` if it is actually set:
https://github.com/PyGithub/PyGithub/blob/540a085001/github/InputGitTreeElement.py#L81
I can understand that passing a `None` goes against the design. I think something like `github.GithubObject.Null` could be introduced to explicitly say that this field is `null`. It can be used everywhere the GH API accepts a null value.
Example
```python
new_tree = repo.create_git_tree(
[
InputGitTreeElement(
path="my/dir/my_file.txt", mode="100644", type="blob", sha=github.GithubObject.Null
),
],
base_tree=head_commit.tree
)
```
This will delete `my/dir/my_file.txt`
---
My current workaround is to directly hit the api to create tree (using requests, setting `sha=None`), get the tree sha & use it with pygithub for my remaining workflow (committing, etc).
Please let me know in case I misunderstood some aspect or if anything needs to be elaborated upon.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `github/InputGitTreeElement.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 ############################ Copyrights and license ############################
4 # #
5 # Copyright 2012 Vincent Jacques <[email protected]> #
6 # Copyright 2012 Zearin <[email protected]> #
7 # Copyright 2013 Vincent Jacques <[email protected]> #
8 # Copyright 2014 Vincent Jacques <[email protected]> #
9 # Copyright 2016 Peter Buckley <[email protected]> #
10 # Copyright 2018 Wan Liuyang <[email protected]> #
11 # Copyright 2018 sfdye <[email protected]> #
12 # #
13 # This file is part of PyGithub. #
14 # http://pygithub.readthedocs.io/ #
15 # #
16 # PyGithub is free software: you can redistribute it and/or modify it under #
17 # the terms of the GNU Lesser General Public License as published by the Free #
18 # Software Foundation, either version 3 of the License, or (at your option) #
19 # any later version. #
20 # #
21 # PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
22 # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
23 # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
24 # details. #
25 # #
26 # You should have received a copy of the GNU Lesser General Public License #
27 # along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
28 # #
29 ################################################################################
30
31 from __future__ import absolute_import
32
33 import six
34
35 import github.GithubObject
36
37
38 class InputGitTreeElement(object):
39 """
40 This class represents InputGitTreeElements
41 """
42
43 def __init__(
44 self,
45 path,
46 mode,
47 type,
48 content=github.GithubObject.NotSet,
49 sha=github.GithubObject.NotSet,
50 ):
51 """
52 :param path: string
53 :param mode: string
54 :param type: string
55 :param content: string
56 :param sha: string
57 """
58
59 assert isinstance(path, (str, six.text_type)), path
60 assert isinstance(mode, (str, six.text_type)), mode
61 assert isinstance(type, (str, six.text_type)), type
62 assert content is github.GithubObject.NotSet or isinstance(
63 content, (str, six.text_type)
64 ), content
65 assert sha is github.GithubObject.NotSet or isinstance(
66 sha, (str, six.text_type)
67 ), sha
68 self.__path = path
69 self.__mode = mode
70 self.__type = type
71 self.__content = content
72 self.__sha = sha
73
74 @property
75 def _identity(self):
76 identity = {
77 "path": self.__path,
78 "mode": self.__mode,
79 "type": self.__type,
80 }
81 if self.__sha is not github.GithubObject.NotSet:
82 identity["sha"] = self.__sha
83 if self.__content is not github.GithubObject.NotSet:
84 identity["content"] = self.__content
85 return identity
86
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/github/InputGitTreeElement.py b/github/InputGitTreeElement.py
--- a/github/InputGitTreeElement.py
+++ b/github/InputGitTreeElement.py
@@ -53,7 +53,7 @@
:param mode: string
:param type: string
:param content: string
- :param sha: string
+ :param sha: string or None
"""
assert isinstance(path, (str, six.text_type)), path
@@ -62,8 +62,10 @@
assert content is github.GithubObject.NotSet or isinstance(
content, (str, six.text_type)
), content
- assert sha is github.GithubObject.NotSet or isinstance(
- sha, (str, six.text_type)
+ assert (
+ sha is github.GithubObject.NotSet
+ or sha is None
+ or isinstance(sha, (str, six.text_type))
), sha
self.__path = path
self.__mode = mode
| {"golden_diff": "diff --git a/github/InputGitTreeElement.py b/github/InputGitTreeElement.py\n--- a/github/InputGitTreeElement.py\n+++ b/github/InputGitTreeElement.py\n@@ -53,7 +53,7 @@\n :param mode: string\n :param type: string\n :param content: string\n- :param sha: string\n+ :param sha: string or None\n \"\"\"\n \n assert isinstance(path, (str, six.text_type)), path\n@@ -62,8 +62,10 @@\n assert content is github.GithubObject.NotSet or isinstance(\n content, (str, six.text_type)\n ), content\n- assert sha is github.GithubObject.NotSet or isinstance(\n- sha, (str, six.text_type)\n+ assert (\n+ sha is github.GithubObject.NotSet\n+ or sha is None\n+ or isinstance(sha, (str, six.text_type))\n ), sha\n self.__path = path\n self.__mode = mode\n", "issue": "InputGitTreeElement should allow passing \"null\" for sha\nGithub's [Tree creation api](https://developer.github.com/v3/git/trees/#create-a-tree) allows us to pass `sha = null` to indicate that the specified blob needs to be deleted.\r\n\r\nHowever, I don't have a way to pass this info to my `InputGitTreeElement`. I can either give it a str or a `github.GithubObject.NotSet`. This means I have no way of deleting files from a tree using PyGithub (I'd like to delete multiple files in a single commit so tree creation is the ideal choice for me).\r\n\r\nThe current design is to only pass the `sha` if it is actually set:\r\nhttps://github.com/PyGithub/PyGithub/blob/540a085001/github/InputGitTreeElement.py#L81\r\n\r\nI can understand that passing a `None` goes against the design. I think something like `github.GithubObject.Null` could be introduced to explicitly say that this field is `null`. It can be used everywhere the GH API accepts a null value.\r\n\r\nExample\r\n```python\r\nnew_tree = repo.create_git_tree(\r\n [\r\n InputGitTreeElement(\r\n path=\"my/dir/my_file.txt\", mode=\"100644\", type=\"blob\", sha=github.GithubObject.Null\r\n ),\r\n ],\r\n base_tree=head_commit.tree\r\n)\r\n```\r\nThis will delete `my/dir/my_file.txt`\r\n\r\n---\r\n\r\nMy current workaround is to directly hit the api to create tree (using requests, setting `sha=None`), get the tree sha & use it with pygithub for my remaining workflow (committing, etc).\r\n\r\nPlease let me know in case I misunderstood some aspect or if anything needs to be elaborated upon.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n############################ Copyrights and license ############################\n# #\n# Copyright 2012 Vincent Jacques <[email protected]> #\n# Copyright 2012 Zearin <[email protected]> #\n# Copyright 2013 Vincent Jacques <[email protected]> #\n# Copyright 2014 Vincent Jacques <[email protected]> #\n# Copyright 2016 Peter Buckley <[email protected]> #\n# Copyright 2018 Wan Liuyang <[email protected]> #\n# Copyright 2018 sfdye <[email protected]> #\n# #\n# This file is part of PyGithub. #\n# http://pygithub.readthedocs.io/ #\n# #\n# PyGithub is free software: you can redistribute it and/or modify it under #\n# the terms of the GNU Lesser General Public License as published by the Free #\n# Software Foundation, either version 3 of the License, or (at your option) #\n# any later version. #\n# #\n# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #\n# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #\n# details. #\n# #\n# You should have received a copy of the GNU Lesser General Public License #\n# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #\n# #\n################################################################################\n\nfrom __future__ import absolute_import\n\nimport six\n\nimport github.GithubObject\n\n\nclass InputGitTreeElement(object):\n \"\"\"\n This class represents InputGitTreeElements\n \"\"\"\n\n def __init__(\n self,\n path,\n mode,\n type,\n content=github.GithubObject.NotSet,\n sha=github.GithubObject.NotSet,\n ):\n \"\"\"\n :param path: string\n :param mode: string\n :param type: string\n :param content: string\n :param sha: string\n \"\"\"\n\n assert isinstance(path, (str, six.text_type)), path\n assert isinstance(mode, (str, six.text_type)), mode\n assert isinstance(type, (str, six.text_type)), type\n assert content is github.GithubObject.NotSet or isinstance(\n content, (str, six.text_type)\n ), content\n assert sha is github.GithubObject.NotSet or isinstance(\n sha, (str, six.text_type)\n ), sha\n self.__path = path\n self.__mode = mode\n self.__type = type\n self.__content = content\n self.__sha = sha\n\n @property\n def _identity(self):\n identity = {\n \"path\": self.__path,\n \"mode\": self.__mode,\n \"type\": self.__type,\n }\n if self.__sha is not github.GithubObject.NotSet:\n identity[\"sha\"] = self.__sha\n if self.__content is not github.GithubObject.NotSet:\n identity[\"content\"] = self.__content\n return identity\n", "path": "github/InputGitTreeElement.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n############################ Copyrights and license ############################\n# #\n# Copyright 2012 Vincent Jacques <[email protected]> #\n# Copyright 2012 Zearin <[email protected]> #\n# Copyright 2013 Vincent Jacques <[email protected]> #\n# Copyright 2014 Vincent Jacques <[email protected]> #\n# Copyright 2016 Peter Buckley <[email protected]> #\n# Copyright 2018 Wan Liuyang <[email protected]> #\n# Copyright 2018 sfdye <[email protected]> #\n# #\n# This file is part of PyGithub. #\n# http://pygithub.readthedocs.io/ #\n# #\n# PyGithub is free software: you can redistribute it and/or modify it under #\n# the terms of the GNU Lesser General Public License as published by the Free #\n# Software Foundation, either version 3 of the License, or (at your option) #\n# any later version. #\n# #\n# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #\n# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #\n# details. #\n# #\n# You should have received a copy of the GNU Lesser General Public License #\n# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #\n# #\n################################################################################\n\nfrom __future__ import absolute_import\n\nimport six\n\nimport github.GithubObject\n\n\nclass InputGitTreeElement(object):\n \"\"\"\n This class represents InputGitTreeElements\n \"\"\"\n\n def __init__(\n self,\n path,\n mode,\n type,\n content=github.GithubObject.NotSet,\n sha=github.GithubObject.NotSet,\n ):\n \"\"\"\n :param path: string\n :param mode: string\n :param type: string\n :param content: string\n :param sha: string or None\n \"\"\"\n\n assert isinstance(path, (str, six.text_type)), path\n assert isinstance(mode, (str, six.text_type)), mode\n assert isinstance(type, (str, six.text_type)), type\n assert content is github.GithubObject.NotSet or isinstance(\n content, (str, six.text_type)\n ), content\n assert (\n sha is github.GithubObject.NotSet\n or sha is None\n or isinstance(sha, (str, six.text_type))\n ), sha\n self.__path = path\n self.__mode = mode\n self.__type = type\n self.__content = content\n self.__sha = sha\n\n @property\n def _identity(self):\n identity = {\n \"path\": self.__path,\n \"mode\": self.__mode,\n \"type\": self.__type,\n }\n if self.__sha is not github.GithubObject.NotSet:\n identity[\"sha\"] = self.__sha\n if self.__content is not github.GithubObject.NotSet:\n identity[\"content\"] = self.__content\n return identity\n", "path": "github/InputGitTreeElement.py"}]} | 1,505 | 223 |
gh_patches_debug_24648 | rasdani/github-patches | git_diff | pypa__pip-8079 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
New resolver cannot installs distributions that only have pre releases
**Environment**
* pip version: master, today
* Python version: 3
* OS: linux
**Description**
I want to install a distribution that only has pre-releases. The legacy resolver does support this. The new one does not.
Note: using `--pre` does not seem to influence the result. The legacy resolver could install such distributions without using `--pre`.
**Expected behavior**
Installation should succeed.
**How to Reproduce**
```console
$ pip install --no-deps odoo13-addon-date-range --unstable-feature=resolver
ERROR: Exception:
Traceback (most recent call last):
File "/home/me/pip/src/pip/_internal/cli/base_command.py", line 199, in _main
status = self.run(options, args)
File "/home/me/pip/src/pip/_internal/cli/req_command.py", line 185, in wrapper
return func(self, options, args)
File "/home/me/pip/src/pip/_internal/commands/install.py", line 333, in run
reqs, check_supported_wheels=not options.target_dir
File "/home/me/pip/src/pip/_internal/resolution/resolvelib/resolver.py", line 80, in resolve
self._result = resolver.resolve(requirements)
File "/home/me/pip/src/pip/_vendor/resolvelib/resolvers.py", line 413, in resolve
state = resolution.resolve(requirements, max_rounds=max_rounds)
File "/home/me/pip/src/pip/_vendor/resolvelib/resolvers.py", line 310, in resolve
failure_causes = self._attempt_to_pin_criterion(name, criterion)
File "/home/me/pip/src/pip/_vendor/resolvelib/resolvers.py", line 240, in _attempt_to_pin_criterion
raise InconsistentCandidate(candidate, criterion)
pip._vendor.resolvelib.resolvers.InconsistentCandidate: Provided candidate LinkCandidate('https://files.pythonhosted.org/packages/1f/0b/945335a37082b6b013cc1331f49e3f5b6a18cdd0b693475e6ca9e9a7df6e/odoo13_addon_date_range-13.0.1.0.1.dev8-py3-none-any.whl#sha256=3883bbe87db8d5db4364e8a42e86546e19e8e4f123d98c4e9454587dfa9401df (from https://pypi.org/simple/odoo13-addon-date-range/) (requires-python:>=3.5)') does not satisfy SpecifierRequirement('odoo13-addon-date-range')
```
Note I used `--no-deps` because a dependency is not on pypi, but that has no influence on the result.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pip/_internal/resolution/resolvelib/requirements.py`
Content:
```
1 from pip._vendor.packaging.utils import canonicalize_name
2
3 from pip._internal.utils.typing import MYPY_CHECK_RUNNING
4
5 from .base import Requirement, format_name
6
7 if MYPY_CHECK_RUNNING:
8 from typing import Sequence
9
10 from pip._vendor.packaging.specifiers import SpecifierSet
11
12 from pip._internal.req.req_install import InstallRequirement
13
14 from .base import Candidate
15 from .factory import Factory
16
17
18 class ExplicitRequirement(Requirement):
19 def __init__(self, candidate):
20 # type: (Candidate) -> None
21 self.candidate = candidate
22
23 def __repr__(self):
24 # type: () -> str
25 return "{class_name}({candidate!r})".format(
26 class_name=self.__class__.__name__,
27 candidate=self.candidate,
28 )
29
30 @property
31 def name(self):
32 # type: () -> str
33 # No need to canonicalise - the candidate did this
34 return self.candidate.name
35
36 def find_matches(self):
37 # type: () -> Sequence[Candidate]
38 return [self.candidate]
39
40 def is_satisfied_by(self, candidate):
41 # type: (Candidate) -> bool
42 return candidate == self.candidate
43
44
45 class SpecifierRequirement(Requirement):
46 def __init__(self, ireq, factory):
47 # type: (InstallRequirement, Factory) -> None
48 assert ireq.link is None, "This is a link, not a specifier"
49 self._ireq = ireq
50 self._factory = factory
51 self.extras = ireq.req.extras
52
53 def __str__(self):
54 # type: () -> str
55 return str(self._ireq.req)
56
57 def __repr__(self):
58 # type: () -> str
59 return "{class_name}({requirement!r})".format(
60 class_name=self.__class__.__name__,
61 requirement=str(self._ireq.req),
62 )
63
64 @property
65 def name(self):
66 # type: () -> str
67 canonical_name = canonicalize_name(self._ireq.req.name)
68 return format_name(canonical_name, self.extras)
69
70 def find_matches(self):
71 # type: () -> Sequence[Candidate]
72 it = self._factory.iter_found_candidates(self._ireq, self.extras)
73 return list(it)
74
75 def is_satisfied_by(self, candidate):
76 # type: (Candidate) -> bool
77 assert candidate.name == self.name, \
78 "Internal issue: Candidate is not for this requirement " \
79 " {} vs {}".format(candidate.name, self.name)
80 return candidate.version in self._ireq.req.specifier
81
82
83 class RequiresPythonRequirement(Requirement):
84 """A requirement representing Requires-Python metadata.
85 """
86 def __init__(self, specifier, match):
87 # type: (SpecifierSet, Candidate) -> None
88 self.specifier = specifier
89 self._candidate = match
90
91 def __repr__(self):
92 # type: () -> str
93 return "{class_name}({specifier!r})".format(
94 class_name=self.__class__.__name__,
95 specifier=str(self.specifier),
96 )
97
98 @property
99 def name(self):
100 # type: () -> str
101 return self._candidate.name
102
103 def find_matches(self):
104 # type: () -> Sequence[Candidate]
105 if self._candidate.version in self.specifier:
106 return [self._candidate]
107 return []
108
109 def is_satisfied_by(self, candidate):
110 # type: (Candidate) -> bool
111 assert candidate.name == self._candidate.name, "Not Python candidate"
112 return candidate.version in self.specifier
113
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/pip/_internal/resolution/resolvelib/requirements.py b/src/pip/_internal/resolution/resolvelib/requirements.py
--- a/src/pip/_internal/resolution/resolvelib/requirements.py
+++ b/src/pip/_internal/resolution/resolvelib/requirements.py
@@ -77,7 +77,11 @@
assert candidate.name == self.name, \
"Internal issue: Candidate is not for this requirement " \
" {} vs {}".format(candidate.name, self.name)
- return candidate.version in self._ireq.req.specifier
+ # We can safely always allow prereleases here since PackageFinder
+ # already implements the prerelease logic, and would have filtered out
+ # prerelease candidates if the user does not expect them.
+ spec = self._ireq.req.specifier
+ return spec.contains(candidate.version, prereleases=True)
class RequiresPythonRequirement(Requirement):
@@ -109,4 +113,7 @@
def is_satisfied_by(self, candidate):
# type: (Candidate) -> bool
assert candidate.name == self._candidate.name, "Not Python candidate"
- return candidate.version in self.specifier
+ # We can safely always allow prereleases here since PackageFinder
+ # already implements the prerelease logic, and would have filtered out
+ # prerelease candidates if the user does not expect them.
+ return self.specifier.contains(candidate.version, prereleases=True)
| {"golden_diff": "diff --git a/src/pip/_internal/resolution/resolvelib/requirements.py b/src/pip/_internal/resolution/resolvelib/requirements.py\n--- a/src/pip/_internal/resolution/resolvelib/requirements.py\n+++ b/src/pip/_internal/resolution/resolvelib/requirements.py\n@@ -77,7 +77,11 @@\n assert candidate.name == self.name, \\\n \"Internal issue: Candidate is not for this requirement \" \\\n \" {} vs {}\".format(candidate.name, self.name)\n- return candidate.version in self._ireq.req.specifier\n+ # We can safely always allow prereleases here since PackageFinder\n+ # already implements the prerelease logic, and would have filtered out\n+ # prerelease candidates if the user does not expect them.\n+ spec = self._ireq.req.specifier\n+ return spec.contains(candidate.version, prereleases=True)\n \n \n class RequiresPythonRequirement(Requirement):\n@@ -109,4 +113,7 @@\n def is_satisfied_by(self, candidate):\n # type: (Candidate) -> bool\n assert candidate.name == self._candidate.name, \"Not Python candidate\"\n- return candidate.version in self.specifier\n+ # We can safely always allow prereleases here since PackageFinder\n+ # already implements the prerelease logic, and would have filtered out\n+ # prerelease candidates if the user does not expect them.\n+ return self.specifier.contains(candidate.version, prereleases=True)\n", "issue": "New resolver cannot installs distributions that only have pre releases\n**Environment**\r\n\r\n* pip version: master, today\r\n* Python version: 3\r\n* OS: linux\r\n\r\n**Description**\r\n\r\nI want to install a distribution that only has pre-releases. The legacy resolver does support this. The new one does not. \r\n\r\nNote: using `--pre` does not seem to influence the result. The legacy resolver could install such distributions without using `--pre`.\r\n\r\n**Expected behavior**\r\n\r\nInstallation should succeed.\r\n\r\n**How to Reproduce**\r\n\r\n```console\r\n$ pip install --no-deps odoo13-addon-date-range --unstable-feature=resolver\r\nERROR: Exception:\r\nTraceback (most recent call last):\r\n File \"/home/me/pip/src/pip/_internal/cli/base_command.py\", line 199, in _main\r\n status = self.run(options, args)\r\n File \"/home/me/pip/src/pip/_internal/cli/req_command.py\", line 185, in wrapper\r\n return func(self, options, args)\r\n File \"/home/me/pip/src/pip/_internal/commands/install.py\", line 333, in run\r\n reqs, check_supported_wheels=not options.target_dir\r\n File \"/home/me/pip/src/pip/_internal/resolution/resolvelib/resolver.py\", line 80, in resolve\r\n self._result = resolver.resolve(requirements)\r\n File \"/home/me/pip/src/pip/_vendor/resolvelib/resolvers.py\", line 413, in resolve\r\n state = resolution.resolve(requirements, max_rounds=max_rounds)\r\n File \"/home/me/pip/src/pip/_vendor/resolvelib/resolvers.py\", line 310, in resolve\r\n failure_causes = self._attempt_to_pin_criterion(name, criterion)\r\n File \"/home/me/pip/src/pip/_vendor/resolvelib/resolvers.py\", line 240, in _attempt_to_pin_criterion\r\n raise InconsistentCandidate(candidate, criterion)\r\npip._vendor.resolvelib.resolvers.InconsistentCandidate: Provided candidate LinkCandidate('https://files.pythonhosted.org/packages/1f/0b/945335a37082b6b013cc1331f49e3f5b6a18cdd0b693475e6ca9e9a7df6e/odoo13_addon_date_range-13.0.1.0.1.dev8-py3-none-any.whl#sha256=3883bbe87db8d5db4364e8a42e86546e19e8e4f123d98c4e9454587dfa9401df (from https://pypi.org/simple/odoo13-addon-date-range/) (requires-python:>=3.5)') does not satisfy SpecifierRequirement('odoo13-addon-date-range')\r\n```\r\n\r\nNote I used `--no-deps` because a dependency is not on pypi, but that has no influence on the result.\n", "before_files": [{"content": "from pip._vendor.packaging.utils import canonicalize_name\n\nfrom pip._internal.utils.typing import MYPY_CHECK_RUNNING\n\nfrom .base import Requirement, format_name\n\nif MYPY_CHECK_RUNNING:\n from typing import Sequence\n\n from pip._vendor.packaging.specifiers import SpecifierSet\n\n from pip._internal.req.req_install import InstallRequirement\n\n from .base import Candidate\n from .factory import Factory\n\n\nclass ExplicitRequirement(Requirement):\n def __init__(self, candidate):\n # type: (Candidate) -> None\n self.candidate = candidate\n\n def __repr__(self):\n # type: () -> str\n return \"{class_name}({candidate!r})\".format(\n class_name=self.__class__.__name__,\n candidate=self.candidate,\n )\n\n @property\n def name(self):\n # type: () -> str\n # No need to canonicalise - the candidate did this\n return self.candidate.name\n\n def find_matches(self):\n # type: () -> Sequence[Candidate]\n return [self.candidate]\n\n def is_satisfied_by(self, candidate):\n # type: (Candidate) -> bool\n return candidate == self.candidate\n\n\nclass SpecifierRequirement(Requirement):\n def __init__(self, ireq, factory):\n # type: (InstallRequirement, Factory) -> None\n assert ireq.link is None, \"This is a link, not a specifier\"\n self._ireq = ireq\n self._factory = factory\n self.extras = ireq.req.extras\n\n def __str__(self):\n # type: () -> str\n return str(self._ireq.req)\n\n def __repr__(self):\n # type: () -> str\n return \"{class_name}({requirement!r})\".format(\n class_name=self.__class__.__name__,\n requirement=str(self._ireq.req),\n )\n\n @property\n def name(self):\n # type: () -> str\n canonical_name = canonicalize_name(self._ireq.req.name)\n return format_name(canonical_name, self.extras)\n\n def find_matches(self):\n # type: () -> Sequence[Candidate]\n it = self._factory.iter_found_candidates(self._ireq, self.extras)\n return list(it)\n\n def is_satisfied_by(self, candidate):\n # type: (Candidate) -> bool\n assert candidate.name == self.name, \\\n \"Internal issue: Candidate is not for this requirement \" \\\n \" {} vs {}\".format(candidate.name, self.name)\n return candidate.version in self._ireq.req.specifier\n\n\nclass RequiresPythonRequirement(Requirement):\n \"\"\"A requirement representing Requires-Python metadata.\n \"\"\"\n def __init__(self, specifier, match):\n # type: (SpecifierSet, Candidate) -> None\n self.specifier = specifier\n self._candidate = match\n\n def __repr__(self):\n # type: () -> str\n return \"{class_name}({specifier!r})\".format(\n class_name=self.__class__.__name__,\n specifier=str(self.specifier),\n )\n\n @property\n def name(self):\n # type: () -> str\n return self._candidate.name\n\n def find_matches(self):\n # type: () -> Sequence[Candidate]\n if self._candidate.version in self.specifier:\n return [self._candidate]\n return []\n\n def is_satisfied_by(self, candidate):\n # type: (Candidate) -> bool\n assert candidate.name == self._candidate.name, \"Not Python candidate\"\n return candidate.version in self.specifier\n", "path": "src/pip/_internal/resolution/resolvelib/requirements.py"}], "after_files": [{"content": "from pip._vendor.packaging.utils import canonicalize_name\n\nfrom pip._internal.utils.typing import MYPY_CHECK_RUNNING\n\nfrom .base import Requirement, format_name\n\nif MYPY_CHECK_RUNNING:\n from typing import Sequence\n\n from pip._vendor.packaging.specifiers import SpecifierSet\n\n from pip._internal.req.req_install import InstallRequirement\n\n from .base import Candidate\n from .factory import Factory\n\n\nclass ExplicitRequirement(Requirement):\n def __init__(self, candidate):\n # type: (Candidate) -> None\n self.candidate = candidate\n\n def __repr__(self):\n # type: () -> str\n return \"{class_name}({candidate!r})\".format(\n class_name=self.__class__.__name__,\n candidate=self.candidate,\n )\n\n @property\n def name(self):\n # type: () -> str\n # No need to canonicalise - the candidate did this\n return self.candidate.name\n\n def find_matches(self):\n # type: () -> Sequence[Candidate]\n return [self.candidate]\n\n def is_satisfied_by(self, candidate):\n # type: (Candidate) -> bool\n return candidate == self.candidate\n\n\nclass SpecifierRequirement(Requirement):\n def __init__(self, ireq, factory):\n # type: (InstallRequirement, Factory) -> None\n assert ireq.link is None, \"This is a link, not a specifier\"\n self._ireq = ireq\n self._factory = factory\n self.extras = ireq.req.extras\n\n def __str__(self):\n # type: () -> str\n return str(self._ireq.req)\n\n def __repr__(self):\n # type: () -> str\n return \"{class_name}({requirement!r})\".format(\n class_name=self.__class__.__name__,\n requirement=str(self._ireq.req),\n )\n\n @property\n def name(self):\n # type: () -> str\n canonical_name = canonicalize_name(self._ireq.req.name)\n return format_name(canonical_name, self.extras)\n\n def find_matches(self):\n # type: () -> Sequence[Candidate]\n it = self._factory.iter_found_candidates(self._ireq, self.extras)\n return list(it)\n\n def is_satisfied_by(self, candidate):\n # type: (Candidate) -> bool\n assert candidate.name == self.name, \\\n \"Internal issue: Candidate is not for this requirement \" \\\n \" {} vs {}\".format(candidate.name, self.name)\n # We can safely always allow prereleases here since PackageFinder\n # already implements the prerelease logic, and would have filtered out\n # prerelease candidates if the user does not expect them.\n spec = self._ireq.req.specifier\n return spec.contains(candidate.version, prereleases=True)\n\n\nclass RequiresPythonRequirement(Requirement):\n \"\"\"A requirement representing Requires-Python metadata.\n \"\"\"\n def __init__(self, specifier, match):\n # type: (SpecifierSet, Candidate) -> None\n self.specifier = specifier\n self._candidate = match\n\n def __repr__(self):\n # type: () -> str\n return \"{class_name}({specifier!r})\".format(\n class_name=self.__class__.__name__,\n specifier=str(self.specifier),\n )\n\n @property\n def name(self):\n # type: () -> str\n return self._candidate.name\n\n def find_matches(self):\n # type: () -> Sequence[Candidate]\n if self._candidate.version in self.specifier:\n return [self._candidate]\n return []\n\n def is_satisfied_by(self, candidate):\n # type: (Candidate) -> bool\n assert candidate.name == self._candidate.name, \"Not Python candidate\"\n # We can safely always allow prereleases here since PackageFinder\n # already implements the prerelease logic, and would have filtered out\n # prerelease candidates if the user does not expect them.\n return self.specifier.contains(candidate.version, prereleases=True)\n", "path": "src/pip/_internal/resolution/resolvelib/requirements.py"}]} | 1,970 | 324 |
gh_patches_debug_18915 | rasdani/github-patches | git_diff | dotkom__onlineweb4-2123 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Links to company websites doesn't work without http://
On a company profile page the link to the company's website will only redirect the user if `http://` is specified when the link is added in the dashboard. For example, the link to AppearTV is written as `www.appeartv.com`, and redirects to `https://online.ntnu.no/company/60/www.appeartv.com`.
There is no information to the user creating an event to add http either, so I can imagine this being a growing problem.
Links to company websites doesn't work without http://
On a company profile page the link to the company's website will only redirect the user if `http://` is specified when the link is added in the dashboard. For example, the link to AppearTV is written as `www.appeartv.com`, and redirects to `https://online.ntnu.no/company/60/www.appeartv.com`.
There is no information to the user creating an event to add http either, so I can imagine this being a growing problem.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `apps/companyprofile/dashboard/forms.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from django.forms import ModelForm
3
4 from apps.companyprofile.models import Company
5 from apps.dashboard.widgets import widget_generator
6 from apps.gallery.widgets import SingleImageInput
7
8
9 class CompanyForm(ModelForm):
10
11 class Meta(object):
12 model = Company
13 fields = ('name', 'short_description', 'long_description', 'image', 'site', 'email_address', 'phone_number',)
14 exclude = ['old_image']
15
16 # Widget generator accepts a form widget, and a list of tuples between field name and an attribute dict
17 widgets = widget_generator(SingleImageInput, [('image', {'id': 'responsive-image-id'})])
18
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/apps/companyprofile/dashboard/forms.py b/apps/companyprofile/dashboard/forms.py
--- a/apps/companyprofile/dashboard/forms.py
+++ b/apps/companyprofile/dashboard/forms.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
from django.forms import ModelForm
+from django.forms.fields import URLField
from apps.companyprofile.models import Company
from apps.dashboard.widgets import widget_generator
@@ -7,10 +8,12 @@
class CompanyForm(ModelForm):
+ site = URLField(max_length=100)
class Meta(object):
model = Company
fields = ('name', 'short_description', 'long_description', 'image', 'site', 'email_address', 'phone_number',)
+
exclude = ['old_image']
# Widget generator accepts a form widget, and a list of tuples between field name and an attribute dict
| {"golden_diff": "diff --git a/apps/companyprofile/dashboard/forms.py b/apps/companyprofile/dashboard/forms.py\n--- a/apps/companyprofile/dashboard/forms.py\n+++ b/apps/companyprofile/dashboard/forms.py\n@@ -1,5 +1,6 @@\n # -*- coding: utf-8 -*-\n from django.forms import ModelForm\n+from django.forms.fields import URLField\n \n from apps.companyprofile.models import Company\n from apps.dashboard.widgets import widget_generator\n@@ -7,10 +8,12 @@\n \n \n class CompanyForm(ModelForm):\n+ site = URLField(max_length=100)\n \n class Meta(object):\n model = Company\n fields = ('name', 'short_description', 'long_description', 'image', 'site', 'email_address', 'phone_number',)\n+\n exclude = ['old_image']\n \n # Widget generator accepts a form widget, and a list of tuples between field name and an attribute dict\n", "issue": "Links to company websites doesn't work without http:// \nOn a company profile page the link to the company's website will only redirect the user if `http://` is specified when the link is added in the dashboard. For example, the link to AppearTV is written as `www.appeartv.com`, and redirects to `https://online.ntnu.no/company/60/www.appeartv.com`.\nThere is no information to the user creating an event to add http either, so I can imagine this being a growing problem. \n\nLinks to company websites doesn't work without http:// \nOn a company profile page the link to the company's website will only redirect the user if `http://` is specified when the link is added in the dashboard. For example, the link to AppearTV is written as `www.appeartv.com`, and redirects to `https://online.ntnu.no/company/60/www.appeartv.com`.\nThere is no information to the user creating an event to add http either, so I can imagine this being a growing problem. \n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom django.forms import ModelForm\n\nfrom apps.companyprofile.models import Company\nfrom apps.dashboard.widgets import widget_generator\nfrom apps.gallery.widgets import SingleImageInput\n\n\nclass CompanyForm(ModelForm):\n\n class Meta(object):\n model = Company\n fields = ('name', 'short_description', 'long_description', 'image', 'site', 'email_address', 'phone_number',)\n exclude = ['old_image']\n\n # Widget generator accepts a form widget, and a list of tuples between field name and an attribute dict\n widgets = widget_generator(SingleImageInput, [('image', {'id': 'responsive-image-id'})])\n", "path": "apps/companyprofile/dashboard/forms.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom django.forms import ModelForm\nfrom django.forms.fields import URLField\n\nfrom apps.companyprofile.models import Company\nfrom apps.dashboard.widgets import widget_generator\nfrom apps.gallery.widgets import SingleImageInput\n\n\nclass CompanyForm(ModelForm):\n site = URLField(max_length=100)\n\n class Meta(object):\n model = Company\n fields = ('name', 'short_description', 'long_description', 'image', 'site', 'email_address', 'phone_number',)\n\n exclude = ['old_image']\n\n # Widget generator accepts a form widget, and a list of tuples between field name and an attribute dict\n widgets = widget_generator(SingleImageInput, [('image', {'id': 'responsive-image-id'})])\n", "path": "apps/companyprofile/dashboard/forms.py"}]} | 649 | 188 |
gh_patches_debug_11671 | rasdani/github-patches | git_diff | netbox-community__netbox-14461 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove the `clearcache` management command
### Proposed Changes
Remove the `clearcache` management command (from the `core` app), and omit it from the upgrade script.
### Justification
~This command was introduced back when we were experimenting with query caching, and is no longer needed.~ I was mistaken; it was actually introduced under #9122 to provide a mechanism for clearing the cached API spec. However, this is also no longer used since we moved to `drf-spectacular` (see #9608).
The Django cache is currently used only for discrete caching operations, including:
* Config revision tracking
* Recording the most recent release
* Caching RSS feed content (the RSSFeedWidget)
There has already been at least one bug related to this function (see #14182). Additionally, plugins may utilize the cache for other purposes, and we cannot make the assumption that it is safe to clear other cached data.
### Impact
Any mechanisms within NetBox or a plugin which employ caching will be responsible for their own cleanup, where applicable.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `netbox/core/management/commands/clearcache.py`
Content:
```
1 from django.core.cache import cache
2 from django.core.management.base import BaseCommand
3
4 from core.models import ConfigRevision
5
6
7 class Command(BaseCommand):
8 """Command to clear the entire cache."""
9 help = 'Clears the cache.'
10
11 def handle(self, *args, **kwargs):
12 # Fetch the current config revision from the cache
13 config_version = cache.get('config_version')
14 # Clear the cache
15 cache.clear()
16 self.stdout.write('Cache has been cleared.', ending="\n")
17 if config_version:
18 # Activate the current config revision
19 ConfigRevision.objects.get(id=config_version).activate()
20 self.stdout.write(f'Config revision ({config_version}) has been restored.', ending="\n")
21
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/netbox/core/management/commands/clearcache.py b/netbox/core/management/commands/clearcache.py
deleted file mode 100644
--- a/netbox/core/management/commands/clearcache.py
+++ /dev/null
@@ -1,20 +0,0 @@
-from django.core.cache import cache
-from django.core.management.base import BaseCommand
-
-from core.models import ConfigRevision
-
-
-class Command(BaseCommand):
- """Command to clear the entire cache."""
- help = 'Clears the cache.'
-
- def handle(self, *args, **kwargs):
- # Fetch the current config revision from the cache
- config_version = cache.get('config_version')
- # Clear the cache
- cache.clear()
- self.stdout.write('Cache has been cleared.', ending="\n")
- if config_version:
- # Activate the current config revision
- ConfigRevision.objects.get(id=config_version).activate()
- self.stdout.write(f'Config revision ({config_version}) has been restored.', ending="\n")
| {"golden_diff": "diff --git a/netbox/core/management/commands/clearcache.py b/netbox/core/management/commands/clearcache.py\ndeleted file mode 100644\n--- a/netbox/core/management/commands/clearcache.py\n+++ /dev/null\n@@ -1,20 +0,0 @@\n-from django.core.cache import cache\n-from django.core.management.base import BaseCommand\n-\n-from core.models import ConfigRevision\n-\n-\n-class Command(BaseCommand):\n- \"\"\"Command to clear the entire cache.\"\"\"\n- help = 'Clears the cache.'\n-\n- def handle(self, *args, **kwargs):\n- # Fetch the current config revision from the cache\n- config_version = cache.get('config_version')\n- # Clear the cache\n- cache.clear()\n- self.stdout.write('Cache has been cleared.', ending=\"\\n\")\n- if config_version:\n- # Activate the current config revision\n- ConfigRevision.objects.get(id=config_version).activate()\n- self.stdout.write(f'Config revision ({config_version}) has been restored.', ending=\"\\n\")\n", "issue": "Remove the `clearcache` management command\n### Proposed Changes\r\n\r\nRemove the `clearcache` management command (from the `core` app), and omit it from the upgrade script.\r\n\r\n### Justification\r\n\r\n~This command was introduced back when we were experimenting with query caching, and is no longer needed.~ I was mistaken; it was actually introduced under #9122 to provide a mechanism for clearing the cached API spec. However, this is also no longer used since we moved to `drf-spectacular` (see #9608).\r\n\r\nThe Django cache is currently used only for discrete caching operations, including:\r\n\r\n* Config revision tracking\r\n* Recording the most recent release\r\n* Caching RSS feed content (the RSSFeedWidget)\r\n\r\nThere has already been at least one bug related to this function (see #14182). Additionally, plugins may utilize the cache for other purposes, and we cannot make the assumption that it is safe to clear other cached data.\r\n\r\n### Impact\r\n\r\nAny mechanisms within NetBox or a plugin which employ caching will be responsible for their own cleanup, where applicable.\n", "before_files": [{"content": "from django.core.cache import cache\nfrom django.core.management.base import BaseCommand\n\nfrom core.models import ConfigRevision\n\n\nclass Command(BaseCommand):\n \"\"\"Command to clear the entire cache.\"\"\"\n help = 'Clears the cache.'\n\n def handle(self, *args, **kwargs):\n # Fetch the current config revision from the cache\n config_version = cache.get('config_version')\n # Clear the cache\n cache.clear()\n self.stdout.write('Cache has been cleared.', ending=\"\\n\")\n if config_version:\n # Activate the current config revision\n ConfigRevision.objects.get(id=config_version).activate()\n self.stdout.write(f'Config revision ({config_version}) has been restored.', ending=\"\\n\")\n", "path": "netbox/core/management/commands/clearcache.py"}], "after_files": [{"content": null, "path": "netbox/core/management/commands/clearcache.py"}]} | 674 | 231 |
gh_patches_debug_54036 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-3190 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release 0.1.3
## 2023-08-16
```[tasklist]
### Tasks
- [x] Cut 0.1.3 release branch, freeze code
- [x] Update version number in all places in the new branch
- [x] Make an image from the branch with tag `0.1.3`, push to Dockerhub
- [x] Test installation with the new image
- [x] Test upgrade
- [x] Smoke testing application
- [x] Stability of the newly released items
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mathesar/__init__.py`
Content:
```
1 default_app_config = 'mathesar.apps.MathesarConfig'
2
3 __version__ = "0.1.2"
4
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mathesar/__init__.py b/mathesar/__init__.py
--- a/mathesar/__init__.py
+++ b/mathesar/__init__.py
@@ -1,3 +1,3 @@
default_app_config = 'mathesar.apps.MathesarConfig'
-__version__ = "0.1.2"
+__version__ = "0.1.3"
| {"golden_diff": "diff --git a/mathesar/__init__.py b/mathesar/__init__.py\n--- a/mathesar/__init__.py\n+++ b/mathesar/__init__.py\n@@ -1,3 +1,3 @@\n default_app_config = 'mathesar.apps.MathesarConfig'\n \n-__version__ = \"0.1.2\"\n+__version__ = \"0.1.3\"\n", "issue": "Release 0.1.3\n## 2023-08-16\r\n```[tasklist]\r\n### Tasks\r\n- [x] Cut 0.1.3 release branch, freeze code\r\n- [x] Update version number in all places in the new branch\r\n- [x] Make an image from the branch with tag `0.1.3`, push to Dockerhub\r\n- [x] Test installation with the new image\r\n- [x] Test upgrade\r\n- [x] Smoke testing application\r\n- [x] Stability of the newly released items\r\n```\r\n\n", "before_files": [{"content": "default_app_config = 'mathesar.apps.MathesarConfig'\n\n__version__ = \"0.1.2\"\n", "path": "mathesar/__init__.py"}], "after_files": [{"content": "default_app_config = 'mathesar.apps.MathesarConfig'\n\n__version__ = \"0.1.3\"\n", "path": "mathesar/__init__.py"}]} | 413 | 83 |
gh_patches_debug_10502 | rasdani/github-patches | git_diff | Kinto__kinto-158 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add custom (meta) data on buckets and collections
For some use-cases, it might become useful to be able to store some custom attributes in buckets or collections (e.g. metadata like application version, contact email or whatever).
Currently both Collection and Bucket resources do not define extra fields in their schema, and Cliquet drops unknown fields if not explicitly allowed.
We can either:
- Allow unknown fields in collection and buckets schemas
- Add a specific root level field (along `data` and `permissions`)
- Add a specific field (called `meta` for example) in the schema that could receive anything.
The advantage of the latter is that custom fields do not interfere with anything in the protocol, and are trivial to implement. The inconvenient is having to put `{data: {metadata: {email: "[email protected]"}}` in the payload.
Thoughts ?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kinto/views/collections.py`
Content:
```
1 import colander
2 import jsonschema
3 from cliquet import resource
4 from jsonschema import exceptions as jsonschema_exceptions
5
6 from kinto.views import NameGenerator, object_exists_or_404
7
8
9 class JSONSchemaMapping(colander.SchemaNode):
10 def schema_type(self, **kw):
11 return colander.Mapping(unknown='preserve')
12
13 def deserialize(self, cstruct=colander.null):
14 # Start by deserializing a simple mapping.
15 validated = super(JSONSchemaMapping, self).deserialize(cstruct)
16
17 # In case it is optional in parent schema.
18 if not validated or validated in (colander.null, colander.drop):
19 return validated
20
21 try:
22 jsonschema.Draft4Validator.check_schema(validated)
23 except jsonschema_exceptions.SchemaError as e:
24 self.raise_invalid(e.path.pop() + e.message)
25 return validated
26
27
28 class CollectionSchema(resource.ResourceSchema):
29 schema = JSONSchemaMapping(missing=colander.drop)
30
31
32 @resource.register(name='collection',
33 collection_methods=('GET',),
34 collection_path='/buckets/{{bucket_id}}/collections',
35 record_path='/buckets/{{bucket_id}}/collections/{{id}}')
36 class Collection(resource.ProtectedResource):
37 mapping = CollectionSchema()
38 permissions = ('read', 'write', 'record:create')
39
40 def __init__(self, *args, **kwargs):
41 super(Collection, self).__init__(*args, **kwargs)
42
43 bucket_id = self.request.matchdict['bucket_id']
44 object_exists_or_404(self.request,
45 collection_id='bucket',
46 object_id=bucket_id)
47
48 self.collection.id_generator = NameGenerator()
49
50 def get_parent_id(self, request):
51 bucket_id = request.matchdict['bucket_id']
52 parent_id = '/buckets/%s' % bucket_id
53 return parent_id
54
55 def delete(self):
56 result = super(Collection, self).delete()
57
58 # Delete records.
59 storage = self.collection.storage
60 parent_id = '%s/collections/%s' % (self.collection.parent_id,
61 self.record_id)
62 storage.delete_all(collection_id='record',
63 parent_id=parent_id,
64 with_deleted=False)
65 storage.purge_deleted(collection_id='record', parent_id=parent_id)
66
67 return result
68
```
Path: `kinto/views/records.py`
Content:
```
1 import jsonschema
2 from cliquet import resource, schema
3 from cliquet.errors import raise_invalid
4 from jsonschema import exceptions as jsonschema_exceptions
5
6 from kinto.views import object_exists_or_404
7
8
9 class RecordSchema(schema.ResourceSchema):
10 class Options():
11 preserve_unknown = True
12
13
14 _parent_path = '/buckets/{{bucket_id}}/collections/{{collection_id}}'
15
16
17 @resource.register(name='record',
18 collection_path=_parent_path + '/records',
19 record_path=_parent_path + '/records/{{id}}')
20 class Record(resource.ProtectedResource):
21
22 mapping = RecordSchema()
23 schema_field = 'schema'
24
25 def __init__(self, *args, **kwargs):
26 super(Record, self).__init__(*args, **kwargs)
27
28 bucket_id = self.request.matchdict['bucket_id']
29 object_exists_or_404(self.request,
30 collection_id='bucket',
31 object_id=bucket_id)
32
33 collection_id = self.request.matchdict['collection_id']
34 parent_id = '/buckets/%s' % bucket_id
35 self._collection = object_exists_or_404(self.request,
36 collection_id='collection',
37 parent_id=parent_id,
38 object_id=collection_id)
39
40 def get_parent_id(self, request):
41 bucket_id = request.matchdict['bucket_id']
42 collection_id = request.matchdict['collection_id']
43 return '/buckets/%s/collections/%s' % (bucket_id, collection_id)
44
45 def is_known_field(self, field_name):
46 """Without schema, any field is considered as known."""
47 return True
48
49 def process_record(self, new, old=None):
50 """Validate records against collection schema, if any."""
51 schema = self._collection.get('schema')
52 if not schema:
53 return new
54
55 collection_timestamp = self._collection[self.collection.modified_field]
56
57 try:
58 jsonschema.validate(new, schema)
59 new[self.schema_field] = collection_timestamp
60 except jsonschema_exceptions.ValidationError as e:
61 field = e.path.pop() if e.path else e.validator_value.pop()
62 raise_invalid(self.request, name=field, description=e.message)
63
64 return new
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kinto/views/collections.py b/kinto/views/collections.py
--- a/kinto/views/collections.py
+++ b/kinto/views/collections.py
@@ -28,6 +28,9 @@
class CollectionSchema(resource.ResourceSchema):
schema = JSONSchemaMapping(missing=colander.drop)
+ class Options:
+ preserve_unknown = True
+
@resource.register(name='collection',
collection_methods=('GET',),
diff --git a/kinto/views/records.py b/kinto/views/records.py
--- a/kinto/views/records.py
+++ b/kinto/views/records.py
@@ -7,7 +7,7 @@
class RecordSchema(schema.ResourceSchema):
- class Options():
+ class Options:
preserve_unknown = True
| {"golden_diff": "diff --git a/kinto/views/collections.py b/kinto/views/collections.py\n--- a/kinto/views/collections.py\n+++ b/kinto/views/collections.py\n@@ -28,6 +28,9 @@\n class CollectionSchema(resource.ResourceSchema):\n schema = JSONSchemaMapping(missing=colander.drop)\n \n+ class Options:\n+ preserve_unknown = True\n+\n \n @resource.register(name='collection',\n collection_methods=('GET',),\ndiff --git a/kinto/views/records.py b/kinto/views/records.py\n--- a/kinto/views/records.py\n+++ b/kinto/views/records.py\n@@ -7,7 +7,7 @@\n \n \n class RecordSchema(schema.ResourceSchema):\n- class Options():\n+ class Options:\n preserve_unknown = True\n", "issue": "Add custom (meta) data on buckets and collections \nFor some use-cases, it might become useful to be able to store some custom attributes in buckets or collections (e.g. metadata like application version, contact email or whatever).\n\nCurrently both Collection and Bucket resources do not define extra fields in their schema, and Cliquet drops unknown fields if not explicitly allowed.\n\nWe can either:\n- Allow unknown fields in collection and buckets schemas\n- Add a specific root level field (along `data` and `permissions`)\n- Add a specific field (called `meta` for example) in the schema that could receive anything.\n\nThe advantage of the latter is that custom fields do not interfere with anything in the protocol, and are trivial to implement. The inconvenient is having to put `{data: {metadata: {email: \"[email protected]\"}}` in the payload.\n\nThoughts ?\n\n", "before_files": [{"content": "import colander\nimport jsonschema\nfrom cliquet import resource\nfrom jsonschema import exceptions as jsonschema_exceptions\n\nfrom kinto.views import NameGenerator, object_exists_or_404\n\n\nclass JSONSchemaMapping(colander.SchemaNode):\n def schema_type(self, **kw):\n return colander.Mapping(unknown='preserve')\n\n def deserialize(self, cstruct=colander.null):\n # Start by deserializing a simple mapping.\n validated = super(JSONSchemaMapping, self).deserialize(cstruct)\n\n # In case it is optional in parent schema.\n if not validated or validated in (colander.null, colander.drop):\n return validated\n\n try:\n jsonschema.Draft4Validator.check_schema(validated)\n except jsonschema_exceptions.SchemaError as e:\n self.raise_invalid(e.path.pop() + e.message)\n return validated\n\n\nclass CollectionSchema(resource.ResourceSchema):\n schema = JSONSchemaMapping(missing=colander.drop)\n\n\[email protected](name='collection',\n collection_methods=('GET',),\n collection_path='/buckets/{{bucket_id}}/collections',\n record_path='/buckets/{{bucket_id}}/collections/{{id}}')\nclass Collection(resource.ProtectedResource):\n mapping = CollectionSchema()\n permissions = ('read', 'write', 'record:create')\n\n def __init__(self, *args, **kwargs):\n super(Collection, self).__init__(*args, **kwargs)\n\n bucket_id = self.request.matchdict['bucket_id']\n object_exists_or_404(self.request,\n collection_id='bucket',\n object_id=bucket_id)\n\n self.collection.id_generator = NameGenerator()\n\n def get_parent_id(self, request):\n bucket_id = request.matchdict['bucket_id']\n parent_id = '/buckets/%s' % bucket_id\n return parent_id\n\n def delete(self):\n result = super(Collection, self).delete()\n\n # Delete records.\n storage = self.collection.storage\n parent_id = '%s/collections/%s' % (self.collection.parent_id,\n self.record_id)\n storage.delete_all(collection_id='record',\n parent_id=parent_id,\n with_deleted=False)\n storage.purge_deleted(collection_id='record', parent_id=parent_id)\n\n return result\n", "path": "kinto/views/collections.py"}, {"content": "import jsonschema\nfrom cliquet import resource, schema\nfrom cliquet.errors import raise_invalid\nfrom jsonschema import exceptions as jsonschema_exceptions\n\nfrom kinto.views import object_exists_or_404\n\n\nclass RecordSchema(schema.ResourceSchema):\n class Options():\n preserve_unknown = True\n\n\n_parent_path = '/buckets/{{bucket_id}}/collections/{{collection_id}}'\n\n\[email protected](name='record',\n collection_path=_parent_path + '/records',\n record_path=_parent_path + '/records/{{id}}')\nclass Record(resource.ProtectedResource):\n\n mapping = RecordSchema()\n schema_field = 'schema'\n\n def __init__(self, *args, **kwargs):\n super(Record, self).__init__(*args, **kwargs)\n\n bucket_id = self.request.matchdict['bucket_id']\n object_exists_or_404(self.request,\n collection_id='bucket',\n object_id=bucket_id)\n\n collection_id = self.request.matchdict['collection_id']\n parent_id = '/buckets/%s' % bucket_id\n self._collection = object_exists_or_404(self.request,\n collection_id='collection',\n parent_id=parent_id,\n object_id=collection_id)\n\n def get_parent_id(self, request):\n bucket_id = request.matchdict['bucket_id']\n collection_id = request.matchdict['collection_id']\n return '/buckets/%s/collections/%s' % (bucket_id, collection_id)\n\n def is_known_field(self, field_name):\n \"\"\"Without schema, any field is considered as known.\"\"\"\n return True\n\n def process_record(self, new, old=None):\n \"\"\"Validate records against collection schema, if any.\"\"\"\n schema = self._collection.get('schema')\n if not schema:\n return new\n\n collection_timestamp = self._collection[self.collection.modified_field]\n\n try:\n jsonschema.validate(new, schema)\n new[self.schema_field] = collection_timestamp\n except jsonschema_exceptions.ValidationError as e:\n field = e.path.pop() if e.path else e.validator_value.pop()\n raise_invalid(self.request, name=field, description=e.message)\n\n return new\n", "path": "kinto/views/records.py"}], "after_files": [{"content": "import colander\nimport jsonschema\nfrom cliquet import resource\nfrom jsonschema import exceptions as jsonschema_exceptions\n\nfrom kinto.views import NameGenerator, object_exists_or_404\n\n\nclass JSONSchemaMapping(colander.SchemaNode):\n def schema_type(self, **kw):\n return colander.Mapping(unknown='preserve')\n\n def deserialize(self, cstruct=colander.null):\n # Start by deserializing a simple mapping.\n validated = super(JSONSchemaMapping, self).deserialize(cstruct)\n\n # In case it is optional in parent schema.\n if not validated or validated in (colander.null, colander.drop):\n return validated\n\n try:\n jsonschema.Draft4Validator.check_schema(validated)\n except jsonschema_exceptions.SchemaError as e:\n self.raise_invalid(e.path.pop() + e.message)\n return validated\n\n\nclass CollectionSchema(resource.ResourceSchema):\n schema = JSONSchemaMapping(missing=colander.drop)\n\n class Options:\n preserve_unknown = True\n\n\[email protected](name='collection',\n collection_methods=('GET',),\n collection_path='/buckets/{{bucket_id}}/collections',\n record_path='/buckets/{{bucket_id}}/collections/{{id}}')\nclass Collection(resource.ProtectedResource):\n mapping = CollectionSchema()\n permissions = ('read', 'write', 'record:create')\n\n def __init__(self, *args, **kwargs):\n super(Collection, self).__init__(*args, **kwargs)\n\n bucket_id = self.request.matchdict['bucket_id']\n object_exists_or_404(self.request,\n collection_id='bucket',\n object_id=bucket_id)\n\n self.collection.id_generator = NameGenerator()\n\n def get_parent_id(self, request):\n bucket_id = request.matchdict['bucket_id']\n parent_id = '/buckets/%s' % bucket_id\n return parent_id\n\n def delete(self):\n result = super(Collection, self).delete()\n\n # Delete records.\n storage = self.collection.storage\n parent_id = '%s/collections/%s' % (self.collection.parent_id,\n self.record_id)\n storage.delete_all(collection_id='record',\n parent_id=parent_id,\n with_deleted=False)\n storage.purge_deleted(collection_id='record', parent_id=parent_id)\n\n return result\n", "path": "kinto/views/collections.py"}, {"content": "import jsonschema\nfrom cliquet import resource, schema\nfrom cliquet.errors import raise_invalid\nfrom jsonschema import exceptions as jsonschema_exceptions\n\nfrom kinto.views import object_exists_or_404\n\n\nclass RecordSchema(schema.ResourceSchema):\n class Options:\n preserve_unknown = True\n\n\n_parent_path = '/buckets/{{bucket_id}}/collections/{{collection_id}}'\n\n\[email protected](name='record',\n collection_path=_parent_path + '/records',\n record_path=_parent_path + '/records/{{id}}')\nclass Record(resource.ProtectedResource):\n\n mapping = RecordSchema()\n schema_field = 'schema'\n\n def __init__(self, *args, **kwargs):\n super(Record, self).__init__(*args, **kwargs)\n\n bucket_id = self.request.matchdict['bucket_id']\n object_exists_or_404(self.request,\n collection_id='bucket',\n object_id=bucket_id)\n\n collection_id = self.request.matchdict['collection_id']\n parent_id = '/buckets/%s' % bucket_id\n self._collection = object_exists_or_404(self.request,\n collection_id='collection',\n parent_id=parent_id,\n object_id=collection_id)\n\n def get_parent_id(self, request):\n bucket_id = request.matchdict['bucket_id']\n collection_id = request.matchdict['collection_id']\n return '/buckets/%s/collections/%s' % (bucket_id, collection_id)\n\n def is_known_field(self, field_name):\n \"\"\"Without schema, any field is considered as known.\"\"\"\n return True\n\n def process_record(self, new, old=None):\n \"\"\"Validate records against collection schema, if any.\"\"\"\n schema = self._collection.get('schema')\n if not schema:\n return new\n\n collection_timestamp = self._collection[self.collection.modified_field]\n\n try:\n jsonschema.validate(new, schema)\n new[self.schema_field] = collection_timestamp\n except jsonschema_exceptions.ValidationError as e:\n field = e.path.pop() if e.path else e.validator_value.pop()\n raise_invalid(self.request, name=field, description=e.message)\n\n return new\n", "path": "kinto/views/records.py"}]} | 1,645 | 167 |
gh_patches_debug_5423 | rasdani/github-patches | git_diff | ivy-llc__ivy-18290 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
linear
#15051
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/paddle/nn/functional/common.py`
Content:
```
1 # local
2 import ivy
3 from ivy.func_wrapper import with_supported_dtypes
4 from ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back
5
6
7 @to_ivy_arrays_and_back
8 @with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle")
9 def cosine_similarity(x1, x2, *, axis=1, eps=1e-08):
10 if len(x1.shape) == len(x2.shape) and len(x2.shape) >= 2:
11 numerator = ivy.sum(x1 * x2, axis=axis)
12 x1_squared_norm = ivy.sum(ivy.square(x1), axis=axis)
13 x2_squared_norm = ivy.sum(ivy.square(x2), axis=axis)
14 else:
15 numerator = ivy.sum(x1 * x2)
16 x1_squared_norm = ivy.sum(ivy.square(x1))
17 x2_squared_norm = ivy.sum(ivy.square(x2))
18
19 x1_norm = ivy.sqrt(x1_squared_norm)
20 x2_norm = ivy.sqrt(x2_squared_norm)
21 norm_mm = x1_norm * x2_norm
22 denominator = ivy.maximum(norm_mm, eps)
23
24 cosine = numerator / denominator
25 return cosine
26
27
28 @to_ivy_arrays_and_back
29 @with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle")
30 def dropout2d(x, *, p=0.5, training=True, data_format="NCHW", name=None):
31 return ivy.dropout2d(x, p=p, training=training, data_format=data_format)
32
33
34 def get_mask(shape, device, prob, seed=None):
35 mask = ivy.where(
36 ivy.random_uniform(shape=shape, device=device, seed=seed) < prob,
37 0.0,
38 1.0,
39 )
40 return mask
41
42
43 @to_ivy_arrays_and_back
44 @with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle")
45 def dropout(x, p=0.5, axis=None, training=True, mode="upscale_in_train", name=None):
46 if axis > 1:
47 raise ValueError("Axis value can only be 0 or 1 or None.")
48 elif axis is None or (isinstance(axis, list) and len(axis) == 2):
49 mask = get_mask(shape=x.shape, device=ivy.dev(x), prob=p, seed=None)
50 elif axis == 0:
51 mask = get_mask(shape=(x.shape[0], 1), device=ivy.dev(x), prob=p)
52 mask = ivy.broadcast_to(mask, x.shape)
53 elif axis == 1:
54 mask = get_mask(shape=(1, x.shape[1]), device=ivy.dev(x), prob=p)
55 mask = ivy.broadcast_to(mask, x.shape)
56 if mode == "upscale_in_train":
57 if training:
58 out = ivy.multiply(x, mask)
59 ret = ivy.multiply(out, 1.0 / (1.0 - p))
60 else:
61 ret = x
62 else:
63 if training:
64 ret = ivy.multiply(x, mask)
65 else:
66 ret = ivy.multiply(x, (1.0 - p))
67 return ret
68
69
70 @to_ivy_arrays_and_back
71 @with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle")
72 def zeropad2d(x, padding, data_format="NCHW", name=None):
73 if ivy.is_array(padding):
74 padding = padding.to_list()
75 if isinstance(padding, int):
76 padding = [padding, padding, padding, padding]
77 if len(padding) != 4:
78 raise ValueError("Padding length should be 4.")
79 if x.ndim != 4:
80 raise ValueError("Input x must be 4-dimensional.")
81 if data_format == "NCHW":
82 padding = ((0, 0), (0, 0), (padding[2], padding[3]), (padding[0], padding[1]))
83 elif data_format == "NHWC":
84 padding = ((0, 0), (padding[2], padding[3]), (padding[0], padding[1]), (0, 0))
85 else:
86 raise ValueError("Unknown data_format: {}".format(data_format))
87 return ivy.pad(x, padding, mode="constant", constant_values=0.0)
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ivy/functional/frontends/paddle/nn/functional/common.py b/ivy/functional/frontends/paddle/nn/functional/common.py
--- a/ivy/functional/frontends/paddle/nn/functional/common.py
+++ b/ivy/functional/frontends/paddle/nn/functional/common.py
@@ -85,3 +85,10 @@
else:
raise ValueError("Unknown data_format: {}".format(data_format))
return ivy.pad(x, padding, mode="constant", constant_values=0.0)
+
+
+@to_ivy_arrays_and_back
+@with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle")
+def linear(x, weight, bias=None, name=None):
+ weight = ivy.swapaxes(weight, -1, -2)
+ return ivy.linear(x, weight, bias=bias)
| {"golden_diff": "diff --git a/ivy/functional/frontends/paddle/nn/functional/common.py b/ivy/functional/frontends/paddle/nn/functional/common.py\n--- a/ivy/functional/frontends/paddle/nn/functional/common.py\n+++ b/ivy/functional/frontends/paddle/nn/functional/common.py\n@@ -85,3 +85,10 @@\n else:\n raise ValueError(\"Unknown data_format: {}\".format(data_format))\n return ivy.pad(x, padding, mode=\"constant\", constant_values=0.0)\n+\n+\n+@to_ivy_arrays_and_back\n+@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n+def linear(x, weight, bias=None, name=None):\n+ weight = ivy.swapaxes(weight, -1, -2)\n+ return ivy.linear(x, weight, bias=bias)\n", "issue": "linear\n#15051 \n", "before_files": [{"content": "# local\nimport ivy\nfrom ivy.func_wrapper import with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back\n\n\n@to_ivy_arrays_and_back\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\ndef cosine_similarity(x1, x2, *, axis=1, eps=1e-08):\n if len(x1.shape) == len(x2.shape) and len(x2.shape) >= 2:\n numerator = ivy.sum(x1 * x2, axis=axis)\n x1_squared_norm = ivy.sum(ivy.square(x1), axis=axis)\n x2_squared_norm = ivy.sum(ivy.square(x2), axis=axis)\n else:\n numerator = ivy.sum(x1 * x2)\n x1_squared_norm = ivy.sum(ivy.square(x1))\n x2_squared_norm = ivy.sum(ivy.square(x2))\n\n x1_norm = ivy.sqrt(x1_squared_norm)\n x2_norm = ivy.sqrt(x2_squared_norm)\n norm_mm = x1_norm * x2_norm\n denominator = ivy.maximum(norm_mm, eps)\n\n cosine = numerator / denominator\n return cosine\n\n\n@to_ivy_arrays_and_back\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\ndef dropout2d(x, *, p=0.5, training=True, data_format=\"NCHW\", name=None):\n return ivy.dropout2d(x, p=p, training=training, data_format=data_format)\n\n\ndef get_mask(shape, device, prob, seed=None):\n mask = ivy.where(\n ivy.random_uniform(shape=shape, device=device, seed=seed) < prob,\n 0.0,\n 1.0,\n )\n return mask\n\n\n@to_ivy_arrays_and_back\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\ndef dropout(x, p=0.5, axis=None, training=True, mode=\"upscale_in_train\", name=None):\n if axis > 1:\n raise ValueError(\"Axis value can only be 0 or 1 or None.\")\n elif axis is None or (isinstance(axis, list) and len(axis) == 2):\n mask = get_mask(shape=x.shape, device=ivy.dev(x), prob=p, seed=None)\n elif axis == 0:\n mask = get_mask(shape=(x.shape[0], 1), device=ivy.dev(x), prob=p)\n mask = ivy.broadcast_to(mask, x.shape)\n elif axis == 1:\n mask = get_mask(shape=(1, x.shape[1]), device=ivy.dev(x), prob=p)\n mask = ivy.broadcast_to(mask, x.shape)\n if mode == \"upscale_in_train\":\n if training:\n out = ivy.multiply(x, mask)\n ret = ivy.multiply(out, 1.0 / (1.0 - p))\n else:\n ret = x\n else:\n if training:\n ret = ivy.multiply(x, mask)\n else:\n ret = ivy.multiply(x, (1.0 - p))\n return ret\n\n\n@to_ivy_arrays_and_back\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\ndef zeropad2d(x, padding, data_format=\"NCHW\", name=None):\n if ivy.is_array(padding):\n padding = padding.to_list()\n if isinstance(padding, int):\n padding = [padding, padding, padding, padding]\n if len(padding) != 4:\n raise ValueError(\"Padding length should be 4.\")\n if x.ndim != 4:\n raise ValueError(\"Input x must be 4-dimensional.\")\n if data_format == \"NCHW\":\n padding = ((0, 0), (0, 0), (padding[2], padding[3]), (padding[0], padding[1]))\n elif data_format == \"NHWC\":\n padding = ((0, 0), (padding[2], padding[3]), (padding[0], padding[1]), (0, 0))\n else:\n raise ValueError(\"Unknown data_format: {}\".format(data_format))\n return ivy.pad(x, padding, mode=\"constant\", constant_values=0.0)\n", "path": "ivy/functional/frontends/paddle/nn/functional/common.py"}], "after_files": [{"content": "# local\nimport ivy\nfrom ivy.func_wrapper import with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back\n\n\n@to_ivy_arrays_and_back\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\ndef cosine_similarity(x1, x2, *, axis=1, eps=1e-08):\n if len(x1.shape) == len(x2.shape) and len(x2.shape) >= 2:\n numerator = ivy.sum(x1 * x2, axis=axis)\n x1_squared_norm = ivy.sum(ivy.square(x1), axis=axis)\n x2_squared_norm = ivy.sum(ivy.square(x2), axis=axis)\n else:\n numerator = ivy.sum(x1 * x2)\n x1_squared_norm = ivy.sum(ivy.square(x1))\n x2_squared_norm = ivy.sum(ivy.square(x2))\n\n x1_norm = ivy.sqrt(x1_squared_norm)\n x2_norm = ivy.sqrt(x2_squared_norm)\n norm_mm = x1_norm * x2_norm\n denominator = ivy.maximum(norm_mm, eps)\n\n cosine = numerator / denominator\n return cosine\n\n\n@to_ivy_arrays_and_back\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\ndef dropout2d(x, *, p=0.5, training=True, data_format=\"NCHW\", name=None):\n return ivy.dropout2d(x, p=p, training=training, data_format=data_format)\n\n\ndef get_mask(shape, device, prob, seed=None):\n mask = ivy.where(\n ivy.random_uniform(shape=shape, device=device, seed=seed) < prob,\n 0.0,\n 1.0,\n )\n return mask\n\n\n@to_ivy_arrays_and_back\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\ndef dropout(x, p=0.5, axis=None, training=True, mode=\"upscale_in_train\", name=None):\n if axis > 1:\n raise ValueError(\"Axis value can only be 0 or 1 or None.\")\n elif axis is None or (isinstance(axis, list) and len(axis) == 2):\n mask = get_mask(shape=x.shape, device=ivy.dev(x), prob=p, seed=None)\n elif axis == 0:\n mask = get_mask(shape=(x.shape[0], 1), device=ivy.dev(x), prob=p)\n mask = ivy.broadcast_to(mask, x.shape)\n elif axis == 1:\n mask = get_mask(shape=(1, x.shape[1]), device=ivy.dev(x), prob=p)\n mask = ivy.broadcast_to(mask, x.shape)\n if mode == \"upscale_in_train\":\n if training:\n out = ivy.multiply(x, mask)\n ret = ivy.multiply(out, 1.0 / (1.0 - p))\n else:\n ret = x\n else:\n if training:\n ret = ivy.multiply(x, mask)\n else:\n ret = ivy.multiply(x, (1.0 - p))\n return ret\n\n\n@to_ivy_arrays_and_back\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\ndef zeropad2d(x, padding, data_format=\"NCHW\", name=None):\n if ivy.is_array(padding):\n padding = padding.to_list()\n if isinstance(padding, int):\n padding = [padding, padding, padding, padding]\n if len(padding) != 4:\n raise ValueError(\"Padding length should be 4.\")\n if x.ndim != 4:\n raise ValueError(\"Input x must be 4-dimensional.\")\n if data_format == \"NCHW\":\n padding = ((0, 0), (0, 0), (padding[2], padding[3]), (padding[0], padding[1]))\n elif data_format == \"NHWC\":\n padding = ((0, 0), (padding[2], padding[3]), (padding[0], padding[1]), (0, 0))\n else:\n raise ValueError(\"Unknown data_format: {}\".format(data_format))\n return ivy.pad(x, padding, mode=\"constant\", constant_values=0.0)\n\n\n@to_ivy_arrays_and_back\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\ndef linear(x, weight, bias=None, name=None):\n weight = ivy.swapaxes(weight, -1, -2)\n return ivy.linear(x, weight, bias=bias)\n", "path": "ivy/functional/frontends/paddle/nn/functional/common.py"}]} | 1,429 | 201 |
gh_patches_debug_2077 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-2615 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[requires.io] dependency update on master branch
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import os
2 import runpy
3 from codecs import open
4
5 from setuptools import setup, find_packages
6
7 # Based on https://github.com/pypa/sampleproject/blob/master/setup.py
8 # and https://python-packaging-user-guide.readthedocs.org/
9
10 here = os.path.abspath(os.path.dirname(__file__))
11
12 with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
13 long_description = f.read()
14
15 VERSION = runpy.run_path(os.path.join(here, "mitmproxy", "version.py"))["VERSION"]
16
17 setup(
18 name="mitmproxy",
19 version=VERSION,
20 description="An interactive, SSL-capable, man-in-the-middle HTTP proxy for penetration testers and software developers.",
21 long_description=long_description,
22 url="http://mitmproxy.org",
23 author="Aldo Cortesi",
24 author_email="[email protected]",
25 license="MIT",
26 classifiers=[
27 "License :: OSI Approved :: MIT License",
28 "Development Status :: 5 - Production/Stable",
29 "Environment :: Console",
30 "Environment :: Console :: Curses",
31 "Operating System :: MacOS :: MacOS X",
32 "Operating System :: POSIX",
33 "Operating System :: Microsoft :: Windows",
34 "Programming Language :: Python",
35 "Programming Language :: Python :: 3",
36 "Programming Language :: Python :: 3 :: Only",
37 "Programming Language :: Python :: 3.5",
38 "Programming Language :: Python :: 3.6",
39 "Programming Language :: Python :: Implementation :: CPython",
40 "Topic :: Security",
41 "Topic :: Internet",
42 "Topic :: Internet :: WWW/HTTP",
43 "Topic :: Internet :: Proxy Servers",
44 "Topic :: Software Development :: Testing"
45 ],
46 packages=find_packages(include=[
47 "mitmproxy", "mitmproxy.*",
48 "pathod", "pathod.*",
49 ]),
50 include_package_data=True,
51 entry_points={
52 'console_scripts': [
53 "mitmproxy = mitmproxy.tools.main:mitmproxy",
54 "mitmdump = mitmproxy.tools.main:mitmdump",
55 "mitmweb = mitmproxy.tools.main:mitmweb",
56 "pathod = pathod.pathod_cmdline:go_pathod",
57 "pathoc = pathod.pathoc_cmdline:go_pathoc"
58 ]
59 },
60 # https://packaging.python.org/en/latest/requirements/#install-requires
61 # It is not considered best practice to use install_requires to pin dependencies to specific versions.
62 install_requires=[
63 "blinker>=1.4, <1.5",
64 "brotlipy>=0.5.1, <0.8",
65 "certifi>=2015.11.20.1", # no semver here - this should always be on the last release!
66 "click>=6.2, <7",
67 "cryptography>=2.0,<2.2",
68 "h2>=3.0, <4",
69 "hyperframe>=5.0, <6",
70 "kaitaistruct>=0.7, <0.8",
71 "ldap3>=2.2.0, <2.4",
72 "passlib>=1.6.5, <1.8",
73 "pyasn1>=0.3.1, <0.4",
74 "pyOpenSSL>=17.2,<17.4",
75 "pyparsing>=2.1.3, <2.3",
76 "pyperclip>=1.5.22, <1.6",
77 "requests>=2.9.1, <3",
78 "ruamel.yaml>=0.13.2, <0.16",
79 "sortedcontainers>=1.5.4, <1.6",
80 "tornado>=4.3, <4.6",
81 "urwid>=1.3.1, <1.4",
82 ],
83 extras_require={
84 ':sys_platform == "win32"': [
85 "pydivert>=2.0.3,<2.2",
86 ],
87 'dev': [
88 "flake8>=3.2.1, <3.5",
89 "Flask>=0.10.1, <0.13",
90 "mypy>=0.530,<0.541",
91 "pytest-cov>=2.2.1, <3",
92 "pytest-faulthandler>=1.3.0, <2",
93 "pytest-timeout>=1.0.0, <2",
94 "pytest-xdist>=1.14, <2",
95 "pytest>=3.1, <4",
96 "rstcheck>=2.2, <4.0",
97 "sphinx_rtd_theme>=0.1.9, <0.3",
98 "sphinx-autobuild>=0.5.2, <0.8",
99 "sphinx>=1.3.5, <1.7",
100 "sphinxcontrib-documentedlist>=0.5.0, <0.7",
101 "tox>=2.3, <3",
102 ],
103 'examples': [
104 "beautifulsoup4>=4.4.1, <4.7",
105 "Pillow>=4.3,<4.4",
106 ]
107 }
108 )
109
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -85,7 +85,7 @@
"pydivert>=2.0.3,<2.2",
],
'dev': [
- "flake8>=3.2.1, <3.5",
+ "flake8>=3.5, <3.6",
"Flask>=0.10.1, <0.13",
"mypy>=0.530,<0.541",
"pytest-cov>=2.2.1, <3",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -85,7 +85,7 @@\n \"pydivert>=2.0.3,<2.2\",\n ],\n 'dev': [\n- \"flake8>=3.2.1, <3.5\",\n+ \"flake8>=3.5, <3.6\",\n \"Flask>=0.10.1, <0.13\",\n \"mypy>=0.530,<0.541\",\n \"pytest-cov>=2.2.1, <3\",\n", "issue": "[requires.io] dependency update on master branch\n\n", "before_files": [{"content": "import os\nimport runpy\nfrom codecs import open\n\nfrom setuptools import setup, find_packages\n\n# Based on https://github.com/pypa/sampleproject/blob/master/setup.py\n# and https://python-packaging-user-guide.readthedocs.org/\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:\n long_description = f.read()\n\nVERSION = runpy.run_path(os.path.join(here, \"mitmproxy\", \"version.py\"))[\"VERSION\"]\n\nsetup(\n name=\"mitmproxy\",\n version=VERSION,\n description=\"An interactive, SSL-capable, man-in-the-middle HTTP proxy for penetration testers and software developers.\",\n long_description=long_description,\n url=\"http://mitmproxy.org\",\n author=\"Aldo Cortesi\",\n author_email=\"[email protected]\",\n license=\"MIT\",\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Environment :: Console :: Curses\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Topic :: Security\",\n \"Topic :: Internet\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: Proxy Servers\",\n \"Topic :: Software Development :: Testing\"\n ],\n packages=find_packages(include=[\n \"mitmproxy\", \"mitmproxy.*\",\n \"pathod\", \"pathod.*\",\n ]),\n include_package_data=True,\n entry_points={\n 'console_scripts': [\n \"mitmproxy = mitmproxy.tools.main:mitmproxy\",\n \"mitmdump = mitmproxy.tools.main:mitmdump\",\n \"mitmweb = mitmproxy.tools.main:mitmweb\",\n \"pathod = pathod.pathod_cmdline:go_pathod\",\n \"pathoc = pathod.pathoc_cmdline:go_pathoc\"\n ]\n },\n # https://packaging.python.org/en/latest/requirements/#install-requires\n # It is not considered best practice to use install_requires to pin dependencies to specific versions.\n install_requires=[\n \"blinker>=1.4, <1.5\",\n \"brotlipy>=0.5.1, <0.8\",\n \"certifi>=2015.11.20.1\", # no semver here - this should always be on the last release!\n \"click>=6.2, <7\",\n \"cryptography>=2.0,<2.2\",\n \"h2>=3.0, <4\",\n \"hyperframe>=5.0, <6\",\n \"kaitaistruct>=0.7, <0.8\",\n \"ldap3>=2.2.0, <2.4\",\n \"passlib>=1.6.5, <1.8\",\n \"pyasn1>=0.3.1, <0.4\",\n \"pyOpenSSL>=17.2,<17.4\",\n \"pyparsing>=2.1.3, <2.3\",\n \"pyperclip>=1.5.22, <1.6\",\n \"requests>=2.9.1, <3\",\n \"ruamel.yaml>=0.13.2, <0.16\",\n \"sortedcontainers>=1.5.4, <1.6\",\n \"tornado>=4.3, <4.6\",\n \"urwid>=1.3.1, <1.4\",\n ],\n extras_require={\n ':sys_platform == \"win32\"': [\n \"pydivert>=2.0.3,<2.2\",\n ],\n 'dev': [\n \"flake8>=3.2.1, <3.5\",\n \"Flask>=0.10.1, <0.13\",\n \"mypy>=0.530,<0.541\",\n \"pytest-cov>=2.2.1, <3\",\n \"pytest-faulthandler>=1.3.0, <2\",\n \"pytest-timeout>=1.0.0, <2\",\n \"pytest-xdist>=1.14, <2\",\n \"pytest>=3.1, <4\",\n \"rstcheck>=2.2, <4.0\",\n \"sphinx_rtd_theme>=0.1.9, <0.3\",\n \"sphinx-autobuild>=0.5.2, <0.8\",\n \"sphinx>=1.3.5, <1.7\",\n \"sphinxcontrib-documentedlist>=0.5.0, <0.7\",\n \"tox>=2.3, <3\",\n ],\n 'examples': [\n \"beautifulsoup4>=4.4.1, <4.7\",\n \"Pillow>=4.3,<4.4\",\n ]\n }\n)\n", "path": "setup.py"}], "after_files": [{"content": "import os\nimport runpy\nfrom codecs import open\n\nfrom setuptools import setup, find_packages\n\n# Based on https://github.com/pypa/sampleproject/blob/master/setup.py\n# and https://python-packaging-user-guide.readthedocs.org/\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:\n long_description = f.read()\n\nVERSION = runpy.run_path(os.path.join(here, \"mitmproxy\", \"version.py\"))[\"VERSION\"]\n\nsetup(\n name=\"mitmproxy\",\n version=VERSION,\n description=\"An interactive, SSL-capable, man-in-the-middle HTTP proxy for penetration testers and software developers.\",\n long_description=long_description,\n url=\"http://mitmproxy.org\",\n author=\"Aldo Cortesi\",\n author_email=\"[email protected]\",\n license=\"MIT\",\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Environment :: Console :: Curses\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Topic :: Security\",\n \"Topic :: Internet\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: Proxy Servers\",\n \"Topic :: Software Development :: Testing\"\n ],\n packages=find_packages(include=[\n \"mitmproxy\", \"mitmproxy.*\",\n \"pathod\", \"pathod.*\",\n ]),\n include_package_data=True,\n entry_points={\n 'console_scripts': [\n \"mitmproxy = mitmproxy.tools.main:mitmproxy\",\n \"mitmdump = mitmproxy.tools.main:mitmdump\",\n \"mitmweb = mitmproxy.tools.main:mitmweb\",\n \"pathod = pathod.pathod_cmdline:go_pathod\",\n \"pathoc = pathod.pathoc_cmdline:go_pathoc\"\n ]\n },\n # https://packaging.python.org/en/latest/requirements/#install-requires\n # It is not considered best practice to use install_requires to pin dependencies to specific versions.\n install_requires=[\n \"blinker>=1.4, <1.5\",\n \"brotlipy>=0.5.1, <0.8\",\n \"certifi>=2015.11.20.1\", # no semver here - this should always be on the last release!\n \"click>=6.2, <7\",\n \"cryptography>=2.0,<2.2\",\n \"h2>=3.0, <4\",\n \"hyperframe>=5.0, <6\",\n \"kaitaistruct>=0.7, <0.8\",\n \"ldap3>=2.2.0, <2.4\",\n \"passlib>=1.6.5, <1.8\",\n \"pyasn1>=0.3.1, <0.4\",\n \"pyOpenSSL>=17.2,<17.4\",\n \"pyparsing>=2.1.3, <2.3\",\n \"pyperclip>=1.5.22, <1.6\",\n \"requests>=2.9.1, <3\",\n \"ruamel.yaml>=0.13.2, <0.16\",\n \"sortedcontainers>=1.5.4, <1.6\",\n \"tornado>=4.3, <4.6\",\n \"urwid>=1.3.1, <1.4\",\n ],\n extras_require={\n ':sys_platform == \"win32\"': [\n \"pydivert>=2.0.3,<2.2\",\n ],\n 'dev': [\n \"flake8>=3.5, <3.6\",\n \"Flask>=0.10.1, <0.13\",\n \"mypy>=0.530,<0.541\",\n \"pytest-cov>=2.2.1, <3\",\n \"pytest-faulthandler>=1.3.0, <2\",\n \"pytest-timeout>=1.0.0, <2\",\n \"pytest-xdist>=1.14, <2\",\n \"pytest>=3.1, <4\",\n \"rstcheck>=2.2, <4.0\",\n \"sphinx_rtd_theme>=0.1.9, <0.3\",\n \"sphinx-autobuild>=0.5.2, <0.8\",\n \"sphinx>=1.3.5, <1.7\",\n \"sphinxcontrib-documentedlist>=0.5.0, <0.7\",\n \"tox>=2.3, <3\",\n ],\n 'examples': [\n \"beautifulsoup4>=4.4.1, <4.7\",\n \"Pillow>=4.3,<4.4\",\n ]\n }\n)\n", "path": "setup.py"}]} | 1,641 | 137 |
gh_patches_debug_28334 | rasdani/github-patches | git_diff | bridgecrewio__checkov-3104 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
only caputring the first line https://github.com/bridgecrewio/checkov/blob/2.0.1131/checkov/dockerfile/checks/WorkdirIsAbsolute.py
def scan_entity_conf(self, conf):
for mydir in conf:
mypath = mydir["value"]
if re.match(PATH, mypath):
return CheckResult.FAILED, mydir
return CheckResult.PASSED, None
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/dockerfile/checks/WorkdirIsAbsolute.py`
Content:
```
1 import re
2
3 from checkov.common.models.enums import CheckCategories, CheckResult
4 from checkov.dockerfile.base_dockerfile_check import BaseDockerfileCheck
5
6 ISABSOLUTE = re.compile("(^/[A-z0-9-_+]*)|(^[A-z0-9-_+]:\\\\.*)|(^\\$[{}A-z0-9-_+].*)")
7
8
9 class WorkdirIsAbsolute(BaseDockerfileCheck):
10 def __init__(self):
11 """
12 For clarity and reliability, you should always use absolute paths for your WORKDIR.
13 """
14 name = "Ensure that WORKDIR values are absolute paths"
15 id = "CKV_DOCKER_10"
16 supported_instructions = ["WORKDIR"]
17 categories = [CheckCategories.CONVENTION]
18 super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)
19
20 def scan_entity_conf(self, conf):
21 for mydir in conf:
22 mypath = mydir["value"]
23 if not re.match(ISABSOLUTE, mypath):
24 return CheckResult.FAILED, mydir
25 return CheckResult.PASSED, None
26
27
28 check = WorkdirIsAbsolute()
29
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/checkov/dockerfile/checks/WorkdirIsAbsolute.py b/checkov/dockerfile/checks/WorkdirIsAbsolute.py
--- a/checkov/dockerfile/checks/WorkdirIsAbsolute.py
+++ b/checkov/dockerfile/checks/WorkdirIsAbsolute.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import re
from checkov.common.models.enums import CheckCategories, CheckResult
@@ -7,21 +9,26 @@
class WorkdirIsAbsolute(BaseDockerfileCheck):
- def __init__(self):
+ def __init__(self) -> None:
"""
For clarity and reliability, you should always use absolute paths for your WORKDIR.
"""
name = "Ensure that WORKDIR values are absolute paths"
id = "CKV_DOCKER_10"
- supported_instructions = ["WORKDIR"]
- categories = [CheckCategories.CONVENTION]
+ supported_instructions = ("WORKDIR",)
+ categories = (CheckCategories.CONVENTION,)
super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)
- def scan_entity_conf(self, conf):
- for mydir in conf:
- mypath = mydir["value"]
- if not re.match(ISABSOLUTE, mypath):
- return CheckResult.FAILED, mydir
+ def scan_entity_conf(self, conf: list[dict[str, int | str]]) -> tuple[CheckResult, list[dict[str, int | str]] | None]:
+ workdirs = []
+ for workdir in conf:
+ path = workdir["value"]
+ if not re.match(ISABSOLUTE, path):
+ workdirs.append(workdir)
+
+ if workdirs:
+ return CheckResult.FAILED, workdirs
+
return CheckResult.PASSED, None
| {"golden_diff": "diff --git a/checkov/dockerfile/checks/WorkdirIsAbsolute.py b/checkov/dockerfile/checks/WorkdirIsAbsolute.py\n--- a/checkov/dockerfile/checks/WorkdirIsAbsolute.py\n+++ b/checkov/dockerfile/checks/WorkdirIsAbsolute.py\n@@ -1,3 +1,5 @@\n+from __future__ import annotations\n+\n import re\n \n from checkov.common.models.enums import CheckCategories, CheckResult\n@@ -7,21 +9,26 @@\n \n \n class WorkdirIsAbsolute(BaseDockerfileCheck):\n- def __init__(self):\n+ def __init__(self) -> None:\n \"\"\"\n For clarity and reliability, you should always use absolute paths for your WORKDIR.\n \"\"\"\n name = \"Ensure that WORKDIR values are absolute paths\"\n id = \"CKV_DOCKER_10\"\n- supported_instructions = [\"WORKDIR\"]\n- categories = [CheckCategories.CONVENTION]\n+ supported_instructions = (\"WORKDIR\",)\n+ categories = (CheckCategories.CONVENTION,)\n super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)\n \n- def scan_entity_conf(self, conf):\n- for mydir in conf:\n- mypath = mydir[\"value\"]\n- if not re.match(ISABSOLUTE, mypath):\n- return CheckResult.FAILED, mydir\n+ def scan_entity_conf(self, conf: list[dict[str, int | str]]) -> tuple[CheckResult, list[dict[str, int | str]] | None]:\n+ workdirs = []\n+ for workdir in conf:\n+ path = workdir[\"value\"]\n+ if not re.match(ISABSOLUTE, path):\n+ workdirs.append(workdir)\n+\n+ if workdirs:\n+ return CheckResult.FAILED, workdirs\n+\n return CheckResult.PASSED, None\n", "issue": "only caputring the first line https://github.com/bridgecrewio/checkov/blob/2.0.1131/checkov/dockerfile/checks/WorkdirIsAbsolute.py\ndef scan_entity_conf(self, conf):\r\n for mydir in conf:\r\n mypath = mydir[\"value\"]\r\n if re.match(PATH, mypath):\r\n return CheckResult.FAILED, mydir\r\n return CheckResult.PASSED, None\n", "before_files": [{"content": "import re\n\nfrom checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.dockerfile.base_dockerfile_check import BaseDockerfileCheck\n\nISABSOLUTE = re.compile(\"(^/[A-z0-9-_+]*)|(^[A-z0-9-_+]:\\\\\\\\.*)|(^\\\\$[{}A-z0-9-_+].*)\")\n\n\nclass WorkdirIsAbsolute(BaseDockerfileCheck):\n def __init__(self):\n \"\"\"\n For clarity and reliability, you should always use absolute paths for your WORKDIR.\n \"\"\"\n name = \"Ensure that WORKDIR values are absolute paths\"\n id = \"CKV_DOCKER_10\"\n supported_instructions = [\"WORKDIR\"]\n categories = [CheckCategories.CONVENTION]\n super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)\n\n def scan_entity_conf(self, conf):\n for mydir in conf:\n mypath = mydir[\"value\"]\n if not re.match(ISABSOLUTE, mypath):\n return CheckResult.FAILED, mydir\n return CheckResult.PASSED, None\n\n\ncheck = WorkdirIsAbsolute()\n", "path": "checkov/dockerfile/checks/WorkdirIsAbsolute.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport re\n\nfrom checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.dockerfile.base_dockerfile_check import BaseDockerfileCheck\n\nISABSOLUTE = re.compile(\"(^/[A-z0-9-_+]*)|(^[A-z0-9-_+]:\\\\\\\\.*)|(^\\\\$[{}A-z0-9-_+].*)\")\n\n\nclass WorkdirIsAbsolute(BaseDockerfileCheck):\n def __init__(self) -> None:\n \"\"\"\n For clarity and reliability, you should always use absolute paths for your WORKDIR.\n \"\"\"\n name = \"Ensure that WORKDIR values are absolute paths\"\n id = \"CKV_DOCKER_10\"\n supported_instructions = (\"WORKDIR\",)\n categories = (CheckCategories.CONVENTION,)\n super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)\n\n def scan_entity_conf(self, conf: list[dict[str, int | str]]) -> tuple[CheckResult, list[dict[str, int | str]] | None]:\n workdirs = []\n for workdir in conf:\n path = workdir[\"value\"]\n if not re.match(ISABSOLUTE, path):\n workdirs.append(workdir)\n\n if workdirs:\n return CheckResult.FAILED, workdirs\n\n return CheckResult.PASSED, None\n\n\ncheck = WorkdirIsAbsolute()\n", "path": "checkov/dockerfile/checks/WorkdirIsAbsolute.py"}]} | 663 | 411 |
gh_patches_debug_29107 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-410 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Nested stack reference to InstanceProfile triggers E2502 Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile
*cfn-lint version: `0.8.1`*
# Description of issue
When using nested stacks and passing IamInstanceProfile ARNs between stacks, E2502 is triggered though it shouldn't be.
# Steps to reproduce
Create a parent template like this
```yaml
AWSTemplateFormatVersion: 2010-09-09
Resources:
IAMInstanceProfile:
Type: AWS::CloudFormation::Stack
Properties:
TemplateURL: https://s3-us-west-2.amazonaws.com/example-bucket/example-instance-profile.yml
Instance:
Type: AWS::CloudFormation::Stack
Properties:
Parameters:
IamInstanceProfile: !GetAtt IAMInstanceProfile.Outputs.InstanceProfileArn
TemplateURL: https://s3-us-west-2.amazonaws.com/example-bucket/example-instance.yml
```
and a child template like this
```yaml
AWSTemplateFormatVersion: 2010-09-09
Resources:
InstanceProfile:
Type: AWS::IAM::InstanceProfile
Properties:
Roles:
- ExampleRole
Outputs:
InstanceProfileArn:
Value: !GetAtt InstanceProfile.Arn
```
# Expected results
The `IamInstanceProfile` parameter in the parent template's `Instance` sub-stack resource definition does indeed contain a valid IAM Instance Profile ARN (passed in from the `IAMInstanceProfile` sub-stack resource and as a result, there should be no error.
Ideally cfn-lint would recognize that `GetAtt` is referencing an output from another stack which very well could be an InstanceProfile ARN and as a result, optimistically not report this error.
Alternatively, if cfn-lint could introspect the sub-stack and determine the object type of the output, it would know whether or not it was the correct object type.
# Actual results
cfn-lint reports the error
> E2502 Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile for Resources/Instance/Properties/Parameters/IamInstanceProfile/Fn::GetAtt
> example-parent.yml:11:9
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cfnlint/rules/resources/iam/InstanceProfile.py`
Content:
```
1 """
2 Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
4 Permission is hereby granted, free of charge, to any person obtaining a copy of this
5 software and associated documentation files (the "Software"), to deal in the Software
6 without restriction, including without limitation the rights to use, copy, modify,
7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
8 permit persons to whom the Software is furnished to do so.
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
16 """
17 from cfnlint import CloudFormationLintRule
18 from cfnlint import RuleMatch
19
20
21 class InstanceProfile(CloudFormationLintRule):
22 """Check if IamInstanceProfile are used"""
23 id = 'E2502'
24 shortdesc = 'Check if IamInstanceProfile are using the name and not ARN'
25 description = 'See if there are any properties IamInstanceProfile' + \
26 'are using name and not ARN'
27 source_url = 'https://github.com/awslabs/cfn-python-lint'
28 tags = ['properties']
29
30 def match(self, cfn):
31 """Check CloudFormation IamInstanceProfile Parameters"""
32
33 matches = []
34
35 # Build the list of keys
36 trees = cfn.search_deep_keys('Fn::GetAtt')
37 # Filter only resources
38 # Disable pylint for Pylint 2
39 # pylint: disable=W0110
40 trees = filter(lambda x: x[0] == 'Resources', trees)
41 for tree in trees:
42 if any(e == 'IamInstanceProfile' for e in tree):
43 obj = tree[-1]
44 objtype = cfn.template.get('Resources', {}).get(obj[0], {}).get('Type')
45 if objtype:
46 if objtype != 'AWS::IAM::InstanceProfile':
47 message = 'Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile for %s' % (
48 '/'.join(map(str, tree[:-1])))
49 matches.append(RuleMatch(tree[:-1], message))
50 else:
51 if cfn.template.get('Resources', {}).get(tree[1], {}).get('Type') in ['AWS::EC2::SpotFleet']:
52 if obj[1] != 'Arn':
53 message = 'Property IamInstanceProfile should be an ARN for %s' % (
54 '/'.join(map(str, tree[:-1])))
55 matches.append(RuleMatch(tree[:-1], message))
56 else:
57 if obj[1] == 'Arn':
58 message = 'Property IamInstanceProfile shouldn\'t be an ARN for %s' % (
59 '/'.join(map(str, tree[:-1])))
60 matches.append(RuleMatch(tree[:-1], message))
61
62 # Search Refs
63 trees = cfn.search_deep_keys('Ref')
64 # Filter only resoureces
65 trees = filter(lambda x: x[0] == 'Resources', trees)
66 for tree in trees:
67 if any(e == 'IamInstanceProfile' for e in tree):
68 obj = tree[-1]
69 objtype = cfn.template.get('Resources', {}).get(obj, {}).get('Type')
70 if objtype:
71 if objtype != 'AWS::IAM::InstanceProfile':
72 message = 'Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile for %s' % (
73 '/'.join(map(str, tree[:-1])))
74 matches.append(RuleMatch(tree[:-1], message))
75
76 return matches
77
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cfnlint/rules/resources/iam/InstanceProfile.py b/src/cfnlint/rules/resources/iam/InstanceProfile.py
--- a/src/cfnlint/rules/resources/iam/InstanceProfile.py
+++ b/src/cfnlint/rules/resources/iam/InstanceProfile.py
@@ -43,12 +43,17 @@
obj = tree[-1]
objtype = cfn.template.get('Resources', {}).get(obj[0], {}).get('Type')
if objtype:
- if objtype != 'AWS::IAM::InstanceProfile':
+ if objtype not in ['AWS::IAM::InstanceProfile', 'AWS::CloudFormation::Stack', 'AWS::CloudFormation::CustomResource']:
message = 'Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile for %s' % (
'/'.join(map(str, tree[:-1])))
matches.append(RuleMatch(tree[:-1], message))
else:
- if cfn.template.get('Resources', {}).get(tree[1], {}).get('Type') in ['AWS::EC2::SpotFleet']:
+ if objtype in ['AWS::CloudFormation::Stack']:
+ if obj[1] != 'Outputs':
+ message = 'Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile for %s' % (
+ '/'.join(map(str, tree[:-1])))
+ matches.append(RuleMatch(tree[:-1], message))
+ elif cfn.template.get('Resources', {}).get(tree[1], {}).get('Type') in ['AWS::EC2::SpotFleet']:
if obj[1] != 'Arn':
message = 'Property IamInstanceProfile should be an ARN for %s' % (
'/'.join(map(str, tree[:-1])))
| {"golden_diff": "diff --git a/src/cfnlint/rules/resources/iam/InstanceProfile.py b/src/cfnlint/rules/resources/iam/InstanceProfile.py\n--- a/src/cfnlint/rules/resources/iam/InstanceProfile.py\n+++ b/src/cfnlint/rules/resources/iam/InstanceProfile.py\n@@ -43,12 +43,17 @@\n obj = tree[-1]\n objtype = cfn.template.get('Resources', {}).get(obj[0], {}).get('Type')\n if objtype:\n- if objtype != 'AWS::IAM::InstanceProfile':\n+ if objtype not in ['AWS::IAM::InstanceProfile', 'AWS::CloudFormation::Stack', 'AWS::CloudFormation::CustomResource']:\n message = 'Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile for %s' % (\n '/'.join(map(str, tree[:-1])))\n matches.append(RuleMatch(tree[:-1], message))\n else:\n- if cfn.template.get('Resources', {}).get(tree[1], {}).get('Type') in ['AWS::EC2::SpotFleet']:\n+ if objtype in ['AWS::CloudFormation::Stack']:\n+ if obj[1] != 'Outputs':\n+ message = 'Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile for %s' % (\n+ '/'.join(map(str, tree[:-1])))\n+ matches.append(RuleMatch(tree[:-1], message))\n+ elif cfn.template.get('Resources', {}).get(tree[1], {}).get('Type') in ['AWS::EC2::SpotFleet']:\n if obj[1] != 'Arn':\n message = 'Property IamInstanceProfile should be an ARN for %s' % (\n '/'.join(map(str, tree[:-1])))\n", "issue": "Nested stack reference to InstanceProfile triggers E2502 Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile\n*cfn-lint version: `0.8.1`*\r\n\r\n# Description of issue\r\n\r\nWhen using nested stacks and passing IamInstanceProfile ARNs between stacks, E2502 is triggered though it shouldn't be.\r\n\r\n# Steps to reproduce\r\n\r\nCreate a parent template like this\r\n```yaml\r\nAWSTemplateFormatVersion: 2010-09-09\r\nResources:\r\n IAMInstanceProfile:\r\n Type: AWS::CloudFormation::Stack\r\n Properties:\r\n TemplateURL: https://s3-us-west-2.amazonaws.com/example-bucket/example-instance-profile.yml\r\n Instance:\r\n Type: AWS::CloudFormation::Stack\r\n Properties:\r\n Parameters:\r\n IamInstanceProfile: !GetAtt IAMInstanceProfile.Outputs.InstanceProfileArn\r\n TemplateURL: https://s3-us-west-2.amazonaws.com/example-bucket/example-instance.yml\r\n```\r\nand a child template like this\r\n\r\n```yaml\r\nAWSTemplateFormatVersion: 2010-09-09\r\nResources:\r\n InstanceProfile:\r\n Type: AWS::IAM::InstanceProfile\r\n Properties:\r\n Roles:\r\n - ExampleRole\r\nOutputs:\r\n InstanceProfileArn:\r\n Value: !GetAtt InstanceProfile.Arn\r\n```\r\n\r\n# Expected results\r\n\r\nThe `IamInstanceProfile` parameter in the parent template's `Instance` sub-stack resource definition does indeed contain a valid IAM Instance Profile ARN (passed in from the `IAMInstanceProfile` sub-stack resource and as a result, there should be no error.\r\n\r\nIdeally cfn-lint would recognize that `GetAtt` is referencing an output from another stack which very well could be an InstanceProfile ARN and as a result, optimistically not report this error.\r\n\r\nAlternatively, if cfn-lint could introspect the sub-stack and determine the object type of the output, it would know whether or not it was the correct object type.\r\n\r\n# Actual results\r\n\r\ncfn-lint reports the error\r\n\r\n> E2502 Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile for Resources/Instance/Properties/Parameters/IamInstanceProfile/Fn::GetAtt\r\n> example-parent.yml:11:9\n", "before_files": [{"content": "\"\"\"\n Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\n\n\nclass InstanceProfile(CloudFormationLintRule):\n \"\"\"Check if IamInstanceProfile are used\"\"\"\n id = 'E2502'\n shortdesc = 'Check if IamInstanceProfile are using the name and not ARN'\n description = 'See if there are any properties IamInstanceProfile' + \\\n 'are using name and not ARN'\n source_url = 'https://github.com/awslabs/cfn-python-lint'\n tags = ['properties']\n\n def match(self, cfn):\n \"\"\"Check CloudFormation IamInstanceProfile Parameters\"\"\"\n\n matches = []\n\n # Build the list of keys\n trees = cfn.search_deep_keys('Fn::GetAtt')\n # Filter only resources\n # Disable pylint for Pylint 2\n # pylint: disable=W0110\n trees = filter(lambda x: x[0] == 'Resources', trees)\n for tree in trees:\n if any(e == 'IamInstanceProfile' for e in tree):\n obj = tree[-1]\n objtype = cfn.template.get('Resources', {}).get(obj[0], {}).get('Type')\n if objtype:\n if objtype != 'AWS::IAM::InstanceProfile':\n message = 'Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile for %s' % (\n '/'.join(map(str, tree[:-1])))\n matches.append(RuleMatch(tree[:-1], message))\n else:\n if cfn.template.get('Resources', {}).get(tree[1], {}).get('Type') in ['AWS::EC2::SpotFleet']:\n if obj[1] != 'Arn':\n message = 'Property IamInstanceProfile should be an ARN for %s' % (\n '/'.join(map(str, tree[:-1])))\n matches.append(RuleMatch(tree[:-1], message))\n else:\n if obj[1] == 'Arn':\n message = 'Property IamInstanceProfile shouldn\\'t be an ARN for %s' % (\n '/'.join(map(str, tree[:-1])))\n matches.append(RuleMatch(tree[:-1], message))\n\n # Search Refs\n trees = cfn.search_deep_keys('Ref')\n # Filter only resoureces\n trees = filter(lambda x: x[0] == 'Resources', trees)\n for tree in trees:\n if any(e == 'IamInstanceProfile' for e in tree):\n obj = tree[-1]\n objtype = cfn.template.get('Resources', {}).get(obj, {}).get('Type')\n if objtype:\n if objtype != 'AWS::IAM::InstanceProfile':\n message = 'Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile for %s' % (\n '/'.join(map(str, tree[:-1])))\n matches.append(RuleMatch(tree[:-1], message))\n\n return matches\n", "path": "src/cfnlint/rules/resources/iam/InstanceProfile.py"}], "after_files": [{"content": "\"\"\"\n Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\n\n\nclass InstanceProfile(CloudFormationLintRule):\n \"\"\"Check if IamInstanceProfile are used\"\"\"\n id = 'E2502'\n shortdesc = 'Check if IamInstanceProfile are using the name and not ARN'\n description = 'See if there are any properties IamInstanceProfile' + \\\n 'are using name and not ARN'\n source_url = 'https://github.com/awslabs/cfn-python-lint'\n tags = ['properties']\n\n def match(self, cfn):\n \"\"\"Check CloudFormation IamInstanceProfile Parameters\"\"\"\n\n matches = []\n\n # Build the list of keys\n trees = cfn.search_deep_keys('Fn::GetAtt')\n # Filter only resources\n # Disable pylint for Pylint 2\n # pylint: disable=W0110\n trees = filter(lambda x: x[0] == 'Resources', trees)\n for tree in trees:\n if any(e == 'IamInstanceProfile' for e in tree):\n obj = tree[-1]\n objtype = cfn.template.get('Resources', {}).get(obj[0], {}).get('Type')\n if objtype:\n if objtype not in ['AWS::IAM::InstanceProfile', 'AWS::CloudFormation::Stack', 'AWS::CloudFormation::CustomResource']:\n message = 'Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile for %s' % (\n '/'.join(map(str, tree[:-1])))\n matches.append(RuleMatch(tree[:-1], message))\n else:\n if objtype in ['AWS::CloudFormation::Stack']:\n if obj[1] != 'Outputs':\n message = 'Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile for %s' % (\n '/'.join(map(str, tree[:-1])))\n matches.append(RuleMatch(tree[:-1], message))\n elif cfn.template.get('Resources', {}).get(tree[1], {}).get('Type') in ['AWS::EC2::SpotFleet']:\n if obj[1] != 'Arn':\n message = 'Property IamInstanceProfile should be an ARN for %s' % (\n '/'.join(map(str, tree[:-1])))\n matches.append(RuleMatch(tree[:-1], message))\n else:\n if obj[1] == 'Arn':\n message = 'Property IamInstanceProfile shouldn\\'t be an ARN for %s' % (\n '/'.join(map(str, tree[:-1])))\n matches.append(RuleMatch(tree[:-1], message))\n\n # Search Refs\n trees = cfn.search_deep_keys('Ref')\n # Filter only resoureces\n trees = filter(lambda x: x[0] == 'Resources', trees)\n for tree in trees:\n if any(e == 'IamInstanceProfile' for e in tree):\n obj = tree[-1]\n objtype = cfn.template.get('Resources', {}).get(obj, {}).get('Type')\n if objtype:\n if objtype != 'AWS::IAM::InstanceProfile':\n message = 'Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile for %s' % (\n '/'.join(map(str, tree[:-1])))\n matches.append(RuleMatch(tree[:-1], message))\n\n return matches\n", "path": "src/cfnlint/rules/resources/iam/InstanceProfile.py"}]} | 1,736 | 384 |
gh_patches_debug_18313 | rasdani/github-patches | git_diff | docker__docker-py-575 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
docker.utils.decorators.check_resource problem
When use `docker_client.start(**kwargs)` to start the container, will be raise follow exception:
```
c.start(**s_kwargs)
File "/home/simplecloud/shiyanlou/env/local/lib/python2.7/site-packages/docker/utils/decorators.py", line 12, in wrapped
return f(self, resource_id, *args, **kwargs)
TypeError: start() got multiple values for keyword argument 'container'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docker/utils/decorators.py`
Content:
```
1 from .. import errors
2
3
4 def check_resource(f):
5 def wrapped(self, resource_id=None, *args, **kwargs):
6 if resource_id is None and (
7 kwargs.get('container') is None and kwargs.get('image') is None
8 ):
9 raise errors.NullResource(
10 'image or container param is None'
11 )
12 return f(self, resource_id, *args, **kwargs)
13 return wrapped
14
```
Path: `docker/version.py`
Content:
```
1 version = "1.2.1-dev"
2 version_info = tuple([int(d) for d in version.replace("-dev", "").split(".")])
3
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docker/utils/decorators.py b/docker/utils/decorators.py
--- a/docker/utils/decorators.py
+++ b/docker/utils/decorators.py
@@ -3,11 +3,14 @@
def check_resource(f):
def wrapped(self, resource_id=None, *args, **kwargs):
- if resource_id is None and (
- kwargs.get('container') is None and kwargs.get('image') is None
- ):
- raise errors.NullResource(
- 'image or container param is None'
- )
+ if resource_id is None:
+ if kwargs.get('container'):
+ resource_id = kwargs.pop('container')
+ elif kwargs.get('image'):
+ resource_id = kwargs.pop('image')
+ else:
+ raise errors.NullResource(
+ 'image or container param is undefined'
+ )
return f(self, resource_id, *args, **kwargs)
return wrapped
diff --git a/docker/version.py b/docker/version.py
--- a/docker/version.py
+++ b/docker/version.py
@@ -1,2 +1,2 @@
-version = "1.2.1-dev"
+version = "1.2.1"
version_info = tuple([int(d) for d in version.replace("-dev", "").split(".")])
| {"golden_diff": "diff --git a/docker/utils/decorators.py b/docker/utils/decorators.py\n--- a/docker/utils/decorators.py\n+++ b/docker/utils/decorators.py\n@@ -3,11 +3,14 @@\n \n def check_resource(f):\n def wrapped(self, resource_id=None, *args, **kwargs):\n- if resource_id is None and (\n- kwargs.get('container') is None and kwargs.get('image') is None\n- ):\n- raise errors.NullResource(\n- 'image or container param is None'\n- )\n+ if resource_id is None:\n+ if kwargs.get('container'):\n+ resource_id = kwargs.pop('container')\n+ elif kwargs.get('image'):\n+ resource_id = kwargs.pop('image')\n+ else:\n+ raise errors.NullResource(\n+ 'image or container param is undefined'\n+ )\n return f(self, resource_id, *args, **kwargs)\n return wrapped\ndiff --git a/docker/version.py b/docker/version.py\n--- a/docker/version.py\n+++ b/docker/version.py\n@@ -1,2 +1,2 @@\n-version = \"1.2.1-dev\"\n+version = \"1.2.1\"\n version_info = tuple([int(d) for d in version.replace(\"-dev\", \"\").split(\".\")])\n", "issue": "docker.utils.decorators.check_resource problem\nWhen use `docker_client.start(**kwargs)` to start the container, will be raise follow exception:\n\n```\nc.start(**s_kwargs)\n File \"/home/simplecloud/shiyanlou/env/local/lib/python2.7/site-packages/docker/utils/decorators.py\", line 12, in wrapped\n return f(self, resource_id, *args, **kwargs)\nTypeError: start() got multiple values for keyword argument 'container'\n```\n\n", "before_files": [{"content": "from .. import errors\n\n\ndef check_resource(f):\n def wrapped(self, resource_id=None, *args, **kwargs):\n if resource_id is None and (\n kwargs.get('container') is None and kwargs.get('image') is None\n ):\n raise errors.NullResource(\n 'image or container param is None'\n )\n return f(self, resource_id, *args, **kwargs)\n return wrapped\n", "path": "docker/utils/decorators.py"}, {"content": "version = \"1.2.1-dev\"\nversion_info = tuple([int(d) for d in version.replace(\"-dev\", \"\").split(\".\")])\n", "path": "docker/version.py"}], "after_files": [{"content": "from .. import errors\n\n\ndef check_resource(f):\n def wrapped(self, resource_id=None, *args, **kwargs):\n if resource_id is None:\n if kwargs.get('container'):\n resource_id = kwargs.pop('container')\n elif kwargs.get('image'):\n resource_id = kwargs.pop('image')\n else:\n raise errors.NullResource(\n 'image or container param is undefined'\n )\n return f(self, resource_id, *args, **kwargs)\n return wrapped\n", "path": "docker/utils/decorators.py"}, {"content": "version = \"1.2.1\"\nversion_info = tuple([int(d) for d in version.replace(\"-dev\", \"\").split(\".\")])\n", "path": "docker/version.py"}]} | 514 | 279 |
gh_patches_debug_25492 | rasdani/github-patches | git_diff | sql-machine-learning__elasticdl-643 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Create owner references among master and worker pods
Currently we have the following two ways to delete master + worker pods:
* Delete each pod one by one
* Delete all pods related to this elasticdl run via `elasticdl_job_name` label `kubectl delete pod -l elasticdl_job_name=test-job-1559292773-93`
It would be much easier if users could just delete master pod and then worker pods can be delete automatically. This would be possible if there are owner references among master and worker pods.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticdl/python/elasticdl/master/k8s_client.py`
Content:
```
1 import logging
2 import os
3 import threading
4 import traceback
5
6 from kubernetes import client, config, watch
7 from kubernetes.client import (
8 V1PersistentVolumeClaimVolumeSource as pvcVolumeSource,
9 )
10
11 WORKER_POD_NAME_PREFIX = "elasticdl-worker-"
12
13
14 class Client(object):
15 def __init__(self, *, worker_image, namespace, job_name, event_callback):
16 """
17 ElasticDL k8s client.
18
19 Args:
20 worker_image: Docker image path for ElasticDL workers.
21 namespace: k8s namespace for ElasticDL pods.
22 job_name: ElasticDL job name, should be unique in the namespace.
23 Used as worker pod name prefix and value for "elasticdl" label.
24 event_callback: If not None, an event watcher will be created and
25 events passed to the callback.
26 """
27 if os.getenv("KUBERNETES_SERVICE_HOST"):
28 # We are running inside k8s
29 config.load_incluster_config()
30 else:
31 # Use user's kube config
32 config.load_kube_config()
33
34 self._v1 = client.CoreV1Api()
35 self._logger = logging.getLogger(__name__)
36 self._image = worker_image
37 self._ns = namespace
38 self._job_name = job_name
39 self._event_cb = event_callback
40 if self._event_cb:
41 threading.Thread(
42 target=self._watch, name="event_watcher", daemon=True
43 ).start()
44
45 def _watch(self):
46 stream = watch.Watch().stream(
47 self._v1.list_namespaced_pod,
48 self._ns,
49 label_selector="elasticdl_job_name=" + self._job_name,
50 )
51 for event in stream:
52 try:
53 self._event_cb(event)
54 except Exception:
55 traceback.print_exc()
56
57 def get_worker_pod_name(self, worker_id):
58 return WORKER_POD_NAME_PREFIX + self._job_name + "-" + str(worker_id)
59
60 def _create_worker_pod(
61 self,
62 worker_id,
63 resource_requests,
64 resource_limits,
65 priority,
66 mount_path,
67 volume_name,
68 image_pull_policy,
69 command,
70 args,
71 restart_policy,
72 ):
73 # Worker container config
74 container = client.V1Container(
75 name=self.get_worker_pod_name(worker_id),
76 image=self._image,
77 command=command,
78 resources=client.V1ResourceRequirements(
79 requests=resource_requests, limits=resource_limits
80 ),
81 image_pull_policy=image_pull_policy,
82 args=args,
83 )
84
85 # Pod
86 spec = client.V1PodSpec(
87 containers=[container], restart_policy=restart_policy
88 )
89
90 # Mount data path
91 if mount_path is not None and volume_name is not None:
92 volume = client.V1Volume(
93 name="data-volume",
94 persistent_volume_claim=pvcVolumeSource(
95 claim_name="fileserver-claim", read_only=False
96 ),
97 )
98 spec.volumes = [volume]
99 container.volume_mounts = [
100 client.V1VolumeMount(name=volume_name, mount_path=mount_path)
101 ]
102
103 if priority is not None:
104 spec.priority_class_name = priority
105
106 pod = client.V1Pod(
107 spec=spec,
108 metadata=client.V1ObjectMeta(
109 name=self.get_worker_pod_name(worker_id),
110 labels={
111 "app": "elasticdl",
112 "elasticdl_job_name": self._job_name,
113 },
114 ),
115 )
116 return pod
117
118 def create_worker(
119 self,
120 worker_id,
121 resource_requests,
122 resource_limits,
123 priority=None,
124 mount_path=None,
125 volume_name=None,
126 image_pull_policy=None,
127 command=None,
128 args=None,
129 restart_policy="OnFailure",
130 ):
131 self._logger.info("Creating worker: " + str(worker_id))
132 pod = self._create_worker_pod(
133 worker_id,
134 resource_requests,
135 resource_limits,
136 priority,
137 mount_path,
138 volume_name,
139 image_pull_policy,
140 command=command,
141 args=args,
142 restart_policy=restart_policy,
143 )
144 return self._v1.create_namespaced_pod(self._ns, pod)
145
146 def delete_worker(self, worker_id):
147 self._logger.info("Deleting worker: " + str(worker_id))
148 self._v1.delete_namespaced_pod(
149 self.get_worker_pod_name(worker_id),
150 self._ns,
151 body=client.V1DeleteOptions(grace_period_seconds=0),
152 )
153
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/elasticdl/python/elasticdl/master/k8s_client.py b/elasticdl/python/elasticdl/master/k8s_client.py
--- a/elasticdl/python/elasticdl/master/k8s_client.py
+++ b/elasticdl/python/elasticdl/master/k8s_client.py
@@ -103,6 +103,16 @@
if priority is not None:
spec.priority_class_name = priority
+ # Find that master pod that will be used as the owner reference
+ # for this worker pod.
+ pods = self._v1.list_namespaced_pod(
+ namespace=self._ns,
+ label_selector="elasticdl_job_name=" + self._job_name
+ ).items
+ master_pod = [pod for pod in pods if (
+ pod.metadata.name == "elasticdl-master-" + self._job_name
+ )][0]
+
pod = client.V1Pod(
spec=spec,
metadata=client.V1ObjectMeta(
@@ -111,6 +121,17 @@
"app": "elasticdl",
"elasticdl_job_name": self._job_name,
},
+ # TODO: Add tests for this once we've done refactoring on
+ # k8s client code and the constant strings
+ owner_references=[
+ client.V1OwnerReference(
+ api_version="v1",
+ block_owner_deletion=True,
+ kind="Pod",
+ name=master_pod.metadata.name,
+ uid=master_pod.metadata.uid,
+ ),
+ ],
),
)
return pod
| {"golden_diff": "diff --git a/elasticdl/python/elasticdl/master/k8s_client.py b/elasticdl/python/elasticdl/master/k8s_client.py\n--- a/elasticdl/python/elasticdl/master/k8s_client.py\n+++ b/elasticdl/python/elasticdl/master/k8s_client.py\n@@ -103,6 +103,16 @@\n if priority is not None:\n spec.priority_class_name = priority\n \n+ # Find that master pod that will be used as the owner reference\n+ # for this worker pod.\n+ pods = self._v1.list_namespaced_pod(\n+ namespace=self._ns,\n+ label_selector=\"elasticdl_job_name=\" + self._job_name\n+ ).items\n+ master_pod = [pod for pod in pods if (\n+ pod.metadata.name == \"elasticdl-master-\" + self._job_name\n+ )][0]\n+\n pod = client.V1Pod(\n spec=spec,\n metadata=client.V1ObjectMeta(\n@@ -111,6 +121,17 @@\n \"app\": \"elasticdl\",\n \"elasticdl_job_name\": self._job_name,\n },\n+ # TODO: Add tests for this once we've done refactoring on\n+ # k8s client code and the constant strings\n+ owner_references=[\n+ client.V1OwnerReference(\n+ api_version=\"v1\",\n+ block_owner_deletion=True,\n+ kind=\"Pod\",\n+ name=master_pod.metadata.name,\n+ uid=master_pod.metadata.uid,\n+ ),\n+ ],\n ),\n )\n return pod\n", "issue": "Create owner references among master and worker pods\nCurrently we have the following two ways to delete master + worker pods:\r\n* Delete each pod one by one\r\n* Delete all pods related to this elasticdl run via `elasticdl_job_name` label `kubectl delete pod -l elasticdl_job_name=test-job-1559292773-93`\r\n\r\nIt would be much easier if users could just delete master pod and then worker pods can be delete automatically. This would be possible if there are owner references among master and worker pods.\n", "before_files": [{"content": "import logging\nimport os\nimport threading\nimport traceback\n\nfrom kubernetes import client, config, watch\nfrom kubernetes.client import (\n V1PersistentVolumeClaimVolumeSource as pvcVolumeSource,\n)\n\nWORKER_POD_NAME_PREFIX = \"elasticdl-worker-\"\n\n\nclass Client(object):\n def __init__(self, *, worker_image, namespace, job_name, event_callback):\n \"\"\"\n ElasticDL k8s client.\n\n Args:\n worker_image: Docker image path for ElasticDL workers.\n namespace: k8s namespace for ElasticDL pods.\n job_name: ElasticDL job name, should be unique in the namespace.\n Used as worker pod name prefix and value for \"elasticdl\" label.\n event_callback: If not None, an event watcher will be created and\n events passed to the callback.\n \"\"\"\n if os.getenv(\"KUBERNETES_SERVICE_HOST\"):\n # We are running inside k8s\n config.load_incluster_config()\n else:\n # Use user's kube config\n config.load_kube_config()\n\n self._v1 = client.CoreV1Api()\n self._logger = logging.getLogger(__name__)\n self._image = worker_image\n self._ns = namespace\n self._job_name = job_name\n self._event_cb = event_callback\n if self._event_cb:\n threading.Thread(\n target=self._watch, name=\"event_watcher\", daemon=True\n ).start()\n\n def _watch(self):\n stream = watch.Watch().stream(\n self._v1.list_namespaced_pod,\n self._ns,\n label_selector=\"elasticdl_job_name=\" + self._job_name,\n )\n for event in stream:\n try:\n self._event_cb(event)\n except Exception:\n traceback.print_exc()\n\n def get_worker_pod_name(self, worker_id):\n return WORKER_POD_NAME_PREFIX + self._job_name + \"-\" + str(worker_id)\n\n def _create_worker_pod(\n self,\n worker_id,\n resource_requests,\n resource_limits,\n priority,\n mount_path,\n volume_name,\n image_pull_policy,\n command,\n args,\n restart_policy,\n ):\n # Worker container config\n container = client.V1Container(\n name=self.get_worker_pod_name(worker_id),\n image=self._image,\n command=command,\n resources=client.V1ResourceRequirements(\n requests=resource_requests, limits=resource_limits\n ),\n image_pull_policy=image_pull_policy,\n args=args,\n )\n\n # Pod\n spec = client.V1PodSpec(\n containers=[container], restart_policy=restart_policy\n )\n\n # Mount data path\n if mount_path is not None and volume_name is not None:\n volume = client.V1Volume(\n name=\"data-volume\",\n persistent_volume_claim=pvcVolumeSource(\n claim_name=\"fileserver-claim\", read_only=False\n ),\n )\n spec.volumes = [volume]\n container.volume_mounts = [\n client.V1VolumeMount(name=volume_name, mount_path=mount_path)\n ]\n\n if priority is not None:\n spec.priority_class_name = priority\n\n pod = client.V1Pod(\n spec=spec,\n metadata=client.V1ObjectMeta(\n name=self.get_worker_pod_name(worker_id),\n labels={\n \"app\": \"elasticdl\",\n \"elasticdl_job_name\": self._job_name,\n },\n ),\n )\n return pod\n\n def create_worker(\n self,\n worker_id,\n resource_requests,\n resource_limits,\n priority=None,\n mount_path=None,\n volume_name=None,\n image_pull_policy=None,\n command=None,\n args=None,\n restart_policy=\"OnFailure\",\n ):\n self._logger.info(\"Creating worker: \" + str(worker_id))\n pod = self._create_worker_pod(\n worker_id,\n resource_requests,\n resource_limits,\n priority,\n mount_path,\n volume_name,\n image_pull_policy,\n command=command,\n args=args,\n restart_policy=restart_policy,\n )\n return self._v1.create_namespaced_pod(self._ns, pod)\n\n def delete_worker(self, worker_id):\n self._logger.info(\"Deleting worker: \" + str(worker_id))\n self._v1.delete_namespaced_pod(\n self.get_worker_pod_name(worker_id),\n self._ns,\n body=client.V1DeleteOptions(grace_period_seconds=0),\n )\n", "path": "elasticdl/python/elasticdl/master/k8s_client.py"}], "after_files": [{"content": "import logging\nimport os\nimport threading\nimport traceback\n\nfrom kubernetes import client, config, watch\nfrom kubernetes.client import (\n V1PersistentVolumeClaimVolumeSource as pvcVolumeSource,\n)\n\nWORKER_POD_NAME_PREFIX = \"elasticdl-worker-\"\n\n\nclass Client(object):\n def __init__(self, *, worker_image, namespace, job_name, event_callback):\n \"\"\"\n ElasticDL k8s client.\n\n Args:\n worker_image: Docker image path for ElasticDL workers.\n namespace: k8s namespace for ElasticDL pods.\n job_name: ElasticDL job name, should be unique in the namespace.\n Used as worker pod name prefix and value for \"elasticdl\" label.\n event_callback: If not None, an event watcher will be created and\n events passed to the callback.\n \"\"\"\n if os.getenv(\"KUBERNETES_SERVICE_HOST\"):\n # We are running inside k8s\n config.load_incluster_config()\n else:\n # Use user's kube config\n config.load_kube_config()\n\n self._v1 = client.CoreV1Api()\n self._logger = logging.getLogger(__name__)\n self._image = worker_image\n self._ns = namespace\n self._job_name = job_name\n self._event_cb = event_callback\n if self._event_cb:\n threading.Thread(\n target=self._watch, name=\"event_watcher\", daemon=True\n ).start()\n\n def _watch(self):\n stream = watch.Watch().stream(\n self._v1.list_namespaced_pod,\n self._ns,\n label_selector=\"elasticdl_job_name=\" + self._job_name,\n )\n for event in stream:\n try:\n self._event_cb(event)\n except Exception:\n traceback.print_exc()\n\n def get_worker_pod_name(self, worker_id):\n return WORKER_POD_NAME_PREFIX + self._job_name + \"-\" + str(worker_id)\n\n def _create_worker_pod(\n self,\n worker_id,\n resource_requests,\n resource_limits,\n priority,\n mount_path,\n volume_name,\n image_pull_policy,\n command,\n args,\n restart_policy,\n ):\n # Worker container config\n container = client.V1Container(\n name=self.get_worker_pod_name(worker_id),\n image=self._image,\n command=command,\n resources=client.V1ResourceRequirements(\n requests=resource_requests, limits=resource_limits\n ),\n image_pull_policy=image_pull_policy,\n args=args,\n )\n\n # Pod\n spec = client.V1PodSpec(\n containers=[container], restart_policy=restart_policy\n )\n\n # Mount data path\n if mount_path is not None and volume_name is not None:\n volume = client.V1Volume(\n name=\"data-volume\",\n persistent_volume_claim=pvcVolumeSource(\n claim_name=\"fileserver-claim\", read_only=False\n ),\n )\n spec.volumes = [volume]\n container.volume_mounts = [\n client.V1VolumeMount(name=volume_name, mount_path=mount_path)\n ]\n\n if priority is not None:\n spec.priority_class_name = priority\n\n # Find that master pod that will be used as the owner reference\n # for this worker pod.\n pods = self._v1.list_namespaced_pod(\n namespace=self._ns,\n label_selector=\"elasticdl_job_name=\" + self._job_name\n ).items\n master_pod = [pod for pod in pods if (\n pod.metadata.name == \"elasticdl-master-\" + self._job_name\n )][0]\n\n pod = client.V1Pod(\n spec=spec,\n metadata=client.V1ObjectMeta(\n name=self.get_worker_pod_name(worker_id),\n labels={\n \"app\": \"elasticdl\",\n \"elasticdl_job_name\": self._job_name,\n },\n # TODO: Add tests for this once we've done refactoring on\n # k8s client code and the constant strings\n owner_references=[\n client.V1OwnerReference(\n api_version=\"v1\",\n block_owner_deletion=True,\n kind=\"Pod\",\n name=master_pod.metadata.name,\n uid=master_pod.metadata.uid,\n ),\n ],\n ),\n )\n return pod\n\n def create_worker(\n self,\n worker_id,\n resource_requests,\n resource_limits,\n priority=None,\n mount_path=None,\n volume_name=None,\n image_pull_policy=None,\n command=None,\n args=None,\n restart_policy=\"OnFailure\",\n ):\n self._logger.info(\"Creating worker: \" + str(worker_id))\n pod = self._create_worker_pod(\n worker_id,\n resource_requests,\n resource_limits,\n priority,\n mount_path,\n volume_name,\n image_pull_policy,\n command=command,\n args=args,\n restart_policy=restart_policy,\n )\n return self._v1.create_namespaced_pod(self._ns, pod)\n\n def delete_worker(self, worker_id):\n self._logger.info(\"Deleting worker: \" + str(worker_id))\n self._v1.delete_namespaced_pod(\n self.get_worker_pod_name(worker_id),\n self._ns,\n body=client.V1DeleteOptions(grace_period_seconds=0),\n )\n", "path": "elasticdl/python/elasticdl/master/k8s_client.py"}]} | 1,683 | 352 |
gh_patches_debug_56983 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-contrib-172 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove unused import
As per comment https://github.com/open-telemetry/opentelemetry-python-contrib/pull/107#discussion_r516262746, there appears to be an unused import in the jinja2 instrumentation
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `instrumentation/opentelemetry-instrumentation-jinja2/src/opentelemetry/instrumentation/jinja2/__init__.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """
16
17 Usage
18 -----
19
20 The OpenTelemetry ``jinja2`` integration traces templates loading, compilation
21 and rendering.
22
23 Usage
24 -----
25
26 .. code-block:: python
27
28 from jinja2 import Environment, FileSystemLoader
29 from opentelemetry.instrumentation.jinja2 import Jinja2Instrumentor
30 from opentelemetry import trace
31 from opentelemetry.trace import TracerProvider
32
33 trace.set_tracer_provider(TracerProvider())
34
35 Jinja2Instrumentor().instrument()
36
37 env = Environment(loader=FileSystemLoader("templates"))
38 template = env.get_template("mytemplate.html")
39
40 API
41 ---
42 """
43 # pylint: disable=no-value-for-parameter
44
45 import logging
46
47 import jinja2
48 from wrapt import ObjectProxy
49 from wrapt import wrap_function_wrapper as _wrap
50
51 from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
52 from opentelemetry.instrumentation.jinja2.version import __version__
53 from opentelemetry.instrumentation.utils import unwrap
54 from opentelemetry.trace import SpanKind, get_tracer
55 from opentelemetry.trace.status import Status, StatusCode
56
57 logger = logging.getLogger(__name__)
58
59 ATTRIBUTE_JINJA2_TEMPLATE_NAME = "jinja2.template_name"
60 ATTRIBUTE_JINJA2_TEMPLATE_PATH = "jinja2.template_path"
61 DEFAULT_TEMPLATE_NAME = "<memory>"
62
63
64 def _with_tracer_wrapper(func):
65 """Helper for providing tracer for wrapper functions.
66 """
67
68 def _with_tracer(tracer):
69 def wrapper(wrapped, instance, args, kwargs):
70 return func(tracer, wrapped, instance, args, kwargs)
71
72 return wrapper
73
74 return _with_tracer
75
76
77 @_with_tracer_wrapper
78 def _wrap_render(tracer, wrapped, instance, args, kwargs):
79 """Wrap `Template.render()` or `Template.generate()`
80 """
81 with tracer.start_as_current_span(
82 "jinja2.render", kind=SpanKind.INTERNAL,
83 ) as span:
84 if span.is_recording():
85 template_name = instance.name or DEFAULT_TEMPLATE_NAME
86 span.set_attribute(ATTRIBUTE_JINJA2_TEMPLATE_NAME, template_name)
87 return wrapped(*args, **kwargs)
88
89
90 @_with_tracer_wrapper
91 def _wrap_compile(tracer, wrapped, _, args, kwargs):
92 with tracer.start_as_current_span(
93 "jinja2.compile", kind=SpanKind.INTERNAL,
94 ) as span:
95 if span.is_recording():
96 template_name = (
97 args[1]
98 if len(args) > 1
99 else kwargs.get("name", DEFAULT_TEMPLATE_NAME)
100 )
101 span.set_attribute(ATTRIBUTE_JINJA2_TEMPLATE_NAME, template_name)
102 return wrapped(*args, **kwargs)
103
104
105 @_with_tracer_wrapper
106 def _wrap_load_template(tracer, wrapped, _, args, kwargs):
107 with tracer.start_as_current_span(
108 "jinja2.load", kind=SpanKind.INTERNAL,
109 ) as span:
110 if span.is_recording():
111 template_name = kwargs.get("name", args[0])
112 span.set_attribute(ATTRIBUTE_JINJA2_TEMPLATE_NAME, template_name)
113 template = None
114 try:
115 template = wrapped(*args, **kwargs)
116 return template
117 finally:
118 if template and span.is_recording():
119 span.set_attribute(
120 ATTRIBUTE_JINJA2_TEMPLATE_PATH, template.filename
121 )
122
123
124 class Jinja2Instrumentor(BaseInstrumentor):
125 """An instrumentor for jinja2
126
127 See `BaseInstrumentor`
128 """
129
130 def _instrument(self, **kwargs):
131 tracer_provider = kwargs.get("tracer_provider")
132 tracer = get_tracer(__name__, __version__, tracer_provider)
133
134 _wrap(jinja2, "environment.Template.render", _wrap_render(tracer))
135 _wrap(jinja2, "environment.Template.generate", _wrap_render(tracer))
136 _wrap(jinja2, "environment.Environment.compile", _wrap_compile(tracer))
137 _wrap(
138 jinja2,
139 "environment.Environment._load_template",
140 _wrap_load_template(tracer),
141 )
142
143 def _uninstrument(self, **kwargs):
144 unwrap(jinja2.Template, "render")
145 unwrap(jinja2.Template, "generate")
146 unwrap(jinja2.Environment, "compile")
147 unwrap(jinja2.Environment, "_load_template")
148
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/instrumentation/opentelemetry-instrumentation-jinja2/src/opentelemetry/instrumentation/jinja2/__init__.py b/instrumentation/opentelemetry-instrumentation-jinja2/src/opentelemetry/instrumentation/jinja2/__init__.py
--- a/instrumentation/opentelemetry-instrumentation-jinja2/src/opentelemetry/instrumentation/jinja2/__init__.py
+++ b/instrumentation/opentelemetry-instrumentation-jinja2/src/opentelemetry/instrumentation/jinja2/__init__.py
@@ -52,7 +52,6 @@
from opentelemetry.instrumentation.jinja2.version import __version__
from opentelemetry.instrumentation.utils import unwrap
from opentelemetry.trace import SpanKind, get_tracer
-from opentelemetry.trace.status import Status, StatusCode
logger = logging.getLogger(__name__)
| {"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-jinja2/src/opentelemetry/instrumentation/jinja2/__init__.py b/instrumentation/opentelemetry-instrumentation-jinja2/src/opentelemetry/instrumentation/jinja2/__init__.py\n--- a/instrumentation/opentelemetry-instrumentation-jinja2/src/opentelemetry/instrumentation/jinja2/__init__.py\n+++ b/instrumentation/opentelemetry-instrumentation-jinja2/src/opentelemetry/instrumentation/jinja2/__init__.py\n@@ -52,7 +52,6 @@\n from opentelemetry.instrumentation.jinja2.version import __version__\n from opentelemetry.instrumentation.utils import unwrap\n from opentelemetry.trace import SpanKind, get_tracer\n-from opentelemetry.trace.status import Status, StatusCode\n \n logger = logging.getLogger(__name__)\n", "issue": "Remove unused import\nAs per comment https://github.com/open-telemetry/opentelemetry-python-contrib/pull/107#discussion_r516262746, there appears to be an unused import in the jinja2 instrumentation\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\n\nUsage\n-----\n\nThe OpenTelemetry ``jinja2`` integration traces templates loading, compilation\nand rendering.\n\nUsage\n-----\n\n.. code-block:: python\n\n from jinja2 import Environment, FileSystemLoader\n from opentelemetry.instrumentation.jinja2 import Jinja2Instrumentor\n from opentelemetry import trace\n from opentelemetry.trace import TracerProvider\n\n trace.set_tracer_provider(TracerProvider())\n\n Jinja2Instrumentor().instrument()\n\n env = Environment(loader=FileSystemLoader(\"templates\"))\n template = env.get_template(\"mytemplate.html\")\n\nAPI\n---\n\"\"\"\n# pylint: disable=no-value-for-parameter\n\nimport logging\n\nimport jinja2\nfrom wrapt import ObjectProxy\nfrom wrapt import wrap_function_wrapper as _wrap\n\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.instrumentation.jinja2.version import __version__\nfrom opentelemetry.instrumentation.utils import unwrap\nfrom opentelemetry.trace import SpanKind, get_tracer\nfrom opentelemetry.trace.status import Status, StatusCode\n\nlogger = logging.getLogger(__name__)\n\nATTRIBUTE_JINJA2_TEMPLATE_NAME = \"jinja2.template_name\"\nATTRIBUTE_JINJA2_TEMPLATE_PATH = \"jinja2.template_path\"\nDEFAULT_TEMPLATE_NAME = \"<memory>\"\n\n\ndef _with_tracer_wrapper(func):\n \"\"\"Helper for providing tracer for wrapper functions.\n \"\"\"\n\n def _with_tracer(tracer):\n def wrapper(wrapped, instance, args, kwargs):\n return func(tracer, wrapped, instance, args, kwargs)\n\n return wrapper\n\n return _with_tracer\n\n\n@_with_tracer_wrapper\ndef _wrap_render(tracer, wrapped, instance, args, kwargs):\n \"\"\"Wrap `Template.render()` or `Template.generate()`\n \"\"\"\n with tracer.start_as_current_span(\n \"jinja2.render\", kind=SpanKind.INTERNAL,\n ) as span:\n if span.is_recording():\n template_name = instance.name or DEFAULT_TEMPLATE_NAME\n span.set_attribute(ATTRIBUTE_JINJA2_TEMPLATE_NAME, template_name)\n return wrapped(*args, **kwargs)\n\n\n@_with_tracer_wrapper\ndef _wrap_compile(tracer, wrapped, _, args, kwargs):\n with tracer.start_as_current_span(\n \"jinja2.compile\", kind=SpanKind.INTERNAL,\n ) as span:\n if span.is_recording():\n template_name = (\n args[1]\n if len(args) > 1\n else kwargs.get(\"name\", DEFAULT_TEMPLATE_NAME)\n )\n span.set_attribute(ATTRIBUTE_JINJA2_TEMPLATE_NAME, template_name)\n return wrapped(*args, **kwargs)\n\n\n@_with_tracer_wrapper\ndef _wrap_load_template(tracer, wrapped, _, args, kwargs):\n with tracer.start_as_current_span(\n \"jinja2.load\", kind=SpanKind.INTERNAL,\n ) as span:\n if span.is_recording():\n template_name = kwargs.get(\"name\", args[0])\n span.set_attribute(ATTRIBUTE_JINJA2_TEMPLATE_NAME, template_name)\n template = None\n try:\n template = wrapped(*args, **kwargs)\n return template\n finally:\n if template and span.is_recording():\n span.set_attribute(\n ATTRIBUTE_JINJA2_TEMPLATE_PATH, template.filename\n )\n\n\nclass Jinja2Instrumentor(BaseInstrumentor):\n \"\"\"An instrumentor for jinja2\n\n See `BaseInstrumentor`\n \"\"\"\n\n def _instrument(self, **kwargs):\n tracer_provider = kwargs.get(\"tracer_provider\")\n tracer = get_tracer(__name__, __version__, tracer_provider)\n\n _wrap(jinja2, \"environment.Template.render\", _wrap_render(tracer))\n _wrap(jinja2, \"environment.Template.generate\", _wrap_render(tracer))\n _wrap(jinja2, \"environment.Environment.compile\", _wrap_compile(tracer))\n _wrap(\n jinja2,\n \"environment.Environment._load_template\",\n _wrap_load_template(tracer),\n )\n\n def _uninstrument(self, **kwargs):\n unwrap(jinja2.Template, \"render\")\n unwrap(jinja2.Template, \"generate\")\n unwrap(jinja2.Environment, \"compile\")\n unwrap(jinja2.Environment, \"_load_template\")\n", "path": "instrumentation/opentelemetry-instrumentation-jinja2/src/opentelemetry/instrumentation/jinja2/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\n\nUsage\n-----\n\nThe OpenTelemetry ``jinja2`` integration traces templates loading, compilation\nand rendering.\n\nUsage\n-----\n\n.. code-block:: python\n\n from jinja2 import Environment, FileSystemLoader\n from opentelemetry.instrumentation.jinja2 import Jinja2Instrumentor\n from opentelemetry import trace\n from opentelemetry.trace import TracerProvider\n\n trace.set_tracer_provider(TracerProvider())\n\n Jinja2Instrumentor().instrument()\n\n env = Environment(loader=FileSystemLoader(\"templates\"))\n template = env.get_template(\"mytemplate.html\")\n\nAPI\n---\n\"\"\"\n# pylint: disable=no-value-for-parameter\n\nimport logging\n\nimport jinja2\nfrom wrapt import ObjectProxy\nfrom wrapt import wrap_function_wrapper as _wrap\n\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.instrumentation.jinja2.version import __version__\nfrom opentelemetry.instrumentation.utils import unwrap\nfrom opentelemetry.trace import SpanKind, get_tracer\n\nlogger = logging.getLogger(__name__)\n\nATTRIBUTE_JINJA2_TEMPLATE_NAME = \"jinja2.template_name\"\nATTRIBUTE_JINJA2_TEMPLATE_PATH = \"jinja2.template_path\"\nDEFAULT_TEMPLATE_NAME = \"<memory>\"\n\n\ndef _with_tracer_wrapper(func):\n \"\"\"Helper for providing tracer for wrapper functions.\n \"\"\"\n\n def _with_tracer(tracer):\n def wrapper(wrapped, instance, args, kwargs):\n return func(tracer, wrapped, instance, args, kwargs)\n\n return wrapper\n\n return _with_tracer\n\n\n@_with_tracer_wrapper\ndef _wrap_render(tracer, wrapped, instance, args, kwargs):\n \"\"\"Wrap `Template.render()` or `Template.generate()`\n \"\"\"\n with tracer.start_as_current_span(\n \"jinja2.render\", kind=SpanKind.INTERNAL,\n ) as span:\n if span.is_recording():\n template_name = instance.name or DEFAULT_TEMPLATE_NAME\n span.set_attribute(ATTRIBUTE_JINJA2_TEMPLATE_NAME, template_name)\n return wrapped(*args, **kwargs)\n\n\n@_with_tracer_wrapper\ndef _wrap_compile(tracer, wrapped, _, args, kwargs):\n with tracer.start_as_current_span(\n \"jinja2.compile\", kind=SpanKind.INTERNAL,\n ) as span:\n if span.is_recording():\n template_name = (\n args[1]\n if len(args) > 1\n else kwargs.get(\"name\", DEFAULT_TEMPLATE_NAME)\n )\n span.set_attribute(ATTRIBUTE_JINJA2_TEMPLATE_NAME, template_name)\n return wrapped(*args, **kwargs)\n\n\n@_with_tracer_wrapper\ndef _wrap_load_template(tracer, wrapped, _, args, kwargs):\n with tracer.start_as_current_span(\n \"jinja2.load\", kind=SpanKind.INTERNAL,\n ) as span:\n if span.is_recording():\n template_name = kwargs.get(\"name\", args[0])\n span.set_attribute(ATTRIBUTE_JINJA2_TEMPLATE_NAME, template_name)\n template = None\n try:\n template = wrapped(*args, **kwargs)\n return template\n finally:\n if template and span.is_recording():\n span.set_attribute(\n ATTRIBUTE_JINJA2_TEMPLATE_PATH, template.filename\n )\n\n\nclass Jinja2Instrumentor(BaseInstrumentor):\n \"\"\"An instrumentor for jinja2\n\n See `BaseInstrumentor`\n \"\"\"\n\n def _instrument(self, **kwargs):\n tracer_provider = kwargs.get(\"tracer_provider\")\n tracer = get_tracer(__name__, __version__, tracer_provider)\n\n _wrap(jinja2, \"environment.Template.render\", _wrap_render(tracer))\n _wrap(jinja2, \"environment.Template.generate\", _wrap_render(tracer))\n _wrap(jinja2, \"environment.Environment.compile\", _wrap_compile(tracer))\n _wrap(\n jinja2,\n \"environment.Environment._load_template\",\n _wrap_load_template(tracer),\n )\n\n def _uninstrument(self, **kwargs):\n unwrap(jinja2.Template, \"render\")\n unwrap(jinja2.Template, \"generate\")\n unwrap(jinja2.Environment, \"compile\")\n unwrap(jinja2.Environment, \"_load_template\")\n", "path": "instrumentation/opentelemetry-instrumentation-jinja2/src/opentelemetry/instrumentation/jinja2/__init__.py"}]} | 1,704 | 183 |
gh_patches_debug_27210 | rasdani/github-patches | git_diff | sql-machine-learning__elasticdl-282 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[master]Use different RPC for reporting gradient and task result.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticdl/master/servicer.py`
Content:
```
1 import threading
2 import numpy as np
3
4 import tensorflow as tf
5 assert tf.executing_eagerly()
6
7 from proto import master_pb2
8 from proto import master_pb2_grpc
9 from util.ndarray import ndarray_to_tensor, tensor_to_ndarray
10
11
12 class MasterServicer(master_pb2_grpc.MasterServicer):
13 """Master service implementation"""
14
15 def __init__(self, logger, grads_to_wait, optimizer):
16 self.logger = logger
17 self._opt = optimizer
18 self._lock = threading.Lock()
19 # TODO: random initialization
20 # A <string, tf.ResourceVariable> map. We use tf.ResourceVariable
21 # instead ndarray to avoid copying and conversion when calling
22 # optimizer's apply_gradients() function.
23 self._model = {}
24 self._version = 0
25 self._gradient_sum = {}
26 self._grad_to_wait = grads_to_wait
27 self._grad_n = 0
28
29 def _set_model_var(self, name, value):
30 """Add or set model variable. Value should be a float32 ndarray"""
31 if value.dtype != np.float32:
32 raise ValueError("Value should be a float32 numpy array")
33 self._model[name] = tf.Variable(value, name=name)
34
35 def GetTask(self, request, context):
36 # TODO: implent task queues. Return an empty task for now.
37 res = master_pb2.Task()
38 res.shard_file_name = ""
39 res.model_version = self._version
40 return res
41
42 def GetModel(self, request, context):
43 if request.min_version > self._version:
44 err_msg = (
45 "Requested version %d not available yet, current version: %d"
46 % (request.min_version, self._version)
47 )
48 self.logger.warning(err_msg)
49 raise ValueError(err_msg)
50
51 res = master_pb2.Model()
52 with self._lock:
53 res.version = self._version
54 for k, v in self._model.items():
55 res.param[k].CopyFrom(ndarray_to_tensor(v.numpy()))
56 return res
57
58 def _update_model(self):
59 assert self._lock.locked()
60 grad_var = []
61 for k in self._gradient_sum:
62 self._gradient_sum[k] = self._gradient_sum[k] / self._grad_to_wait
63 grad_var.append((self._gradient_sum[k], self._model[k]))
64 self._opt.apply_gradients(grad_var)
65 self._version += 1
66 self._gradient_sum.clear()
67 self._grad_n = 0
68
69 def ReportTaskResult(self, request, context):
70 if request.model_version > self._version:
71 err_msg = "Model version %d out of range, current version: %d" % (
72 request.model_version,
73 self._version,
74 )
75 self.logger.warning(err_msg)
76 raise ValueError(err_msg)
77
78 res = master_pb2.ReportTaskResultReply()
79 if request.model_version < self._version:
80 self.logger.warning(
81 "Task result for outdated version %d dropped",
82 request.model_version,
83 )
84 res.accepted = False
85 res.model_version = self._version
86 return res
87
88 if request.err_message:
89 self.logger.warning("Worker error: %s" % request.err_message)
90 res.accepted = False
91 res.model_version = self._version
92 return res
93
94 # TODO: Update task queue with task_id
95 with self._lock:
96 tmp = {}
97 # Do sanity check before accumulating gradients.
98 for k, v in request.gradient.items():
99 if k not in self._model:
100 raise ValueError(
101 "Gradient key: %s is not part of model", k
102 )
103 arr = tensor_to_ndarray(v)
104 if arr.shape != self._model[k].numpy().shape:
105 raise ValueError(
106 "Gradient key: %s has incompatible dimension", k
107 )
108 tmp[k] = arr
109
110 for k, v in tmp.items():
111 if k in self._gradient_sum:
112 self._gradient_sum[k] = self._gradient_sum[k] + v
113 else:
114 self._gradient_sum[k] = v
115
116 self._grad_n += 1
117 if self._grad_n >= self._grad_to_wait:
118 self._update_model()
119 res.accepted = True
120 res.model_version = self._version
121 return res
122
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/elasticdl/master/servicer.py b/elasticdl/master/servicer.py
--- a/elasticdl/master/servicer.py
+++ b/elasticdl/master/servicer.py
@@ -66,7 +66,7 @@
self._gradient_sum.clear()
self._grad_n = 0
- def ReportTaskResult(self, request, context):
+ def ReportGradient(self, request, context):
if request.model_version > self._version:
err_msg = "Model version %d out of range, current version: %d" % (
request.model_version,
@@ -75,7 +75,7 @@
self.logger.warning(err_msg)
raise ValueError(err_msg)
- res = master_pb2.ReportTaskResultReply()
+ res = master_pb2.ReportGradientReply()
if request.model_version < self._version:
self.logger.warning(
"Task result for outdated version %d dropped",
@@ -85,12 +85,6 @@
res.model_version = self._version
return res
- if request.err_message:
- self.logger.warning("Worker error: %s" % request.err_message)
- res.accepted = False
- res.model_version = self._version
- return res
-
# TODO: Update task queue with task_id
with self._lock:
tmp = {}
| {"golden_diff": "diff --git a/elasticdl/master/servicer.py b/elasticdl/master/servicer.py\n--- a/elasticdl/master/servicer.py\n+++ b/elasticdl/master/servicer.py\n@@ -66,7 +66,7 @@\n self._gradient_sum.clear()\n self._grad_n = 0\n \n- def ReportTaskResult(self, request, context):\n+ def ReportGradient(self, request, context):\n if request.model_version > self._version:\n err_msg = \"Model version %d out of range, current version: %d\" % (\n request.model_version,\n@@ -75,7 +75,7 @@\n self.logger.warning(err_msg)\n raise ValueError(err_msg)\n \n- res = master_pb2.ReportTaskResultReply()\n+ res = master_pb2.ReportGradientReply()\n if request.model_version < self._version:\n self.logger.warning(\n \"Task result for outdated version %d dropped\",\n@@ -85,12 +85,6 @@\n res.model_version = self._version\n return res\n \n- if request.err_message:\n- self.logger.warning(\"Worker error: %s\" % request.err_message)\n- res.accepted = False\n- res.model_version = self._version\n- return res\n-\n # TODO: Update task queue with task_id\n with self._lock:\n tmp = {}\n", "issue": "[master]Use different RPC for reporting gradient and task result.\n\n", "before_files": [{"content": "import threading\nimport numpy as np\n\nimport tensorflow as tf\nassert tf.executing_eagerly()\n\nfrom proto import master_pb2\nfrom proto import master_pb2_grpc\nfrom util.ndarray import ndarray_to_tensor, tensor_to_ndarray\n\n\nclass MasterServicer(master_pb2_grpc.MasterServicer):\n \"\"\"Master service implementation\"\"\"\n\n def __init__(self, logger, grads_to_wait, optimizer):\n self.logger = logger\n self._opt = optimizer\n self._lock = threading.Lock()\n # TODO: random initialization\n # A <string, tf.ResourceVariable> map. We use tf.ResourceVariable\n # instead ndarray to avoid copying and conversion when calling\n # optimizer's apply_gradients() function.\n self._model = {}\n self._version = 0\n self._gradient_sum = {}\n self._grad_to_wait = grads_to_wait\n self._grad_n = 0\n\n def _set_model_var(self, name, value):\n \"\"\"Add or set model variable. Value should be a float32 ndarray\"\"\"\n if value.dtype != np.float32:\n raise ValueError(\"Value should be a float32 numpy array\")\n self._model[name] = tf.Variable(value, name=name)\n\n def GetTask(self, request, context):\n # TODO: implent task queues. Return an empty task for now.\n res = master_pb2.Task()\n res.shard_file_name = \"\"\n res.model_version = self._version\n return res\n\n def GetModel(self, request, context):\n if request.min_version > self._version:\n err_msg = (\n \"Requested version %d not available yet, current version: %d\"\n % (request.min_version, self._version)\n )\n self.logger.warning(err_msg)\n raise ValueError(err_msg)\n\n res = master_pb2.Model()\n with self._lock:\n res.version = self._version\n for k, v in self._model.items():\n res.param[k].CopyFrom(ndarray_to_tensor(v.numpy()))\n return res\n\n def _update_model(self):\n assert self._lock.locked()\n grad_var = []\n for k in self._gradient_sum:\n self._gradient_sum[k] = self._gradient_sum[k] / self._grad_to_wait\n grad_var.append((self._gradient_sum[k], self._model[k]))\n self._opt.apply_gradients(grad_var)\n self._version += 1\n self._gradient_sum.clear()\n self._grad_n = 0\n\n def ReportTaskResult(self, request, context):\n if request.model_version > self._version:\n err_msg = \"Model version %d out of range, current version: %d\" % (\n request.model_version,\n self._version,\n )\n self.logger.warning(err_msg)\n raise ValueError(err_msg)\n\n res = master_pb2.ReportTaskResultReply()\n if request.model_version < self._version:\n self.logger.warning(\n \"Task result for outdated version %d dropped\",\n request.model_version,\n )\n res.accepted = False\n res.model_version = self._version\n return res\n\n if request.err_message:\n self.logger.warning(\"Worker error: %s\" % request.err_message)\n res.accepted = False\n res.model_version = self._version\n return res\n\n # TODO: Update task queue with task_id\n with self._lock:\n tmp = {}\n # Do sanity check before accumulating gradients.\n for k, v in request.gradient.items():\n if k not in self._model:\n raise ValueError(\n \"Gradient key: %s is not part of model\", k\n )\n arr = tensor_to_ndarray(v)\n if arr.shape != self._model[k].numpy().shape:\n raise ValueError(\n \"Gradient key: %s has incompatible dimension\", k\n )\n tmp[k] = arr\n\n for k, v in tmp.items():\n if k in self._gradient_sum:\n self._gradient_sum[k] = self._gradient_sum[k] + v\n else:\n self._gradient_sum[k] = v\n\n self._grad_n += 1\n if self._grad_n >= self._grad_to_wait:\n self._update_model()\n res.accepted = True\n res.model_version = self._version\n return res\n", "path": "elasticdl/master/servicer.py"}], "after_files": [{"content": "import threading\nimport numpy as np\n\nimport tensorflow as tf\nassert tf.executing_eagerly()\n\nfrom proto import master_pb2\nfrom proto import master_pb2_grpc\nfrom util.ndarray import ndarray_to_tensor, tensor_to_ndarray\n\n\nclass MasterServicer(master_pb2_grpc.MasterServicer):\n \"\"\"Master service implementation\"\"\"\n\n def __init__(self, logger, grads_to_wait, optimizer):\n self.logger = logger\n self._opt = optimizer\n self._lock = threading.Lock()\n # TODO: random initialization\n # A <string, tf.ResourceVariable> map. We use tf.ResourceVariable\n # instead ndarray to avoid copying and conversion when calling\n # optimizer's apply_gradients() function.\n self._model = {}\n self._version = 0\n self._gradient_sum = {}\n self._grad_to_wait = grads_to_wait\n self._grad_n = 0\n\n def _set_model_var(self, name, value):\n \"\"\"Add or set model variable. Value should be a float32 ndarray\"\"\"\n if value.dtype != np.float32:\n raise ValueError(\"Value should be a float32 numpy array\")\n self._model[name] = tf.Variable(value, name=name)\n\n def GetTask(self, request, context):\n # TODO: implent task queues. Return an empty task for now.\n res = master_pb2.Task()\n res.shard_file_name = \"\"\n res.model_version = self._version\n return res\n\n def GetModel(self, request, context):\n if request.min_version > self._version:\n err_msg = (\n \"Requested version %d not available yet, current version: %d\"\n % (request.min_version, self._version)\n )\n self.logger.warning(err_msg)\n raise ValueError(err_msg)\n\n res = master_pb2.Model()\n with self._lock:\n res.version = self._version\n for k, v in self._model.items():\n res.param[k].CopyFrom(ndarray_to_tensor(v.numpy()))\n return res\n\n def _update_model(self):\n assert self._lock.locked()\n grad_var = []\n for k in self._gradient_sum:\n self._gradient_sum[k] = self._gradient_sum[k] / self._grad_to_wait\n grad_var.append((self._gradient_sum[k], self._model[k]))\n self._opt.apply_gradients(grad_var)\n self._version += 1\n self._gradient_sum.clear()\n self._grad_n = 0\n\n def ReportGradient(self, request, context):\n if request.model_version > self._version:\n err_msg = \"Model version %d out of range, current version: %d\" % (\n request.model_version,\n self._version,\n )\n self.logger.warning(err_msg)\n raise ValueError(err_msg)\n\n res = master_pb2.ReportGradientReply()\n if request.model_version < self._version:\n self.logger.warning(\n \"Task result for outdated version %d dropped\",\n request.model_version,\n )\n res.accepted = False\n res.model_version = self._version\n return res\n\n # TODO: Update task queue with task_id\n with self._lock:\n tmp = {}\n # Do sanity check before accumulating gradients.\n for k, v in request.gradient.items():\n if k not in self._model:\n raise ValueError(\n \"Gradient key: %s is not part of model\", k\n )\n arr = tensor_to_ndarray(v)\n if arr.shape != self._model[k].numpy().shape:\n raise ValueError(\n \"Gradient key: %s has incompatible dimension\", k\n )\n tmp[k] = arr\n\n for k, v in tmp.items():\n if k in self._gradient_sum:\n self._gradient_sum[k] = self._gradient_sum[k] + v\n else:\n self._gradient_sum[k] = v\n\n self._grad_n += 1\n if self._grad_n >= self._grad_to_wait:\n self._update_model()\n res.accepted = True\n res.model_version = self._version\n return res\n", "path": "elasticdl/master/servicer.py"}]} | 1,467 | 301 |
gh_patches_debug_61519 | rasdani/github-patches | git_diff | open-mmlab__mmpose-1906 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
little config error in 1.x
mmpose/tree/1.x/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand)/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py
mobilenetv2 out_channels is 1280, however "in_channles" of the head is 2048 in this config file.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py`
Content:
```
1 _base_ = ['../../../_base_/default_runtime.py']
2
3 # runtime
4 train_cfg = dict(max_epochs=210, val_interval=10)
5
6 # optimizer
7 optim_wrapper = dict(optimizer=dict(
8 type='Adam',
9 lr=5e-4,
10 ))
11
12 # learning policy
13 param_scheduler = [
14 dict(
15 type='LinearLR', begin=0, end=500, start_factor=0.001,
16 by_epoch=False), # warm-up
17 dict(
18 type='MultiStepLR',
19 begin=0,
20 end=210,
21 milestones=[170, 200],
22 gamma=0.1,
23 by_epoch=True)
24 ]
25
26 # automatically scaling LR based on the actual training batch size
27 auto_scale_lr = dict(base_batch_size=256)
28
29 # hooks
30 default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater'))
31 # codec settings
32 codec = dict(
33 type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2)
34
35 # model settings
36 model = dict(
37 type='TopdownPoseEstimator',
38 data_preprocessor=dict(
39 type='PoseDataPreprocessor',
40 mean=[123.675, 116.28, 103.53],
41 std=[58.395, 57.12, 57.375],
42 bgr_to_rgb=True),
43 backbone=dict(
44 type='MobileNetV2',
45 widen_factor=1.,
46 out_indices=(7, ),
47 init_cfg=dict(type='Pretrained', checkpoint='mmcls://mobilenet_v2')),
48 head=dict(
49 type='HeatmapHead',
50 in_channels=2048,
51 out_channels=21,
52 loss=dict(type='KeypointMSELoss', use_target_weight=True),
53 decoder=codec),
54 test_cfg=dict(
55 flip_test=True,
56 flip_mode='heatmap',
57 shift_heatmap=True,
58 ))
59
60 # base dataset settings
61 dataset_type = 'CocoWholeBodyHandDataset'
62 data_mode = 'topdown'
63 data_root = 'data/coco/'
64
65 # pipelines
66 train_pipeline = [
67 dict(type='LoadImage', file_client_args={{_base_.file_client_args}}),
68 dict(type='GetBBoxCenterScale'),
69 dict(
70 type='RandomBBoxTransform', rotate_factor=180,
71 scale_factor=(0.7, 1.3)),
72 dict(type='RandomFlip', direction='horizontal'),
73 dict(type='TopdownAffine', input_size=codec['input_size']),
74 dict(type='GenerateTarget', encoder=codec),
75 dict(type='PackPoseInputs')
76 ]
77 val_pipeline = [
78 dict(type='LoadImage', file_client_args={{_base_.file_client_args}}),
79 dict(type='GetBBoxCenterScale'),
80 dict(type='TopdownAffine', input_size=codec['input_size']),
81 dict(type='PackPoseInputs')
82 ]
83
84 # data loaders
85 train_dataloader = dict(
86 batch_size=32,
87 num_workers=2,
88 persistent_workers=True,
89 sampler=dict(type='DefaultSampler', shuffle=True),
90 dataset=dict(
91 type=dataset_type,
92 data_root=data_root,
93 data_mode=data_mode,
94 ann_file='annotations/coco_wholebody_train_v1.0.json',
95 data_prefix=dict(img='train2017/'),
96 pipeline=train_pipeline,
97 ))
98 val_dataloader = dict(
99 batch_size=32,
100 num_workers=2,
101 persistent_workers=True,
102 drop_last=False,
103 sampler=dict(type='DefaultSampler', shuffle=False, round_up=False),
104 dataset=dict(
105 type=dataset_type,
106 data_root=data_root,
107 data_mode=data_mode,
108 ann_file='annotations/coco_wholebody_val_v1.0.json',
109 data_prefix=dict(img='val2017/'),
110 test_mode=True,
111 pipeline=val_pipeline,
112 ))
113 test_dataloader = val_dataloader
114
115 val_evaluator = [
116 dict(type='PCKAccuracy', thr=0.2),
117 dict(type='AUC'),
118 dict(type='EPE')
119 ]
120 test_evaluator = val_evaluator
121
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py
--- a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py
+++ b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py
@@ -47,7 +47,7 @@
init_cfg=dict(type='Pretrained', checkpoint='mmcls://mobilenet_v2')),
head=dict(
type='HeatmapHead',
- in_channels=2048,
+ in_channels=1280,
out_channels=21,
loss=dict(type='KeypointMSELoss', use_target_weight=True),
decoder=codec),
| {"golden_diff": "diff --git a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py\n--- a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py\n+++ b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py\n@@ -47,7 +47,7 @@\n init_cfg=dict(type='Pretrained', checkpoint='mmcls://mobilenet_v2')),\n head=dict(\n type='HeatmapHead',\n- in_channels=2048,\n+ in_channels=1280,\n out_channels=21,\n loss=dict(type='KeypointMSELoss', use_target_weight=True),\n decoder=codec),\n", "issue": "little config error in 1.x\n\r\nmmpose/tree/1.x/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand)/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py\r\n\r\nmobilenetv2 out_channels is 1280, however \"in_channles\" of the head is 2048 in this config file. \r\n\n", "before_files": [{"content": "_base_ = ['../../../_base_/default_runtime.py']\n\n# runtime\ntrain_cfg = dict(max_epochs=210, val_interval=10)\n\n# optimizer\noptim_wrapper = dict(optimizer=dict(\n type='Adam',\n lr=5e-4,\n))\n\n# learning policy\nparam_scheduler = [\n dict(\n type='LinearLR', begin=0, end=500, start_factor=0.001,\n by_epoch=False), # warm-up\n dict(\n type='MultiStepLR',\n begin=0,\n end=210,\n milestones=[170, 200],\n gamma=0.1,\n by_epoch=True)\n]\n\n# automatically scaling LR based on the actual training batch size\nauto_scale_lr = dict(base_batch_size=256)\n\n# hooks\ndefault_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater'))\n# codec settings\ncodec = dict(\n type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2)\n\n# model settings\nmodel = dict(\n type='TopdownPoseEstimator',\n data_preprocessor=dict(\n type='PoseDataPreprocessor',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n bgr_to_rgb=True),\n backbone=dict(\n type='MobileNetV2',\n widen_factor=1.,\n out_indices=(7, ),\n init_cfg=dict(type='Pretrained', checkpoint='mmcls://mobilenet_v2')),\n head=dict(\n type='HeatmapHead',\n in_channels=2048,\n out_channels=21,\n loss=dict(type='KeypointMSELoss', use_target_weight=True),\n decoder=codec),\n test_cfg=dict(\n flip_test=True,\n flip_mode='heatmap',\n shift_heatmap=True,\n ))\n\n# base dataset settings\ndataset_type = 'CocoWholeBodyHandDataset'\ndata_mode = 'topdown'\ndata_root = 'data/coco/'\n\n# pipelines\ntrain_pipeline = [\n dict(type='LoadImage', file_client_args={{_base_.file_client_args}}),\n dict(type='GetBBoxCenterScale'),\n dict(\n type='RandomBBoxTransform', rotate_factor=180,\n scale_factor=(0.7, 1.3)),\n dict(type='RandomFlip', direction='horizontal'),\n dict(type='TopdownAffine', input_size=codec['input_size']),\n dict(type='GenerateTarget', encoder=codec),\n dict(type='PackPoseInputs')\n]\nval_pipeline = [\n dict(type='LoadImage', file_client_args={{_base_.file_client_args}}),\n dict(type='GetBBoxCenterScale'),\n dict(type='TopdownAffine', input_size=codec['input_size']),\n dict(type='PackPoseInputs')\n]\n\n# data loaders\ntrain_dataloader = dict(\n batch_size=32,\n num_workers=2,\n persistent_workers=True,\n sampler=dict(type='DefaultSampler', shuffle=True),\n dataset=dict(\n type=dataset_type,\n data_root=data_root,\n data_mode=data_mode,\n ann_file='annotations/coco_wholebody_train_v1.0.json',\n data_prefix=dict(img='train2017/'),\n pipeline=train_pipeline,\n ))\nval_dataloader = dict(\n batch_size=32,\n num_workers=2,\n persistent_workers=True,\n drop_last=False,\n sampler=dict(type='DefaultSampler', shuffle=False, round_up=False),\n dataset=dict(\n type=dataset_type,\n data_root=data_root,\n data_mode=data_mode,\n ann_file='annotations/coco_wholebody_val_v1.0.json',\n data_prefix=dict(img='val2017/'),\n test_mode=True,\n pipeline=val_pipeline,\n ))\ntest_dataloader = val_dataloader\n\nval_evaluator = [\n dict(type='PCKAccuracy', thr=0.2),\n dict(type='AUC'),\n dict(type='EPE')\n]\ntest_evaluator = val_evaluator\n", "path": "configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py"}], "after_files": [{"content": "_base_ = ['../../../_base_/default_runtime.py']\n\n# runtime\ntrain_cfg = dict(max_epochs=210, val_interval=10)\n\n# optimizer\noptim_wrapper = dict(optimizer=dict(\n type='Adam',\n lr=5e-4,\n))\n\n# learning policy\nparam_scheduler = [\n dict(\n type='LinearLR', begin=0, end=500, start_factor=0.001,\n by_epoch=False), # warm-up\n dict(\n type='MultiStepLR',\n begin=0,\n end=210,\n milestones=[170, 200],\n gamma=0.1,\n by_epoch=True)\n]\n\n# automatically scaling LR based on the actual training batch size\nauto_scale_lr = dict(base_batch_size=256)\n\n# hooks\ndefault_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater'))\n# codec settings\ncodec = dict(\n type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2)\n\n# model settings\nmodel = dict(\n type='TopdownPoseEstimator',\n data_preprocessor=dict(\n type='PoseDataPreprocessor',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n bgr_to_rgb=True),\n backbone=dict(\n type='MobileNetV2',\n widen_factor=1.,\n out_indices=(7, ),\n init_cfg=dict(type='Pretrained', checkpoint='mmcls://mobilenet_v2')),\n head=dict(\n type='HeatmapHead',\n in_channels=1280,\n out_channels=21,\n loss=dict(type='KeypointMSELoss', use_target_weight=True),\n decoder=codec),\n test_cfg=dict(\n flip_test=True,\n flip_mode='heatmap',\n shift_heatmap=True,\n ))\n\n# base dataset settings\ndataset_type = 'CocoWholeBodyHandDataset'\ndata_mode = 'topdown'\ndata_root = 'data/coco/'\n\n# pipelines\ntrain_pipeline = [\n dict(type='LoadImage', file_client_args={{_base_.file_client_args}}),\n dict(type='GetBBoxCenterScale'),\n dict(\n type='RandomBBoxTransform', rotate_factor=180,\n scale_factor=(0.7, 1.3)),\n dict(type='RandomFlip', direction='horizontal'),\n dict(type='TopdownAffine', input_size=codec['input_size']),\n dict(type='GenerateTarget', encoder=codec),\n dict(type='PackPoseInputs')\n]\nval_pipeline = [\n dict(type='LoadImage', file_client_args={{_base_.file_client_args}}),\n dict(type='GetBBoxCenterScale'),\n dict(type='TopdownAffine', input_size=codec['input_size']),\n dict(type='PackPoseInputs')\n]\n\n# data loaders\ntrain_dataloader = dict(\n batch_size=32,\n num_workers=2,\n persistent_workers=True,\n sampler=dict(type='DefaultSampler', shuffle=True),\n dataset=dict(\n type=dataset_type,\n data_root=data_root,\n data_mode=data_mode,\n ann_file='annotations/coco_wholebody_train_v1.0.json',\n data_prefix=dict(img='train2017/'),\n pipeline=train_pipeline,\n ))\nval_dataloader = dict(\n batch_size=32,\n num_workers=2,\n persistent_workers=True,\n drop_last=False,\n sampler=dict(type='DefaultSampler', shuffle=False, round_up=False),\n dataset=dict(\n type=dataset_type,\n data_root=data_root,\n data_mode=data_mode,\n ann_file='annotations/coco_wholebody_val_v1.0.json',\n data_prefix=dict(img='val2017/'),\n test_mode=True,\n pipeline=val_pipeline,\n ))\ntest_dataloader = val_dataloader\n\nval_evaluator = [\n dict(type='PCKAccuracy', thr=0.2),\n dict(type='AUC'),\n dict(type='EPE')\n]\ntest_evaluator = val_evaluator\n", "path": "configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py"}]} | 1,585 | 316 |
gh_patches_debug_20002 | rasdani/github-patches | git_diff | scrapy__scrapy-2510 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Disk queues don't preserve Request class
When a Request subclass (e.g. FormRequest) is sent to a disk queue a bare Request is what you get back.
This is inconvenient for scrapy-splash: Splash requests all have Splash URL as request.url, but for logging it is nice to display the requested URL, not only Splash URL. In scrapy-splash this is implemented by changing `__repr__` in a Request subclass, but it works only when request is kept in memory.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/utils/reqser.py`
Content:
```
1 """
2 Helper functions for serializing (and deserializing) requests.
3 """
4 import six
5
6 from scrapy.http import Request
7 from scrapy.utils.python import to_unicode, to_native_str
8
9
10 def request_to_dict(request, spider=None):
11 """Convert Request object to a dict.
12
13 If a spider is given, it will try to find out the name of the spider method
14 used in the callback and store that as the callback.
15 """
16 cb = request.callback
17 if callable(cb):
18 cb = _find_method(spider, cb)
19 eb = request.errback
20 if callable(eb):
21 eb = _find_method(spider, eb)
22 d = {
23 'url': to_unicode(request.url), # urls should be safe (safe_string_url)
24 'callback': cb,
25 'errback': eb,
26 'method': request.method,
27 'headers': dict(request.headers),
28 'body': request.body,
29 'cookies': request.cookies,
30 'meta': request.meta,
31 '_encoding': request._encoding,
32 'priority': request.priority,
33 'dont_filter': request.dont_filter,
34 }
35 return d
36
37
38 def request_from_dict(d, spider=None):
39 """Create Request object from a dict.
40
41 If a spider is given, it will try to resolve the callbacks looking at the
42 spider for methods with the same name.
43 """
44 cb = d['callback']
45 if cb and spider:
46 cb = _get_method(spider, cb)
47 eb = d['errback']
48 if eb and spider:
49 eb = _get_method(spider, eb)
50 return Request(
51 url=to_native_str(d['url']),
52 callback=cb,
53 errback=eb,
54 method=d['method'],
55 headers=d['headers'],
56 body=d['body'],
57 cookies=d['cookies'],
58 meta=d['meta'],
59 encoding=d['_encoding'],
60 priority=d['priority'],
61 dont_filter=d['dont_filter'])
62
63
64 def _find_method(obj, func):
65 if obj:
66 try:
67 func_self = six.get_method_self(func)
68 except AttributeError: # func has no __self__
69 pass
70 else:
71 if func_self is obj:
72 return six.get_method_function(func).__name__
73 raise ValueError("Function %s is not a method of: %s" % (func, obj))
74
75
76 def _get_method(obj, name):
77 name = str(name)
78 try:
79 return getattr(obj, name)
80 except AttributeError:
81 raise ValueError("Method %r not found in: %s" % (name, obj))
82
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scrapy/utils/reqser.py b/scrapy/utils/reqser.py
--- a/scrapy/utils/reqser.py
+++ b/scrapy/utils/reqser.py
@@ -5,6 +5,7 @@
from scrapy.http import Request
from scrapy.utils.python import to_unicode, to_native_str
+from scrapy.utils.misc import load_object
def request_to_dict(request, spider=None):
@@ -32,6 +33,8 @@
'priority': request.priority,
'dont_filter': request.dont_filter,
}
+ if type(request) is not Request:
+ d['_class'] = request.__module__ + '.' + request.__class__.__name__
return d
@@ -47,7 +50,8 @@
eb = d['errback']
if eb and spider:
eb = _get_method(spider, eb)
- return Request(
+ request_cls = load_object(d['_class']) if '_class' in d else Request
+ return request_cls(
url=to_native_str(d['url']),
callback=cb,
errback=eb,
| {"golden_diff": "diff --git a/scrapy/utils/reqser.py b/scrapy/utils/reqser.py\n--- a/scrapy/utils/reqser.py\n+++ b/scrapy/utils/reqser.py\n@@ -5,6 +5,7 @@\n \n from scrapy.http import Request\n from scrapy.utils.python import to_unicode, to_native_str\n+from scrapy.utils.misc import load_object\n \n \n def request_to_dict(request, spider=None):\n@@ -32,6 +33,8 @@\n 'priority': request.priority,\n 'dont_filter': request.dont_filter,\n }\n+ if type(request) is not Request:\n+ d['_class'] = request.__module__ + '.' + request.__class__.__name__\n return d\n \n \n@@ -47,7 +50,8 @@\n eb = d['errback']\n if eb and spider:\n eb = _get_method(spider, eb)\n- return Request(\n+ request_cls = load_object(d['_class']) if '_class' in d else Request\n+ return request_cls(\n url=to_native_str(d['url']),\n callback=cb,\n errback=eb,\n", "issue": "Disk queues don't preserve Request class\nWhen a Request subclass (e.g. FormRequest) is sent to a disk queue a bare Request is what you get back. \n\nThis is inconvenient for scrapy-splash: Splash requests all have Splash URL as request.url, but for logging it is nice to display the requested URL, not only Splash URL. In scrapy-splash this is implemented by changing `__repr__` in a Request subclass, but it works only when request is kept in memory.\n\n", "before_files": [{"content": "\"\"\"\nHelper functions for serializing (and deserializing) requests.\n\"\"\"\nimport six\n\nfrom scrapy.http import Request\nfrom scrapy.utils.python import to_unicode, to_native_str\n\n\ndef request_to_dict(request, spider=None):\n \"\"\"Convert Request object to a dict.\n\n If a spider is given, it will try to find out the name of the spider method\n used in the callback and store that as the callback.\n \"\"\"\n cb = request.callback\n if callable(cb):\n cb = _find_method(spider, cb)\n eb = request.errback\n if callable(eb):\n eb = _find_method(spider, eb)\n d = {\n 'url': to_unicode(request.url), # urls should be safe (safe_string_url)\n 'callback': cb,\n 'errback': eb,\n 'method': request.method,\n 'headers': dict(request.headers),\n 'body': request.body,\n 'cookies': request.cookies,\n 'meta': request.meta,\n '_encoding': request._encoding,\n 'priority': request.priority,\n 'dont_filter': request.dont_filter,\n }\n return d\n\n\ndef request_from_dict(d, spider=None):\n \"\"\"Create Request object from a dict.\n\n If a spider is given, it will try to resolve the callbacks looking at the\n spider for methods with the same name.\n \"\"\"\n cb = d['callback']\n if cb and spider:\n cb = _get_method(spider, cb)\n eb = d['errback']\n if eb and spider:\n eb = _get_method(spider, eb)\n return Request(\n url=to_native_str(d['url']),\n callback=cb,\n errback=eb,\n method=d['method'],\n headers=d['headers'],\n body=d['body'],\n cookies=d['cookies'],\n meta=d['meta'],\n encoding=d['_encoding'],\n priority=d['priority'],\n dont_filter=d['dont_filter'])\n\n\ndef _find_method(obj, func):\n if obj:\n try:\n func_self = six.get_method_self(func)\n except AttributeError: # func has no __self__\n pass\n else:\n if func_self is obj:\n return six.get_method_function(func).__name__\n raise ValueError(\"Function %s is not a method of: %s\" % (func, obj))\n\n\ndef _get_method(obj, name):\n name = str(name)\n try:\n return getattr(obj, name)\n except AttributeError:\n raise ValueError(\"Method %r not found in: %s\" % (name, obj))\n", "path": "scrapy/utils/reqser.py"}], "after_files": [{"content": "\"\"\"\nHelper functions for serializing (and deserializing) requests.\n\"\"\"\nimport six\n\nfrom scrapy.http import Request\nfrom scrapy.utils.python import to_unicode, to_native_str\nfrom scrapy.utils.misc import load_object\n\n\ndef request_to_dict(request, spider=None):\n \"\"\"Convert Request object to a dict.\n\n If a spider is given, it will try to find out the name of the spider method\n used in the callback and store that as the callback.\n \"\"\"\n cb = request.callback\n if callable(cb):\n cb = _find_method(spider, cb)\n eb = request.errback\n if callable(eb):\n eb = _find_method(spider, eb)\n d = {\n 'url': to_unicode(request.url), # urls should be safe (safe_string_url)\n 'callback': cb,\n 'errback': eb,\n 'method': request.method,\n 'headers': dict(request.headers),\n 'body': request.body,\n 'cookies': request.cookies,\n 'meta': request.meta,\n '_encoding': request._encoding,\n 'priority': request.priority,\n 'dont_filter': request.dont_filter,\n }\n if type(request) is not Request:\n d['_class'] = request.__module__ + '.' + request.__class__.__name__\n return d\n\n\ndef request_from_dict(d, spider=None):\n \"\"\"Create Request object from a dict.\n\n If a spider is given, it will try to resolve the callbacks looking at the\n spider for methods with the same name.\n \"\"\"\n cb = d['callback']\n if cb and spider:\n cb = _get_method(spider, cb)\n eb = d['errback']\n if eb and spider:\n eb = _get_method(spider, eb)\n request_cls = load_object(d['_class']) if '_class' in d else Request\n return request_cls(\n url=to_native_str(d['url']),\n callback=cb,\n errback=eb,\n method=d['method'],\n headers=d['headers'],\n body=d['body'],\n cookies=d['cookies'],\n meta=d['meta'],\n encoding=d['_encoding'],\n priority=d['priority'],\n dont_filter=d['dont_filter'])\n\n\ndef _find_method(obj, func):\n if obj:\n try:\n func_self = six.get_method_self(func)\n except AttributeError: # func has no __self__\n pass\n else:\n if func_self is obj:\n return six.get_method_function(func).__name__\n raise ValueError(\"Function %s is not a method of: %s\" % (func, obj))\n\n\ndef _get_method(obj, name):\n name = str(name)\n try:\n return getattr(obj, name)\n except AttributeError:\n raise ValueError(\"Method %r not found in: %s\" % (name, obj))\n", "path": "scrapy/utils/reqser.py"}]} | 1,066 | 243 |
gh_patches_debug_16259 | rasdani/github-patches | git_diff | tensorflow__addons-270 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update sparsemax to use tf.where V2
As described in #250 and temporarily patched in #251 sparsemax has one instance of tf.where that needs the broadcasting dimensions changed to match numpy and TF2 style.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tensorflow_addons/activations/sparsemax.py`
Content:
```
1 # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15
16 from __future__ import absolute_import
17 from __future__ import division
18 from __future__ import print_function
19
20 import tensorflow as tf
21
22 from tensorflow_addons.utils import keras_utils
23
24
25 @tf.function
26 @keras_utils.register_keras_custom_object
27 def sparsemax(logits, axis=-1, name=None):
28 """Sparsemax activation function [1].
29
30 For each batch `i` and class `j` we have
31 $$sparsemax[i, j] = max(logits[i, j] - tau(logits[i, :]), 0)$$
32
33 [1]: https://arxiv.org/abs/1602.02068
34
35 Args:
36 logits: Input tensor.
37 axis: Integer, axis along which the sparsemax operation is applied.
38 name: A name for the operation (optional).
39 Returns:
40 Tensor, output of sparsemax transformation. Has the same type and
41 shape as `logits`.
42 Raises:
43 ValueError: In case `dim(logits) == 1`.
44 """
45 logits = tf.convert_to_tensor(logits, name="logits")
46
47 # We need its original shape for shape inference.
48 shape = logits.get_shape()
49 rank = shape.rank
50 is_last_axis = (axis == -1) or (axis == rank - 1)
51
52 if is_last_axis:
53 output = _compute_2d_sparsemax(logits, name=name)
54 output.set_shape(shape)
55 return output
56
57 # If dim is not the last dimension, we have to do a transpose so that we can
58 # still perform softmax on its last dimension.
59
60 # Swap logits' dimension of dim and its last dimension.
61 rank_op = tf.rank(logits)
62 axis_norm = axis % rank
63 logits = _swap_axis(logits, axis_norm, tf.math.subtract(rank_op, 1))
64
65 # Do the actual softmax on its last dimension.
66 output = _compute_2d_sparsemax(logits)
67 output = _swap_axis(
68 output, axis_norm, tf.math.subtract(rank_op, 1), name=name)
69
70 # Make shape inference work since transpose may erase its static shape.
71 output.set_shape(shape)
72 return output
73
74
75 def _swap_axis(logits, dim_index, last_index, **kwargs):
76 return tf.transpose(
77 logits,
78 tf.concat([
79 tf.range(dim_index), [last_index],
80 tf.range(dim_index + 1, last_index), [dim_index]
81 ], 0), **kwargs)
82
83
84 @tf.function
85 def _compute_2d_sparsemax(logits, name=None):
86 """Performs the sparsemax operation when axis=-1."""
87 shape_op = tf.shape(logits)
88 obs = tf.math.reduce_prod(shape_op[:-1])
89 dims = shape_op[-1]
90
91 # In the paper, they call the logits z.
92 # The mean(logits) can be substracted from logits to make the algorithm
93 # more numerically stable. the instability in this algorithm comes mostly
94 # from the z_cumsum. Substacting the mean will cause z_cumsum to be close
95 # to zero. However, in practise the numerical instability issues are very
96 # minor and substacting the mean causes extra issues with inf and nan
97 # input.
98 # Reshape to [obs, dims] as it is almost free and means the remanining
99 # code doesn't need to worry about the rank.
100 z = tf.reshape(logits, [obs, dims])
101
102 # sort z
103 z_sorted, _ = tf.nn.top_k(z, k=dims)
104
105 # calculate k(z)
106 z_cumsum = tf.math.cumsum(z_sorted, axis=-1)
107 k = tf.range(1, tf.cast(dims, logits.dtype) + 1, dtype=logits.dtype)
108 z_check = 1 + k * z_sorted > z_cumsum
109 # because the z_check vector is always [1,1,...1,0,0,...0] finding the
110 # (index + 1) of the last `1` is the same as just summing the number of 1.
111 k_z = tf.math.reduce_sum(tf.cast(z_check, tf.int32), axis=-1)
112
113 # calculate tau(z)
114 # If there are inf values or all values are -inf, the k_z will be zero,
115 # this is mathematically invalid and will also cause the gather_nd to fail.
116 # Prevent this issue for now by setting k_z = 1 if k_z = 0, this is then
117 # fixed later (see p_safe) by returning p = nan. This results in the same
118 # behavior as softmax.
119 k_z_safe = tf.math.maximum(k_z, 1)
120 indices = tf.stack(
121 [tf.range(0, obs), tf.reshape(k_z_safe, [-1]) - 1], axis=1)
122 tau_sum = tf.gather_nd(z_cumsum, indices)
123 tau_z = (tau_sum - 1) / tf.cast(k_z, logits.dtype)
124
125 # calculate p
126 p = tf.math.maximum(
127 tf.cast(0, logits.dtype), z - tf.expand_dims(tau_z, -1))
128 # If k_z = 0 or if z = nan, then the input is invalid
129 # TODO: Adjust dimension order for TF2 broadcasting
130 p_safe = tf.compat.v1.where(
131 tf.math.logical_or(
132 tf.math.equal(k_z, 0), tf.math.is_nan(z_cumsum[:, -1])),
133 tf.fill([obs, dims], tf.cast(float("nan"), logits.dtype)), p)
134
135 # Reshape back to original size
136 p_safe = tf.reshape(p_safe, shape_op, name=name)
137 return p_safe
138
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/tensorflow_addons/activations/sparsemax.py b/tensorflow_addons/activations/sparsemax.py
--- a/tensorflow_addons/activations/sparsemax.py
+++ b/tensorflow_addons/activations/sparsemax.py
@@ -126,11 +126,12 @@
p = tf.math.maximum(
tf.cast(0, logits.dtype), z - tf.expand_dims(tau_z, -1))
# If k_z = 0 or if z = nan, then the input is invalid
- # TODO: Adjust dimension order for TF2 broadcasting
- p_safe = tf.compat.v1.where(
- tf.math.logical_or(
- tf.math.equal(k_z, 0), tf.math.is_nan(z_cumsum[:, -1])),
- tf.fill([obs, dims], tf.cast(float("nan"), logits.dtype)), p)
+ p_safe = tf.where(
+ tf.expand_dims(
+ tf.math.logical_or(
+ tf.math.equal(k_z, 0), tf.math.is_nan(z_cumsum[:, -1])),
+ axis=-1), tf.fill([obs, dims], tf.cast(float("nan"),
+ logits.dtype)), p)
# Reshape back to original size
p_safe = tf.reshape(p_safe, shape_op, name=name)
| {"golden_diff": "diff --git a/tensorflow_addons/activations/sparsemax.py b/tensorflow_addons/activations/sparsemax.py\n--- a/tensorflow_addons/activations/sparsemax.py\n+++ b/tensorflow_addons/activations/sparsemax.py\n@@ -126,11 +126,12 @@\n p = tf.math.maximum(\n tf.cast(0, logits.dtype), z - tf.expand_dims(tau_z, -1))\n # If k_z = 0 or if z = nan, then the input is invalid\n- # TODO: Adjust dimension order for TF2 broadcasting\n- p_safe = tf.compat.v1.where(\n- tf.math.logical_or(\n- tf.math.equal(k_z, 0), tf.math.is_nan(z_cumsum[:, -1])),\n- tf.fill([obs, dims], tf.cast(float(\"nan\"), logits.dtype)), p)\n+ p_safe = tf.where(\n+ tf.expand_dims(\n+ tf.math.logical_or(\n+ tf.math.equal(k_z, 0), tf.math.is_nan(z_cumsum[:, -1])),\n+ axis=-1), tf.fill([obs, dims], tf.cast(float(\"nan\"),\n+ logits.dtype)), p)\n \n # Reshape back to original size\n p_safe = tf.reshape(p_safe, shape_op, name=name)\n", "issue": "Update sparsemax to use tf.where V2\nAs described in #250 and temporarily patched in #251 sparsemax has one instance of tf.where that needs the broadcasting dimensions changed to match numpy and TF2 style.\n", "before_files": [{"content": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom tensorflow_addons.utils import keras_utils\n\n\[email protected]\n@keras_utils.register_keras_custom_object\ndef sparsemax(logits, axis=-1, name=None):\n \"\"\"Sparsemax activation function [1].\n\n For each batch `i` and class `j` we have\n $$sparsemax[i, j] = max(logits[i, j] - tau(logits[i, :]), 0)$$\n\n [1]: https://arxiv.org/abs/1602.02068\n\n Args:\n logits: Input tensor.\n axis: Integer, axis along which the sparsemax operation is applied.\n name: A name for the operation (optional).\n Returns:\n Tensor, output of sparsemax transformation. Has the same type and\n shape as `logits`.\n Raises:\n ValueError: In case `dim(logits) == 1`.\n \"\"\"\n logits = tf.convert_to_tensor(logits, name=\"logits\")\n\n # We need its original shape for shape inference.\n shape = logits.get_shape()\n rank = shape.rank\n is_last_axis = (axis == -1) or (axis == rank - 1)\n\n if is_last_axis:\n output = _compute_2d_sparsemax(logits, name=name)\n output.set_shape(shape)\n return output\n\n # If dim is not the last dimension, we have to do a transpose so that we can\n # still perform softmax on its last dimension.\n\n # Swap logits' dimension of dim and its last dimension.\n rank_op = tf.rank(logits)\n axis_norm = axis % rank\n logits = _swap_axis(logits, axis_norm, tf.math.subtract(rank_op, 1))\n\n # Do the actual softmax on its last dimension.\n output = _compute_2d_sparsemax(logits)\n output = _swap_axis(\n output, axis_norm, tf.math.subtract(rank_op, 1), name=name)\n\n # Make shape inference work since transpose may erase its static shape.\n output.set_shape(shape)\n return output\n\n\ndef _swap_axis(logits, dim_index, last_index, **kwargs):\n return tf.transpose(\n logits,\n tf.concat([\n tf.range(dim_index), [last_index],\n tf.range(dim_index + 1, last_index), [dim_index]\n ], 0), **kwargs)\n\n\[email protected]\ndef _compute_2d_sparsemax(logits, name=None):\n \"\"\"Performs the sparsemax operation when axis=-1.\"\"\"\n shape_op = tf.shape(logits)\n obs = tf.math.reduce_prod(shape_op[:-1])\n dims = shape_op[-1]\n\n # In the paper, they call the logits z.\n # The mean(logits) can be substracted from logits to make the algorithm\n # more numerically stable. the instability in this algorithm comes mostly\n # from the z_cumsum. Substacting the mean will cause z_cumsum to be close\n # to zero. However, in practise the numerical instability issues are very\n # minor and substacting the mean causes extra issues with inf and nan\n # input.\n # Reshape to [obs, dims] as it is almost free and means the remanining\n # code doesn't need to worry about the rank.\n z = tf.reshape(logits, [obs, dims])\n\n # sort z\n z_sorted, _ = tf.nn.top_k(z, k=dims)\n\n # calculate k(z)\n z_cumsum = tf.math.cumsum(z_sorted, axis=-1)\n k = tf.range(1, tf.cast(dims, logits.dtype) + 1, dtype=logits.dtype)\n z_check = 1 + k * z_sorted > z_cumsum\n # because the z_check vector is always [1,1,...1,0,0,...0] finding the\n # (index + 1) of the last `1` is the same as just summing the number of 1.\n k_z = tf.math.reduce_sum(tf.cast(z_check, tf.int32), axis=-1)\n\n # calculate tau(z)\n # If there are inf values or all values are -inf, the k_z will be zero,\n # this is mathematically invalid and will also cause the gather_nd to fail.\n # Prevent this issue for now by setting k_z = 1 if k_z = 0, this is then\n # fixed later (see p_safe) by returning p = nan. This results in the same\n # behavior as softmax.\n k_z_safe = tf.math.maximum(k_z, 1)\n indices = tf.stack(\n [tf.range(0, obs), tf.reshape(k_z_safe, [-1]) - 1], axis=1)\n tau_sum = tf.gather_nd(z_cumsum, indices)\n tau_z = (tau_sum - 1) / tf.cast(k_z, logits.dtype)\n\n # calculate p\n p = tf.math.maximum(\n tf.cast(0, logits.dtype), z - tf.expand_dims(tau_z, -1))\n # If k_z = 0 or if z = nan, then the input is invalid\n # TODO: Adjust dimension order for TF2 broadcasting\n p_safe = tf.compat.v1.where(\n tf.math.logical_or(\n tf.math.equal(k_z, 0), tf.math.is_nan(z_cumsum[:, -1])),\n tf.fill([obs, dims], tf.cast(float(\"nan\"), logits.dtype)), p)\n\n # Reshape back to original size\n p_safe = tf.reshape(p_safe, shape_op, name=name)\n return p_safe\n", "path": "tensorflow_addons/activations/sparsemax.py"}], "after_files": [{"content": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom tensorflow_addons.utils import keras_utils\n\n\[email protected]\n@keras_utils.register_keras_custom_object\ndef sparsemax(logits, axis=-1, name=None):\n \"\"\"Sparsemax activation function [1].\n\n For each batch `i` and class `j` we have\n $$sparsemax[i, j] = max(logits[i, j] - tau(logits[i, :]), 0)$$\n\n [1]: https://arxiv.org/abs/1602.02068\n\n Args:\n logits: Input tensor.\n axis: Integer, axis along which the sparsemax operation is applied.\n name: A name for the operation (optional).\n Returns:\n Tensor, output of sparsemax transformation. Has the same type and\n shape as `logits`.\n Raises:\n ValueError: In case `dim(logits) == 1`.\n \"\"\"\n logits = tf.convert_to_tensor(logits, name=\"logits\")\n\n # We need its original shape for shape inference.\n shape = logits.get_shape()\n rank = shape.rank\n is_last_axis = (axis == -1) or (axis == rank - 1)\n\n if is_last_axis:\n output = _compute_2d_sparsemax(logits, name=name)\n output.set_shape(shape)\n return output\n\n # If dim is not the last dimension, we have to do a transpose so that we can\n # still perform softmax on its last dimension.\n\n # Swap logits' dimension of dim and its last dimension.\n rank_op = tf.rank(logits)\n axis_norm = axis % rank\n logits = _swap_axis(logits, axis_norm, tf.math.subtract(rank_op, 1))\n\n # Do the actual softmax on its last dimension.\n output = _compute_2d_sparsemax(logits)\n output = _swap_axis(\n output, axis_norm, tf.math.subtract(rank_op, 1), name=name)\n\n # Make shape inference work since transpose may erase its static shape.\n output.set_shape(shape)\n return output\n\n\ndef _swap_axis(logits, dim_index, last_index, **kwargs):\n return tf.transpose(\n logits,\n tf.concat([\n tf.range(dim_index), [last_index],\n tf.range(dim_index + 1, last_index), [dim_index]\n ], 0), **kwargs)\n\n\[email protected]\ndef _compute_2d_sparsemax(logits, name=None):\n \"\"\"Performs the sparsemax operation when axis=-1.\"\"\"\n shape_op = tf.shape(logits)\n obs = tf.math.reduce_prod(shape_op[:-1])\n dims = shape_op[-1]\n\n # In the paper, they call the logits z.\n # The mean(logits) can be substracted from logits to make the algorithm\n # more numerically stable. the instability in this algorithm comes mostly\n # from the z_cumsum. Substacting the mean will cause z_cumsum to be close\n # to zero. However, in practise the numerical instability issues are very\n # minor and substacting the mean causes extra issues with inf and nan\n # input.\n # Reshape to [obs, dims] as it is almost free and means the remanining\n # code doesn't need to worry about the rank.\n z = tf.reshape(logits, [obs, dims])\n\n # sort z\n z_sorted, _ = tf.nn.top_k(z, k=dims)\n\n # calculate k(z)\n z_cumsum = tf.math.cumsum(z_sorted, axis=-1)\n k = tf.range(1, tf.cast(dims, logits.dtype) + 1, dtype=logits.dtype)\n z_check = 1 + k * z_sorted > z_cumsum\n # because the z_check vector is always [1,1,...1,0,0,...0] finding the\n # (index + 1) of the last `1` is the same as just summing the number of 1.\n k_z = tf.math.reduce_sum(tf.cast(z_check, tf.int32), axis=-1)\n\n # calculate tau(z)\n # If there are inf values or all values are -inf, the k_z will be zero,\n # this is mathematically invalid and will also cause the gather_nd to fail.\n # Prevent this issue for now by setting k_z = 1 if k_z = 0, this is then\n # fixed later (see p_safe) by returning p = nan. This results in the same\n # behavior as softmax.\n k_z_safe = tf.math.maximum(k_z, 1)\n indices = tf.stack(\n [tf.range(0, obs), tf.reshape(k_z_safe, [-1]) - 1], axis=1)\n tau_sum = tf.gather_nd(z_cumsum, indices)\n tau_z = (tau_sum - 1) / tf.cast(k_z, logits.dtype)\n\n # calculate p\n p = tf.math.maximum(\n tf.cast(0, logits.dtype), z - tf.expand_dims(tau_z, -1))\n # If k_z = 0 or if z = nan, then the input is invalid\n p_safe = tf.where(\n tf.expand_dims(\n tf.math.logical_or(\n tf.math.equal(k_z, 0), tf.math.is_nan(z_cumsum[:, -1])),\n axis=-1), tf.fill([obs, dims], tf.cast(float(\"nan\"),\n logits.dtype)), p)\n\n # Reshape back to original size\n p_safe = tf.reshape(p_safe, shape_op, name=name)\n return p_safe\n", "path": "tensorflow_addons/activations/sparsemax.py"}]} | 1,988 | 291 |
gh_patches_debug_7433 | rasdani/github-patches | git_diff | SciTools__cartopy-439 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BUG: Geostationary example bug
```
python cartopy/docs/source/examples/geostationary.py
Traceback (most recent call last):
File "/net/home/h05/cpelley/git/cartopy/docs/source/examples/geostationary.py", line 60, in <module>
main()
File "/net/home/h05/cpelley/git/cartopy/docs/source/examples/geostationary.py", line 54, in main
img, crs, extent, origin = geos_image()
File "/net/home/h05/cpelley/git/cartopy/docs/source/examples/geostationary.py", line 43, in geos_image
img_handle = BytesIO(urllib2.urlopen(url).read())
NameError: global name 'urllib2' is not defined
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/cartopy/examples/geostationary.py`
Content:
```
1 """
2 Reprojecting images from a Geostationary projection
3 ---------------------------------------------------
4
5 This example demonstrates Cartopy's ability to project images into the desired
6 projection on-the-fly. The image itself is retrieved from a URL and is loaded
7 directly into memory without storing it intermediately into a file. It
8 represents pre-processed data from Moderate-Resolution Imaging
9 Spectroradiometer (MODIS) which has been put into an image in the data's
10 native Geostationary coordinate system - it is then projected by cartopy
11 into a global Miller map.
12
13 """
14 __tags__ = ["Scalar data"]
15 try:
16 from urllib2 import urlopen
17 except ImportError:
18 from urllib.request import urlopen
19 from io import BytesIO
20
21 import cartopy.crs as ccrs
22 import matplotlib.pyplot as plt
23
24
25 def geos_image():
26 """
27 Return a specific MODIS image by retrieving it from a github gist URL.
28
29 Returns
30 -------
31 img : numpy array
32 The pixels of the image in a numpy array.
33 img_proj : cartopy CRS
34 The rectangular coordinate system of the image.
35 img_extent : tuple of floats
36 The extent of the image ``(x0, y0, x1, y1)`` referenced in
37 the ``img_proj`` coordinate system.
38 origin : str
39 The origin of the image to be passed through to matplotlib's imshow.
40
41 """
42 url = ('https://gist.github.com/pelson/5871263/raw/'
43 'EIDA50_201211061300_clip2.png')
44 img_handle = BytesIO(urllib2.urlopen(url).read())
45 img = plt.imread(img_handle)
46 img_proj = ccrs.Geostationary(satellite_height=35786000)
47 img_extent = (-5500000, 5500000, -5500000, 5500000)
48 return img, img_proj, img_extent, 'upper'
49
50
51 def main():
52 ax = plt.axes(projection=ccrs.Miller())
53 ax.coastlines()
54 ax.set_global()
55 img, crs, extent, origin = geos_image()
56 plt.imshow(img, transform=crs, extent=extent, origin=origin, cmap='gray')
57 plt.show()
58
59
60 if __name__ == '__main__':
61 main()
62
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/cartopy/examples/geostationary.py b/lib/cartopy/examples/geostationary.py
--- a/lib/cartopy/examples/geostationary.py
+++ b/lib/cartopy/examples/geostationary.py
@@ -41,7 +41,7 @@
"""
url = ('https://gist.github.com/pelson/5871263/raw/'
'EIDA50_201211061300_clip2.png')
- img_handle = BytesIO(urllib2.urlopen(url).read())
+ img_handle = BytesIO(urlopen(url).read())
img = plt.imread(img_handle)
img_proj = ccrs.Geostationary(satellite_height=35786000)
img_extent = (-5500000, 5500000, -5500000, 5500000)
| {"golden_diff": "diff --git a/lib/cartopy/examples/geostationary.py b/lib/cartopy/examples/geostationary.py\n--- a/lib/cartopy/examples/geostationary.py\n+++ b/lib/cartopy/examples/geostationary.py\n@@ -41,7 +41,7 @@\n \"\"\"\n url = ('https://gist.github.com/pelson/5871263/raw/'\n 'EIDA50_201211061300_clip2.png')\n- img_handle = BytesIO(urllib2.urlopen(url).read())\n+ img_handle = BytesIO(urlopen(url).read())\n img = plt.imread(img_handle)\n img_proj = ccrs.Geostationary(satellite_height=35786000)\n img_extent = (-5500000, 5500000, -5500000, 5500000)\n", "issue": "BUG: Geostationary example bug\n```\npython cartopy/docs/source/examples/geostationary.py\nTraceback (most recent call last):\n File \"/net/home/h05/cpelley/git/cartopy/docs/source/examples/geostationary.py\", line 60, in <module>\n main()\n File \"/net/home/h05/cpelley/git/cartopy/docs/source/examples/geostationary.py\", line 54, in main\n img, crs, extent, origin = geos_image()\n File \"/net/home/h05/cpelley/git/cartopy/docs/source/examples/geostationary.py\", line 43, in geos_image\n img_handle = BytesIO(urllib2.urlopen(url).read())\nNameError: global name 'urllib2' is not defined\n```\n\n", "before_files": [{"content": "\"\"\"\nReprojecting images from a Geostationary projection\n---------------------------------------------------\n\nThis example demonstrates Cartopy's ability to project images into the desired\nprojection on-the-fly. The image itself is retrieved from a URL and is loaded\ndirectly into memory without storing it intermediately into a file. It\nrepresents pre-processed data from Moderate-Resolution Imaging\nSpectroradiometer (MODIS) which has been put into an image in the data's\nnative Geostationary coordinate system - it is then projected by cartopy\ninto a global Miller map.\n\n\"\"\"\n__tags__ = [\"Scalar data\"]\ntry:\n from urllib2 import urlopen\nexcept ImportError:\n from urllib.request import urlopen\nfrom io import BytesIO\n\nimport cartopy.crs as ccrs\nimport matplotlib.pyplot as plt\n\n\ndef geos_image():\n \"\"\"\n Return a specific MODIS image by retrieving it from a github gist URL.\n\n Returns\n -------\n img : numpy array\n The pixels of the image in a numpy array.\n img_proj : cartopy CRS\n The rectangular coordinate system of the image.\n img_extent : tuple of floats\n The extent of the image ``(x0, y0, x1, y1)`` referenced in\n the ``img_proj`` coordinate system.\n origin : str\n The origin of the image to be passed through to matplotlib's imshow.\n\n \"\"\"\n url = ('https://gist.github.com/pelson/5871263/raw/'\n 'EIDA50_201211061300_clip2.png')\n img_handle = BytesIO(urllib2.urlopen(url).read())\n img = plt.imread(img_handle)\n img_proj = ccrs.Geostationary(satellite_height=35786000)\n img_extent = (-5500000, 5500000, -5500000, 5500000)\n return img, img_proj, img_extent, 'upper'\n\n\ndef main():\n ax = plt.axes(projection=ccrs.Miller())\n ax.coastlines()\n ax.set_global()\n img, crs, extent, origin = geos_image()\n plt.imshow(img, transform=crs, extent=extent, origin=origin, cmap='gray')\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n", "path": "lib/cartopy/examples/geostationary.py"}], "after_files": [{"content": "\"\"\"\nReprojecting images from a Geostationary projection\n---------------------------------------------------\n\nThis example demonstrates Cartopy's ability to project images into the desired\nprojection on-the-fly. The image itself is retrieved from a URL and is loaded\ndirectly into memory without storing it intermediately into a file. It\nrepresents pre-processed data from Moderate-Resolution Imaging\nSpectroradiometer (MODIS) which has been put into an image in the data's\nnative Geostationary coordinate system - it is then projected by cartopy\ninto a global Miller map.\n\n\"\"\"\n__tags__ = [\"Scalar data\"]\ntry:\n from urllib2 import urlopen\nexcept ImportError:\n from urllib.request import urlopen\nfrom io import BytesIO\n\nimport cartopy.crs as ccrs\nimport matplotlib.pyplot as plt\n\n\ndef geos_image():\n \"\"\"\n Return a specific MODIS image by retrieving it from a github gist URL.\n\n Returns\n -------\n img : numpy array\n The pixels of the image in a numpy array.\n img_proj : cartopy CRS\n The rectangular coordinate system of the image.\n img_extent : tuple of floats\n The extent of the image ``(x0, y0, x1, y1)`` referenced in\n the ``img_proj`` coordinate system.\n origin : str\n The origin of the image to be passed through to matplotlib's imshow.\n\n \"\"\"\n url = ('https://gist.github.com/pelson/5871263/raw/'\n 'EIDA50_201211061300_clip2.png')\n img_handle = BytesIO(urlopen(url).read())\n img = plt.imread(img_handle)\n img_proj = ccrs.Geostationary(satellite_height=35786000)\n img_extent = (-5500000, 5500000, -5500000, 5500000)\n return img, img_proj, img_extent, 'upper'\n\n\ndef main():\n ax = plt.axes(projection=ccrs.Miller())\n ax.coastlines()\n ax.set_global()\n img, crs, extent, origin = geos_image()\n plt.imshow(img, transform=crs, extent=extent, origin=origin, cmap='gray')\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n", "path": "lib/cartopy/examples/geostationary.py"}]} | 1,075 | 207 |
gh_patches_debug_774 | rasdani/github-patches | git_diff | getredash__redash-2501 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Non blocking widget refresh indicator
When refreshing a dashboard widget the previous results are hidden by the refresh animation. This can be an issue when refreshing a dashboard frequently, as you might happen to see the spinner for long period of times.
To solve this we can keep showing the old data until new one is available, while showing some indication that refresh is in progress.
Is the following animation enough?

After refreshing a dashboard, widgets become draggable even when not in edit mode
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redash/handlers/widgets.py`
Content:
```
1 import json
2
3 from flask import request
4 from redash import models
5 from redash.handlers.base import BaseResource
6 from redash.permissions import (require_access,
7 require_object_modify_permission,
8 require_permission, view_only)
9
10
11 class WidgetListResource(BaseResource):
12 @require_permission('edit_dashboard')
13 def post(self):
14 """
15 Add a widget to a dashboard.
16
17 :<json number dashboard_id: The ID for the dashboard being added to
18 :<json visualization_id: The ID of the visualization to put in this widget
19 :<json object options: Widget options
20 :<json string text: Text box contents
21 :<json number width: Width for widget display
22
23 :>json object widget: The created widget
24 """
25 widget_properties = request.get_json(force=True)
26 dashboard = models.Dashboard.get_by_id_and_org(widget_properties.pop('dashboard_id'), self.current_org)
27 require_object_modify_permission(dashboard, self.current_user)
28
29 widget_properties['options'] = json.dumps(widget_properties['options'])
30 widget_properties.pop('id', None)
31 widget_properties['dashboard'] = dashboard
32
33 visualization_id = widget_properties.pop('visualization_id')
34 if visualization_id:
35 visualization = models.Visualization.get_by_id_and_org(visualization_id, self.current_org)
36 require_access(visualization.query_rel.groups, self.current_user, view_only)
37 else:
38 visualization = None
39
40 widget_properties['visualization'] = visualization
41
42 widget = models.Widget(**widget_properties)
43 models.db.session.add(widget)
44 models.db.session.commit()
45
46 models.db.session.commit()
47 return {'widget': widget.to_dict()}
48
49
50 class WidgetResource(BaseResource):
51 @require_permission('edit_dashboard')
52 def post(self, widget_id):
53 """
54 Updates a widget in a dashboard.
55 This method currently handles Text Box widgets only.
56
57 :param number widget_id: The ID of the widget to modify
58
59 :<json string text: The new contents of the text box
60 """
61 widget = models.Widget.get_by_id_and_org(widget_id, self.current_org)
62 require_object_modify_permission(widget.dashboard, self.current_user)
63 widget_properties = request.get_json(force=True)
64 widget.text = widget_properties['text']
65 widget.options = json.dumps(widget_properties['options'])
66 models.db.session.commit()
67 return widget.to_dict()
68
69 @require_permission('edit_dashboard')
70 def delete(self, widget_id):
71 """
72 Remove a widget from a dashboard.
73
74 :param number widget_id: ID of widget to remove
75 """
76 widget = models.Widget.get_by_id_and_org(widget_id, self.current_org)
77 require_object_modify_permission(widget.dashboard, self.current_user)
78 models.db.session.delete(widget)
79 models.db.session.commit()
80
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/redash/handlers/widgets.py b/redash/handlers/widgets.py
--- a/redash/handlers/widgets.py
+++ b/redash/handlers/widgets.py
@@ -44,7 +44,7 @@
models.db.session.commit()
models.db.session.commit()
- return {'widget': widget.to_dict()}
+ return widget.to_dict()
class WidgetResource(BaseResource):
| {"golden_diff": "diff --git a/redash/handlers/widgets.py b/redash/handlers/widgets.py\n--- a/redash/handlers/widgets.py\n+++ b/redash/handlers/widgets.py\n@@ -44,7 +44,7 @@\n models.db.session.commit()\n \n models.db.session.commit()\n- return {'widget': widget.to_dict()}\n+ return widget.to_dict()\n \n \n class WidgetResource(BaseResource):\n", "issue": "Non blocking widget refresh indicator\nWhen refreshing a dashboard widget the previous results are hidden by the refresh animation. This can be an issue when refreshing a dashboard frequently, as you might happen to see the spinner for long period of times.\r\n\r\nTo solve this we can keep showing the old data until new one is available, while showing some indication that refresh is in progress.\r\n\r\nIs the following animation enough?\r\n\r\n\nAfter refreshing a dashboard, widgets become draggable even when not in edit mode\n\n", "before_files": [{"content": "import json\n\nfrom flask import request\nfrom redash import models\nfrom redash.handlers.base import BaseResource\nfrom redash.permissions import (require_access,\n require_object_modify_permission,\n require_permission, view_only)\n\n\nclass WidgetListResource(BaseResource):\n @require_permission('edit_dashboard')\n def post(self):\n \"\"\"\n Add a widget to a dashboard.\n\n :<json number dashboard_id: The ID for the dashboard being added to\n :<json visualization_id: The ID of the visualization to put in this widget\n :<json object options: Widget options\n :<json string text: Text box contents\n :<json number width: Width for widget display\n\n :>json object widget: The created widget\n \"\"\"\n widget_properties = request.get_json(force=True)\n dashboard = models.Dashboard.get_by_id_and_org(widget_properties.pop('dashboard_id'), self.current_org)\n require_object_modify_permission(dashboard, self.current_user)\n\n widget_properties['options'] = json.dumps(widget_properties['options'])\n widget_properties.pop('id', None)\n widget_properties['dashboard'] = dashboard\n\n visualization_id = widget_properties.pop('visualization_id')\n if visualization_id:\n visualization = models.Visualization.get_by_id_and_org(visualization_id, self.current_org)\n require_access(visualization.query_rel.groups, self.current_user, view_only)\n else:\n visualization = None\n\n widget_properties['visualization'] = visualization\n\n widget = models.Widget(**widget_properties)\n models.db.session.add(widget)\n models.db.session.commit()\n\n models.db.session.commit()\n return {'widget': widget.to_dict()}\n\n\nclass WidgetResource(BaseResource):\n @require_permission('edit_dashboard')\n def post(self, widget_id):\n \"\"\"\n Updates a widget in a dashboard.\n This method currently handles Text Box widgets only.\n\n :param number widget_id: The ID of the widget to modify\n\n :<json string text: The new contents of the text box\n \"\"\"\n widget = models.Widget.get_by_id_and_org(widget_id, self.current_org)\n require_object_modify_permission(widget.dashboard, self.current_user)\n widget_properties = request.get_json(force=True)\n widget.text = widget_properties['text']\n widget.options = json.dumps(widget_properties['options'])\n models.db.session.commit()\n return widget.to_dict()\n\n @require_permission('edit_dashboard')\n def delete(self, widget_id):\n \"\"\"\n Remove a widget from a dashboard.\n\n :param number widget_id: ID of widget to remove\n \"\"\"\n widget = models.Widget.get_by_id_and_org(widget_id, self.current_org)\n require_object_modify_permission(widget.dashboard, self.current_user)\n models.db.session.delete(widget)\n models.db.session.commit()\n", "path": "redash/handlers/widgets.py"}], "after_files": [{"content": "import json\n\nfrom flask import request\nfrom redash import models\nfrom redash.handlers.base import BaseResource\nfrom redash.permissions import (require_access,\n require_object_modify_permission,\n require_permission, view_only)\n\n\nclass WidgetListResource(BaseResource):\n @require_permission('edit_dashboard')\n def post(self):\n \"\"\"\n Add a widget to a dashboard.\n\n :<json number dashboard_id: The ID for the dashboard being added to\n :<json visualization_id: The ID of the visualization to put in this widget\n :<json object options: Widget options\n :<json string text: Text box contents\n :<json number width: Width for widget display\n\n :>json object widget: The created widget\n \"\"\"\n widget_properties = request.get_json(force=True)\n dashboard = models.Dashboard.get_by_id_and_org(widget_properties.pop('dashboard_id'), self.current_org)\n require_object_modify_permission(dashboard, self.current_user)\n\n widget_properties['options'] = json.dumps(widget_properties['options'])\n widget_properties.pop('id', None)\n widget_properties['dashboard'] = dashboard\n\n visualization_id = widget_properties.pop('visualization_id')\n if visualization_id:\n visualization = models.Visualization.get_by_id_and_org(visualization_id, self.current_org)\n require_access(visualization.query_rel.groups, self.current_user, view_only)\n else:\n visualization = None\n\n widget_properties['visualization'] = visualization\n\n widget = models.Widget(**widget_properties)\n models.db.session.add(widget)\n models.db.session.commit()\n\n models.db.session.commit()\n return widget.to_dict()\n\n\nclass WidgetResource(BaseResource):\n @require_permission('edit_dashboard')\n def post(self, widget_id):\n \"\"\"\n Updates a widget in a dashboard.\n This method currently handles Text Box widgets only.\n\n :param number widget_id: The ID of the widget to modify\n\n :<json string text: The new contents of the text box\n \"\"\"\n widget = models.Widget.get_by_id_and_org(widget_id, self.current_org)\n require_object_modify_permission(widget.dashboard, self.current_user)\n widget_properties = request.get_json(force=True)\n widget.text = widget_properties['text']\n widget.options = json.dumps(widget_properties['options'])\n models.db.session.commit()\n return widget.to_dict()\n\n @require_permission('edit_dashboard')\n def delete(self, widget_id):\n \"\"\"\n Remove a widget from a dashboard.\n\n :param number widget_id: ID of widget to remove\n \"\"\"\n widget = models.Widget.get_by_id_and_org(widget_id, self.current_org)\n require_object_modify_permission(widget.dashboard, self.current_user)\n models.db.session.delete(widget)\n models.db.session.commit()\n", "path": "redash/handlers/widgets.py"}]} | 1,104 | 89 |
gh_patches_debug_34407 | rasdani/github-patches | git_diff | svthalia__concrexit-2726 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Liked photos API endpoint
To add an overview of liked photos to ThaliApp, we need a new endpoint for liked photos.
I think it would be best to have `api/v2/photos/photos/` with `liked` boolean GET filter. It will need to do some filtering to prevent photos that are not published in an album from being returned.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/photos/api/v2/urls.py`
Content:
```
1 """Photos app API v2 urls."""
2 from django.urls import include, path
3
4 from photos.api.v2.views import AlbumDetailView, AlbumListView, PhotoLikeView
5
6 app_name = "photos"
7
8 urlpatterns = [
9 path(
10 "photos/",
11 include(
12 [
13 path("albums/", AlbumListView.as_view(), name="album-list"),
14 path(
15 "albums/<slug:slug>/",
16 AlbumDetailView.as_view(),
17 name="album-detail",
18 ),
19 path(
20 "photos/<int:pk>/like/", PhotoLikeView.as_view(), name="photo-like"
21 ),
22 ]
23 ),
24 ),
25 ]
26
```
Path: `website/photos/api/v2/views.py`
Content:
```
1 from django.db.models import Count, Prefetch, Q
2
3 from oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope
4 from rest_framework import filters, status
5 from rest_framework.exceptions import PermissionDenied
6 from rest_framework.generics import ListAPIView, RetrieveAPIView
7 from rest_framework.response import Response
8 from rest_framework.views import APIView
9
10 from photos import services
11 from photos.api.v2.serializers.album import AlbumListSerializer, AlbumSerializer
12 from photos.models import Album, Like, Photo
13
14
15 class AlbumListView(ListAPIView):
16 """Returns an overview of all albums."""
17
18 serializer_class = AlbumListSerializer
19 queryset = Album.objects.filter(hidden=False)
20 permission_classes = [
21 IsAuthenticatedOrTokenHasScope,
22 ]
23 required_scopes = ["photos:read"]
24 filter_backends = (filters.SearchFilter,)
25 search_fields = ("title", "date", "slug")
26
27
28 class AlbumDetailView(RetrieveAPIView):
29 """Returns the details of an album."""
30
31 serializer_class = AlbumSerializer
32 permission_classes = [
33 IsAuthenticatedOrTokenHasScope,
34 ]
35 required_scopes = ["photos:read"]
36 lookup_field = "slug"
37
38 def retrieve(self, request, *args, **kwargs):
39 if not services.is_album_accessible(request, self.get_object()):
40 raise PermissionDenied
41 return super().retrieve(request, *args, **kwargs)
42
43 def get_queryset(self):
44 photos = Photo.objects.select_properties("num_likes")
45 if self.request.member:
46 photos = photos.annotate(
47 member_likes=Count("likes", filter=Q(likes__member=self.request.member))
48 )
49 return Album.objects.filter(hidden=False).prefetch_related(
50 Prefetch("photo_set", queryset=photos)
51 )
52
53
54 class PhotoLikeView(APIView):
55 permission_classes = [IsAuthenticatedOrTokenHasScope]
56 required_scopes = ["photos:read"]
57
58 def get(self, request, **kwargs):
59 photo_id = kwargs.get("pk")
60 try:
61 photo = Photo.objects.filter(album__hidden=False).get(pk=photo_id)
62 except Photo.DoesNotExist:
63 return Response(status=status.HTTP_404_NOT_FOUND)
64
65 return Response(
66 {
67 "liked": photo.likes.filter(member=request.member).exists(),
68 "num_likes": photo.num_likes,
69 },
70 status=status.HTTP_200_OK,
71 )
72
73 def post(self, request, **kwargs):
74 photo_id = kwargs.get("pk")
75 try:
76 photo = Photo.objects.filter(album__hidden=False).get(pk=photo_id)
77 except Photo.DoesNotExist:
78 return Response(status=status.HTTP_404_NOT_FOUND)
79
80 _, created = Like.objects.get_or_create(photo=photo, member=request.member)
81
82 if created:
83 return Response(
84 {
85 "liked": photo.likes.filter(member=request.member).exists(),
86 "num_likes": photo.num_likes,
87 },
88 status=status.HTTP_201_CREATED,
89 )
90 return Response(
91 {
92 "liked": photo.likes.filter(member=request.member).exists(),
93 "num_likes": photo.num_likes,
94 },
95 status=status.HTTP_200_OK,
96 )
97
98 def delete(self, request, **kwargs):
99 photo_id = kwargs.get("pk")
100 try:
101 photo = Photo.objects.filter(album__hidden=False).get(pk=photo_id)
102 except Photo.DoesNotExist:
103 return Response(status=status.HTTP_404_NOT_FOUND)
104
105 try:
106 like = Like.objects.filter(photo__album__hidden=False).get(
107 member=request.member, photo__pk=photo_id
108 )
109 except Like.DoesNotExist:
110 return Response(
111 {
112 "liked": False,
113 "num_likes": photo.num_likes,
114 },
115 status=status.HTTP_204_NO_CONTENT,
116 )
117
118 like.delete()
119
120 return Response(
121 {
122 "liked": False,
123 "num_likes": photo.num_likes,
124 },
125 status=status.HTTP_202_ACCEPTED,
126 )
127
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/website/photos/api/v2/urls.py b/website/photos/api/v2/urls.py
--- a/website/photos/api/v2/urls.py
+++ b/website/photos/api/v2/urls.py
@@ -1,7 +1,12 @@
"""Photos app API v2 urls."""
from django.urls import include, path
-from photos.api.v2.views import AlbumDetailView, AlbumListView, PhotoLikeView
+from photos.api.v2.views import (
+ AlbumDetailView,
+ AlbumListView,
+ LikedPhotosListView,
+ PhotoLikeView,
+)
app_name = "photos"
@@ -19,6 +24,9 @@
path(
"photos/<int:pk>/like/", PhotoLikeView.as_view(), name="photo-like"
),
+ path(
+ "photos/liked/", LikedPhotosListView.as_view(), name="liked-photos"
+ ),
]
),
),
diff --git a/website/photos/api/v2/views.py b/website/photos/api/v2/views.py
--- a/website/photos/api/v2/views.py
+++ b/website/photos/api/v2/views.py
@@ -8,7 +8,11 @@
from rest_framework.views import APIView
from photos import services
-from photos.api.v2.serializers.album import AlbumListSerializer, AlbumSerializer
+from photos.api.v2.serializers.album import (
+ AlbumListSerializer,
+ AlbumSerializer,
+ PhotoListSerializer,
+)
from photos.models import Album, Like, Photo
@@ -51,6 +55,35 @@
)
+class LikedPhotosListView(ListAPIView):
+ """Returns the details the liked album."""
+
+ serializer_class = PhotoListSerializer
+ permission_classes = [
+ IsAuthenticatedOrTokenHasScope,
+ ]
+ required_scopes = ["photos:read"]
+
+ def get(self, request, *args, **kwargs):
+ if not self.request.member:
+ return Response(
+ data={
+ "detail": "You need to be a member in order to view your liked photos."
+ },
+ status=status.HTTP_403_FORBIDDEN,
+ )
+ return self.list(request, *args, **kwargs)
+
+ def get_queryset(self):
+ return (
+ Photo.objects.filter(likes__member=self.request.member, album__hidden=False)
+ .annotate(
+ member_likes=Count("likes", filter=Q(likes__member=self.request.member))
+ )
+ .select_properties("num_likes")
+ )
+
+
class PhotoLikeView(APIView):
permission_classes = [IsAuthenticatedOrTokenHasScope]
required_scopes = ["photos:read"]
| {"golden_diff": "diff --git a/website/photos/api/v2/urls.py b/website/photos/api/v2/urls.py\n--- a/website/photos/api/v2/urls.py\n+++ b/website/photos/api/v2/urls.py\n@@ -1,7 +1,12 @@\n \"\"\"Photos app API v2 urls.\"\"\"\n from django.urls import include, path\n \n-from photos.api.v2.views import AlbumDetailView, AlbumListView, PhotoLikeView\n+from photos.api.v2.views import (\n+ AlbumDetailView,\n+ AlbumListView,\n+ LikedPhotosListView,\n+ PhotoLikeView,\n+)\n \n app_name = \"photos\"\n \n@@ -19,6 +24,9 @@\n path(\n \"photos/<int:pk>/like/\", PhotoLikeView.as_view(), name=\"photo-like\"\n ),\n+ path(\n+ \"photos/liked/\", LikedPhotosListView.as_view(), name=\"liked-photos\"\n+ ),\n ]\n ),\n ),\ndiff --git a/website/photos/api/v2/views.py b/website/photos/api/v2/views.py\n--- a/website/photos/api/v2/views.py\n+++ b/website/photos/api/v2/views.py\n@@ -8,7 +8,11 @@\n from rest_framework.views import APIView\n \n from photos import services\n-from photos.api.v2.serializers.album import AlbumListSerializer, AlbumSerializer\n+from photos.api.v2.serializers.album import (\n+ AlbumListSerializer,\n+ AlbumSerializer,\n+ PhotoListSerializer,\n+)\n from photos.models import Album, Like, Photo\n \n \n@@ -51,6 +55,35 @@\n )\n \n \n+class LikedPhotosListView(ListAPIView):\n+ \"\"\"Returns the details the liked album.\"\"\"\n+\n+ serializer_class = PhotoListSerializer\n+ permission_classes = [\n+ IsAuthenticatedOrTokenHasScope,\n+ ]\n+ required_scopes = [\"photos:read\"]\n+\n+ def get(self, request, *args, **kwargs):\n+ if not self.request.member:\n+ return Response(\n+ data={\n+ \"detail\": \"You need to be a member in order to view your liked photos.\"\n+ },\n+ status=status.HTTP_403_FORBIDDEN,\n+ )\n+ return self.list(request, *args, **kwargs)\n+\n+ def get_queryset(self):\n+ return (\n+ Photo.objects.filter(likes__member=self.request.member, album__hidden=False)\n+ .annotate(\n+ member_likes=Count(\"likes\", filter=Q(likes__member=self.request.member))\n+ )\n+ .select_properties(\"num_likes\")\n+ )\n+\n+\n class PhotoLikeView(APIView):\n permission_classes = [IsAuthenticatedOrTokenHasScope]\n required_scopes = [\"photos:read\"]\n", "issue": "Liked photos API endpoint\nTo add an overview of liked photos to ThaliApp, we need a new endpoint for liked photos.\r\n\r\nI think it would be best to have `api/v2/photos/photos/` with `liked` boolean GET filter. It will need to do some filtering to prevent photos that are not published in an album from being returned.\n", "before_files": [{"content": "\"\"\"Photos app API v2 urls.\"\"\"\nfrom django.urls import include, path\n\nfrom photos.api.v2.views import AlbumDetailView, AlbumListView, PhotoLikeView\n\napp_name = \"photos\"\n\nurlpatterns = [\n path(\n \"photos/\",\n include(\n [\n path(\"albums/\", AlbumListView.as_view(), name=\"album-list\"),\n path(\n \"albums/<slug:slug>/\",\n AlbumDetailView.as_view(),\n name=\"album-detail\",\n ),\n path(\n \"photos/<int:pk>/like/\", PhotoLikeView.as_view(), name=\"photo-like\"\n ),\n ]\n ),\n ),\n]\n", "path": "website/photos/api/v2/urls.py"}, {"content": "from django.db.models import Count, Prefetch, Q\n\nfrom oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope\nfrom rest_framework import filters, status\nfrom rest_framework.exceptions import PermissionDenied\nfrom rest_framework.generics import ListAPIView, RetrieveAPIView\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom photos import services\nfrom photos.api.v2.serializers.album import AlbumListSerializer, AlbumSerializer\nfrom photos.models import Album, Like, Photo\n\n\nclass AlbumListView(ListAPIView):\n \"\"\"Returns an overview of all albums.\"\"\"\n\n serializer_class = AlbumListSerializer\n queryset = Album.objects.filter(hidden=False)\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n ]\n required_scopes = [\"photos:read\"]\n filter_backends = (filters.SearchFilter,)\n search_fields = (\"title\", \"date\", \"slug\")\n\n\nclass AlbumDetailView(RetrieveAPIView):\n \"\"\"Returns the details of an album.\"\"\"\n\n serializer_class = AlbumSerializer\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n ]\n required_scopes = [\"photos:read\"]\n lookup_field = \"slug\"\n\n def retrieve(self, request, *args, **kwargs):\n if not services.is_album_accessible(request, self.get_object()):\n raise PermissionDenied\n return super().retrieve(request, *args, **kwargs)\n\n def get_queryset(self):\n photos = Photo.objects.select_properties(\"num_likes\")\n if self.request.member:\n photos = photos.annotate(\n member_likes=Count(\"likes\", filter=Q(likes__member=self.request.member))\n )\n return Album.objects.filter(hidden=False).prefetch_related(\n Prefetch(\"photo_set\", queryset=photos)\n )\n\n\nclass PhotoLikeView(APIView):\n permission_classes = [IsAuthenticatedOrTokenHasScope]\n required_scopes = [\"photos:read\"]\n\n def get(self, request, **kwargs):\n photo_id = kwargs.get(\"pk\")\n try:\n photo = Photo.objects.filter(album__hidden=False).get(pk=photo_id)\n except Photo.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n return Response(\n {\n \"liked\": photo.likes.filter(member=request.member).exists(),\n \"num_likes\": photo.num_likes,\n },\n status=status.HTTP_200_OK,\n )\n\n def post(self, request, **kwargs):\n photo_id = kwargs.get(\"pk\")\n try:\n photo = Photo.objects.filter(album__hidden=False).get(pk=photo_id)\n except Photo.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n _, created = Like.objects.get_or_create(photo=photo, member=request.member)\n\n if created:\n return Response(\n {\n \"liked\": photo.likes.filter(member=request.member).exists(),\n \"num_likes\": photo.num_likes,\n },\n status=status.HTTP_201_CREATED,\n )\n return Response(\n {\n \"liked\": photo.likes.filter(member=request.member).exists(),\n \"num_likes\": photo.num_likes,\n },\n status=status.HTTP_200_OK,\n )\n\n def delete(self, request, **kwargs):\n photo_id = kwargs.get(\"pk\")\n try:\n photo = Photo.objects.filter(album__hidden=False).get(pk=photo_id)\n except Photo.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n try:\n like = Like.objects.filter(photo__album__hidden=False).get(\n member=request.member, photo__pk=photo_id\n )\n except Like.DoesNotExist:\n return Response(\n {\n \"liked\": False,\n \"num_likes\": photo.num_likes,\n },\n status=status.HTTP_204_NO_CONTENT,\n )\n\n like.delete()\n\n return Response(\n {\n \"liked\": False,\n \"num_likes\": photo.num_likes,\n },\n status=status.HTTP_202_ACCEPTED,\n )\n", "path": "website/photos/api/v2/views.py"}], "after_files": [{"content": "\"\"\"Photos app API v2 urls.\"\"\"\nfrom django.urls import include, path\n\nfrom photos.api.v2.views import (\n AlbumDetailView,\n AlbumListView,\n LikedPhotosListView,\n PhotoLikeView,\n)\n\napp_name = \"photos\"\n\nurlpatterns = [\n path(\n \"photos/\",\n include(\n [\n path(\"albums/\", AlbumListView.as_view(), name=\"album-list\"),\n path(\n \"albums/<slug:slug>/\",\n AlbumDetailView.as_view(),\n name=\"album-detail\",\n ),\n path(\n \"photos/<int:pk>/like/\", PhotoLikeView.as_view(), name=\"photo-like\"\n ),\n path(\n \"photos/liked/\", LikedPhotosListView.as_view(), name=\"liked-photos\"\n ),\n ]\n ),\n ),\n]\n", "path": "website/photos/api/v2/urls.py"}, {"content": "from django.db.models import Count, Prefetch, Q\n\nfrom oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope\nfrom rest_framework import filters, status\nfrom rest_framework.exceptions import PermissionDenied\nfrom rest_framework.generics import ListAPIView, RetrieveAPIView\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom photos import services\nfrom photos.api.v2.serializers.album import (\n AlbumListSerializer,\n AlbumSerializer,\n PhotoListSerializer,\n)\nfrom photos.models import Album, Like, Photo\n\n\nclass AlbumListView(ListAPIView):\n \"\"\"Returns an overview of all albums.\"\"\"\n\n serializer_class = AlbumListSerializer\n queryset = Album.objects.filter(hidden=False)\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n ]\n required_scopes = [\"photos:read\"]\n filter_backends = (filters.SearchFilter,)\n search_fields = (\"title\", \"date\", \"slug\")\n\n\nclass AlbumDetailView(RetrieveAPIView):\n \"\"\"Returns the details of an album.\"\"\"\n\n serializer_class = AlbumSerializer\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n ]\n required_scopes = [\"photos:read\"]\n lookup_field = \"slug\"\n\n def retrieve(self, request, *args, **kwargs):\n if not services.is_album_accessible(request, self.get_object()):\n raise PermissionDenied\n return super().retrieve(request, *args, **kwargs)\n\n def get_queryset(self):\n photos = Photo.objects.select_properties(\"num_likes\")\n if self.request.member:\n photos = photos.annotate(\n member_likes=Count(\"likes\", filter=Q(likes__member=self.request.member))\n )\n return Album.objects.filter(hidden=False).prefetch_related(\n Prefetch(\"photo_set\", queryset=photos)\n )\n\n\nclass LikedPhotosListView(ListAPIView):\n \"\"\"Returns the details the liked album.\"\"\"\n\n serializer_class = PhotoListSerializer\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n ]\n required_scopes = [\"photos:read\"]\n\n def get(self, request, *args, **kwargs):\n if not self.request.member:\n return Response(\n data={\n \"detail\": \"You need to be a member in order to view your liked photos.\"\n },\n status=status.HTTP_403_FORBIDDEN,\n )\n return self.list(request, *args, **kwargs)\n\n def get_queryset(self):\n return (\n Photo.objects.filter(likes__member=self.request.member, album__hidden=False)\n .annotate(\n member_likes=Count(\"likes\", filter=Q(likes__member=self.request.member))\n )\n .select_properties(\"num_likes\")\n )\n\n\nclass PhotoLikeView(APIView):\n permission_classes = [IsAuthenticatedOrTokenHasScope]\n required_scopes = [\"photos:read\"]\n\n def get(self, request, **kwargs):\n photo_id = kwargs.get(\"pk\")\n try:\n photo = Photo.objects.filter(album__hidden=False).get(pk=photo_id)\n except Photo.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n return Response(\n {\n \"liked\": photo.likes.filter(member=request.member).exists(),\n \"num_likes\": photo.num_likes,\n },\n status=status.HTTP_200_OK,\n )\n\n def post(self, request, **kwargs):\n photo_id = kwargs.get(\"pk\")\n try:\n photo = Photo.objects.filter(album__hidden=False).get(pk=photo_id)\n except Photo.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n _, created = Like.objects.get_or_create(photo=photo, member=request.member)\n\n if created:\n return Response(\n {\n \"liked\": photo.likes.filter(member=request.member).exists(),\n \"num_likes\": photo.num_likes,\n },\n status=status.HTTP_201_CREATED,\n )\n return Response(\n {\n \"liked\": photo.likes.filter(member=request.member).exists(),\n \"num_likes\": photo.num_likes,\n },\n status=status.HTTP_200_OK,\n )\n\n def delete(self, request, **kwargs):\n photo_id = kwargs.get(\"pk\")\n try:\n photo = Photo.objects.filter(album__hidden=False).get(pk=photo_id)\n except Photo.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n try:\n like = Like.objects.filter(photo__album__hidden=False).get(\n member=request.member, photo__pk=photo_id\n )\n except Like.DoesNotExist:\n return Response(\n {\n \"liked\": False,\n \"num_likes\": photo.num_likes,\n },\n status=status.HTTP_204_NO_CONTENT,\n )\n\n like.delete()\n\n return Response(\n {\n \"liked\": False,\n \"num_likes\": photo.num_likes,\n },\n status=status.HTTP_202_ACCEPTED,\n )\n", "path": "website/photos/api/v2/views.py"}]} | 1,634 | 588 |
gh_patches_debug_38309 | rasdani/github-patches | git_diff | tornadoweb__tornado-2562 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update release notes and set version to 5.0b1
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 # Ensure we get the local copy of tornado instead of what's on the standard path
2 import os
3 import sys
4 import time
5 sys.path.insert(0, os.path.abspath(".."))
6 import tornado
7
8 master_doc = "index"
9
10 project = "Tornado"
11 copyright = "2009-%s, The Tornado Authors" % time.strftime("%Y")
12
13 version = release = tornado.version
14
15 extensions = [
16 "sphinx.ext.autodoc",
17 "sphinx.ext.coverage",
18 "sphinx.ext.doctest",
19 "sphinx.ext.intersphinx",
20 "sphinx.ext.viewcode",
21 ]
22
23 primary_domain = 'py'
24 default_role = 'py:obj'
25
26 autodoc_member_order = "bysource"
27 autoclass_content = "both"
28 autodoc_inherit_docstrings = False
29
30 # Without this line sphinx includes a copy of object.__init__'s docstring
31 # on any class that doesn't define __init__.
32 # https://bitbucket.org/birkenfeld/sphinx/issue/1337/autoclass_content-both-uses-object__init__
33 autodoc_docstring_signature = False
34
35 coverage_skip_undoc_in_source = True
36 coverage_ignore_modules = [
37 "tornado.platform.asyncio",
38 "tornado.platform.caresresolver",
39 "tornado.platform.twisted",
40 ]
41 # I wish this could go in a per-module file...
42 coverage_ignore_classes = [
43 # tornado.gen
44 "Runner",
45
46 # tornado.web
47 "ChunkedTransferEncoding",
48 "GZipContentEncoding",
49 "OutputTransform",
50 "TemplateModule",
51 "url",
52
53 # tornado.websocket
54 "WebSocketProtocol",
55 "WebSocketProtocol13",
56 "WebSocketProtocol76",
57 ]
58
59 coverage_ignore_functions = [
60 # various modules
61 "doctests",
62 "main",
63
64 # tornado.escape
65 # parse_qs_bytes should probably be documented but it's complicated by
66 # having different implementations between py2 and py3.
67 "parse_qs_bytes",
68
69 # tornado.gen
70 "Multi",
71 ]
72
73 html_favicon = 'favicon.ico'
74
75 latex_documents = [
76 ('index', 'tornado.tex', 'Tornado Documentation', 'The Tornado Authors', 'manual', False),
77 ]
78
79 intersphinx_mapping = {
80 'python': ('https://docs.python.org/3.6/', None),
81 }
82
83 on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
84
85 # On RTD we can't import sphinx_rtd_theme, but it will be applied by
86 # default anyway. This block will use the same theme when building locally
87 # as on RTD.
88 if not on_rtd:
89 import sphinx_rtd_theme
90 html_theme = 'sphinx_rtd_theme'
91 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -1,14 +1,14 @@
# Ensure we get the local copy of tornado instead of what's on the standard path
import os
import sys
-import time
+
sys.path.insert(0, os.path.abspath(".."))
import tornado
master_doc = "index"
project = "Tornado"
-copyright = "2009-%s, The Tornado Authors" % time.strftime("%Y")
+copyright = "The Tornado Authors"
version = release = tornado.version
@@ -20,8 +20,8 @@
"sphinx.ext.viewcode",
]
-primary_domain = 'py'
-default_role = 'py:obj'
+primary_domain = "py"
+default_role = "py:obj"
autodoc_member_order = "bysource"
autoclass_content = "both"
@@ -42,14 +42,12 @@
coverage_ignore_classes = [
# tornado.gen
"Runner",
-
# tornado.web
"ChunkedTransferEncoding",
"GZipContentEncoding",
"OutputTransform",
"TemplateModule",
"url",
-
# tornado.websocket
"WebSocketProtocol",
"WebSocketProtocol13",
@@ -60,32 +58,36 @@
# various modules
"doctests",
"main",
-
# tornado.escape
# parse_qs_bytes should probably be documented but it's complicated by
# having different implementations between py2 and py3.
"parse_qs_bytes",
-
# tornado.gen
"Multi",
]
-html_favicon = 'favicon.ico'
+html_favicon = "favicon.ico"
latex_documents = [
- ('index', 'tornado.tex', 'Tornado Documentation', 'The Tornado Authors', 'manual', False),
+ (
+ "index",
+ "tornado.tex",
+ "Tornado Documentation",
+ "The Tornado Authors",
+ "manual",
+ False,
+ )
]
-intersphinx_mapping = {
- 'python': ('https://docs.python.org/3.6/', None),
-}
+intersphinx_mapping = {"python": ("https://docs.python.org/3.6/", None)}
-on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
+on_rtd = os.environ.get("READTHEDOCS", None) == "True"
# On RTD we can't import sphinx_rtd_theme, but it will be applied by
# default anyway. This block will use the same theme when building locally
# as on RTD.
if not on_rtd:
import sphinx_rtd_theme
- html_theme = 'sphinx_rtd_theme'
+
+ html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -1,14 +1,14 @@\n # Ensure we get the local copy of tornado instead of what's on the standard path\n import os\n import sys\n-import time\n+\n sys.path.insert(0, os.path.abspath(\"..\"))\n import tornado\n \n master_doc = \"index\"\n \n project = \"Tornado\"\n-copyright = \"2009-%s, The Tornado Authors\" % time.strftime(\"%Y\")\n+copyright = \"The Tornado Authors\"\n \n version = release = tornado.version\n \n@@ -20,8 +20,8 @@\n \"sphinx.ext.viewcode\",\n ]\n \n-primary_domain = 'py'\n-default_role = 'py:obj'\n+primary_domain = \"py\"\n+default_role = \"py:obj\"\n \n autodoc_member_order = \"bysource\"\n autoclass_content = \"both\"\n@@ -42,14 +42,12 @@\n coverage_ignore_classes = [\n # tornado.gen\n \"Runner\",\n-\n # tornado.web\n \"ChunkedTransferEncoding\",\n \"GZipContentEncoding\",\n \"OutputTransform\",\n \"TemplateModule\",\n \"url\",\n-\n # tornado.websocket\n \"WebSocketProtocol\",\n \"WebSocketProtocol13\",\n@@ -60,32 +58,36 @@\n # various modules\n \"doctests\",\n \"main\",\n-\n # tornado.escape\n # parse_qs_bytes should probably be documented but it's complicated by\n # having different implementations between py2 and py3.\n \"parse_qs_bytes\",\n-\n # tornado.gen\n \"Multi\",\n ]\n \n-html_favicon = 'favicon.ico'\n+html_favicon = \"favicon.ico\"\n \n latex_documents = [\n- ('index', 'tornado.tex', 'Tornado Documentation', 'The Tornado Authors', 'manual', False),\n+ (\n+ \"index\",\n+ \"tornado.tex\",\n+ \"Tornado Documentation\",\n+ \"The Tornado Authors\",\n+ \"manual\",\n+ False,\n+ )\n ]\n \n-intersphinx_mapping = {\n- 'python': ('https://docs.python.org/3.6/', None),\n-}\n+intersphinx_mapping = {\"python\": (\"https://docs.python.org/3.6/\", None)}\n \n-on_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n+on_rtd = os.environ.get(\"READTHEDOCS\", None) == \"True\"\n \n # On RTD we can't import sphinx_rtd_theme, but it will be applied by\n # default anyway. This block will use the same theme when building locally\n # as on RTD.\n if not on_rtd:\n import sphinx_rtd_theme\n- html_theme = 'sphinx_rtd_theme'\n+\n+ html_theme = \"sphinx_rtd_theme\"\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n", "issue": "Update release notes and set version to 5.0b1\n\n", "before_files": [{"content": "# Ensure we get the local copy of tornado instead of what's on the standard path\nimport os\nimport sys\nimport time\nsys.path.insert(0, os.path.abspath(\"..\"))\nimport tornado\n\nmaster_doc = \"index\"\n\nproject = \"Tornado\"\ncopyright = \"2009-%s, The Tornado Authors\" % time.strftime(\"%Y\")\n\nversion = release = tornado.version\n\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.coverage\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.viewcode\",\n]\n\nprimary_domain = 'py'\ndefault_role = 'py:obj'\n\nautodoc_member_order = \"bysource\"\nautoclass_content = \"both\"\nautodoc_inherit_docstrings = False\n\n# Without this line sphinx includes a copy of object.__init__'s docstring\n# on any class that doesn't define __init__.\n# https://bitbucket.org/birkenfeld/sphinx/issue/1337/autoclass_content-both-uses-object__init__\nautodoc_docstring_signature = False\n\ncoverage_skip_undoc_in_source = True\ncoverage_ignore_modules = [\n \"tornado.platform.asyncio\",\n \"tornado.platform.caresresolver\",\n \"tornado.platform.twisted\",\n]\n# I wish this could go in a per-module file...\ncoverage_ignore_classes = [\n # tornado.gen\n \"Runner\",\n\n # tornado.web\n \"ChunkedTransferEncoding\",\n \"GZipContentEncoding\",\n \"OutputTransform\",\n \"TemplateModule\",\n \"url\",\n\n # tornado.websocket\n \"WebSocketProtocol\",\n \"WebSocketProtocol13\",\n \"WebSocketProtocol76\",\n]\n\ncoverage_ignore_functions = [\n # various modules\n \"doctests\",\n \"main\",\n\n # tornado.escape\n # parse_qs_bytes should probably be documented but it's complicated by\n # having different implementations between py2 and py3.\n \"parse_qs_bytes\",\n\n # tornado.gen\n \"Multi\",\n]\n\nhtml_favicon = 'favicon.ico'\n\nlatex_documents = [\n ('index', 'tornado.tex', 'Tornado Documentation', 'The Tornado Authors', 'manual', False),\n]\n\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/3.6/', None),\n}\n\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n\n# On RTD we can't import sphinx_rtd_theme, but it will be applied by\n# default anyway. This block will use the same theme when building locally\n# as on RTD.\nif not on_rtd:\n import sphinx_rtd_theme\n html_theme = 'sphinx_rtd_theme'\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n", "path": "docs/conf.py"}], "after_files": [{"content": "# Ensure we get the local copy of tornado instead of what's on the standard path\nimport os\nimport sys\n\nsys.path.insert(0, os.path.abspath(\"..\"))\nimport tornado\n\nmaster_doc = \"index\"\n\nproject = \"Tornado\"\ncopyright = \"The Tornado Authors\"\n\nversion = release = tornado.version\n\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.coverage\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.viewcode\",\n]\n\nprimary_domain = \"py\"\ndefault_role = \"py:obj\"\n\nautodoc_member_order = \"bysource\"\nautoclass_content = \"both\"\nautodoc_inherit_docstrings = False\n\n# Without this line sphinx includes a copy of object.__init__'s docstring\n# on any class that doesn't define __init__.\n# https://bitbucket.org/birkenfeld/sphinx/issue/1337/autoclass_content-both-uses-object__init__\nautodoc_docstring_signature = False\n\ncoverage_skip_undoc_in_source = True\ncoverage_ignore_modules = [\n \"tornado.platform.asyncio\",\n \"tornado.platform.caresresolver\",\n \"tornado.platform.twisted\",\n]\n# I wish this could go in a per-module file...\ncoverage_ignore_classes = [\n # tornado.gen\n \"Runner\",\n # tornado.web\n \"ChunkedTransferEncoding\",\n \"GZipContentEncoding\",\n \"OutputTransform\",\n \"TemplateModule\",\n \"url\",\n # tornado.websocket\n \"WebSocketProtocol\",\n \"WebSocketProtocol13\",\n \"WebSocketProtocol76\",\n]\n\ncoverage_ignore_functions = [\n # various modules\n \"doctests\",\n \"main\",\n # tornado.escape\n # parse_qs_bytes should probably be documented but it's complicated by\n # having different implementations between py2 and py3.\n \"parse_qs_bytes\",\n # tornado.gen\n \"Multi\",\n]\n\nhtml_favicon = \"favicon.ico\"\n\nlatex_documents = [\n (\n \"index\",\n \"tornado.tex\",\n \"Tornado Documentation\",\n \"The Tornado Authors\",\n \"manual\",\n False,\n )\n]\n\nintersphinx_mapping = {\"python\": (\"https://docs.python.org/3.6/\", None)}\n\non_rtd = os.environ.get(\"READTHEDOCS\", None) == \"True\"\n\n# On RTD we can't import sphinx_rtd_theme, but it will be applied by\n# default anyway. This block will use the same theme when building locally\n# as on RTD.\nif not on_rtd:\n import sphinx_rtd_theme\n\n html_theme = \"sphinx_rtd_theme\"\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n", "path": "docs/conf.py"}]} | 1,063 | 641 |
gh_patches_debug_15664 | rasdani/github-patches | git_diff | getredash__redash-909 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error on adding modules to python datasource
I'm trying to add a module to a python datasource, but it's failing with this traceback
```
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1475, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1461, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/local/lib/python2.7/dist-packages/flask_restful/__init__.py", line 477, in wrapper
resp = resource(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/flask_login.py", line 792, in decorated_view
return func(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/flask/views.py", line 84, in view
return self.dispatch_request(*args, **kwargs)
File "/opt/redash/redash.0.9.2.b1536/redash/handlers/base.py", line 19, in dispatch_request
return super(BaseResource, self).dispatch_request(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/flask_restful/__init__.py", line 587, in dispatch_request
resp = meth(*args, **kwargs)
File "/opt/redash/redash.0.9.2.b1536/redash/permissions.py", line 40, in decorated
return fn(*args, **kwargs)
File "/opt/redash/redash.0.9.2.b1536/redash/handlers/data_sources.py", line 38, in post
data_source.options.update(req['options'])
File "/opt/redash/redash.0.9.2.b1536/redash/utils/configuration.py", line 56, in update
if k in self.schema['secret'] and v == SECRET_PLACEHOLDER:
KeyError: 'secret'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redash/utils/configuration.py`
Content:
```
1 import json
2 import jsonschema
3 from jsonschema import ValidationError
4
5 SECRET_PLACEHOLDER = '--------'
6
7
8 class ConfigurationContainer(object):
9 def __init__(self, config, schema=None):
10 self._config = config
11 self.set_schema(schema)
12
13 def set_schema(self, schema):
14 self._schema = schema
15
16 @property
17 def schema(self):
18 if self._schema is None:
19 raise RuntimeError("Schema missing.")
20
21 return self._schema
22
23 def is_valid(self):
24 try:
25 self.validate()
26 except (ValidationError, ValueError):
27 return False
28
29 return True
30
31 def validate(self):
32 jsonschema.validate(self._config, self._schema)
33
34 def to_json(self):
35 return json.dumps(self._config)
36
37 def iteritems(self):
38 return self._config.iteritems()
39
40 def to_dict(self, mask_secrets=False):
41 if (mask_secrets is False or 'secret' not in self.schema):
42 return self._config
43
44 config = self._config.copy()
45 for key in config:
46 if key in self.schema['secret']:
47 config[key] = SECRET_PLACEHOLDER
48
49 return config
50
51 def update(self, new_config):
52 jsonschema.validate(new_config, self.schema)
53
54 config = {}
55 for k, v in new_config.iteritems():
56 if k in self.schema['secret'] and v == SECRET_PLACEHOLDER:
57 config[k] = self[k]
58 else:
59 config[k] = v
60
61 self._config = config
62
63 def get(self, *args, **kwargs):
64 return self._config.get(*args, **kwargs)
65
66 def __getitem__(self, item):
67 if item in self._config:
68 return self._config[item]
69
70 raise KeyError(item)
71
72 def __contains__(self, item):
73 return item in self._config
74
75 @classmethod
76 def from_json(cls, config_in_json):
77 return cls(json.loads(config_in_json))
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/redash/utils/configuration.py b/redash/utils/configuration.py
--- a/redash/utils/configuration.py
+++ b/redash/utils/configuration.py
@@ -38,7 +38,7 @@
return self._config.iteritems()
def to_dict(self, mask_secrets=False):
- if (mask_secrets is False or 'secret' not in self.schema):
+ if mask_secrets is False or 'secret' not in self.schema:
return self._config
config = self._config.copy()
@@ -53,7 +53,7 @@
config = {}
for k, v in new_config.iteritems():
- if k in self.schema['secret'] and v == SECRET_PLACEHOLDER:
+ if k in self.schema.get('secret', []) and v == SECRET_PLACEHOLDER:
config[k] = self[k]
else:
config[k] = v
| {"golden_diff": "diff --git a/redash/utils/configuration.py b/redash/utils/configuration.py\n--- a/redash/utils/configuration.py\n+++ b/redash/utils/configuration.py\n@@ -38,7 +38,7 @@\n return self._config.iteritems()\n \n def to_dict(self, mask_secrets=False):\n- if (mask_secrets is False or 'secret' not in self.schema):\n+ if mask_secrets is False or 'secret' not in self.schema:\n return self._config\n \n config = self._config.copy()\n@@ -53,7 +53,7 @@\n \n config = {}\n for k, v in new_config.iteritems():\n- if k in self.schema['secret'] and v == SECRET_PLACEHOLDER:\n+ if k in self.schema.get('secret', []) and v == SECRET_PLACEHOLDER:\n config[k] = self[k]\n else:\n config[k] = v\n", "issue": "Error on adding modules to python datasource\nI'm trying to add a module to a python datasource, but it's failing with this traceback\n\n```\nTraceback (most recent call last):\n File \"/usr/local/lib/python2.7/dist-packages/flask/app.py\", line 1475, in full_dispatch_request\n rv = self.dispatch_request()\n File \"/usr/local/lib/python2.7/dist-packages/flask/app.py\", line 1461, in dispatch_request\n return self.view_functions[rule.endpoint](**req.view_args)\n File \"/usr/local/lib/python2.7/dist-packages/flask_restful/__init__.py\", line 477, in wrapper\n resp = resource(*args, **kwargs)\n File \"/usr/local/lib/python2.7/dist-packages/flask_login.py\", line 792, in decorated_view\n return func(*args, **kwargs)\n File \"/usr/local/lib/python2.7/dist-packages/flask/views.py\", line 84, in view\n return self.dispatch_request(*args, **kwargs)\n File \"/opt/redash/redash.0.9.2.b1536/redash/handlers/base.py\", line 19, in dispatch_request\n return super(BaseResource, self).dispatch_request(*args, **kwargs)\n File \"/usr/local/lib/python2.7/dist-packages/flask_restful/__init__.py\", line 587, in dispatch_request\n resp = meth(*args, **kwargs)\n File \"/opt/redash/redash.0.9.2.b1536/redash/permissions.py\", line 40, in decorated\n return fn(*args, **kwargs)\n File \"/opt/redash/redash.0.9.2.b1536/redash/handlers/data_sources.py\", line 38, in post\n data_source.options.update(req['options'])\n File \"/opt/redash/redash.0.9.2.b1536/redash/utils/configuration.py\", line 56, in update\n if k in self.schema['secret'] and v == SECRET_PLACEHOLDER:\nKeyError: 'secret'\n```\n\n", "before_files": [{"content": "import json\nimport jsonschema\nfrom jsonschema import ValidationError\n\nSECRET_PLACEHOLDER = '--------'\n\n\nclass ConfigurationContainer(object):\n def __init__(self, config, schema=None):\n self._config = config\n self.set_schema(schema)\n\n def set_schema(self, schema):\n self._schema = schema\n\n @property\n def schema(self):\n if self._schema is None:\n raise RuntimeError(\"Schema missing.\")\n\n return self._schema\n\n def is_valid(self):\n try:\n self.validate()\n except (ValidationError, ValueError):\n return False\n\n return True\n\n def validate(self):\n jsonschema.validate(self._config, self._schema)\n\n def to_json(self):\n return json.dumps(self._config)\n\n def iteritems(self):\n return self._config.iteritems()\n\n def to_dict(self, mask_secrets=False):\n if (mask_secrets is False or 'secret' not in self.schema):\n return self._config\n\n config = self._config.copy()\n for key in config:\n if key in self.schema['secret']:\n config[key] = SECRET_PLACEHOLDER\n\n return config\n\n def update(self, new_config):\n jsonschema.validate(new_config, self.schema)\n\n config = {}\n for k, v in new_config.iteritems():\n if k in self.schema['secret'] and v == SECRET_PLACEHOLDER:\n config[k] = self[k]\n else:\n config[k] = v\n\n self._config = config\n\n def get(self, *args, **kwargs):\n return self._config.get(*args, **kwargs)\n\n def __getitem__(self, item):\n if item in self._config:\n return self._config[item]\n\n raise KeyError(item)\n\n def __contains__(self, item):\n return item in self._config\n\n @classmethod\n def from_json(cls, config_in_json):\n return cls(json.loads(config_in_json))\n", "path": "redash/utils/configuration.py"}], "after_files": [{"content": "import json\nimport jsonschema\nfrom jsonschema import ValidationError\n\nSECRET_PLACEHOLDER = '--------'\n\n\nclass ConfigurationContainer(object):\n def __init__(self, config, schema=None):\n self._config = config\n self.set_schema(schema)\n\n def set_schema(self, schema):\n self._schema = schema\n\n @property\n def schema(self):\n if self._schema is None:\n raise RuntimeError(\"Schema missing.\")\n\n return self._schema\n\n def is_valid(self):\n try:\n self.validate()\n except (ValidationError, ValueError):\n return False\n\n return True\n\n def validate(self):\n jsonschema.validate(self._config, self._schema)\n\n def to_json(self):\n return json.dumps(self._config)\n\n def iteritems(self):\n return self._config.iteritems()\n\n def to_dict(self, mask_secrets=False):\n if mask_secrets is False or 'secret' not in self.schema:\n return self._config\n\n config = self._config.copy()\n for key in config:\n if key in self.schema['secret']:\n config[key] = SECRET_PLACEHOLDER\n\n return config\n\n def update(self, new_config):\n jsonschema.validate(new_config, self.schema)\n\n config = {}\n for k, v in new_config.iteritems():\n if k in self.schema.get('secret', []) and v == SECRET_PLACEHOLDER:\n config[k] = self[k]\n else:\n config[k] = v\n\n self._config = config\n\n def get(self, *args, **kwargs):\n return self._config.get(*args, **kwargs)\n\n def __getitem__(self, item):\n if item in self._config:\n return self._config[item]\n\n raise KeyError(item)\n\n def __contains__(self, item):\n return item in self._config\n\n @classmethod\n def from_json(cls, config_in_json):\n return cls(json.loads(config_in_json))\n", "path": "redash/utils/configuration.py"}]} | 1,295 | 193 |
gh_patches_debug_9045 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-2967 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider att is broken
During the global build at 2021-06-02-14-42-40, spider **att** failed with **0 features** and **5433 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/logs/att.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/att.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/att.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/att.py`
Content:
```
1 import scrapy
2 import json
3 import re
4 from locations.items import GeojsonPointItem
5 from locations.hours import OpeningHours
6
7
8 DAY_MAPPING = {
9 "MONDAY": "Mo",
10 "TUESDAY": "Tu",
11 "WEDNESDAY": "We",
12 "THURSDAY": "Th",
13 "FRIDAY": "Fr",
14 "SATURDAY": "Sa",
15 "SUNDAY": "Su"
16 }
17
18
19 class ATTScraper(scrapy.Spider):
20 name = "att"
21 item_attributes = { 'brand': "AT&T", 'brand_wikidata': "Q35476" }
22 allowed_domains = ['www.att.com']
23 start_urls = (
24 'https://www.att.com/stores/us',
25 )
26 download_delay = 0.2
27
28 def parse_hours(self, store_hours):
29 opening_hours = OpeningHours()
30 store_data = json.loads(store_hours)
31
32 for store_day in store_data:
33 if len(store_day["intervals"]) < 1:
34 continue
35 day = DAY_MAPPING[store_day["day"]]
36 open_time = str(store_day["intervals"][0]["start"])
37 if open_time == '0':
38 open_time = '0000'
39 close_time = str(store_day["intervals"][0]["end"])
40 if close_time == '0':
41 close_time = '2359'
42 opening_hours.add_range(day=day,
43 open_time=open_time,
44 close_time=close_time,
45 time_format='%H%M'
46 )
47
48 return opening_hours.as_opening_hours()
49
50 def parse(self, response):
51 urls = response.xpath('//a[@class="Directory-listLink"]/@href').extract()
52 is_store_list = response.xpath('//a[@class="Teaser-titleLink"]/@href').extract()
53
54 if not urls and is_store_list:
55 urls = response.xpath('//a[@class="Teaser-titleLink"]/@href').extract()
56 for url in urls:
57 if url.count('/') >= 2:
58 yield scrapy.Request(response.urljoin(url), callback=self.parse_store)
59 else:
60 yield scrapy.Request(response.urljoin(url))
61
62 def parse_store(self, response):
63 ref = re.search(r'.+/(.+?)/?(?:\.html|$)', response.url).group(1)
64
65 properties = {
66 'ref': ref,
67 'name': response.xpath('normalize-space(//span[@class="LocationName-brand"]/text())').extract_first(),
68 'addr_full': response.xpath('normalize-space(//meta[@itemprop="streetAddress"]/@content)').extract_first(),
69 'city': response.xpath('normalize-space(//meta[@itemprop="addressLocality"]/@content)').extract_first(),
70 'state': response.xpath('normalize-space(//abbr[@itemprop="addressRegion"]/text())').extract_first(),
71 'postcode': response.xpath('normalize-space(//span[@itemprop="postalCode"]/text())').extract_first(),
72 'country': response.xpath('normalize-space(//abbr[@itemprop="addressCountry"]/text())').extract_first(),
73 'phone': response.xpath('normalize-space(//span[@itemprop="telephone"]//text())').extract_first(),
74 'website': response.url,
75 'lat': response.xpath('normalize-space(//meta[@itemprop="latitude"]/@content)').extract_first(),
76 'lon': response.xpath('normalize-space(//meta[@itemprop="longitude"]/@content)').extract_first(),
77 }
78
79 hours = response.xpath('//span[@class="c-location-hours-today js-location-hours"]/@data-days').extract_first()
80 properties['opening_hours'] = self.parse_hours(hours)
81
82 yield GeojsonPointItem(**properties)
83
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/locations/spiders/att.py b/locations/spiders/att.py
--- a/locations/spiders/att.py
+++ b/locations/spiders/att.py
@@ -76,7 +76,7 @@
'lon': response.xpath('normalize-space(//meta[@itemprop="longitude"]/@content)').extract_first(),
}
- hours = response.xpath('//span[@class="c-location-hours-today js-location-hours"]/@data-days').extract_first()
+ hours = response.xpath('//span[@class="c-hours-today js-hours-today"]/@data-days').extract_first()
properties['opening_hours'] = self.parse_hours(hours)
yield GeojsonPointItem(**properties)
| {"golden_diff": "diff --git a/locations/spiders/att.py b/locations/spiders/att.py\n--- a/locations/spiders/att.py\n+++ b/locations/spiders/att.py\n@@ -76,7 +76,7 @@\n 'lon': response.xpath('normalize-space(//meta[@itemprop=\"longitude\"]/@content)').extract_first(),\n }\n \n- hours = response.xpath('//span[@class=\"c-location-hours-today js-location-hours\"]/@data-days').extract_first()\n+ hours = response.xpath('//span[@class=\"c-hours-today js-hours-today\"]/@data-days').extract_first()\n properties['opening_hours'] = self.parse_hours(hours)\n \n yield GeojsonPointItem(**properties)\n", "issue": "Spider att is broken\nDuring the global build at 2021-06-02-14-42-40, spider **att** failed with **0 features** and **5433 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/logs/att.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/att.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/att.geojson))\n", "before_files": [{"content": "import scrapy\nimport json\nimport re\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nDAY_MAPPING = {\n \"MONDAY\": \"Mo\",\n \"TUESDAY\": \"Tu\",\n \"WEDNESDAY\": \"We\",\n \"THURSDAY\": \"Th\",\n \"FRIDAY\": \"Fr\",\n \"SATURDAY\": \"Sa\",\n \"SUNDAY\": \"Su\"\n}\n\n\nclass ATTScraper(scrapy.Spider):\n name = \"att\"\n item_attributes = { 'brand': \"AT&T\", 'brand_wikidata': \"Q35476\" }\n allowed_domains = ['www.att.com']\n start_urls = (\n 'https://www.att.com/stores/us',\n )\n download_delay = 0.2\n\n def parse_hours(self, store_hours):\n opening_hours = OpeningHours()\n store_data = json.loads(store_hours)\n\n for store_day in store_data:\n if len(store_day[\"intervals\"]) < 1:\n continue\n day = DAY_MAPPING[store_day[\"day\"]]\n open_time = str(store_day[\"intervals\"][0][\"start\"])\n if open_time == '0':\n open_time = '0000'\n close_time = str(store_day[\"intervals\"][0][\"end\"])\n if close_time == '0':\n close_time = '2359'\n opening_hours.add_range(day=day,\n open_time=open_time,\n close_time=close_time,\n time_format='%H%M'\n )\n\n return opening_hours.as_opening_hours()\n\n def parse(self, response):\n urls = response.xpath('//a[@class=\"Directory-listLink\"]/@href').extract()\n is_store_list = response.xpath('//a[@class=\"Teaser-titleLink\"]/@href').extract()\n\n if not urls and is_store_list:\n urls = response.xpath('//a[@class=\"Teaser-titleLink\"]/@href').extract()\n for url in urls:\n if url.count('/') >= 2:\n yield scrapy.Request(response.urljoin(url), callback=self.parse_store)\n else:\n yield scrapy.Request(response.urljoin(url))\n\n def parse_store(self, response):\n ref = re.search(r'.+/(.+?)/?(?:\\.html|$)', response.url).group(1)\n\n properties = {\n 'ref': ref,\n 'name': response.xpath('normalize-space(//span[@class=\"LocationName-brand\"]/text())').extract_first(),\n 'addr_full': response.xpath('normalize-space(//meta[@itemprop=\"streetAddress\"]/@content)').extract_first(),\n 'city': response.xpath('normalize-space(//meta[@itemprop=\"addressLocality\"]/@content)').extract_first(),\n 'state': response.xpath('normalize-space(//abbr[@itemprop=\"addressRegion\"]/text())').extract_first(),\n 'postcode': response.xpath('normalize-space(//span[@itemprop=\"postalCode\"]/text())').extract_first(),\n 'country': response.xpath('normalize-space(//abbr[@itemprop=\"addressCountry\"]/text())').extract_first(),\n 'phone': response.xpath('normalize-space(//span[@itemprop=\"telephone\"]//text())').extract_first(),\n 'website': response.url,\n 'lat': response.xpath('normalize-space(//meta[@itemprop=\"latitude\"]/@content)').extract_first(),\n 'lon': response.xpath('normalize-space(//meta[@itemprop=\"longitude\"]/@content)').extract_first(),\n }\n\n hours = response.xpath('//span[@class=\"c-location-hours-today js-location-hours\"]/@data-days').extract_first()\n properties['opening_hours'] = self.parse_hours(hours)\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/att.py"}], "after_files": [{"content": "import scrapy\nimport json\nimport re\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nDAY_MAPPING = {\n \"MONDAY\": \"Mo\",\n \"TUESDAY\": \"Tu\",\n \"WEDNESDAY\": \"We\",\n \"THURSDAY\": \"Th\",\n \"FRIDAY\": \"Fr\",\n \"SATURDAY\": \"Sa\",\n \"SUNDAY\": \"Su\"\n}\n\n\nclass ATTScraper(scrapy.Spider):\n name = \"att\"\n item_attributes = { 'brand': \"AT&T\", 'brand_wikidata': \"Q35476\" }\n allowed_domains = ['www.att.com']\n start_urls = (\n 'https://www.att.com/stores/us',\n )\n download_delay = 0.2\n\n def parse_hours(self, store_hours):\n opening_hours = OpeningHours()\n store_data = json.loads(store_hours)\n\n for store_day in store_data:\n if len(store_day[\"intervals\"]) < 1:\n continue\n day = DAY_MAPPING[store_day[\"day\"]]\n open_time = str(store_day[\"intervals\"][0][\"start\"])\n if open_time == '0':\n open_time = '0000'\n close_time = str(store_day[\"intervals\"][0][\"end\"])\n if close_time == '0':\n close_time = '2359'\n opening_hours.add_range(day=day,\n open_time=open_time,\n close_time=close_time,\n time_format='%H%M'\n )\n\n return opening_hours.as_opening_hours()\n\n def parse(self, response):\n urls = response.xpath('//a[@class=\"Directory-listLink\"]/@href').extract()\n is_store_list = response.xpath('//a[@class=\"Teaser-titleLink\"]/@href').extract()\n\n if not urls and is_store_list:\n urls = response.xpath('//a[@class=\"Teaser-titleLink\"]/@href').extract()\n for url in urls:\n if url.count('/') >= 2:\n yield scrapy.Request(response.urljoin(url), callback=self.parse_store)\n else:\n yield scrapy.Request(response.urljoin(url))\n\n def parse_store(self, response):\n ref = re.search(r'.+/(.+?)/?(?:\\.html|$)', response.url).group(1)\n\n properties = {\n 'ref': ref,\n 'name': response.xpath('normalize-space(//span[@class=\"LocationName-brand\"]/text())').extract_first(),\n 'addr_full': response.xpath('normalize-space(//meta[@itemprop=\"streetAddress\"]/@content)').extract_first(),\n 'city': response.xpath('normalize-space(//meta[@itemprop=\"addressLocality\"]/@content)').extract_first(),\n 'state': response.xpath('normalize-space(//abbr[@itemprop=\"addressRegion\"]/text())').extract_first(),\n 'postcode': response.xpath('normalize-space(//span[@itemprop=\"postalCode\"]/text())').extract_first(),\n 'country': response.xpath('normalize-space(//abbr[@itemprop=\"addressCountry\"]/text())').extract_first(),\n 'phone': response.xpath('normalize-space(//span[@itemprop=\"telephone\"]//text())').extract_first(),\n 'website': response.url,\n 'lat': response.xpath('normalize-space(//meta[@itemprop=\"latitude\"]/@content)').extract_first(),\n 'lon': response.xpath('normalize-space(//meta[@itemprop=\"longitude\"]/@content)').extract_first(),\n }\n\n hours = response.xpath('//span[@class=\"c-hours-today js-hours-today\"]/@data-days').extract_first()\n properties['opening_hours'] = self.parse_hours(hours)\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/att.py"}]} | 1,391 | 155 |
gh_patches_debug_8441 | rasdani/github-patches | git_diff | pyqtgraph__pyqtgraph-953 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
HistogramLUTWidget kargs bug?
# lut_widget = HistogramLUTWidget(background='w')
File "/usr/local/lib/python3.4/dist-packages/pyqtgraph-0.9.8-py3.4.egg/pyqtgraph/widgets/HistogramLUTWidget.py", line 18, in **init**
self.item = HistogramLUTItem(_args, *_kargs)
# TypeError: **init**() got an unexpected keyword argument 'background'
I can fix it by:
class HistogramLUTWidget(pg.GraphicsView):
```
def __init__(self, parent=None, *args, **kargs):
# background = kargs.get('background', 'default')
background = kargs.pop('background', 'default')
```
...
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyqtgraph/widgets/HistogramLUTWidget.py`
Content:
```
1 """
2 Widget displaying an image histogram along with gradient editor. Can be used to adjust the appearance of images.
3 This is a wrapper around HistogramLUTItem
4 """
5
6 from ..Qt import QtGui, QtCore
7 from .GraphicsView import GraphicsView
8 from ..graphicsItems.HistogramLUTItem import HistogramLUTItem
9
10 __all__ = ['HistogramLUTWidget']
11
12
13 class HistogramLUTWidget(GraphicsView):
14
15 def __init__(self, parent=None, *args, **kargs):
16 background = kargs.get('background', 'default')
17 GraphicsView.__init__(self, parent, useOpenGL=False, background=background)
18 self.item = HistogramLUTItem(*args, **kargs)
19 self.setCentralItem(self.item)
20 self.setSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Expanding)
21 self.setMinimumWidth(95)
22
23
24 def sizeHint(self):
25 return QtCore.QSize(115, 200)
26
27
28
29 def __getattr__(self, attr):
30 return getattr(self.item, attr)
31
32
33
34
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pyqtgraph/widgets/HistogramLUTWidget.py b/pyqtgraph/widgets/HistogramLUTWidget.py
--- a/pyqtgraph/widgets/HistogramLUTWidget.py
+++ b/pyqtgraph/widgets/HistogramLUTWidget.py
@@ -13,7 +13,7 @@
class HistogramLUTWidget(GraphicsView):
def __init__(self, parent=None, *args, **kargs):
- background = kargs.get('background', 'default')
+ background = kargs.pop('background', 'default')
GraphicsView.__init__(self, parent, useOpenGL=False, background=background)
self.item = HistogramLUTItem(*args, **kargs)
self.setCentralItem(self.item)
| {"golden_diff": "diff --git a/pyqtgraph/widgets/HistogramLUTWidget.py b/pyqtgraph/widgets/HistogramLUTWidget.py\n--- a/pyqtgraph/widgets/HistogramLUTWidget.py\n+++ b/pyqtgraph/widgets/HistogramLUTWidget.py\n@@ -13,7 +13,7 @@\n class HistogramLUTWidget(GraphicsView):\n \n def __init__(self, parent=None, *args, **kargs):\n- background = kargs.get('background', 'default')\n+ background = kargs.pop('background', 'default')\n GraphicsView.__init__(self, parent, useOpenGL=False, background=background)\n self.item = HistogramLUTItem(*args, **kargs)\n self.setCentralItem(self.item)\n", "issue": "HistogramLUTWidget kargs bug?\n# lut_widget = HistogramLUTWidget(background='w')\n\n File \"/usr/local/lib/python3.4/dist-packages/pyqtgraph-0.9.8-py3.4.egg/pyqtgraph/widgets/HistogramLUTWidget.py\", line 18, in **init**\n self.item = HistogramLUTItem(_args, *_kargs)\n# TypeError: **init**() got an unexpected keyword argument 'background'\n\nI can fix it by:\n\nclass HistogramLUTWidget(pg.GraphicsView):\n\n```\ndef __init__(self, parent=None, *args, **kargs):\n # background = kargs.get('background', 'default')\n background = kargs.pop('background', 'default')\n```\n\n...\n\n", "before_files": [{"content": "\"\"\"\nWidget displaying an image histogram along with gradient editor. Can be used to adjust the appearance of images.\nThis is a wrapper around HistogramLUTItem\n\"\"\"\n\nfrom ..Qt import QtGui, QtCore\nfrom .GraphicsView import GraphicsView\nfrom ..graphicsItems.HistogramLUTItem import HistogramLUTItem\n\n__all__ = ['HistogramLUTWidget']\n\n\nclass HistogramLUTWidget(GraphicsView):\n \n def __init__(self, parent=None, *args, **kargs):\n background = kargs.get('background', 'default')\n GraphicsView.__init__(self, parent, useOpenGL=False, background=background)\n self.item = HistogramLUTItem(*args, **kargs)\n self.setCentralItem(self.item)\n self.setSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Expanding)\n self.setMinimumWidth(95)\n \n\n def sizeHint(self):\n return QtCore.QSize(115, 200)\n \n \n\n def __getattr__(self, attr):\n return getattr(self.item, attr)\n\n\n\n", "path": "pyqtgraph/widgets/HistogramLUTWidget.py"}], "after_files": [{"content": "\"\"\"\nWidget displaying an image histogram along with gradient editor. Can be used to adjust the appearance of images.\nThis is a wrapper around HistogramLUTItem\n\"\"\"\n\nfrom ..Qt import QtGui, QtCore\nfrom .GraphicsView import GraphicsView\nfrom ..graphicsItems.HistogramLUTItem import HistogramLUTItem\n\n__all__ = ['HistogramLUTWidget']\n\n\nclass HistogramLUTWidget(GraphicsView):\n \n def __init__(self, parent=None, *args, **kargs):\n background = kargs.pop('background', 'default')\n GraphicsView.__init__(self, parent, useOpenGL=False, background=background)\n self.item = HistogramLUTItem(*args, **kargs)\n self.setCentralItem(self.item)\n self.setSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Expanding)\n self.setMinimumWidth(95)\n \n\n def sizeHint(self):\n return QtCore.QSize(115, 200)\n \n \n\n def __getattr__(self, attr):\n return getattr(self.item, attr)\n\n\n\n", "path": "pyqtgraph/widgets/HistogramLUTWidget.py"}]} | 709 | 158 |
gh_patches_debug_16517 | rasdani/github-patches | git_diff | ultrabug__py3status-113 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Typo in the keyboard_layout module
`xbklayout` function should be `xkblayout`, i.e. "kb" instead of "bk". This typo appears 3 times in total.
The rest of the code uses "kb" so I assumed what I found was a typo and decided to report it since it already caught my eye.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `py3status/modules/keyboard_layout.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """
3 Display the current keyboard layout.
4
5 Configuration parameters:
6 - cache_timeout: check for keyboard layout change every seconds
7
8 Requires:
9 - xkblayout-state
10 or
11 - setxkbmap
12
13 @author shadowprince
14 @license Eclipse Public License
15 """
16
17 from subprocess import check_output
18 from time import time
19 import shlex
20 import re
21
22 # colors of layouts, check your command's output to match keys
23 LANG_COLORS = {
24 'fr': '#268BD2', # solarized blue
25 'ru': '#F75252', # red
26 'ua': '#FCE94F', # yellow
27 'us': '#729FCF', # light blue
28 }
29
30 LAYOUT_RE = re.compile(r".*layout:\s*(\w+).*", flags=re.DOTALL)
31
32
33 def xbklayout():
34 """
35 check using xkblayout-state (preferred method)
36 """
37 return check_output(
38 ["xkblayout-state", "print", "%s"]
39 ).decode('utf-8')
40
41
42 def setxkbmap():
43 """
44 check using setxkbmap >= 1.3.0
45
46 Please read issue 33 for more information :
47 https://github.com/ultrabug/py3status/pull/33
48 """
49 out = check_output(shlex.split("setxkbmap -query")).decode("utf-8")
50
51 return re.match(LAYOUT_RE, out).group(1)
52
53
54 class Py3status:
55 """
56 """
57 # available configuration parameters
58 cache_timeout = 10
59 color = ''
60
61 def __init__(self):
62 """
63 find the best implementation to get the keyboard's layout
64 """
65 try:
66 xbklayout()
67 except:
68 self.command = setxkbmap
69 else:
70 self.command = xbklayout
71
72 def keyboard_layout(self, i3s_output_list, i3s_config):
73 response = {
74 'cached_until': time() + self.cache_timeout,
75 'full_text': ''
76 }
77
78 lang = self.command().strip()
79 lang_color = self.color if self.color else LANG_COLORS.get(lang)
80
81 response['full_text'] = lang or '??'
82 if lang_color:
83 response['color'] = lang_color
84
85 return response
86
87 if __name__ == "__main__":
88 """
89 Test this module by calling it directly.
90 """
91 from time import sleep
92 x = Py3status()
93 config = {
94 'color_good': '#00FF00',
95 'color_bad': '#FF0000',
96 }
97 while True:
98 print(x.keyboard_layout([], config))
99 sleep(1)
100
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/py3status/modules/keyboard_layout.py b/py3status/modules/keyboard_layout.py
--- a/py3status/modules/keyboard_layout.py
+++ b/py3status/modules/keyboard_layout.py
@@ -30,7 +30,7 @@
LAYOUT_RE = re.compile(r".*layout:\s*(\w+).*", flags=re.DOTALL)
-def xbklayout():
+def xkblayout():
"""
check using xkblayout-state (preferred method)
"""
@@ -63,11 +63,11 @@
find the best implementation to get the keyboard's layout
"""
try:
- xbklayout()
+ xkblayout()
except:
self.command = setxkbmap
else:
- self.command = xbklayout
+ self.command = xkblayout
def keyboard_layout(self, i3s_output_list, i3s_config):
response = {
| {"golden_diff": "diff --git a/py3status/modules/keyboard_layout.py b/py3status/modules/keyboard_layout.py\n--- a/py3status/modules/keyboard_layout.py\n+++ b/py3status/modules/keyboard_layout.py\n@@ -30,7 +30,7 @@\n LAYOUT_RE = re.compile(r\".*layout:\\s*(\\w+).*\", flags=re.DOTALL)\n \n \n-def xbklayout():\n+def xkblayout():\n \"\"\"\n check using xkblayout-state (preferred method)\n \"\"\"\n@@ -63,11 +63,11 @@\n find the best implementation to get the keyboard's layout\n \"\"\"\n try:\n- xbklayout()\n+ xkblayout()\n except:\n self.command = setxkbmap\n else:\n- self.command = xbklayout\n+ self.command = xkblayout\n \n def keyboard_layout(self, i3s_output_list, i3s_config):\n response = {\n", "issue": "Typo in the keyboard_layout module\n`xbklayout` function should be `xkblayout`, i.e. \"kb\" instead of \"bk\". This typo appears 3 times in total.\n\nThe rest of the code uses \"kb\" so I assumed what I found was a typo and decided to report it since it already caught my eye.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nDisplay the current keyboard layout.\n\nConfiguration parameters:\n - cache_timeout: check for keyboard layout change every seconds\n\nRequires:\n - xkblayout-state\n or\n - setxkbmap\n\n@author shadowprince\n@license Eclipse Public License\n\"\"\"\n\nfrom subprocess import check_output\nfrom time import time\nimport shlex\nimport re\n\n# colors of layouts, check your command's output to match keys\nLANG_COLORS = {\n 'fr': '#268BD2', # solarized blue\n 'ru': '#F75252', # red\n 'ua': '#FCE94F', # yellow\n 'us': '#729FCF', # light blue\n}\n\nLAYOUT_RE = re.compile(r\".*layout:\\s*(\\w+).*\", flags=re.DOTALL)\n\n\ndef xbklayout():\n \"\"\"\n check using xkblayout-state (preferred method)\n \"\"\"\n return check_output(\n [\"xkblayout-state\", \"print\", \"%s\"]\n ).decode('utf-8')\n\n\ndef setxkbmap():\n \"\"\"\n check using setxkbmap >= 1.3.0\n\n Please read issue 33 for more information :\n https://github.com/ultrabug/py3status/pull/33\n \"\"\"\n out = check_output(shlex.split(\"setxkbmap -query\")).decode(\"utf-8\")\n\n return re.match(LAYOUT_RE, out).group(1)\n\n\nclass Py3status:\n \"\"\"\n \"\"\"\n # available configuration parameters\n cache_timeout = 10\n color = ''\n\n def __init__(self):\n \"\"\"\n find the best implementation to get the keyboard's layout\n \"\"\"\n try:\n xbklayout()\n except:\n self.command = setxkbmap\n else:\n self.command = xbklayout\n\n def keyboard_layout(self, i3s_output_list, i3s_config):\n response = {\n 'cached_until': time() + self.cache_timeout,\n 'full_text': ''\n }\n\n lang = self.command().strip()\n lang_color = self.color if self.color else LANG_COLORS.get(lang)\n\n response['full_text'] = lang or '??'\n if lang_color:\n response['color'] = lang_color\n\n return response\n\nif __name__ == \"__main__\":\n \"\"\"\n Test this module by calling it directly.\n \"\"\"\n from time import sleep\n x = Py3status()\n config = {\n 'color_good': '#00FF00',\n 'color_bad': '#FF0000',\n }\n while True:\n print(x.keyboard_layout([], config))\n sleep(1)\n", "path": "py3status/modules/keyboard_layout.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nDisplay the current keyboard layout.\n\nConfiguration parameters:\n - cache_timeout: check for keyboard layout change every seconds\n\nRequires:\n - xkblayout-state\n or\n - setxkbmap\n\n@author shadowprince\n@license Eclipse Public License\n\"\"\"\n\nfrom subprocess import check_output\nfrom time import time\nimport shlex\nimport re\n\n# colors of layouts, check your command's output to match keys\nLANG_COLORS = {\n 'fr': '#268BD2', # solarized blue\n 'ru': '#F75252', # red\n 'ua': '#FCE94F', # yellow\n 'us': '#729FCF', # light blue\n}\n\nLAYOUT_RE = re.compile(r\".*layout:\\s*(\\w+).*\", flags=re.DOTALL)\n\n\ndef xkblayout():\n \"\"\"\n check using xkblayout-state (preferred method)\n \"\"\"\n return check_output(\n [\"xkblayout-state\", \"print\", \"%s\"]\n ).decode('utf-8')\n\n\ndef setxkbmap():\n \"\"\"\n check using setxkbmap >= 1.3.0\n\n Please read issue 33 for more information :\n https://github.com/ultrabug/py3status/pull/33\n \"\"\"\n out = check_output(shlex.split(\"setxkbmap -query\")).decode(\"utf-8\")\n\n return re.match(LAYOUT_RE, out).group(1)\n\n\nclass Py3status:\n \"\"\"\n \"\"\"\n # available configuration parameters\n cache_timeout = 10\n color = ''\n\n def __init__(self):\n \"\"\"\n find the best implementation to get the keyboard's layout\n \"\"\"\n try:\n xkblayout()\n except:\n self.command = setxkbmap\n else:\n self.command = xkblayout\n\n def keyboard_layout(self, i3s_output_list, i3s_config):\n response = {\n 'cached_until': time() + self.cache_timeout,\n 'full_text': ''\n }\n\n lang = self.command().strip()\n lang_color = self.color if self.color else LANG_COLORS.get(lang)\n\n response['full_text'] = lang or '??'\n if lang_color:\n response['color'] = lang_color\n\n return response\n\nif __name__ == \"__main__\":\n \"\"\"\n Test this module by calling it directly.\n \"\"\"\n from time import sleep\n x = Py3status()\n config = {\n 'color_good': '#00FF00',\n 'color_bad': '#FF0000',\n }\n while True:\n print(x.keyboard_layout([], config))\n sleep(1)\n", "path": "py3status/modules/keyboard_layout.py"}]} | 1,130 | 208 |
gh_patches_debug_62154 | rasdani/github-patches | git_diff | Parsl__parsl-258 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`globals` should be an optional config field
Running over a config without `globals`, I see:
```
def make_rundir(config=None, path=None):
"""When a path has not been specified, make the run directory.
Creates a rundir with the following hierarchy:
./runinfo <- Home of all run directories
|----000
|----001 <- Directories for each run
| ....
|----NNN
Kwargs:
- path (str): String path to a specific run dir
Default : None.
"""
try:
if not path:
path = None
> elif config["globals"].get('runDir', None):
E KeyError: 'globals'
../dataflow/rundirs.py:25: KeyError
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsl/dataflow/rundirs.py`
Content:
```
1 import os
2 from glob import glob
3 import logging
4
5 logger = logging.getLogger(__name__)
6
7
8 def make_rundir(config=None, path=None):
9 """When a path has not been specified, make the run directory.
10
11 Creates a rundir with the following hierarchy:
12 ./runinfo <- Home of all run directories
13 |----000
14 |----001 <- Directories for each run
15 | ....
16 |----NNN
17
18 Kwargs:
19 - path (str): String path to a specific run dir
20 Default : None.
21 """
22 try:
23 if not path:
24 path = None
25 elif config["globals"].get('runDir', None):
26 path = config["globals"]['runDir']
27
28 if not path:
29 path = "./runinfo"
30
31 if not os.path.exists(path):
32 os.makedirs(path)
33
34 prev_rundirs = glob(os.path.join(path, "[0-9]*"))
35
36 current_rundir = os.path.join(path, '000')
37
38 if prev_rundirs:
39 # Since we globbed on files named as 0-9
40 x = sorted([int(os.path.basename(x)) for x in prev_rundirs])[-1]
41 current_rundir = os.path.join(path, '{0:03}'.format(x + 1))
42
43 os.makedirs(current_rundir)
44 logger.debug("Parsl run initializing in rundir:{0}".format(current_rundir))
45 return os.path.abspath(current_rundir)
46
47 except Exception as e:
48 logger.error("Failed to create a run directory")
49 logger.error("Error: {0}".format(e))
50 exit(-1)
51
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/parsl/dataflow/rundirs.py b/parsl/dataflow/rundirs.py
--- a/parsl/dataflow/rundirs.py
+++ b/parsl/dataflow/rundirs.py
@@ -22,7 +22,7 @@
try:
if not path:
path = None
- elif config["globals"].get('runDir', None):
+ elif config.get("globals", {}).get('runDir'):
path = config["globals"]['runDir']
if not path:
| {"golden_diff": "diff --git a/parsl/dataflow/rundirs.py b/parsl/dataflow/rundirs.py\n--- a/parsl/dataflow/rundirs.py\n+++ b/parsl/dataflow/rundirs.py\n@@ -22,7 +22,7 @@\n try:\n if not path:\n path = None\n- elif config[\"globals\"].get('runDir', None):\n+ elif config.get(\"globals\", {}).get('runDir'):\n path = config[\"globals\"]['runDir']\n \n if not path:\n", "issue": "`globals` should be an optional config field\nRunning over a config without `globals`, I see:\r\n```\r\n def make_rundir(config=None, path=None):\r\n \"\"\"When a path has not been specified, make the run directory.\r\n\r\n Creates a rundir with the following hierarchy:\r\n ./runinfo <- Home of all run directories\r\n |----000\r\n |----001 <- Directories for each run\r\n | ....\r\n |----NNN\r\n\r\n Kwargs:\r\n - path (str): String path to a specific run dir\r\n Default : None.\r\n \"\"\"\r\n try:\r\n if not path:\r\n path = None\r\n> elif config[\"globals\"].get('runDir', None):\r\nE KeyError: 'globals'\r\n\r\n../dataflow/rundirs.py:25: KeyError\r\n```\n", "before_files": [{"content": "import os\nfrom glob import glob\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef make_rundir(config=None, path=None):\n \"\"\"When a path has not been specified, make the run directory.\n\n Creates a rundir with the following hierarchy:\n ./runinfo <- Home of all run directories\n |----000\n |----001 <- Directories for each run\n | ....\n |----NNN\n\n Kwargs:\n - path (str): String path to a specific run dir\n Default : None.\n \"\"\"\n try:\n if not path:\n path = None\n elif config[\"globals\"].get('runDir', None):\n path = config[\"globals\"]['runDir']\n\n if not path:\n path = \"./runinfo\"\n\n if not os.path.exists(path):\n os.makedirs(path)\n\n prev_rundirs = glob(os.path.join(path, \"[0-9]*\"))\n\n current_rundir = os.path.join(path, '000')\n\n if prev_rundirs:\n # Since we globbed on files named as 0-9\n x = sorted([int(os.path.basename(x)) for x in prev_rundirs])[-1]\n current_rundir = os.path.join(path, '{0:03}'.format(x + 1))\n\n os.makedirs(current_rundir)\n logger.debug(\"Parsl run initializing in rundir:{0}\".format(current_rundir))\n return os.path.abspath(current_rundir)\n\n except Exception as e:\n logger.error(\"Failed to create a run directory\")\n logger.error(\"Error: {0}\".format(e))\n exit(-1)\n", "path": "parsl/dataflow/rundirs.py"}], "after_files": [{"content": "import os\nfrom glob import glob\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef make_rundir(config=None, path=None):\n \"\"\"When a path has not been specified, make the run directory.\n\n Creates a rundir with the following hierarchy:\n ./runinfo <- Home of all run directories\n |----000\n |----001 <- Directories for each run\n | ....\n |----NNN\n\n Kwargs:\n - path (str): String path to a specific run dir\n Default : None.\n \"\"\"\n try:\n if not path:\n path = None\n elif config.get(\"globals\", {}).get('runDir'):\n path = config[\"globals\"]['runDir']\n\n if not path:\n path = \"./runinfo\"\n\n if not os.path.exists(path):\n os.makedirs(path)\n\n prev_rundirs = glob(os.path.join(path, \"[0-9]*\"))\n\n current_rundir = os.path.join(path, '000')\n\n if prev_rundirs:\n # Since we globbed on files named as 0-9\n x = sorted([int(os.path.basename(x)) for x in prev_rundirs])[-1]\n current_rundir = os.path.join(path, '{0:03}'.format(x + 1))\n\n os.makedirs(current_rundir)\n logger.debug(\"Parsl run initializing in rundir:{0}\".format(current_rundir))\n return os.path.abspath(current_rundir)\n\n except Exception as e:\n logger.error(\"Failed to create a run directory\")\n logger.error(\"Error: {0}\".format(e))\n exit(-1)\n", "path": "parsl/dataflow/rundirs.py"}]} | 898 | 118 |
gh_patches_debug_15618 | rasdani/github-patches | git_diff | opsdroid__opsdroid-930 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add time to crontab log message
When the cron parser is triggered it emits a debug log saying `Running crontab skills`.
It would be more useful if it included the time that opsdroid thinks it is. This would help when trying to debug issues where skills are triggered at the wrong time due to opsdroid having the wrong timezone.
The line which needs updating is [here](https://github.com/opsdroid/opsdroid/blob/master/opsdroid/parsers/crontab.py#L17).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opsdroid/parsers/crontab.py`
Content:
```
1 """A helper function for parsing and executing crontab skills."""
2
3 import asyncio
4 import logging
5
6 import arrow
7 import pycron
8
9
10 _LOGGER = logging.getLogger(__name__)
11
12
13 async def parse_crontab(opsdroid):
14 """Parse all crontab skills against the current time."""
15 while opsdroid.eventloop.is_running():
16 await asyncio.sleep(60 - arrow.now().time().second)
17 _LOGGER.debug(_("Running crontab skills"))
18 for skill in opsdroid.skills:
19 for matcher in skill.matchers:
20 if "crontab" in matcher:
21 if matcher["timezone"] is not None:
22 timezone = matcher["timezone"]
23 else:
24 timezone = opsdroid.config.get("timezone", "UTC")
25 if pycron.is_now(matcher["crontab"],
26 arrow.now(tz=timezone)):
27 await opsdroid.run_skill(skill,
28 skill.config,
29 None)
30
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/opsdroid/parsers/crontab.py b/opsdroid/parsers/crontab.py
--- a/opsdroid/parsers/crontab.py
+++ b/opsdroid/parsers/crontab.py
@@ -1,5 +1,5 @@
"""A helper function for parsing and executing crontab skills."""
-
+import time
import asyncio
import logging
@@ -14,7 +14,7 @@
"""Parse all crontab skills against the current time."""
while opsdroid.eventloop.is_running():
await asyncio.sleep(60 - arrow.now().time().second)
- _LOGGER.debug(_("Running crontab skills"))
+ _LOGGER.debug(_("Running crontab skills at %s "), time.asctime())
for skill in opsdroid.skills:
for matcher in skill.matchers:
if "crontab" in matcher:
| {"golden_diff": "diff --git a/opsdroid/parsers/crontab.py b/opsdroid/parsers/crontab.py\n--- a/opsdroid/parsers/crontab.py\n+++ b/opsdroid/parsers/crontab.py\n@@ -1,5 +1,5 @@\n \"\"\"A helper function for parsing and executing crontab skills.\"\"\"\n-\n+import time\n import asyncio\n import logging\n \n@@ -14,7 +14,7 @@\n \"\"\"Parse all crontab skills against the current time.\"\"\"\n while opsdroid.eventloop.is_running():\n await asyncio.sleep(60 - arrow.now().time().second)\n- _LOGGER.debug(_(\"Running crontab skills\"))\n+ _LOGGER.debug(_(\"Running crontab skills at %s \"), time.asctime())\n for skill in opsdroid.skills:\n for matcher in skill.matchers:\n if \"crontab\" in matcher:\n", "issue": "Add time to crontab log message\nWhen the cron parser is triggered it emits a debug log saying `Running crontab skills`.\r\n\r\nIt would be more useful if it included the time that opsdroid thinks it is. This would help when trying to debug issues where skills are triggered at the wrong time due to opsdroid having the wrong timezone.\r\n\r\nThe line which needs updating is [here](https://github.com/opsdroid/opsdroid/blob/master/opsdroid/parsers/crontab.py#L17). \n", "before_files": [{"content": "\"\"\"A helper function for parsing and executing crontab skills.\"\"\"\n\nimport asyncio\nimport logging\n\nimport arrow\nimport pycron\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\nasync def parse_crontab(opsdroid):\n \"\"\"Parse all crontab skills against the current time.\"\"\"\n while opsdroid.eventloop.is_running():\n await asyncio.sleep(60 - arrow.now().time().second)\n _LOGGER.debug(_(\"Running crontab skills\"))\n for skill in opsdroid.skills:\n for matcher in skill.matchers:\n if \"crontab\" in matcher:\n if matcher[\"timezone\"] is not None:\n timezone = matcher[\"timezone\"]\n else:\n timezone = opsdroid.config.get(\"timezone\", \"UTC\")\n if pycron.is_now(matcher[\"crontab\"],\n arrow.now(tz=timezone)):\n await opsdroid.run_skill(skill,\n skill.config,\n None)\n", "path": "opsdroid/parsers/crontab.py"}], "after_files": [{"content": "\"\"\"A helper function for parsing and executing crontab skills.\"\"\"\nimport time\nimport asyncio\nimport logging\n\nimport arrow\nimport pycron\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\nasync def parse_crontab(opsdroid):\n \"\"\"Parse all crontab skills against the current time.\"\"\"\n while opsdroid.eventloop.is_running():\n await asyncio.sleep(60 - arrow.now().time().second)\n _LOGGER.debug(_(\"Running crontab skills at %s \"), time.asctime())\n for skill in opsdroid.skills:\n for matcher in skill.matchers:\n if \"crontab\" in matcher:\n if matcher[\"timezone\"] is not None:\n timezone = matcher[\"timezone\"]\n else:\n timezone = opsdroid.config.get(\"timezone\", \"UTC\")\n if pycron.is_now(matcher[\"crontab\"],\n arrow.now(tz=timezone)):\n await opsdroid.run_skill(skill,\n skill.config,\n None)\n", "path": "opsdroid/parsers/crontab.py"}]} | 626 | 194 |
gh_patches_debug_8828 | rasdani/github-patches | git_diff | mozmeao__snippets-service-1238 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Just published jobs with global limits get prematurely completed due to missing metrics.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `snippets/base/management/commands/update_jobs.py`
Content:
```
1 from datetime import datetime, timedelta
2
3 from django.contrib.auth import get_user_model
4 from django.core.management.base import BaseCommand
5 from django.db import transaction
6 from django.db.models import F, Q
7
8 from snippets.base.models import Job
9
10
11 class Command(BaseCommand):
12 args = "(no args)"
13 help = "Update Jobs"
14
15 @transaction.atomic
16 def handle(self, *args, **options):
17 now = datetime.utcnow()
18 user = get_user_model().objects.get_or_create(username='snippets_bot')[0]
19 count_total_completed = 0
20
21 # Publish Scheduled Jobs with `publish_start` before now or without
22 # publish_start.
23 jobs = Job.objects.filter(status=Job.SCHEDULED).filter(
24 Q(publish_start__lte=now) | Q(publish_start=None)
25 )
26 count_published = jobs.count()
27 for job in jobs:
28 job.change_status(
29 status=Job.PUBLISHED,
30 user=user,
31 reason='Published start date reached.',
32 )
33
34 # Disable Published Jobs with `publish_end` before now.
35 jobs = Job.objects.filter(status=Job.PUBLISHED, publish_end__lte=now)
36 count_publication_end = jobs.count()
37 count_total_completed += count_publication_end
38
39 for job in jobs:
40 job.change_status(
41 status=Job.COMPLETED,
42 user=user,
43 reason='Publication end date reached.',
44 )
45
46 # Disable Jobs that reached Impression, Click or Block limits.
47 count_limit = {}
48 for limit in ['impressions', 'clicks', 'blocks']:
49 jobs = (Job.objects
50 .filter(status=Job.PUBLISHED)
51 .exclude(**{f'limit_{limit}': 0})
52 .filter(**{f'limit_{limit}__lte': F(f'metric_{limit}')}))
53 for job in jobs:
54 job.change_status(
55 status=Job.COMPLETED,
56 user=user,
57 reason=f'Limit reached: {limit}.',
58 )
59
60 count_limit[limit] = jobs.count()
61 count_total_completed += count_limit[limit]
62
63 # Disable Jobs that have Impression, Click or Block limits but don't
64 # have metrics data for at least 24h. This is to handle cases where the
65 # Metrics Pipeline is broken.
66 yesterday = datetime.utcnow() - timedelta(days=1)
67 jobs = (Job.objects
68 .filter(status=Job.PUBLISHED)
69 .exclude(limit_impressions=0, limit_clicks=0, limit_blocks=0)
70 .filter(metric_last_update__lt=yesterday))
71 for job in jobs:
72 job.change_status(
73 status=Job.COMPLETED,
74 user=user,
75 reason=f'Premature termination due to missing metrics.',
76 )
77 count_premature_termination = jobs.count()
78 count_total_completed += count_premature_termination
79
80 count_running = Job.objects.filter(status=Job.PUBLISHED).count()
81
82 self.stdout.write(
83 f'Jobs Published: {count_published}\n'
84 f'Jobs Completed: {count_total_completed}\n'
85 f' - Reached Publication End Date: {count_publication_end}\n'
86 f' - Reached Impressions Limit: {count_limit["impressions"]}\n'
87 f' - Reached Clicks Limit: {count_limit["clicks"]}\n'
88 f' - Reached Blocks Limit: {count_limit["blocks"]}\n'
89 f' - Premature Termination due to missing metrics: {count_premature_termination}\n'
90 f'Total Jobs Running: {count_running}\n'
91 )
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/snippets/base/management/commands/update_jobs.py b/snippets/base/management/commands/update_jobs.py
--- a/snippets/base/management/commands/update_jobs.py
+++ b/snippets/base/management/commands/update_jobs.py
@@ -67,6 +67,8 @@
jobs = (Job.objects
.filter(status=Job.PUBLISHED)
.exclude(limit_impressions=0, limit_clicks=0, limit_blocks=0)
+ # Exclude Jobs with limits which haven't been updated once yet.
+ .exclude(metric_last_update='1970-01-01')
.filter(metric_last_update__lt=yesterday))
for job in jobs:
job.change_status(
| {"golden_diff": "diff --git a/snippets/base/management/commands/update_jobs.py b/snippets/base/management/commands/update_jobs.py\n--- a/snippets/base/management/commands/update_jobs.py\n+++ b/snippets/base/management/commands/update_jobs.py\n@@ -67,6 +67,8 @@\n jobs = (Job.objects\n .filter(status=Job.PUBLISHED)\n .exclude(limit_impressions=0, limit_clicks=0, limit_blocks=0)\n+ # Exclude Jobs with limits which haven't been updated once yet.\n+ .exclude(metric_last_update='1970-01-01')\n .filter(metric_last_update__lt=yesterday))\n for job in jobs:\n job.change_status(\n", "issue": "Just published jobs with global limits get prematurely completed due to missing metrics.\n\n", "before_files": [{"content": "from datetime import datetime, timedelta\n\nfrom django.contrib.auth import get_user_model\nfrom django.core.management.base import BaseCommand\nfrom django.db import transaction\nfrom django.db.models import F, Q\n\nfrom snippets.base.models import Job\n\n\nclass Command(BaseCommand):\n args = \"(no args)\"\n help = \"Update Jobs\"\n\n @transaction.atomic\n def handle(self, *args, **options):\n now = datetime.utcnow()\n user = get_user_model().objects.get_or_create(username='snippets_bot')[0]\n count_total_completed = 0\n\n # Publish Scheduled Jobs with `publish_start` before now or without\n # publish_start.\n jobs = Job.objects.filter(status=Job.SCHEDULED).filter(\n Q(publish_start__lte=now) | Q(publish_start=None)\n )\n count_published = jobs.count()\n for job in jobs:\n job.change_status(\n status=Job.PUBLISHED,\n user=user,\n reason='Published start date reached.',\n )\n\n # Disable Published Jobs with `publish_end` before now.\n jobs = Job.objects.filter(status=Job.PUBLISHED, publish_end__lte=now)\n count_publication_end = jobs.count()\n count_total_completed += count_publication_end\n\n for job in jobs:\n job.change_status(\n status=Job.COMPLETED,\n user=user,\n reason='Publication end date reached.',\n )\n\n # Disable Jobs that reached Impression, Click or Block limits.\n count_limit = {}\n for limit in ['impressions', 'clicks', 'blocks']:\n jobs = (Job.objects\n .filter(status=Job.PUBLISHED)\n .exclude(**{f'limit_{limit}': 0})\n .filter(**{f'limit_{limit}__lte': F(f'metric_{limit}')}))\n for job in jobs:\n job.change_status(\n status=Job.COMPLETED,\n user=user,\n reason=f'Limit reached: {limit}.',\n )\n\n count_limit[limit] = jobs.count()\n count_total_completed += count_limit[limit]\n\n # Disable Jobs that have Impression, Click or Block limits but don't\n # have metrics data for at least 24h. This is to handle cases where the\n # Metrics Pipeline is broken.\n yesterday = datetime.utcnow() - timedelta(days=1)\n jobs = (Job.objects\n .filter(status=Job.PUBLISHED)\n .exclude(limit_impressions=0, limit_clicks=0, limit_blocks=0)\n .filter(metric_last_update__lt=yesterday))\n for job in jobs:\n job.change_status(\n status=Job.COMPLETED,\n user=user,\n reason=f'Premature termination due to missing metrics.',\n )\n count_premature_termination = jobs.count()\n count_total_completed += count_premature_termination\n\n count_running = Job.objects.filter(status=Job.PUBLISHED).count()\n\n self.stdout.write(\n f'Jobs Published: {count_published}\\n'\n f'Jobs Completed: {count_total_completed}\\n'\n f' - Reached Publication End Date: {count_publication_end}\\n'\n f' - Reached Impressions Limit: {count_limit[\"impressions\"]}\\n'\n f' - Reached Clicks Limit: {count_limit[\"clicks\"]}\\n'\n f' - Reached Blocks Limit: {count_limit[\"blocks\"]}\\n'\n f' - Premature Termination due to missing metrics: {count_premature_termination}\\n'\n f'Total Jobs Running: {count_running}\\n'\n )\n", "path": "snippets/base/management/commands/update_jobs.py"}], "after_files": [{"content": "from datetime import datetime, timedelta\n\nfrom django.contrib.auth import get_user_model\nfrom django.core.management.base import BaseCommand\nfrom django.db import transaction\nfrom django.db.models import F, Q\n\nfrom snippets.base.models import Job\n\n\nclass Command(BaseCommand):\n args = \"(no args)\"\n help = \"Update Jobs\"\n\n @transaction.atomic\n def handle(self, *args, **options):\n now = datetime.utcnow()\n user = get_user_model().objects.get_or_create(username='snippets_bot')[0]\n count_total_completed = 0\n\n # Publish Scheduled Jobs with `publish_start` before now or without\n # publish_start.\n jobs = Job.objects.filter(status=Job.SCHEDULED).filter(\n Q(publish_start__lte=now) | Q(publish_start=None)\n )\n count_published = jobs.count()\n for job in jobs:\n job.change_status(\n status=Job.PUBLISHED,\n user=user,\n reason='Published start date reached.',\n )\n\n # Disable Published Jobs with `publish_end` before now.\n jobs = Job.objects.filter(status=Job.PUBLISHED, publish_end__lte=now)\n count_publication_end = jobs.count()\n count_total_completed += count_publication_end\n\n for job in jobs:\n job.change_status(\n status=Job.COMPLETED,\n user=user,\n reason='Publication end date reached.',\n )\n\n # Disable Jobs that reached Impression, Click or Block limits.\n count_limit = {}\n for limit in ['impressions', 'clicks', 'blocks']:\n jobs = (Job.objects\n .filter(status=Job.PUBLISHED)\n .exclude(**{f'limit_{limit}': 0})\n .filter(**{f'limit_{limit}__lte': F(f'metric_{limit}')}))\n for job in jobs:\n job.change_status(\n status=Job.COMPLETED,\n user=user,\n reason=f'Limit reached: {limit}.',\n )\n\n count_limit[limit] = jobs.count()\n count_total_completed += count_limit[limit]\n\n # Disable Jobs that have Impression, Click or Block limits but don't\n # have metrics data for at least 24h. This is to handle cases where the\n # Metrics Pipeline is broken.\n yesterday = datetime.utcnow() - timedelta(days=1)\n jobs = (Job.objects\n .filter(status=Job.PUBLISHED)\n .exclude(limit_impressions=0, limit_clicks=0, limit_blocks=0)\n # Exclude Jobs with limits which haven't been updated once yet.\n .exclude(metric_last_update='1970-01-01')\n .filter(metric_last_update__lt=yesterday))\n for job in jobs:\n job.change_status(\n status=Job.COMPLETED,\n user=user,\n reason=f'Premature termination due to missing metrics.',\n )\n count_premature_termination = jobs.count()\n count_total_completed += count_premature_termination\n\n count_running = Job.objects.filter(status=Job.PUBLISHED).count()\n\n self.stdout.write(\n f'Jobs Published: {count_published}\\n'\n f'Jobs Completed: {count_total_completed}\\n'\n f' - Reached Publication End Date: {count_publication_end}\\n'\n f' - Reached Impressions Limit: {count_limit[\"impressions\"]}\\n'\n f' - Reached Clicks Limit: {count_limit[\"clicks\"]}\\n'\n f' - Reached Blocks Limit: {count_limit[\"blocks\"]}\\n'\n f' - Premature Termination due to missing metrics: {count_premature_termination}\\n'\n f'Total Jobs Running: {count_running}\\n'\n )\n", "path": "snippets/base/management/commands/update_jobs.py"}]} | 1,228 | 157 |
gh_patches_debug_16891 | rasdani/github-patches | git_diff | medtagger__MedTagger-447 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bump Python to 3.7.0
## Expected Behavior
MedTagger should always use the latest version of Python technologies and follow the rabbit instead of leaving technical dept :)
## Actual Behavior
We've got Python 3.6.x right now.
## Additional comment
Remember about Makefiles, Dockerfiles, TravisCI and more(?).
**WATCH OUT!** It's relatively new. Some of our dependencies may not work properly! Find out if `numpy` and other libs supports it!
**BLOCKED BY:**
- [SimpleITK](https://github.com/SimpleITK/SimpleITK/releases) - next release (>1.1.0) will be fine to use with Python3.7.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `backend/scripts/import_data.py`
Content:
```
1 """Script that will fill MedTagger with data.
2
3 How to use it?
4 --------------
5 At first, download some scans from anywhere on the Internet. You can use example dataset from Data Science Bowl 2017:
6 https://www.kaggle.com/c/data-science-bowl-2017/data (look for 'sample_images.7z' file).
7
8 Then, place these data (unzipped) anywhere on your computer and run this script by:
9
10 (venv) $ python3.6 scripts/import_data.py --source=./dir_with_scans/
11
12 Please keep all scans with given structure:
13
14 |
15 `-- dir_with_scans
16 |-- 0a0c32c9e08cc2ea76a71649de56be6d
17 | |-- 0a67f9edb4915467ac16a565955898d3.dcm
18 | |-- 0eb4e3cae3de93e50431cf12bdc6c93d.dcm
19 | `-- ...
20 |-- 0a38e7597ca26f9374f8ea2770ba870d
21 | |-- 0bad9c3a3890617f78a905b78bc60f99.dcm
22 | |-- 1cffdd431884c2792ae0cbecec1c9e14.dcm
23 | `-- ...
24 `-- ...
25
26 """
27 import os
28 import argparse
29 import glob
30 import logging
31 import logging.config
32
33 from medtagger.repositories import scans as ScansRepository, datasets as DatasetsRepository
34 from medtagger.workers.storage import parse_dicom_and_update_slice
35
36
37 logging.config.fileConfig('logging.conf')
38 logger = logging.getLogger(__name__)
39
40 parser = argparse.ArgumentParser(description='Import data to the MedTagger.')
41 parser.add_argument('--source', type=str, required=True, help='Source directory')
42 parser.add_argument('--dataset', type=str, required=True, help='Dataset key for these scans')
43 args = parser.parse_args()
44
45
46 if __name__ == '__main__':
47 logger.info('Checking Dataset...')
48 dataset = DatasetsRepository.get_dataset_by_key(args.dataset)
49
50 source = args.source.rstrip('/')
51 for scan_directory in glob.iglob(source + '/*'):
52 if not os.path.isdir(scan_directory):
53 logger.warning('"%s" is not a directory. Skipping...', scan_directory)
54 continue
55
56 logger.info('Adding new Scan from "%s".', scan_directory)
57 slice_names = glob.glob(scan_directory + '/*.dcm')
58 number_of_slices = len(slice_names)
59 scan = ScansRepository.add_new_scan(dataset, number_of_slices, None)
60
61 for slice_name in slice_names:
62 logger.info('Adding new Slice to Scan "%s" based on "%s".', scan.id, slice_name)
63 with open(slice_name, 'rb') as slice_dicom_file:
64 _slice = scan.add_slice()
65 image = slice_dicom_file.read()
66 parse_dicom_and_update_slice.delay(_slice.id, image)
67
```
Path: `backend/scripts/convert_dicoms_to_png.py`
Content:
```
1 """Script that will convert multiple dicoms to PNG format.
2
3 How to use it?
4 --------------
5 At first, download some scans from anywhere on the Internet. You can use example dataset from Data Science Bowl 2017:
6 https://www.kaggle.com/c/data-science-bowl-2017/data (look for 'sample_images.7z' file).
7
8 Then, place these data (unzipped) anywhere on your computer and run this script by:
9
10 (venv) $ python3.6 scripts/dicoms_to_png.py --input=./dir_with_scans/ --output=./dir_with_scans/converted/
11
12 Name of the converted Dicom file is a position of the scan on the z axis.
13 """
14 import os
15 import argparse
16
17 import SimpleITK as sitk
18 from PIL import Image
19
20 from medtagger.conversion import convert_slice_to_normalized_8bit_array
21 from medtagger.definitions import DicomTag
22 from medtagger.dicoms import read_list
23
24
25 parser = argparse.ArgumentParser(description='Convert dicoms to png format.')
26 parser.add_argument('--input', type=str, required=True, help='Full path to directory where dicoms are located')
27 parser.add_argument('--output', type=str, required=True,
28 help='Full path to directory where converted dicoms would be located')
29
30 args = parser.parse_args()
31 dicoms_folder_path = args.input
32 converted_dicoms_folder_path = args.output
33
34 dicoms = [sitk.ReadImage(dicoms_folder_path + d) for d in os.listdir(dicoms_folder_path) if
35 os.path.isfile(dicoms_folder_path + d)]
36 min_position = abs(min(float((read_list(dicom, DicomTag.IMAGE_POSITION_PATIENT) or [])[2]) for dicom in dicoms))
37
38 if not os.path.exists(converted_dicoms_folder_path):
39 os.mkdir(converted_dicoms_folder_path)
40
41 for single_dicom in dicoms:
42 image_bytes = convert_slice_to_normalized_8bit_array(single_dicom)
43 slice_position = float((read_list(single_dicom, DicomTag.IMAGE_POSITION_PATIENT) or [])[2])
44 converted_dicom_name = '{0:.2f}'.format(slice_position + min_position)
45 Image.fromarray(image_bytes, 'L').save(converted_dicoms_folder_path + converted_dicom_name + '.png')
46
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/backend/scripts/convert_dicoms_to_png.py b/backend/scripts/convert_dicoms_to_png.py
--- a/backend/scripts/convert_dicoms_to_png.py
+++ b/backend/scripts/convert_dicoms_to_png.py
@@ -7,7 +7,7 @@
Then, place these data (unzipped) anywhere on your computer and run this script by:
- (venv) $ python3.6 scripts/dicoms_to_png.py --input=./dir_with_scans/ --output=./dir_with_scans/converted/
+ (venv) $ python3.7 scripts/dicoms_to_png.py --input=./dir_with_scans/ --output=./dir_with_scans/converted/
Name of the converted Dicom file is a position of the scan on the z axis.
"""
diff --git a/backend/scripts/import_data.py b/backend/scripts/import_data.py
--- a/backend/scripts/import_data.py
+++ b/backend/scripts/import_data.py
@@ -7,7 +7,7 @@
Then, place these data (unzipped) anywhere on your computer and run this script by:
- (venv) $ python3.6 scripts/import_data.py --source=./dir_with_scans/
+ (venv) $ python3.7 scripts/import_data.py --source=./dir_with_scans/
Please keep all scans with given structure:
| {"golden_diff": "diff --git a/backend/scripts/convert_dicoms_to_png.py b/backend/scripts/convert_dicoms_to_png.py\n--- a/backend/scripts/convert_dicoms_to_png.py\n+++ b/backend/scripts/convert_dicoms_to_png.py\n@@ -7,7 +7,7 @@\n \n Then, place these data (unzipped) anywhere on your computer and run this script by:\n \n- (venv) $ python3.6 scripts/dicoms_to_png.py --input=./dir_with_scans/ --output=./dir_with_scans/converted/\n+ (venv) $ python3.7 scripts/dicoms_to_png.py --input=./dir_with_scans/ --output=./dir_with_scans/converted/\n \n Name of the converted Dicom file is a position of the scan on the z axis.\n \"\"\"\ndiff --git a/backend/scripts/import_data.py b/backend/scripts/import_data.py\n--- a/backend/scripts/import_data.py\n+++ b/backend/scripts/import_data.py\n@@ -7,7 +7,7 @@\n \n Then, place these data (unzipped) anywhere on your computer and run this script by:\n \n- (venv) $ python3.6 scripts/import_data.py --source=./dir_with_scans/\n+ (venv) $ python3.7 scripts/import_data.py --source=./dir_with_scans/\n \n Please keep all scans with given structure:\n", "issue": "Bump Python to 3.7.0\n## Expected Behavior\r\n\r\nMedTagger should always use the latest version of Python technologies and follow the rabbit instead of leaving technical dept :)\r\n\r\n## Actual Behavior\r\n\r\nWe've got Python 3.6.x right now.\r\n\r\n## Additional comment\r\n\r\nRemember about Makefiles, Dockerfiles, TravisCI and more(?).\r\n\r\n**WATCH OUT!** It's relatively new. Some of our dependencies may not work properly! Find out if `numpy` and other libs supports it!\r\n\r\n**BLOCKED BY:**\r\n- [SimpleITK](https://github.com/SimpleITK/SimpleITK/releases) - next release (>1.1.0) will be fine to use with Python3.7.\n", "before_files": [{"content": "\"\"\"Script that will fill MedTagger with data.\n\nHow to use it?\n--------------\nAt first, download some scans from anywhere on the Internet. You can use example dataset from Data Science Bowl 2017:\nhttps://www.kaggle.com/c/data-science-bowl-2017/data (look for 'sample_images.7z' file).\n\nThen, place these data (unzipped) anywhere on your computer and run this script by:\n\n (venv) $ python3.6 scripts/import_data.py --source=./dir_with_scans/\n\nPlease keep all scans with given structure:\n\n |\n `-- dir_with_scans\n |-- 0a0c32c9e08cc2ea76a71649de56be6d\n | |-- 0a67f9edb4915467ac16a565955898d3.dcm\n | |-- 0eb4e3cae3de93e50431cf12bdc6c93d.dcm\n | `-- ...\n |-- 0a38e7597ca26f9374f8ea2770ba870d\n | |-- 0bad9c3a3890617f78a905b78bc60f99.dcm\n | |-- 1cffdd431884c2792ae0cbecec1c9e14.dcm\n | `-- ...\n `-- ...\n\n\"\"\"\nimport os\nimport argparse\nimport glob\nimport logging\nimport logging.config\n\nfrom medtagger.repositories import scans as ScansRepository, datasets as DatasetsRepository\nfrom medtagger.workers.storage import parse_dicom_and_update_slice\n\n\nlogging.config.fileConfig('logging.conf')\nlogger = logging.getLogger(__name__)\n\nparser = argparse.ArgumentParser(description='Import data to the MedTagger.')\nparser.add_argument('--source', type=str, required=True, help='Source directory')\nparser.add_argument('--dataset', type=str, required=True, help='Dataset key for these scans')\nargs = parser.parse_args()\n\n\nif __name__ == '__main__':\n logger.info('Checking Dataset...')\n dataset = DatasetsRepository.get_dataset_by_key(args.dataset)\n\n source = args.source.rstrip('/')\n for scan_directory in glob.iglob(source + '/*'):\n if not os.path.isdir(scan_directory):\n logger.warning('\"%s\" is not a directory. Skipping...', scan_directory)\n continue\n\n logger.info('Adding new Scan from \"%s\".', scan_directory)\n slice_names = glob.glob(scan_directory + '/*.dcm')\n number_of_slices = len(slice_names)\n scan = ScansRepository.add_new_scan(dataset, number_of_slices, None)\n\n for slice_name in slice_names:\n logger.info('Adding new Slice to Scan \"%s\" based on \"%s\".', scan.id, slice_name)\n with open(slice_name, 'rb') as slice_dicom_file:\n _slice = scan.add_slice()\n image = slice_dicom_file.read()\n parse_dicom_and_update_slice.delay(_slice.id, image)\n", "path": "backend/scripts/import_data.py"}, {"content": "\"\"\"Script that will convert multiple dicoms to PNG format.\n\nHow to use it?\n--------------\nAt first, download some scans from anywhere on the Internet. You can use example dataset from Data Science Bowl 2017:\nhttps://www.kaggle.com/c/data-science-bowl-2017/data (look for 'sample_images.7z' file).\n\nThen, place these data (unzipped) anywhere on your computer and run this script by:\n\n (venv) $ python3.6 scripts/dicoms_to_png.py --input=./dir_with_scans/ --output=./dir_with_scans/converted/\n\nName of the converted Dicom file is a position of the scan on the z axis.\n\"\"\"\nimport os\nimport argparse\n\nimport SimpleITK as sitk\nfrom PIL import Image\n\nfrom medtagger.conversion import convert_slice_to_normalized_8bit_array\nfrom medtagger.definitions import DicomTag\nfrom medtagger.dicoms import read_list\n\n\nparser = argparse.ArgumentParser(description='Convert dicoms to png format.')\nparser.add_argument('--input', type=str, required=True, help='Full path to directory where dicoms are located')\nparser.add_argument('--output', type=str, required=True,\n help='Full path to directory where converted dicoms would be located')\n\nargs = parser.parse_args()\ndicoms_folder_path = args.input\nconverted_dicoms_folder_path = args.output\n\ndicoms = [sitk.ReadImage(dicoms_folder_path + d) for d in os.listdir(dicoms_folder_path) if\n os.path.isfile(dicoms_folder_path + d)]\nmin_position = abs(min(float((read_list(dicom, DicomTag.IMAGE_POSITION_PATIENT) or [])[2]) for dicom in dicoms))\n\nif not os.path.exists(converted_dicoms_folder_path):\n os.mkdir(converted_dicoms_folder_path)\n\nfor single_dicom in dicoms:\n image_bytes = convert_slice_to_normalized_8bit_array(single_dicom)\n slice_position = float((read_list(single_dicom, DicomTag.IMAGE_POSITION_PATIENT) or [])[2])\n converted_dicom_name = '{0:.2f}'.format(slice_position + min_position)\n Image.fromarray(image_bytes, 'L').save(converted_dicoms_folder_path + converted_dicom_name + '.png')\n", "path": "backend/scripts/convert_dicoms_to_png.py"}], "after_files": [{"content": "\"\"\"Script that will fill MedTagger with data.\n\nHow to use it?\n--------------\nAt first, download some scans from anywhere on the Internet. You can use example dataset from Data Science Bowl 2017:\nhttps://www.kaggle.com/c/data-science-bowl-2017/data (look for 'sample_images.7z' file).\n\nThen, place these data (unzipped) anywhere on your computer and run this script by:\n\n (venv) $ python3.7 scripts/import_data.py --source=./dir_with_scans/\n\nPlease keep all scans with given structure:\n\n |\n `-- dir_with_scans\n |-- 0a0c32c9e08cc2ea76a71649de56be6d\n | |-- 0a67f9edb4915467ac16a565955898d3.dcm\n | |-- 0eb4e3cae3de93e50431cf12bdc6c93d.dcm\n | `-- ...\n |-- 0a38e7597ca26f9374f8ea2770ba870d\n | |-- 0bad9c3a3890617f78a905b78bc60f99.dcm\n | |-- 1cffdd431884c2792ae0cbecec1c9e14.dcm\n | `-- ...\n `-- ...\n\n\"\"\"\nimport os\nimport argparse\nimport glob\nimport logging\nimport logging.config\n\nfrom medtagger.repositories import scans as ScansRepository, datasets as DatasetsRepository\nfrom medtagger.workers.storage import parse_dicom_and_update_slice\n\n\nlogging.config.fileConfig('logging.conf')\nlogger = logging.getLogger(__name__)\n\nparser = argparse.ArgumentParser(description='Import data to the MedTagger.')\nparser.add_argument('--source', type=str, required=True, help='Source directory')\nparser.add_argument('--dataset', type=str, required=True, help='Dataset key for these scans')\nargs = parser.parse_args()\n\n\nif __name__ == '__main__':\n logger.info('Checking Dataset...')\n dataset = DatasetsRepository.get_dataset_by_key(args.dataset)\n\n source = args.source.rstrip('/')\n for scan_directory in glob.iglob(source + '/*'):\n if not os.path.isdir(scan_directory):\n logger.warning('\"%s\" is not a directory. Skipping...', scan_directory)\n continue\n\n logger.info('Adding new Scan from \"%s\".', scan_directory)\n slice_names = glob.glob(scan_directory + '/*.dcm')\n number_of_slices = len(slice_names)\n scan = ScansRepository.add_new_scan(dataset, number_of_slices, None)\n\n for slice_name in slice_names:\n logger.info('Adding new Slice to Scan \"%s\" based on \"%s\".', scan.id, slice_name)\n with open(slice_name, 'rb') as slice_dicom_file:\n _slice = scan.add_slice()\n image = slice_dicom_file.read()\n parse_dicom_and_update_slice.delay(_slice.id, image)\n", "path": "backend/scripts/import_data.py"}, {"content": "\"\"\"Script that will convert multiple dicoms to PNG format.\n\nHow to use it?\n--------------\nAt first, download some scans from anywhere on the Internet. You can use example dataset from Data Science Bowl 2017:\nhttps://www.kaggle.com/c/data-science-bowl-2017/data (look for 'sample_images.7z' file).\n\nThen, place these data (unzipped) anywhere on your computer and run this script by:\n\n (venv) $ python3.7 scripts/dicoms_to_png.py --input=./dir_with_scans/ --output=./dir_with_scans/converted/\n\nName of the converted Dicom file is a position of the scan on the z axis.\n\"\"\"\nimport os\nimport argparse\n\nimport SimpleITK as sitk\nfrom PIL import Image\n\nfrom medtagger.conversion import convert_slice_to_normalized_8bit_array\nfrom medtagger.definitions import DicomTag\nfrom medtagger.dicoms import read_list\n\n\nparser = argparse.ArgumentParser(description='Convert dicoms to png format.')\nparser.add_argument('--input', type=str, required=True, help='Full path to directory where dicoms are located')\nparser.add_argument('--output', type=str, required=True,\n help='Full path to directory where converted dicoms would be located')\n\nargs = parser.parse_args()\ndicoms_folder_path = args.input\nconverted_dicoms_folder_path = args.output\n\ndicoms = [sitk.ReadImage(dicoms_folder_path + d) for d in os.listdir(dicoms_folder_path) if\n os.path.isfile(dicoms_folder_path + d)]\nmin_position = abs(min(float((read_list(dicom, DicomTag.IMAGE_POSITION_PATIENT) or [])[2]) for dicom in dicoms))\n\nif not os.path.exists(converted_dicoms_folder_path):\n os.mkdir(converted_dicoms_folder_path)\n\nfor single_dicom in dicoms:\n image_bytes = convert_slice_to_normalized_8bit_array(single_dicom)\n slice_position = float((read_list(single_dicom, DicomTag.IMAGE_POSITION_PATIENT) or [])[2])\n converted_dicom_name = '{0:.2f}'.format(slice_position + min_position)\n Image.fromarray(image_bytes, 'L').save(converted_dicoms_folder_path + converted_dicom_name + '.png')\n", "path": "backend/scripts/convert_dicoms_to_png.py"}]} | 1,840 | 291 |
gh_patches_debug_8533 | rasdani/github-patches | git_diff | pyinstaller__pyinstaller-2112 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
cherrypy >= 6.1.0 fails tests
From the cherrypy [changelog](https://github.com/cherrypy/cherrypy/blob/master/CHANGES.txt):
```
6.1.0
-----
* Combined wsgiserver2 and wsgiserver3 modules into a
single module, ``cherrypy.wsgiserver``.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `PyInstaller/hooks/hook-cherrypy.py`
Content:
```
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2015-2016, PyInstaller Development Team.
3 #
4 # Distributed under the terms of the GNU General Public License with exception
5 # for distributing bootloader.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #-----------------------------------------------------------------------------
9 #
10 # CherryPy is a minimalist Python web framework.
11 #
12 # http://www.cherrypy.org/
13 #
14 # Tested with CherryPy 5.0.1
15
16
17 from PyInstaller.utils.hooks import collect_submodules
18
19
20 hiddenimports = collect_submodules('cherrypy.wsgiserver')
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/PyInstaller/hooks/hook-cherrypy.py b/PyInstaller/hooks/hook-cherrypy.py
deleted file mode 100644
--- a/PyInstaller/hooks/hook-cherrypy.py
+++ /dev/null
@@ -1,20 +0,0 @@
-#-----------------------------------------------------------------------------
-# Copyright (c) 2015-2016, PyInstaller Development Team.
-#
-# Distributed under the terms of the GNU General Public License with exception
-# for distributing bootloader.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-#
-# CherryPy is a minimalist Python web framework.
-#
-# http://www.cherrypy.org/
-#
-# Tested with CherryPy 5.0.1
-
-
-from PyInstaller.utils.hooks import collect_submodules
-
-
-hiddenimports = collect_submodules('cherrypy.wsgiserver')
\ No newline at end of file
| {"golden_diff": "diff --git a/PyInstaller/hooks/hook-cherrypy.py b/PyInstaller/hooks/hook-cherrypy.py\ndeleted file mode 100644\n--- a/PyInstaller/hooks/hook-cherrypy.py\n+++ /dev/null\n@@ -1,20 +0,0 @@\n-#-----------------------------------------------------------------------------\n-# Copyright (c) 2015-2016, PyInstaller Development Team.\n-#\n-# Distributed under the terms of the GNU General Public License with exception\n-# for distributing bootloader.\n-#\n-# The full license is in the file COPYING.txt, distributed with this software.\n-#-----------------------------------------------------------------------------\n-#\n-# CherryPy is a minimalist Python web framework.\n-#\n-# http://www.cherrypy.org/\n-#\n-# Tested with CherryPy 5.0.1\n-\n-\n-from PyInstaller.utils.hooks import collect_submodules\n-\n-\n-hiddenimports = collect_submodules('cherrypy.wsgiserver')\n\\ No newline at end of file\n", "issue": "cherrypy >= 6.1.0 fails tests\nFrom the cherrypy [changelog](https://github.com/cherrypy/cherrypy/blob/master/CHANGES.txt):\n\n```\n6.1.0\n-----\n\n* Combined wsgiserver2 and wsgiserver3 modules into a\n single module, ``cherrypy.wsgiserver``.\n```\n\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2015-2016, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n#\n# CherryPy is a minimalist Python web framework.\n#\n# http://www.cherrypy.org/\n#\n# Tested with CherryPy 5.0.1\n\n\nfrom PyInstaller.utils.hooks import collect_submodules\n\n\nhiddenimports = collect_submodules('cherrypy.wsgiserver')", "path": "PyInstaller/hooks/hook-cherrypy.py"}], "after_files": [{"content": null, "path": "PyInstaller/hooks/hook-cherrypy.py"}]} | 502 | 214 |
gh_patches_debug_25371 | rasdani/github-patches | git_diff | vispy__vispy-463 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug when running Vispy offline for the first time
There appears to be a bug when you run Vispy offline and you don't have the freetype thing already downloaded. Not completely sure about the exact conditions responsible for the crash, require some testing...
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `vispy/util/fonts/_freetype.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # -----------------------------------------------------------------------------
3 # Copyright (c) 2014, Vispy Development Team. All Rights Reserved.
4 # Distributed under the (new) BSD License. See LICENSE.txt for more info.
5 # -----------------------------------------------------------------------------
6
7 # Use freetype to get glyph bitmaps
8
9 import sys
10 import numpy as np
11
12 from ...ext.freetype import (FT_LOAD_RENDER, FT_LOAD_NO_HINTING,
13 FT_LOAD_NO_AUTOHINT, Face)
14
15
16 # Convert face to filename
17 from ._vispy_fonts import _vispy_fonts, _get_vispy_font_filename
18 if sys.platform.startswith('linux'):
19 from ...ext.fontconfig import find_font
20 elif sys.platform.startswith('win'):
21 from ._win32 import find_font # noqa, analysis:ignore
22 else:
23 raise NotImplementedError
24
25 _font_dict = {}
26
27
28 def _load_font(face, bold, italic):
29 key = '%s-%s-%s' % (face, bold, italic)
30 if key in _font_dict:
31 return _font_dict[key]
32 if face in _vispy_fonts:
33 fname = _get_vispy_font_filename(face, bold, italic)
34 else:
35 fname = find_font(face, bold, italic)
36 font = Face(fname)
37 _font_dict[key] = font
38 return font
39
40
41 def _load_glyph(f, char, glyphs_dict):
42 """Load glyph from font into dict"""
43 flags = FT_LOAD_RENDER | FT_LOAD_NO_HINTING | FT_LOAD_NO_AUTOHINT
44 face = _load_font(f['face'], f['bold'], f['italic'])
45 face.set_char_size(f['size'] * 64)
46 # get the character of interest
47 face.load_char(char, flags)
48 bitmap = face.glyph.bitmap
49 width = face.glyph.bitmap.width
50 height = face.glyph.bitmap.rows
51 bitmap = np.array(bitmap.buffer)
52 w0 = bitmap.size // height if bitmap.size > 0 else 0
53 bitmap.shape = (height, w0)
54 bitmap = bitmap[:, :width].astype(np.ubyte)
55
56 left = face.glyph.bitmap_left
57 top = face.glyph.bitmap_top
58 advance = face.glyph.advance.x / 64.
59 glyph = dict(char=char, offset=(left, top), bitmap=bitmap,
60 advance=advance, kerning={})
61 glyphs_dict[char] = glyph
62 # Generate kerning
63 for other_char, other_glyph in glyphs_dict.items():
64 kerning = face.get_kerning(other_char, char)
65 glyph['kerning'][other_char] = kerning.x / 64.
66 kerning = face.get_kerning(char, other_char)
67 other_glyph['kerning'][char] = kerning.x / 64.
68
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/vispy/util/fonts/_freetype.py b/vispy/util/fonts/_freetype.py
--- a/vispy/util/fonts/_freetype.py
+++ b/vispy/util/fonts/_freetype.py
@@ -9,9 +9,6 @@
import sys
import numpy as np
-from ...ext.freetype import (FT_LOAD_RENDER, FT_LOAD_NO_HINTING,
- FT_LOAD_NO_AUTOHINT, Face)
-
# Convert face to filename
from ._vispy_fonts import _vispy_fonts, _get_vispy_font_filename
@@ -25,7 +22,11 @@
_font_dict = {}
+# Nest freetype imports in case someone doesn't have freetype on their system
+# and isn't using fonts (Windows)
+
def _load_font(face, bold, italic):
+ from ...ext.freetype import Face
key = '%s-%s-%s' % (face, bold, italic)
if key in _font_dict:
return _font_dict[key]
@@ -40,6 +41,8 @@
def _load_glyph(f, char, glyphs_dict):
"""Load glyph from font into dict"""
+ from ...ext.freetype import (FT_LOAD_RENDER, FT_LOAD_NO_HINTING,
+ FT_LOAD_NO_AUTOHINT)
flags = FT_LOAD_RENDER | FT_LOAD_NO_HINTING | FT_LOAD_NO_AUTOHINT
face = _load_font(f['face'], f['bold'], f['italic'])
face.set_char_size(f['size'] * 64)
| {"golden_diff": "diff --git a/vispy/util/fonts/_freetype.py b/vispy/util/fonts/_freetype.py\n--- a/vispy/util/fonts/_freetype.py\n+++ b/vispy/util/fonts/_freetype.py\n@@ -9,9 +9,6 @@\n import sys\n import numpy as np\n \n-from ...ext.freetype import (FT_LOAD_RENDER, FT_LOAD_NO_HINTING,\n- FT_LOAD_NO_AUTOHINT, Face)\n-\n \n # Convert face to filename\n from ._vispy_fonts import _vispy_fonts, _get_vispy_font_filename\n@@ -25,7 +22,11 @@\n _font_dict = {}\n \n \n+# Nest freetype imports in case someone doesn't have freetype on their system\n+# and isn't using fonts (Windows)\n+\n def _load_font(face, bold, italic):\n+ from ...ext.freetype import Face\n key = '%s-%s-%s' % (face, bold, italic)\n if key in _font_dict:\n return _font_dict[key]\n@@ -40,6 +41,8 @@\n \n def _load_glyph(f, char, glyphs_dict):\n \"\"\"Load glyph from font into dict\"\"\"\n+ from ...ext.freetype import (FT_LOAD_RENDER, FT_LOAD_NO_HINTING,\n+ FT_LOAD_NO_AUTOHINT)\n flags = FT_LOAD_RENDER | FT_LOAD_NO_HINTING | FT_LOAD_NO_AUTOHINT\n face = _load_font(f['face'], f['bold'], f['italic'])\n face.set_char_size(f['size'] * 64)\n", "issue": "Bug when running Vispy offline for the first time\nThere appears to be a bug when you run Vispy offline and you don't have the freetype thing already downloaded. Not completely sure about the exact conditions responsible for the crash, require some testing...\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# Copyright (c) 2014, Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n# -----------------------------------------------------------------------------\n\n# Use freetype to get glyph bitmaps\n\nimport sys\nimport numpy as np\n\nfrom ...ext.freetype import (FT_LOAD_RENDER, FT_LOAD_NO_HINTING,\n FT_LOAD_NO_AUTOHINT, Face)\n\n\n# Convert face to filename\nfrom ._vispy_fonts import _vispy_fonts, _get_vispy_font_filename\nif sys.platform.startswith('linux'):\n from ...ext.fontconfig import find_font\nelif sys.platform.startswith('win'):\n from ._win32 import find_font # noqa, analysis:ignore\nelse:\n raise NotImplementedError\n\n_font_dict = {}\n\n\ndef _load_font(face, bold, italic):\n key = '%s-%s-%s' % (face, bold, italic)\n if key in _font_dict:\n return _font_dict[key]\n if face in _vispy_fonts:\n fname = _get_vispy_font_filename(face, bold, italic)\n else:\n fname = find_font(face, bold, italic)\n font = Face(fname)\n _font_dict[key] = font\n return font\n\n\ndef _load_glyph(f, char, glyphs_dict):\n \"\"\"Load glyph from font into dict\"\"\"\n flags = FT_LOAD_RENDER | FT_LOAD_NO_HINTING | FT_LOAD_NO_AUTOHINT\n face = _load_font(f['face'], f['bold'], f['italic'])\n face.set_char_size(f['size'] * 64)\n # get the character of interest\n face.load_char(char, flags)\n bitmap = face.glyph.bitmap\n width = face.glyph.bitmap.width\n height = face.glyph.bitmap.rows\n bitmap = np.array(bitmap.buffer)\n w0 = bitmap.size // height if bitmap.size > 0 else 0\n bitmap.shape = (height, w0)\n bitmap = bitmap[:, :width].astype(np.ubyte)\n\n left = face.glyph.bitmap_left\n top = face.glyph.bitmap_top\n advance = face.glyph.advance.x / 64.\n glyph = dict(char=char, offset=(left, top), bitmap=bitmap,\n advance=advance, kerning={})\n glyphs_dict[char] = glyph\n # Generate kerning\n for other_char, other_glyph in glyphs_dict.items():\n kerning = face.get_kerning(other_char, char)\n glyph['kerning'][other_char] = kerning.x / 64.\n kerning = face.get_kerning(char, other_char)\n other_glyph['kerning'][char] = kerning.x / 64.\n", "path": "vispy/util/fonts/_freetype.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# Copyright (c) 2014, Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n# -----------------------------------------------------------------------------\n\n# Use freetype to get glyph bitmaps\n\nimport sys\nimport numpy as np\n\n\n# Convert face to filename\nfrom ._vispy_fonts import _vispy_fonts, _get_vispy_font_filename\nif sys.platform.startswith('linux'):\n from ...ext.fontconfig import find_font\nelif sys.platform.startswith('win'):\n from ._win32 import find_font # noqa, analysis:ignore\nelse:\n raise NotImplementedError\n\n_font_dict = {}\n\n\n# Nest freetype imports in case someone doesn't have freetype on their system\n# and isn't using fonts (Windows)\n\ndef _load_font(face, bold, italic):\n from ...ext.freetype import Face\n key = '%s-%s-%s' % (face, bold, italic)\n if key in _font_dict:\n return _font_dict[key]\n if face in _vispy_fonts:\n fname = _get_vispy_font_filename(face, bold, italic)\n else:\n fname = find_font(face, bold, italic)\n font = Face(fname)\n _font_dict[key] = font\n return font\n\n\ndef _load_glyph(f, char, glyphs_dict):\n \"\"\"Load glyph from font into dict\"\"\"\n from ...ext.freetype import (FT_LOAD_RENDER, FT_LOAD_NO_HINTING,\n FT_LOAD_NO_AUTOHINT)\n flags = FT_LOAD_RENDER | FT_LOAD_NO_HINTING | FT_LOAD_NO_AUTOHINT\n face = _load_font(f['face'], f['bold'], f['italic'])\n face.set_char_size(f['size'] * 64)\n # get the character of interest\n face.load_char(char, flags)\n bitmap = face.glyph.bitmap\n width = face.glyph.bitmap.width\n height = face.glyph.bitmap.rows\n bitmap = np.array(bitmap.buffer)\n w0 = bitmap.size // height if bitmap.size > 0 else 0\n bitmap.shape = (height, w0)\n bitmap = bitmap[:, :width].astype(np.ubyte)\n\n left = face.glyph.bitmap_left\n top = face.glyph.bitmap_top\n advance = face.glyph.advance.x / 64.\n glyph = dict(char=char, offset=(left, top), bitmap=bitmap,\n advance=advance, kerning={})\n glyphs_dict[char] = glyph\n # Generate kerning\n for other_char, other_glyph in glyphs_dict.items():\n kerning = face.get_kerning(other_char, char)\n glyph['kerning'][other_char] = kerning.x / 64.\n kerning = face.get_kerning(char, other_char)\n other_glyph['kerning'][char] = kerning.x / 64.\n", "path": "vispy/util/fonts/_freetype.py"}]} | 1,036 | 339 |
gh_patches_debug_8387 | rasdani/github-patches | git_diff | elastic__apm-agent-python-1352 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
apm-agent-python and structlog - mapper_parsing_exception for `event.dataset`
**Describe the bug**: When using `structlog` and with `elasticapm` and the log processer `elasticapm.structlog_processor`, we have recently seen Logstash refuse our logger with the following error:
```
[2021-10-05T12:10:10,746][WARN ][logstash.outputs.elasticsearch][main][a2a92c7c0ddf765b1969e7e8d4a302b6deca976af4c80a2d9706ccdf2486267b] Could not index event to Elasticsearch. {:status=>400, :action=>["index", {:_id=>nil, :_index=>"logstash-2021.10.05", :routing=>nil}, {"stage_environment"=>"prod", "service.name"=>"flow", "company"=>"<PayerCompany: Hidden Company Name [Parent Company: Hidden Company Name [CRN: DKXXXXXXXX]] [CRN: DKXXXXXXXX]>", "@version"=>"1", "host"=>"167.71.1.240", "sentry"=>"skipped", "timestamp"=>"2021-10-05T12:10:00.483890Z", "logger"=>"account_service.models", "event.dataset"=>"flow", "event"=>"PayerCompany change state request", "level"=>"debug", "port"=>58652, "new"=>"APPROVED", "override"=>false, "@timestamp"=>2021-10-05T12:10:10.462Z, "old"=>"APPROVED", "modline"=>"account_service.models:159"}], :response=>{"index"=>{"_index"=>"logstash-2021.10.05", "_type"=>"_doc", "_id"=>"ST1cUHwBFM723LU2e_JV", "status"=>400, "error"=>{"type"=>"mapper_parsing_exception", "reason"=>"Could not dynamically add mapping for field [event.dataset]. Existing mapping for [event] must be of type object but found [text]."}}}}
```
We setup our structlog like this in the Django settings file:
```python
processors = [
structlog.stdlib.filter_by_level,
structlog.stdlib.add_log_level,
structlog.stdlib.add_logger_name,
log_processors.add_module_and_lineno,
log_processors.normalize_datatypes,
log_processors.attach_environment,
log_processors.timestamper,
structlog_processor, # this is the processor that seems to cause the error
SentryJsonProcessor(level=logging.ERROR, tag_keys="__all__"),
structlog.stdlib.PositionalArgumentsFormatter(),
structlog.processors.StackInfoRenderer(),
structlog.processors.format_exc_info,
structlog.processors.UnicodeDecoder(),
structlog.stdlib.ProcessorFormatter.wrap_for_formatter,
]
# Structlog
structlog.configure(
processors=processors,
context_class=structlog.threadlocal.wrap_dict(dict),
logger_factory=structlog.stdlib.LoggerFactory(),
wrapper_class=structlog.stdlib.BoundLogger,
cache_logger_on_first_use=True,
)
```
If we remove `structlog_processor` all loggers are received by Logstash with no problems.
If we write a small customer log processor that strips the `event.dataset` (e.g. `del event_dict["event.dataset"]` it also works fine again.
**To Reproduce**
1. Setup structlog
2. Add the elasticapm.structlog_processor to the list of processors for structlog
3. Send a logger (e.g. `logger.info("test")`).
**Environment (please complete the following information)**
- OS: Debian (slim)
- Python version: 3.9.7
- Framework and version [e.g. Django 2.1]: 3.2.8
- APM Server version: 7.15
- Agent version: N/A
**Additional context**
We use `python-logstash-async` for delivery of the logs to the logstash server.
requirements.txt (excerpts)
```pip
elastic-apm==6.5.0
elasticsearch==7.15.0
django-structlog==2.1.3
python-logstash-async==2.3.0
structlog==21.1.0
structlog-sentry==1.4.0
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticapm/handlers/structlog.py`
Content:
```
1 # Copyright (c) 2019, Elasticsearch BV
2 # All rights reserved.
3 #
4 # Redistribution and use in source and binary forms, with or without
5 # modification, are permitted provided that the following conditions are met:
6 #
7 # * Redistributions of source code must retain the above copyright notice, this
8 # list of conditions and the following disclaimer.
9 #
10 # * Redistributions in binary form must reproduce the above copyright notice,
11 # this list of conditions and the following disclaimer in the documentation
12 # and/or other materials provided with the distribution.
13 #
14 # * Neither the name of the copyright holder nor the names of its
15 # contributors may be used to endorse or promote products derived from
16 # this software without specific prior written permission.
17 #
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
22 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27
28
29 from __future__ import absolute_import
30
31 from elasticapm import get_client
32 from elasticapm.traces import execution_context
33
34
35 def structlog_processor(logger, method_name, event_dict):
36 """
37 Add three new entries to the event_dict for any processed events:
38
39 * transaction.id
40 * trace.id
41 * span.id
42
43 Only adds non-None IDs.
44
45 :param logger:
46 Unused (logger instance in structlog)
47 :param method_name:
48 Unused (wrapped method_name)
49 :param event_dict:
50 Event dictionary for the event we're processing
51 :return:
52 `event_dict`, with three new entries.
53 """
54 transaction = execution_context.get_transaction()
55 if transaction:
56 event_dict["transaction.id"] = transaction.id
57 client = get_client()
58 if client:
59 event_dict["service.name"] = client.config.service_name
60 event_dict["event.dataset"] = f"{client.config.service_name}"
61 if transaction and transaction.trace_parent:
62 event_dict["trace.id"] = transaction.trace_parent.trace_id
63 span = execution_context.get_span()
64 if span and span.id:
65 event_dict["span.id"] = span.id
66 return event_dict
67
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/elasticapm/handlers/structlog.py b/elasticapm/handlers/structlog.py
--- a/elasticapm/handlers/structlog.py
+++ b/elasticapm/handlers/structlog.py
@@ -57,7 +57,7 @@
client = get_client()
if client:
event_dict["service.name"] = client.config.service_name
- event_dict["event.dataset"] = f"{client.config.service_name}"
+ event_dict["event"] = {"dataset": f"{client.config.service_name}"}
if transaction and transaction.trace_parent:
event_dict["trace.id"] = transaction.trace_parent.trace_id
span = execution_context.get_span()
| {"golden_diff": "diff --git a/elasticapm/handlers/structlog.py b/elasticapm/handlers/structlog.py\n--- a/elasticapm/handlers/structlog.py\n+++ b/elasticapm/handlers/structlog.py\n@@ -57,7 +57,7 @@\n client = get_client()\n if client:\n event_dict[\"service.name\"] = client.config.service_name\n- event_dict[\"event.dataset\"] = f\"{client.config.service_name}\"\n+ event_dict[\"event\"] = {\"dataset\": f\"{client.config.service_name}\"}\n if transaction and transaction.trace_parent:\n event_dict[\"trace.id\"] = transaction.trace_parent.trace_id\n span = execution_context.get_span()\n", "issue": "apm-agent-python and structlog - mapper_parsing_exception for `event.dataset` \n**Describe the bug**: When using `structlog` and with `elasticapm` and the log processer `elasticapm.structlog_processor`, we have recently seen Logstash refuse our logger with the following error: \r\n\r\n```\r\n[2021-10-05T12:10:10,746][WARN ][logstash.outputs.elasticsearch][main][a2a92c7c0ddf765b1969e7e8d4a302b6deca976af4c80a2d9706ccdf2486267b] Could not index event to Elasticsearch. {:status=>400, :action=>[\"index\", {:_id=>nil, :_index=>\"logstash-2021.10.05\", :routing=>nil}, {\"stage_environment\"=>\"prod\", \"service.name\"=>\"flow\", \"company\"=>\"<PayerCompany: Hidden Company Name [Parent Company: Hidden Company Name [CRN: DKXXXXXXXX]] [CRN: DKXXXXXXXX]>\", \"@version\"=>\"1\", \"host\"=>\"167.71.1.240\", \"sentry\"=>\"skipped\", \"timestamp\"=>\"2021-10-05T12:10:00.483890Z\", \"logger\"=>\"account_service.models\", \"event.dataset\"=>\"flow\", \"event\"=>\"PayerCompany change state request\", \"level\"=>\"debug\", \"port\"=>58652, \"new\"=>\"APPROVED\", \"override\"=>false, \"@timestamp\"=>2021-10-05T12:10:10.462Z, \"old\"=>\"APPROVED\", \"modline\"=>\"account_service.models:159\"}], :response=>{\"index\"=>{\"_index\"=>\"logstash-2021.10.05\", \"_type\"=>\"_doc\", \"_id\"=>\"ST1cUHwBFM723LU2e_JV\", \"status\"=>400, \"error\"=>{\"type\"=>\"mapper_parsing_exception\", \"reason\"=>\"Could not dynamically add mapping for field [event.dataset]. Existing mapping for [event] must be of type object but found [text].\"}}}}\r\n```\r\n\r\nWe setup our structlog like this in the Django settings file: \r\n\r\n```python\r\nprocessors = [\r\n structlog.stdlib.filter_by_level,\r\n structlog.stdlib.add_log_level,\r\n structlog.stdlib.add_logger_name,\r\n log_processors.add_module_and_lineno,\r\n log_processors.normalize_datatypes,\r\n log_processors.attach_environment,\r\n log_processors.timestamper,\r\n structlog_processor, # this is the processor that seems to cause the error\r\n SentryJsonProcessor(level=logging.ERROR, tag_keys=\"__all__\"),\r\n structlog.stdlib.PositionalArgumentsFormatter(),\r\n structlog.processors.StackInfoRenderer(),\r\n structlog.processors.format_exc_info,\r\n structlog.processors.UnicodeDecoder(),\r\n structlog.stdlib.ProcessorFormatter.wrap_for_formatter,\r\n]\r\n\r\n\r\n# Structlog\r\nstructlog.configure(\r\n processors=processors,\r\n context_class=structlog.threadlocal.wrap_dict(dict),\r\n logger_factory=structlog.stdlib.LoggerFactory(),\r\n wrapper_class=structlog.stdlib.BoundLogger,\r\n cache_logger_on_first_use=True,\r\n)\r\n```\r\n\r\nIf we remove `structlog_processor` all loggers are received by Logstash with no problems. \r\n\r\nIf we write a small customer log processor that strips the `event.dataset` (e.g. `del event_dict[\"event.dataset\"]` it also works fine again. \r\n\r\n**To Reproduce**\r\n\r\n1. Setup structlog\r\n2. Add the elasticapm.structlog_processor to the list of processors for structlog\r\n3. Send a logger (e.g. `logger.info(\"test\")`). \r\n\r\n**Environment (please complete the following information)**\r\n- OS: Debian (slim)\r\n- Python version: 3.9.7\r\n- Framework and version [e.g. Django 2.1]: 3.2.8\r\n- APM Server version: 7.15\r\n- Agent version: N/A\r\n\r\n\r\n**Additional context**\r\n\r\nWe use `python-logstash-async` for delivery of the logs to the logstash server. \r\n\r\nrequirements.txt (excerpts)\r\n```pip\r\nelastic-apm==6.5.0\r\nelasticsearch==7.15.0\r\ndjango-structlog==2.1.3\r\npython-logstash-async==2.3.0\r\nstructlog==21.1.0\r\nstructlog-sentry==1.4.0\r\n``` \n", "before_files": [{"content": "# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\n\nfrom __future__ import absolute_import\n\nfrom elasticapm import get_client\nfrom elasticapm.traces import execution_context\n\n\ndef structlog_processor(logger, method_name, event_dict):\n \"\"\"\n Add three new entries to the event_dict for any processed events:\n\n * transaction.id\n * trace.id\n * span.id\n\n Only adds non-None IDs.\n\n :param logger:\n Unused (logger instance in structlog)\n :param method_name:\n Unused (wrapped method_name)\n :param event_dict:\n Event dictionary for the event we're processing\n :return:\n `event_dict`, with three new entries.\n \"\"\"\n transaction = execution_context.get_transaction()\n if transaction:\n event_dict[\"transaction.id\"] = transaction.id\n client = get_client()\n if client:\n event_dict[\"service.name\"] = client.config.service_name\n event_dict[\"event.dataset\"] = f\"{client.config.service_name}\"\n if transaction and transaction.trace_parent:\n event_dict[\"trace.id\"] = transaction.trace_parent.trace_id\n span = execution_context.get_span()\n if span and span.id:\n event_dict[\"span.id\"] = span.id\n return event_dict\n", "path": "elasticapm/handlers/structlog.py"}], "after_files": [{"content": "# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\n\nfrom __future__ import absolute_import\n\nfrom elasticapm import get_client\nfrom elasticapm.traces import execution_context\n\n\ndef structlog_processor(logger, method_name, event_dict):\n \"\"\"\n Add three new entries to the event_dict for any processed events:\n\n * transaction.id\n * trace.id\n * span.id\n\n Only adds non-None IDs.\n\n :param logger:\n Unused (logger instance in structlog)\n :param method_name:\n Unused (wrapped method_name)\n :param event_dict:\n Event dictionary for the event we're processing\n :return:\n `event_dict`, with three new entries.\n \"\"\"\n transaction = execution_context.get_transaction()\n if transaction:\n event_dict[\"transaction.id\"] = transaction.id\n client = get_client()\n if client:\n event_dict[\"service.name\"] = client.config.service_name\n event_dict[\"event\"] = {\"dataset\": f\"{client.config.service_name}\"}\n if transaction and transaction.trace_parent:\n event_dict[\"trace.id\"] = transaction.trace_parent.trace_id\n span = execution_context.get_span()\n if span and span.id:\n event_dict[\"span.id\"] = span.id\n return event_dict\n", "path": "elasticapm/handlers/structlog.py"}]} | 1,962 | 156 |
gh_patches_debug_16503 | rasdani/github-patches | git_diff | bridgecrewio__checkov-1061 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Dockerfile scan fails when in directory and used -f
**Describe the bug**
When running directory scan checkov shows Dockerfile failed checks. When scanning file no errors are shown.
**To Reproduce**
Create Dockerfile in directory `test` with content:
```
FROM debian:buster
ENV CHECKOV_VERSION 1.0.775
RUN export DEBIAN_FRONTEND=noninteractive && \
apt-get -y update && \
apt-get -y --no-install-recommends install wget unzip ca-certificates git python3 python3-pip python3-setuptools python3-wheel && \
pip3 install -U checkov=="${CHECKOV_VERSION}"
```
`checkov -f test/Dockerfile` won't show errors
`checkov -d test` will show error
**Expected behavior**
Show error in both cases.
**Screenshots**
<img width="892" alt="Screenshot 2021-04-10 at 09 39 21" src="https://user-images.githubusercontent.com/672767/114262507-a54dde80-99e0-11eb-9e9e-3e3f5d2d2a7f.png">
**Desktop (please complete the following information):**
- OS: MacOS 11.2.3
- Python: 3.9.4
- Checkov Version 2.0.27
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/dockerfile/runner.py`
Content:
```
1 import logging
2 import os
3 from dockerfile_parse.constants import DOCKERFILE_FILENAME
4
5 from checkov.common.output.record import Record
6 from checkov.common.output.report import Report
7 from checkov.common.runners.base_runner import BaseRunner, filter_ignored_directories
8 from checkov.dockerfile.parser import parse, collect_skipped_checks
9 from checkov.dockerfile.registry import registry
10 from checkov.runner_filter import RunnerFilter
11
12 DOCKER_FILE_MASK = [DOCKERFILE_FILENAME]
13
14
15 class Runner(BaseRunner):
16 check_type = "dockerfile"
17
18 def run(self, root_folder, external_checks_dir=None, files=None, runner_filter=RunnerFilter(),
19 collect_skip_comments=True):
20 report = Report(self.check_type)
21 definitions = {}
22 definitions_raw = {}
23 parsing_errors = {}
24 files_list = []
25 if external_checks_dir:
26 for directory in external_checks_dir:
27 registry.load_external_checks(directory)
28
29 if files:
30 for file in files:
31 if file in DOCKER_FILE_MASK:
32 (definitions[file], definitions_raw[file]) = parse(file)
33
34 if root_folder:
35 for root, d_names, f_names in os.walk(root_folder):
36 filter_ignored_directories(d_names)
37 for file in f_names:
38 if file in DOCKER_FILE_MASK:
39 files_list.append(os.path.join(root, file))
40
41 for file in files_list:
42 relative_file_path = f'/{os.path.relpath(file, os.path.commonprefix((root_folder, file)))}'
43 try:
44 (definitions[relative_file_path], definitions_raw[relative_file_path]) = parse(file)
45 except TypeError:
46 logging.info(f'Dockerfile skipping {file} as it is not a valid dockerfile template')
47
48 for docker_file_path in definitions.keys():
49
50 # There are a few cases here. If -f was used, there could be a leading / because it's an absolute path,
51 # or there will be no leading slash; root_folder will always be none.
52 # If -d is used, root_folder will be the value given, and -f will start with a / (hardcoded above).
53 # The goal here is simply to get a valid path to the file (which docker_file_path does not always give).
54 if docker_file_path[0] == '/':
55 path_to_convert = (root_folder + docker_file_path) if root_folder else docker_file_path
56 else:
57 path_to_convert = (os.path.join(root_folder, docker_file_path)) if root_folder else docker_file_path
58
59 file_abs_path = os.path.abspath(path_to_convert)
60 skipped_checks = collect_skipped_checks(definitions[docker_file_path])
61 instructions = definitions[docker_file_path]
62
63 results = registry.scan(docker_file_path, instructions, skipped_checks,
64 runner_filter)
65 for check, check_result in results.items():
66 result_configuration = check_result['results_configuration']
67 startline = 0
68 endline = 0
69 result_instruction = ""
70 if result_configuration:
71 startline = result_configuration['startline']
72 endline = result_configuration['endline']
73 result_instruction = result_configuration["instruction"]
74
75 codeblock = []
76 self.calc_record_codeblock(codeblock, definitions_raw, docker_file_path, endline, startline)
77 record = Record(check_id=check.id, check_name=check.name, check_result=check_result,
78 code_block=codeblock,
79 file_path=docker_file_path,
80 file_line_range=[startline,
81 endline],
82 resource="{}.{}".format(docker_file_path,
83 result_instruction,
84 startline),
85 evaluations=None, check_class=check.__class__.__module__,
86 file_abs_path=file_abs_path, entity_tags=None)
87 report.add_record(record=record)
88
89 return report
90
91
92 def calc_record_codeblock(self, codeblock, definitions_raw, docker_file_path, endline, startline):
93 for line in range(startline, endline + 1):
94 codeblock.append((line, definitions_raw[docker_file_path][line]))
95
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/checkov/dockerfile/runner.py b/checkov/dockerfile/runner.py
--- a/checkov/dockerfile/runner.py
+++ b/checkov/dockerfile/runner.py
@@ -15,7 +15,7 @@
class Runner(BaseRunner):
check_type = "dockerfile"
- def run(self, root_folder, external_checks_dir=None, files=None, runner_filter=RunnerFilter(),
+ def run(self, root_folder=None, external_checks_dir=None, files=None, runner_filter=RunnerFilter(),
collect_skip_comments=True):
report = Report(self.check_type)
definitions = {}
@@ -28,7 +28,7 @@
if files:
for file in files:
- if file in DOCKER_FILE_MASK:
+ if os.path.basename(file) in DOCKER_FILE_MASK:
(definitions[file], definitions_raw[file]) = parse(file)
if root_folder:
| {"golden_diff": "diff --git a/checkov/dockerfile/runner.py b/checkov/dockerfile/runner.py\n--- a/checkov/dockerfile/runner.py\n+++ b/checkov/dockerfile/runner.py\n@@ -15,7 +15,7 @@\n class Runner(BaseRunner):\n check_type = \"dockerfile\"\n \n- def run(self, root_folder, external_checks_dir=None, files=None, runner_filter=RunnerFilter(),\n+ def run(self, root_folder=None, external_checks_dir=None, files=None, runner_filter=RunnerFilter(),\n collect_skip_comments=True):\n report = Report(self.check_type)\n definitions = {}\n@@ -28,7 +28,7 @@\n \n if files:\n for file in files:\n- if file in DOCKER_FILE_MASK:\n+ if os.path.basename(file) in DOCKER_FILE_MASK:\n (definitions[file], definitions_raw[file]) = parse(file)\n \n if root_folder:\n", "issue": "Dockerfile scan fails when in directory and used -f\n**Describe the bug**\r\nWhen running directory scan checkov shows Dockerfile failed checks. When scanning file no errors are shown.\r\n\r\n**To Reproduce**\r\nCreate Dockerfile in directory `test` with content:\r\n```\r\nFROM debian:buster\r\n\r\nENV CHECKOV_VERSION 1.0.775\r\n\r\nRUN export DEBIAN_FRONTEND=noninteractive && \\\r\n apt-get -y update && \\\r\n apt-get -y --no-install-recommends install wget unzip ca-certificates git python3 python3-pip python3-setuptools python3-wheel && \\\r\n pip3 install -U checkov==\"${CHECKOV_VERSION}\"\r\n```\r\n\r\n`checkov -f test/Dockerfile` won't show errors\r\n`checkov -d test` will show error\r\n\r\n**Expected behavior**\r\nShow error in both cases.\r\n\r\n**Screenshots**\r\n<img width=\"892\" alt=\"Screenshot 2021-04-10 at 09 39 21\" src=\"https://user-images.githubusercontent.com/672767/114262507-a54dde80-99e0-11eb-9e9e-3e3f5d2d2a7f.png\">\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: MacOS 11.2.3\r\n - Python: 3.9.4\r\n - Checkov Version 2.0.27\r\n\n", "before_files": [{"content": "import logging\nimport os\nfrom dockerfile_parse.constants import DOCKERFILE_FILENAME\n\nfrom checkov.common.output.record import Record\nfrom checkov.common.output.report import Report\nfrom checkov.common.runners.base_runner import BaseRunner, filter_ignored_directories\nfrom checkov.dockerfile.parser import parse, collect_skipped_checks\nfrom checkov.dockerfile.registry import registry\nfrom checkov.runner_filter import RunnerFilter\n\nDOCKER_FILE_MASK = [DOCKERFILE_FILENAME]\n\n\nclass Runner(BaseRunner):\n check_type = \"dockerfile\"\n\n def run(self, root_folder, external_checks_dir=None, files=None, runner_filter=RunnerFilter(),\n collect_skip_comments=True):\n report = Report(self.check_type)\n definitions = {}\n definitions_raw = {}\n parsing_errors = {}\n files_list = []\n if external_checks_dir:\n for directory in external_checks_dir:\n registry.load_external_checks(directory)\n\n if files:\n for file in files:\n if file in DOCKER_FILE_MASK:\n (definitions[file], definitions_raw[file]) = parse(file)\n\n if root_folder:\n for root, d_names, f_names in os.walk(root_folder):\n filter_ignored_directories(d_names)\n for file in f_names:\n if file in DOCKER_FILE_MASK:\n files_list.append(os.path.join(root, file))\n\n for file in files_list:\n relative_file_path = f'/{os.path.relpath(file, os.path.commonprefix((root_folder, file)))}'\n try:\n (definitions[relative_file_path], definitions_raw[relative_file_path]) = parse(file)\n except TypeError:\n logging.info(f'Dockerfile skipping {file} as it is not a valid dockerfile template')\n\n for docker_file_path in definitions.keys():\n\n # There are a few cases here. If -f was used, there could be a leading / because it's an absolute path,\n # or there will be no leading slash; root_folder will always be none.\n # If -d is used, root_folder will be the value given, and -f will start with a / (hardcoded above).\n # The goal here is simply to get a valid path to the file (which docker_file_path does not always give).\n if docker_file_path[0] == '/':\n path_to_convert = (root_folder + docker_file_path) if root_folder else docker_file_path\n else:\n path_to_convert = (os.path.join(root_folder, docker_file_path)) if root_folder else docker_file_path\n\n file_abs_path = os.path.abspath(path_to_convert)\n skipped_checks = collect_skipped_checks(definitions[docker_file_path])\n instructions = definitions[docker_file_path]\n\n results = registry.scan(docker_file_path, instructions, skipped_checks,\n runner_filter)\n for check, check_result in results.items():\n result_configuration = check_result['results_configuration']\n startline = 0\n endline = 0\n result_instruction = \"\"\n if result_configuration:\n startline = result_configuration['startline']\n endline = result_configuration['endline']\n result_instruction = result_configuration[\"instruction\"]\n\n codeblock = []\n self.calc_record_codeblock(codeblock, definitions_raw, docker_file_path, endline, startline)\n record = Record(check_id=check.id, check_name=check.name, check_result=check_result,\n code_block=codeblock,\n file_path=docker_file_path,\n file_line_range=[startline,\n endline],\n resource=\"{}.{}\".format(docker_file_path,\n result_instruction,\n startline),\n evaluations=None, check_class=check.__class__.__module__,\n file_abs_path=file_abs_path, entity_tags=None)\n report.add_record(record=record)\n\n return report\n\n\n def calc_record_codeblock(self, codeblock, definitions_raw, docker_file_path, endline, startline):\n for line in range(startline, endline + 1):\n codeblock.append((line, definitions_raw[docker_file_path][line]))\n", "path": "checkov/dockerfile/runner.py"}], "after_files": [{"content": "import logging\nimport os\nfrom dockerfile_parse.constants import DOCKERFILE_FILENAME\n\nfrom checkov.common.output.record import Record\nfrom checkov.common.output.report import Report\nfrom checkov.common.runners.base_runner import BaseRunner, filter_ignored_directories\nfrom checkov.dockerfile.parser import parse, collect_skipped_checks\nfrom checkov.dockerfile.registry import registry\nfrom checkov.runner_filter import RunnerFilter\n\nDOCKER_FILE_MASK = [DOCKERFILE_FILENAME]\n\n\nclass Runner(BaseRunner):\n check_type = \"dockerfile\"\n\n def run(self, root_folder=None, external_checks_dir=None, files=None, runner_filter=RunnerFilter(),\n collect_skip_comments=True):\n report = Report(self.check_type)\n definitions = {}\n definitions_raw = {}\n parsing_errors = {}\n files_list = []\n if external_checks_dir:\n for directory in external_checks_dir:\n registry.load_external_checks(directory)\n\n if files:\n for file in files:\n if os.path.basename(file) in DOCKER_FILE_MASK:\n (definitions[file], definitions_raw[file]) = parse(file)\n\n if root_folder:\n for root, d_names, f_names in os.walk(root_folder):\n filter_ignored_directories(d_names)\n for file in f_names:\n if file in DOCKER_FILE_MASK:\n files_list.append(os.path.join(root, file))\n\n for file in files_list:\n relative_file_path = f'/{os.path.relpath(file, os.path.commonprefix((root_folder, file)))}'\n try:\n (definitions[relative_file_path], definitions_raw[relative_file_path]) = parse(file)\n except TypeError:\n logging.info(f'Dockerfile skipping {file} as it is not a valid dockerfile template')\n\n for docker_file_path in definitions.keys():\n\n # There are a few cases here. If -f was used, there could be a leading / because it's an absolute path,\n # or there will be no leading slash; root_folder will always be none.\n # If -d is used, root_folder will be the value given, and -f will start with a / (hardcoded above).\n # The goal here is simply to get a valid path to the file (which docker_file_path does not always give).\n if docker_file_path[0] == '/':\n path_to_convert = (root_folder + docker_file_path) if root_folder else docker_file_path\n else:\n path_to_convert = (os.path.join(root_folder, docker_file_path)) if root_folder else docker_file_path\n\n file_abs_path = os.path.abspath(path_to_convert)\n skipped_checks = collect_skipped_checks(definitions[docker_file_path])\n instructions = definitions[docker_file_path]\n\n results = registry.scan(docker_file_path, instructions, skipped_checks,\n runner_filter)\n for check, check_result in results.items():\n result_configuration = check_result['results_configuration']\n startline = 0\n endline = 0\n result_instruction = \"\"\n if result_configuration:\n startline = result_configuration['startline']\n endline = result_configuration['endline']\n result_instruction = result_configuration[\"instruction\"]\n\n codeblock = []\n self.calc_record_codeblock(codeblock, definitions_raw, docker_file_path, endline, startline)\n record = Record(check_id=check.id, check_name=check.name, check_result=check_result,\n code_block=codeblock,\n file_path=docker_file_path,\n file_line_range=[startline,\n endline],\n resource=\"{}.{}\".format(docker_file_path,\n result_instruction,\n startline),\n evaluations=None, check_class=check.__class__.__module__,\n file_abs_path=file_abs_path, entity_tags=None)\n report.add_record(record=record)\n\n return report\n\n\n def calc_record_codeblock(self, codeblock, definitions_raw, docker_file_path, endline, startline):\n for line in range(startline, endline + 1):\n codeblock.append((line, definitions_raw[docker_file_path][line]))\n", "path": "checkov/dockerfile/runner.py"}]} | 1,610 | 197 |
gh_patches_debug_41075 | rasdani/github-patches | git_diff | vyperlang__vyper-828 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Solidity Compatible ERC20 broken
The [Solidity compatible ERC20 token](https://github.com/ethereum/vyper/blob/master/examples/tokens/ERC20_solidity_compatible/ERC20.v.py) no longer compiles, since it was not updated after the removal of separate uint256 math functions. This is a super easy fix. I can do it later in the week if no one gets to it before then.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/tokens/ERC20_solidity_compatible/ERC20.v.py`
Content:
```
1 # Solidity-Compatible EIP20/ERC20 Token
2 # Implements https://github.com/ethereum/EIPs/blob/master/EIPS/eip-20-token-standard.md
3 # Author: Phil Daian
4
5 # The use of the uint256 datatype as in this token is not
6 # recommended, as it can pose security risks.
7
8 # This token is intended as a proof of concept towards
9 # language interoperability and not for production use.
10
11 # Events issued by the contract
12 Transfer: event({_from: indexed(address), _to: indexed(address), _value: uint256})
13 Approval: event({_owner: indexed(address), _spender: indexed(address), _value: uint256})
14
15 balances: uint256[address]
16 allowances: (uint256[address])[address]
17 num_issued: uint256
18
19 @public
20 @payable
21 def deposit():
22 _value: uint256 = convert(msg.value, 'uint256')
23 _sender: address = msg.sender
24 self.balances[_sender] = uint256_add(self.balances[_sender], _value)
25 self.num_issued = uint256_add(self.num_issued, _value)
26 # Fire deposit event as transfer from 0x0
27 log.Transfer(0x0000000000000000000000000000000000000000, _sender, _value)
28
29 @public
30 def withdraw(_value : uint256) -> bool:
31 _sender: address = msg.sender
32 # Make sure sufficient funds are present, op will not underflow supply
33 # implicitly through overflow protection
34 self.balances[_sender] = uint256_sub(self.balances[_sender], _value)
35 self.num_issued = uint256_sub(self.num_issued, _value)
36 send(_sender, as_wei_value(convert(_value, 'int128'), 'wei'))
37 # Fire withdraw event as transfer to 0x0
38 log.Transfer(_sender, 0x0000000000000000000000000000000000000000, _value)
39 return true
40
41 @public
42 @constant
43 def totalSupply() -> uint256:
44 return self.num_issued
45
46 @public
47 @constant
48 def balanceOf(_owner : address) -> uint256:
49 return self.balances[_owner]
50
51 @public
52 def transfer(_to : address, _value : uint256) -> bool:
53 _sender: address = msg.sender
54 # Make sure sufficient funds are present implicitly through overflow protection
55 self.balances[_sender] = uint256_sub(self.balances[_sender], _value)
56 self.balances[_to] = uint256_add(self.balances[_to], _value)
57 # Fire transfer event
58 log.Transfer(_sender, _to, _value)
59 return true
60
61 @public
62 def transferFrom(_from : address, _to : address, _value : uint256) -> bool:
63 _sender: address = msg.sender
64 allowance: uint256 = self.allowances[_from][_sender]
65 # Make sure sufficient funds/allowance are present implicitly through overflow protection
66 self.balances[_from] = uint256_sub(self.balances[_from], _value)
67 self.balances[_to] = uint256_add(self.balances[_to], _value)
68 self.allowances[_from][_sender] = uint256_sub(allowance, _value)
69 # Fire transfer event
70 log.Transfer(_from, _to, _value)
71 return true
72
73 @public
74 def approve(_spender : address, _value : uint256) -> bool:
75 _sender: address = msg.sender
76 self.allowances[_sender][_spender] = _value
77 # Fire approval event
78 log.Approval(_sender, _spender, _value)
79 return true
80
81 @public
82 @constant
83 def allowance(_owner : address, _spender : address) -> uint256:
84 return self.allowances[_owner][_spender]
85
86
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/tokens/ERC20_solidity_compatible/ERC20.v.py b/examples/tokens/ERC20_solidity_compatible/ERC20.v.py
--- a/examples/tokens/ERC20_solidity_compatible/ERC20.v.py
+++ b/examples/tokens/ERC20_solidity_compatible/ERC20.v.py
@@ -21,8 +21,8 @@
def deposit():
_value: uint256 = convert(msg.value, 'uint256')
_sender: address = msg.sender
- self.balances[_sender] = uint256_add(self.balances[_sender], _value)
- self.num_issued = uint256_add(self.num_issued, _value)
+ self.balances[_sender] = self.balances[_sender] + _value
+ self.num_issued = self.num_issued + _value
# Fire deposit event as transfer from 0x0
log.Transfer(0x0000000000000000000000000000000000000000, _sender, _value)
@@ -31,12 +31,12 @@
_sender: address = msg.sender
# Make sure sufficient funds are present, op will not underflow supply
# implicitly through overflow protection
- self.balances[_sender] = uint256_sub(self.balances[_sender], _value)
- self.num_issued = uint256_sub(self.num_issued, _value)
+ self.balances[_sender] = self.balances[_sender] - _value
+ self.num_issued = self.num_issued - _value
send(_sender, as_wei_value(convert(_value, 'int128'), 'wei'))
# Fire withdraw event as transfer to 0x0
log.Transfer(_sender, 0x0000000000000000000000000000000000000000, _value)
- return true
+ return True
@public
@constant
@@ -52,23 +52,23 @@
def transfer(_to : address, _value : uint256) -> bool:
_sender: address = msg.sender
# Make sure sufficient funds are present implicitly through overflow protection
- self.balances[_sender] = uint256_sub(self.balances[_sender], _value)
- self.balances[_to] = uint256_add(self.balances[_to], _value)
+ self.balances[_sender] = self.balances[_sender] - _value
+ self.balances[_to] = self.balances[_to] + _value
# Fire transfer event
log.Transfer(_sender, _to, _value)
- return true
+ return True
@public
def transferFrom(_from : address, _to : address, _value : uint256) -> bool:
_sender: address = msg.sender
allowance: uint256 = self.allowances[_from][_sender]
# Make sure sufficient funds/allowance are present implicitly through overflow protection
- self.balances[_from] = uint256_sub(self.balances[_from], _value)
- self.balances[_to] = uint256_add(self.balances[_to], _value)
- self.allowances[_from][_sender] = uint256_sub(allowance, _value)
+ self.balances[_from] = self.balances[_from] - _value
+ self.balances[_to] = self.balances[_to] + _value
+ self.allowances[_from][_sender] = allowance - _value
# Fire transfer event
log.Transfer(_from, _to, _value)
- return true
+ return True
@public
def approve(_spender : address, _value : uint256) -> bool:
@@ -76,7 +76,7 @@
self.allowances[_sender][_spender] = _value
# Fire approval event
log.Approval(_sender, _spender, _value)
- return true
+ return True
@public
@constant
| {"golden_diff": "diff --git a/examples/tokens/ERC20_solidity_compatible/ERC20.v.py b/examples/tokens/ERC20_solidity_compatible/ERC20.v.py\n--- a/examples/tokens/ERC20_solidity_compatible/ERC20.v.py\n+++ b/examples/tokens/ERC20_solidity_compatible/ERC20.v.py\n@@ -21,8 +21,8 @@\n def deposit():\n _value: uint256 = convert(msg.value, 'uint256')\n _sender: address = msg.sender\n- self.balances[_sender] = uint256_add(self.balances[_sender], _value)\n- self.num_issued = uint256_add(self.num_issued, _value)\n+ self.balances[_sender] = self.balances[_sender] + _value\n+ self.num_issued = self.num_issued + _value\n # Fire deposit event as transfer from 0x0\n log.Transfer(0x0000000000000000000000000000000000000000, _sender, _value)\n \n@@ -31,12 +31,12 @@\n _sender: address = msg.sender\n # Make sure sufficient funds are present, op will not underflow supply\n # implicitly through overflow protection\n- self.balances[_sender] = uint256_sub(self.balances[_sender], _value)\n- self.num_issued = uint256_sub(self.num_issued, _value)\n+ self.balances[_sender] = self.balances[_sender] - _value\n+ self.num_issued = self.num_issued - _value\n send(_sender, as_wei_value(convert(_value, 'int128'), 'wei'))\n # Fire withdraw event as transfer to 0x0\n log.Transfer(_sender, 0x0000000000000000000000000000000000000000, _value)\n- return true\n+ return True\n \n @public\n @constant\n@@ -52,23 +52,23 @@\n def transfer(_to : address, _value : uint256) -> bool:\n _sender: address = msg.sender\n # Make sure sufficient funds are present implicitly through overflow protection\n- self.balances[_sender] = uint256_sub(self.balances[_sender], _value)\n- self.balances[_to] = uint256_add(self.balances[_to], _value)\n+ self.balances[_sender] = self.balances[_sender] - _value\n+ self.balances[_to] = self.balances[_to] + _value\n # Fire transfer event\n log.Transfer(_sender, _to, _value)\n- return true\n+ return True\n \n @public\n def transferFrom(_from : address, _to : address, _value : uint256) -> bool:\n _sender: address = msg.sender\n allowance: uint256 = self.allowances[_from][_sender]\n # Make sure sufficient funds/allowance are present implicitly through overflow protection\n- self.balances[_from] = uint256_sub(self.balances[_from], _value)\n- self.balances[_to] = uint256_add(self.balances[_to], _value)\n- self.allowances[_from][_sender] = uint256_sub(allowance, _value)\n+ self.balances[_from] = self.balances[_from] - _value\n+ self.balances[_to] = self.balances[_to] + _value\n+ self.allowances[_from][_sender] = allowance - _value\n # Fire transfer event\n log.Transfer(_from, _to, _value)\n- return true\n+ return True\n \n @public\n def approve(_spender : address, _value : uint256) -> bool:\n@@ -76,7 +76,7 @@\n self.allowances[_sender][_spender] = _value\n # Fire approval event\n log.Approval(_sender, _spender, _value)\n- return true\n+ return True\n \n @public\n @constant\n", "issue": "Solidity Compatible ERC20 broken\nThe [Solidity compatible ERC20 token](https://github.com/ethereum/vyper/blob/master/examples/tokens/ERC20_solidity_compatible/ERC20.v.py) no longer compiles, since it was not updated after the removal of separate uint256 math functions. This is a super easy fix. I can do it later in the week if no one gets to it before then. \n", "before_files": [{"content": "# Solidity-Compatible EIP20/ERC20 Token\n# Implements https://github.com/ethereum/EIPs/blob/master/EIPS/eip-20-token-standard.md\n# Author: Phil Daian\n\n# The use of the uint256 datatype as in this token is not\n# recommended, as it can pose security risks.\n\n# This token is intended as a proof of concept towards\n# language interoperability and not for production use.\n\n# Events issued by the contract\nTransfer: event({_from: indexed(address), _to: indexed(address), _value: uint256})\nApproval: event({_owner: indexed(address), _spender: indexed(address), _value: uint256})\n\nbalances: uint256[address]\nallowances: (uint256[address])[address]\nnum_issued: uint256\n\n@public\n@payable\ndef deposit():\n _value: uint256 = convert(msg.value, 'uint256')\n _sender: address = msg.sender\n self.balances[_sender] = uint256_add(self.balances[_sender], _value)\n self.num_issued = uint256_add(self.num_issued, _value)\n # Fire deposit event as transfer from 0x0\n log.Transfer(0x0000000000000000000000000000000000000000, _sender, _value)\n\n@public\ndef withdraw(_value : uint256) -> bool:\n _sender: address = msg.sender\n # Make sure sufficient funds are present, op will not underflow supply\n # implicitly through overflow protection\n self.balances[_sender] = uint256_sub(self.balances[_sender], _value)\n self.num_issued = uint256_sub(self.num_issued, _value)\n send(_sender, as_wei_value(convert(_value, 'int128'), 'wei'))\n # Fire withdraw event as transfer to 0x0\n log.Transfer(_sender, 0x0000000000000000000000000000000000000000, _value)\n return true\n\n@public\n@constant\ndef totalSupply() -> uint256:\n return self.num_issued\n\n@public\n@constant\ndef balanceOf(_owner : address) -> uint256:\n return self.balances[_owner]\n\n@public\ndef transfer(_to : address, _value : uint256) -> bool:\n _sender: address = msg.sender\n # Make sure sufficient funds are present implicitly through overflow protection\n self.balances[_sender] = uint256_sub(self.balances[_sender], _value)\n self.balances[_to] = uint256_add(self.balances[_to], _value)\n # Fire transfer event\n log.Transfer(_sender, _to, _value)\n return true\n\n@public\ndef transferFrom(_from : address, _to : address, _value : uint256) -> bool:\n _sender: address = msg.sender\n allowance: uint256 = self.allowances[_from][_sender]\n # Make sure sufficient funds/allowance are present implicitly through overflow protection\n self.balances[_from] = uint256_sub(self.balances[_from], _value)\n self.balances[_to] = uint256_add(self.balances[_to], _value)\n self.allowances[_from][_sender] = uint256_sub(allowance, _value)\n # Fire transfer event\n log.Transfer(_from, _to, _value)\n return true\n\n@public\ndef approve(_spender : address, _value : uint256) -> bool:\n _sender: address = msg.sender\n self.allowances[_sender][_spender] = _value\n # Fire approval event\n log.Approval(_sender, _spender, _value)\n return true\n\n@public\n@constant\ndef allowance(_owner : address, _spender : address) -> uint256:\n return self.allowances[_owner][_spender]\n\n", "path": "examples/tokens/ERC20_solidity_compatible/ERC20.v.py"}], "after_files": [{"content": "# Solidity-Compatible EIP20/ERC20 Token\n# Implements https://github.com/ethereum/EIPs/blob/master/EIPS/eip-20-token-standard.md\n# Author: Phil Daian\n\n# The use of the uint256 datatype as in this token is not\n# recommended, as it can pose security risks.\n\n# This token is intended as a proof of concept towards\n# language interoperability and not for production use.\n\n# Events issued by the contract\nTransfer: event({_from: indexed(address), _to: indexed(address), _value: uint256})\nApproval: event({_owner: indexed(address), _spender: indexed(address), _value: uint256})\n\nbalances: uint256[address]\nallowances: (uint256[address])[address]\nnum_issued: uint256\n\n@public\n@payable\ndef deposit():\n _value: uint256 = convert(msg.value, 'uint256')\n _sender: address = msg.sender\n self.balances[_sender] = self.balances[_sender] + _value\n self.num_issued = self.num_issued + _value\n # Fire deposit event as transfer from 0x0\n log.Transfer(0x0000000000000000000000000000000000000000, _sender, _value)\n\n@public\ndef withdraw(_value : uint256) -> bool:\n _sender: address = msg.sender\n # Make sure sufficient funds are present, op will not underflow supply\n # implicitly through overflow protection\n self.balances[_sender] = self.balances[_sender] - _value\n self.num_issued = self.num_issued - _value\n send(_sender, as_wei_value(convert(_value, 'int128'), 'wei'))\n # Fire withdraw event as transfer to 0x0\n log.Transfer(_sender, 0x0000000000000000000000000000000000000000, _value)\n return True\n\n@public\n@constant\ndef totalSupply() -> uint256:\n return self.num_issued\n\n@public\n@constant\ndef balanceOf(_owner : address) -> uint256:\n return self.balances[_owner]\n\n@public\ndef transfer(_to : address, _value : uint256) -> bool:\n _sender: address = msg.sender\n # Make sure sufficient funds are present implicitly through overflow protection\n self.balances[_sender] = self.balances[_sender] - _value\n self.balances[_to] = self.balances[_to] + _value\n # Fire transfer event\n log.Transfer(_sender, _to, _value)\n return True\n\n@public\ndef transferFrom(_from : address, _to : address, _value : uint256) -> bool:\n _sender: address = msg.sender\n allowance: uint256 = self.allowances[_from][_sender]\n # Make sure sufficient funds/allowance are present implicitly through overflow protection\n self.balances[_from] = self.balances[_from] - _value\n self.balances[_to] = self.balances[_to] + _value\n self.allowances[_from][_sender] = allowance - _value\n # Fire transfer event\n log.Transfer(_from, _to, _value)\n return True\n\n@public\ndef approve(_spender : address, _value : uint256) -> bool:\n _sender: address = msg.sender\n self.allowances[_sender][_spender] = _value\n # Fire approval event\n log.Approval(_sender, _spender, _value)\n return True\n\n@public\n@constant\ndef allowance(_owner : address, _spender : address) -> uint256:\n return self.allowances[_owner][_spender]\n\n", "path": "examples/tokens/ERC20_solidity_compatible/ERC20.v.py"}]} | 1,475 | 989 |
gh_patches_debug_29690 | rasdani/github-patches | git_diff | openstates__openstates-scrapers-1455 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
MA: committee scraper for 2017
State: MA
says that it is skipping every page, I believe the site was rewritten and so will need a complete rewrite
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `openstates/ma/committees.py`
Content:
```
1 from billy.scrape.committees import CommitteeScraper, Committee
2
3 import lxml.html
4
5
6 class MACommitteeScraper(CommitteeScraper):
7 jurisdiction = 'ma'
8
9 def scrape(self, term, chambers):
10 page_types = []
11 if 'upper' in chambers:
12 page_types += ['Senate', 'Joint']
13 if 'lower' in chambers:
14 page_types += ['House']
15 chamber_mapping = {'Senate': 'upper',
16 'House': 'lower',
17 'Joint': 'joint'}
18
19 foundComms = []
20
21 for page_type in page_types:
22 url = 'http://www.malegislature.gov/Committees/' + page_type
23
24 html = self.get(url, verify=False).text
25 doc = lxml.html.fromstring(html)
26 doc.make_links_absolute('http://www.malegislature.gov')
27
28 for com_url in doc.xpath('//ul[@class="committeeList"]/li/a/@href'):
29 chamber = chamber_mapping[page_type]
30 self.scrape_committee(chamber, com_url)
31
32 def scrape_committee(self, chamber, url):
33 html = self.get(url, verify=False).text
34 doc = lxml.html.fromstring(html)
35
36 name = doc.xpath('//span[@class="committeeShortName"]/text()')
37 if len(name) == 0:
38 self.warning("Had to skip this malformed page.")
39 return
40 # Because of http://www.malegislature.gov/Committees/Senate/S29 this
41 # XXX: hack had to be pushed in. Remove me ASAP. This just skips
42 # malformed pages.
43
44 name = name[0]
45 com = Committee(chamber, name)
46 com.add_source(url)
47
48 # get both titles and names, order is consistent
49 titles = doc.xpath('//p[@class="rankingMemberTitle"]/text()')
50 names = doc.xpath('//p[@class="rankingMemberName"]/a/text()')
51
52 for title, name in zip(titles, names):
53 com.add_member(name, title)
54
55 for member in doc.xpath('//div[@class="committeeRegularMembers"]//a/text()'):
56 com.add_member(member)
57
58 if com['members']:
59 self.save_committee(com)
60
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/openstates/ma/committees.py b/openstates/ma/committees.py
--- a/openstates/ma/committees.py
+++ b/openstates/ma/committees.py
@@ -16,8 +16,6 @@
'House': 'lower',
'Joint': 'joint'}
- foundComms = []
-
for page_type in page_types:
url = 'http://www.malegislature.gov/Committees/' + page_type
@@ -33,27 +31,15 @@
html = self.get(url, verify=False).text
doc = lxml.html.fromstring(html)
- name = doc.xpath('//span[@class="committeeShortName"]/text()')
- if len(name) == 0:
- self.warning("Had to skip this malformed page.")
- return
- # Because of http://www.malegislature.gov/Committees/Senate/S29 this
- # XXX: hack had to be pushed in. Remove me ASAP. This just skips
- # malformed pages.
-
- name = name[0]
+ name = doc.xpath('//title/text()')[0]
com = Committee(chamber, name)
com.add_source(url)
- # get both titles and names, order is consistent
- titles = doc.xpath('//p[@class="rankingMemberTitle"]/text()')
- names = doc.xpath('//p[@class="rankingMemberName"]/a/text()')
-
- for title, name in zip(titles, names):
- com.add_member(name, title)
-
- for member in doc.xpath('//div[@class="committeeRegularMembers"]//a/text()'):
- com.add_member(member)
+ members = doc.xpath('//a[contains(@href, "/Legislators/Profile")]')
+ for member in members:
+ title = member.xpath('../span')
+ role = title[0].text.lower() if title else 'member'
+ com.add_member(member.text, role)
if com['members']:
self.save_committee(com)
| {"golden_diff": "diff --git a/openstates/ma/committees.py b/openstates/ma/committees.py\n--- a/openstates/ma/committees.py\n+++ b/openstates/ma/committees.py\n@@ -16,8 +16,6 @@\n 'House': 'lower',\n 'Joint': 'joint'}\n \n- foundComms = []\n-\n for page_type in page_types:\n url = 'http://www.malegislature.gov/Committees/' + page_type\n \n@@ -33,27 +31,15 @@\n html = self.get(url, verify=False).text\n doc = lxml.html.fromstring(html)\n \n- name = doc.xpath('//span[@class=\"committeeShortName\"]/text()')\n- if len(name) == 0:\n- self.warning(\"Had to skip this malformed page.\")\n- return\n- # Because of http://www.malegislature.gov/Committees/Senate/S29 this\n- # XXX: hack had to be pushed in. Remove me ASAP. This just skips\n- # malformed pages.\n-\n- name = name[0]\n+ name = doc.xpath('//title/text()')[0]\n com = Committee(chamber, name)\n com.add_source(url)\n \n- # get both titles and names, order is consistent\n- titles = doc.xpath('//p[@class=\"rankingMemberTitle\"]/text()')\n- names = doc.xpath('//p[@class=\"rankingMemberName\"]/a/text()')\n-\n- for title, name in zip(titles, names):\n- com.add_member(name, title)\n-\n- for member in doc.xpath('//div[@class=\"committeeRegularMembers\"]//a/text()'):\n- com.add_member(member)\n+ members = doc.xpath('//a[contains(@href, \"/Legislators/Profile\")]')\n+ for member in members:\n+ title = member.xpath('../span')\n+ role = title[0].text.lower() if title else 'member'\n+ com.add_member(member.text, role)\n \n if com['members']:\n self.save_committee(com)\n", "issue": "MA: committee scraper for 2017\nState: MA\r\n\r\nsays that it is skipping every page, I believe the site was rewritten and so will need a complete rewrite\n", "before_files": [{"content": "from billy.scrape.committees import CommitteeScraper, Committee\n\nimport lxml.html\n\n\nclass MACommitteeScraper(CommitteeScraper):\n jurisdiction = 'ma'\n\n def scrape(self, term, chambers):\n page_types = []\n if 'upper' in chambers:\n page_types += ['Senate', 'Joint']\n if 'lower' in chambers:\n page_types += ['House']\n chamber_mapping = {'Senate': 'upper',\n 'House': 'lower',\n 'Joint': 'joint'}\n\n foundComms = []\n\n for page_type in page_types:\n url = 'http://www.malegislature.gov/Committees/' + page_type\n\n html = self.get(url, verify=False).text\n doc = lxml.html.fromstring(html)\n doc.make_links_absolute('http://www.malegislature.gov')\n\n for com_url in doc.xpath('//ul[@class=\"committeeList\"]/li/a/@href'):\n chamber = chamber_mapping[page_type]\n self.scrape_committee(chamber, com_url)\n\n def scrape_committee(self, chamber, url):\n html = self.get(url, verify=False).text\n doc = lxml.html.fromstring(html)\n\n name = doc.xpath('//span[@class=\"committeeShortName\"]/text()')\n if len(name) == 0:\n self.warning(\"Had to skip this malformed page.\")\n return\n # Because of http://www.malegislature.gov/Committees/Senate/S29 this\n # XXX: hack had to be pushed in. Remove me ASAP. This just skips\n # malformed pages.\n\n name = name[0]\n com = Committee(chamber, name)\n com.add_source(url)\n\n # get both titles and names, order is consistent\n titles = doc.xpath('//p[@class=\"rankingMemberTitle\"]/text()')\n names = doc.xpath('//p[@class=\"rankingMemberName\"]/a/text()')\n\n for title, name in zip(titles, names):\n com.add_member(name, title)\n\n for member in doc.xpath('//div[@class=\"committeeRegularMembers\"]//a/text()'):\n com.add_member(member)\n\n if com['members']:\n self.save_committee(com)\n", "path": "openstates/ma/committees.py"}], "after_files": [{"content": "from billy.scrape.committees import CommitteeScraper, Committee\n\nimport lxml.html\n\n\nclass MACommitteeScraper(CommitteeScraper):\n jurisdiction = 'ma'\n\n def scrape(self, term, chambers):\n page_types = []\n if 'upper' in chambers:\n page_types += ['Senate', 'Joint']\n if 'lower' in chambers:\n page_types += ['House']\n chamber_mapping = {'Senate': 'upper',\n 'House': 'lower',\n 'Joint': 'joint'}\n\n for page_type in page_types:\n url = 'http://www.malegislature.gov/Committees/' + page_type\n\n html = self.get(url, verify=False).text\n doc = lxml.html.fromstring(html)\n doc.make_links_absolute('http://www.malegislature.gov')\n\n for com_url in doc.xpath('//ul[@class=\"committeeList\"]/li/a/@href'):\n chamber = chamber_mapping[page_type]\n self.scrape_committee(chamber, com_url)\n\n def scrape_committee(self, chamber, url):\n html = self.get(url, verify=False).text\n doc = lxml.html.fromstring(html)\n\n name = doc.xpath('//title/text()')[0]\n com = Committee(chamber, name)\n com.add_source(url)\n\n members = doc.xpath('//a[contains(@href, \"/Legislators/Profile\")]')\n for member in members:\n title = member.xpath('../span')\n role = title[0].text.lower() if title else 'member'\n com.add_member(member.text, role)\n\n if com['members']:\n self.save_committee(com)\n", "path": "openstates/ma/committees.py"}]} | 893 | 456 |
gh_patches_debug_6239 | rasdani/github-patches | git_diff | searx__searx-1800 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[flickr_noapi] UnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 1: ordinal not in range(128)
Similar to #419
Installation: current master commit
How to reproduce? Search for "kek" on https://search.snopyta.org/ and click on "Images"
```
ERROR:flask.app:Exception on / [POST]
Traceback (most recent call last):
File "/usr/local/searx/searx-ve/local/lib/python2.7/site-packages/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/searx/searx-ve/local/lib/python2.7/site-packages/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/searx/searx-ve/local/lib/python2.7/site-packages/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/searx/searx-ve/local/lib/python2.7/site-packages/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/searx/searx-ve/local/lib/python2.7/site-packages/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/local/searx/searx/webapp.py", line 544, in index
result['title'] = highlight_content(escape(result['title'] or u''), search_query.query)
File "/usr/local/searx/searx/utils.py", line 79, in highlight_content
if content.lower().find(query.lower()) > -1:
UnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 1: ordinal not in range(128)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `searx/engines/flickr_noapi.py`
Content:
```
1 #!/usr/bin/env python
2
3 """
4 Flickr (Images)
5
6 @website https://www.flickr.com
7 @provide-api yes (https://secure.flickr.com/services/api/flickr.photos.search.html)
8
9 @using-api no
10 @results HTML
11 @stable no
12 @parse url, title, thumbnail, img_src
13 """
14
15 from json import loads
16 from time import time
17 import re
18 from searx.engines import logger
19 from searx.url_utils import urlencode
20 from searx.utils import ecma_unescape, html_to_text
21
22 logger = logger.getChild('flickr-noapi')
23
24 categories = ['images']
25
26 url = 'https://www.flickr.com/'
27 search_url = url + 'search?{query}&page={page}'
28 time_range_url = '&min_upload_date={start}&max_upload_date={end}'
29 photo_url = 'https://www.flickr.com/photos/{userid}/{photoid}'
30 modelexport_re = re.compile(r"^\s*modelExport:\s*({.*}),$", re.M)
31 image_sizes = ('o', 'k', 'h', 'b', 'c', 'z', 'n', 'm', 't', 'q', 's')
32
33 paging = True
34 time_range_support = True
35 time_range_dict = {'day': 60 * 60 * 24,
36 'week': 60 * 60 * 24 * 7,
37 'month': 60 * 60 * 24 * 7 * 4,
38 'year': 60 * 60 * 24 * 7 * 52}
39
40
41 def build_flickr_url(user_id, photo_id):
42 return photo_url.format(userid=user_id, photoid=photo_id)
43
44
45 def _get_time_range_url(time_range):
46 if time_range in time_range_dict:
47 return time_range_url.format(start=time(), end=str(int(time()) - time_range_dict[time_range]))
48 return ''
49
50
51 def request(query, params):
52 params['url'] = (search_url.format(query=urlencode({'text': query}), page=params['pageno'])
53 + _get_time_range_url(params['time_range']))
54 return params
55
56
57 def response(resp):
58 results = []
59
60 matches = modelexport_re.search(resp.text)
61
62 if matches is None:
63 return results
64
65 match = matches.group(1)
66 model_export = loads(match)
67
68 if 'legend' not in model_export:
69 return results
70
71 legend = model_export['legend']
72
73 # handle empty page
74 if not legend or not legend[0]:
75 return results
76
77 for index in legend:
78 photo = model_export['main'][index[0]][int(index[1])][index[2]][index[3]][int(index[4])]
79 author = ecma_unescape(photo.get('realname', ''))
80 source = ecma_unescape(photo.get('username', '')) + ' @ Flickr'
81 title = ecma_unescape(photo.get('title', ''))
82 content = html_to_text(ecma_unescape(photo.get('description', '')))
83 img_src = None
84 # From the biggest to the lowest format
85 for image_size in image_sizes:
86 if image_size in photo['sizes']:
87 img_src = photo['sizes'][image_size]['url']
88 img_format = 'jpg ' \
89 + str(photo['sizes'][image_size]['width']) \
90 + 'x' \
91 + str(photo['sizes'][image_size]['height'])
92 break
93
94 if not img_src:
95 logger.debug('cannot find valid image size: {0}'.format(repr(photo)))
96 continue
97
98 # For a bigger thumbnail, keep only the url_z, not the url_n
99 if 'n' in photo['sizes']:
100 thumbnail_src = photo['sizes']['n']['url']
101 elif 'z' in photo['sizes']:
102 thumbnail_src = photo['sizes']['z']['url']
103 else:
104 thumbnail_src = img_src
105
106 if 'ownerNsid' not in photo:
107 # should not happen, disowned photo? Show it anyway
108 url = img_src
109 else:
110 url = build_flickr_url(photo['ownerNsid'], photo['id'])
111
112 result = {
113 'url': url,
114 'img_src': img_src,
115 'thumbnail_src': thumbnail_src,
116 'source': source,
117 'img_format': img_format,
118 'template': 'images.html'
119 }
120 try:
121 result['author'] = author.encode('utf-8')
122 result['title'] = title.encode('utf-8')
123 result['content'] = content.encode('utf-8')
124 except:
125 result['author'] = ''
126 result['title'] = ''
127 result['content'] = ''
128 results.append(result)
129
130 return results
131
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/searx/engines/flickr_noapi.py b/searx/engines/flickr_noapi.py
--- a/searx/engines/flickr_noapi.py
+++ b/searx/engines/flickr_noapi.py
@@ -118,9 +118,9 @@
'template': 'images.html'
}
try:
- result['author'] = author.encode('utf-8')
- result['title'] = title.encode('utf-8')
- result['content'] = content.encode('utf-8')
+ result['author'] = author
+ result['title'] = title
+ result['content'] = content
except:
result['author'] = ''
result['title'] = ''
| {"golden_diff": "diff --git a/searx/engines/flickr_noapi.py b/searx/engines/flickr_noapi.py\n--- a/searx/engines/flickr_noapi.py\n+++ b/searx/engines/flickr_noapi.py\n@@ -118,9 +118,9 @@\n 'template': 'images.html'\n }\n try:\n- result['author'] = author.encode('utf-8')\n- result['title'] = title.encode('utf-8')\n- result['content'] = content.encode('utf-8')\n+ result['author'] = author\n+ result['title'] = title\n+ result['content'] = content\n except:\n result['author'] = ''\n result['title'] = ''\n", "issue": "[flickr_noapi] UnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 1: ordinal not in range(128)\nSimilar to #419\r\n\r\nInstallation: current master commit\r\nHow to reproduce? Search for \"kek\" on https://search.snopyta.org/ and click on \"Images\"\r\n\r\n```\r\nERROR:flask.app:Exception on / [POST]\r\nTraceback (most recent call last):\r\n File \"/usr/local/searx/searx-ve/local/lib/python2.7/site-packages/flask/app.py\", line 2292, in wsgi_app\r\n response = self.full_dispatch_request()\r\n File \"/usr/local/searx/searx-ve/local/lib/python2.7/site-packages/flask/app.py\", line 1815, in full_dispatch_request\r\n rv = self.handle_user_exception(e)\r\n File \"/usr/local/searx/searx-ve/local/lib/python2.7/site-packages/flask/app.py\", line 1718, in handle_user_exception\r\n reraise(exc_type, exc_value, tb)\r\n File \"/usr/local/searx/searx-ve/local/lib/python2.7/site-packages/flask/app.py\", line 1813, in full_dispatch_request\r\n rv = self.dispatch_request()\r\n File \"/usr/local/searx/searx-ve/local/lib/python2.7/site-packages/flask/app.py\", line 1799, in dispatch_request\r\n return self.view_functions[rule.endpoint](**req.view_args)\r\n File \"/usr/local/searx/searx/webapp.py\", line 544, in index\r\n result['title'] = highlight_content(escape(result['title'] or u''), search_query.query)\r\n File \"/usr/local/searx/searx/utils.py\", line 79, in highlight_content\r\n if content.lower().find(query.lower()) > -1:\r\nUnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 1: ordinal not in range(128)\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"\n Flickr (Images)\n\n @website https://www.flickr.com\n @provide-api yes (https://secure.flickr.com/services/api/flickr.photos.search.html)\n\n @using-api no\n @results HTML\n @stable no\n @parse url, title, thumbnail, img_src\n\"\"\"\n\nfrom json import loads\nfrom time import time\nimport re\nfrom searx.engines import logger\nfrom searx.url_utils import urlencode\nfrom searx.utils import ecma_unescape, html_to_text\n\nlogger = logger.getChild('flickr-noapi')\n\ncategories = ['images']\n\nurl = 'https://www.flickr.com/'\nsearch_url = url + 'search?{query}&page={page}'\ntime_range_url = '&min_upload_date={start}&max_upload_date={end}'\nphoto_url = 'https://www.flickr.com/photos/{userid}/{photoid}'\nmodelexport_re = re.compile(r\"^\\s*modelExport:\\s*({.*}),$\", re.M)\nimage_sizes = ('o', 'k', 'h', 'b', 'c', 'z', 'n', 'm', 't', 'q', 's')\n\npaging = True\ntime_range_support = True\ntime_range_dict = {'day': 60 * 60 * 24,\n 'week': 60 * 60 * 24 * 7,\n 'month': 60 * 60 * 24 * 7 * 4,\n 'year': 60 * 60 * 24 * 7 * 52}\n\n\ndef build_flickr_url(user_id, photo_id):\n return photo_url.format(userid=user_id, photoid=photo_id)\n\n\ndef _get_time_range_url(time_range):\n if time_range in time_range_dict:\n return time_range_url.format(start=time(), end=str(int(time()) - time_range_dict[time_range]))\n return ''\n\n\ndef request(query, params):\n params['url'] = (search_url.format(query=urlencode({'text': query}), page=params['pageno'])\n + _get_time_range_url(params['time_range']))\n return params\n\n\ndef response(resp):\n results = []\n\n matches = modelexport_re.search(resp.text)\n\n if matches is None:\n return results\n\n match = matches.group(1)\n model_export = loads(match)\n\n if 'legend' not in model_export:\n return results\n\n legend = model_export['legend']\n\n # handle empty page\n if not legend or not legend[0]:\n return results\n\n for index in legend:\n photo = model_export['main'][index[0]][int(index[1])][index[2]][index[3]][int(index[4])]\n author = ecma_unescape(photo.get('realname', ''))\n source = ecma_unescape(photo.get('username', '')) + ' @ Flickr'\n title = ecma_unescape(photo.get('title', ''))\n content = html_to_text(ecma_unescape(photo.get('description', '')))\n img_src = None\n # From the biggest to the lowest format\n for image_size in image_sizes:\n if image_size in photo['sizes']:\n img_src = photo['sizes'][image_size]['url']\n img_format = 'jpg ' \\\n + str(photo['sizes'][image_size]['width']) \\\n + 'x' \\\n + str(photo['sizes'][image_size]['height'])\n break\n\n if not img_src:\n logger.debug('cannot find valid image size: {0}'.format(repr(photo)))\n continue\n\n # For a bigger thumbnail, keep only the url_z, not the url_n\n if 'n' in photo['sizes']:\n thumbnail_src = photo['sizes']['n']['url']\n elif 'z' in photo['sizes']:\n thumbnail_src = photo['sizes']['z']['url']\n else:\n thumbnail_src = img_src\n\n if 'ownerNsid' not in photo:\n # should not happen, disowned photo? Show it anyway\n url = img_src\n else:\n url = build_flickr_url(photo['ownerNsid'], photo['id'])\n\n result = {\n 'url': url,\n 'img_src': img_src,\n 'thumbnail_src': thumbnail_src,\n 'source': source,\n 'img_format': img_format,\n 'template': 'images.html'\n }\n try:\n result['author'] = author.encode('utf-8')\n result['title'] = title.encode('utf-8')\n result['content'] = content.encode('utf-8')\n except:\n result['author'] = ''\n result['title'] = ''\n result['content'] = ''\n results.append(result)\n\n return results\n", "path": "searx/engines/flickr_noapi.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"\n Flickr (Images)\n\n @website https://www.flickr.com\n @provide-api yes (https://secure.flickr.com/services/api/flickr.photos.search.html)\n\n @using-api no\n @results HTML\n @stable no\n @parse url, title, thumbnail, img_src\n\"\"\"\n\nfrom json import loads\nfrom time import time\nimport re\nfrom searx.engines import logger\nfrom searx.url_utils import urlencode\nfrom searx.utils import ecma_unescape, html_to_text\n\nlogger = logger.getChild('flickr-noapi')\n\ncategories = ['images']\n\nurl = 'https://www.flickr.com/'\nsearch_url = url + 'search?{query}&page={page}'\ntime_range_url = '&min_upload_date={start}&max_upload_date={end}'\nphoto_url = 'https://www.flickr.com/photos/{userid}/{photoid}'\nmodelexport_re = re.compile(r\"^\\s*modelExport:\\s*({.*}),$\", re.M)\nimage_sizes = ('o', 'k', 'h', 'b', 'c', 'z', 'n', 'm', 't', 'q', 's')\n\npaging = True\ntime_range_support = True\ntime_range_dict = {'day': 60 * 60 * 24,\n 'week': 60 * 60 * 24 * 7,\n 'month': 60 * 60 * 24 * 7 * 4,\n 'year': 60 * 60 * 24 * 7 * 52}\n\n\ndef build_flickr_url(user_id, photo_id):\n return photo_url.format(userid=user_id, photoid=photo_id)\n\n\ndef _get_time_range_url(time_range):\n if time_range in time_range_dict:\n return time_range_url.format(start=time(), end=str(int(time()) - time_range_dict[time_range]))\n return ''\n\n\ndef request(query, params):\n params['url'] = (search_url.format(query=urlencode({'text': query}), page=params['pageno'])\n + _get_time_range_url(params['time_range']))\n return params\n\n\ndef response(resp):\n results = []\n\n matches = modelexport_re.search(resp.text)\n\n if matches is None:\n return results\n\n match = matches.group(1)\n model_export = loads(match)\n\n if 'legend' not in model_export:\n return results\n\n legend = model_export['legend']\n\n # handle empty page\n if not legend or not legend[0]:\n return results\n\n for index in legend:\n photo = model_export['main'][index[0]][int(index[1])][index[2]][index[3]][int(index[4])]\n author = ecma_unescape(photo.get('realname', ''))\n source = ecma_unescape(photo.get('username', '')) + ' @ Flickr'\n title = ecma_unescape(photo.get('title', ''))\n content = html_to_text(ecma_unescape(photo.get('description', '')))\n img_src = None\n # From the biggest to the lowest format\n for image_size in image_sizes:\n if image_size in photo['sizes']:\n img_src = photo['sizes'][image_size]['url']\n img_format = 'jpg ' \\\n + str(photo['sizes'][image_size]['width']) \\\n + 'x' \\\n + str(photo['sizes'][image_size]['height'])\n break\n\n if not img_src:\n logger.debug('cannot find valid image size: {0}'.format(repr(photo)))\n continue\n\n # For a bigger thumbnail, keep only the url_z, not the url_n\n if 'n' in photo['sizes']:\n thumbnail_src = photo['sizes']['n']['url']\n elif 'z' in photo['sizes']:\n thumbnail_src = photo['sizes']['z']['url']\n else:\n thumbnail_src = img_src\n\n if 'ownerNsid' not in photo:\n # should not happen, disowned photo? Show it anyway\n url = img_src\n else:\n url = build_flickr_url(photo['ownerNsid'], photo['id'])\n\n result = {\n 'url': url,\n 'img_src': img_src,\n 'thumbnail_src': thumbnail_src,\n 'source': source,\n 'img_format': img_format,\n 'template': 'images.html'\n }\n try:\n result['author'] = author\n result['title'] = title\n result['content'] = content\n except:\n result['author'] = ''\n result['title'] = ''\n result['content'] = ''\n results.append(result)\n\n return results\n", "path": "searx/engines/flickr_noapi.py"}]} | 2,039 | 169 |
gh_patches_debug_23348 | rasdani/github-patches | git_diff | wagtail__wagtail-8210 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Tags over 100 characters
Found a bug? Please fill out the sections below. 👍
### Issue Summary
When adding a tag while using the ClusterTaggableManager class, if the tag name is greater than the character limit for the database column no validation error is given.
### Steps to Reproduce
1. login to admin and edit a page with a tag content panel
2. create a tag with more than 100 characters
3. save, or publish the page
### Technical details
* Python version: Python 3.5.1
* Django version: 1.11.13
* Wagtail version: 1.13.1
Tags over 100 characters
Found a bug? Please fill out the sections below. 👍
### Issue Summary
When adding a tag while using the ClusterTaggableManager class, if the tag name is greater than the character limit for the database column no validation error is given.
### Steps to Reproduce
1. login to admin and edit a page with a tag content panel
2. create a tag with more than 100 characters
3. save, or publish the page
### Technical details
* Python version: Python 3.5.1
* Django version: 1.11.13
* Wagtail version: 1.13.1
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wagtail/admin/forms/tags.py`
Content:
```
1 from taggit.forms import TagField as TaggitTagField
2 from taggit.models import Tag
3
4 from wagtail.admin.widgets import AdminTagWidget
5
6
7 class TagField(TaggitTagField):
8 """
9 Extends taggit's TagField with the option to prevent creating tags that do not already exist
10 """
11
12 widget = AdminTagWidget
13
14 def __init__(self, *args, **kwargs):
15 self.tag_model = kwargs.pop("tag_model", None)
16 self.free_tagging = kwargs.pop("free_tagging", None)
17
18 super().__init__(*args, **kwargs)
19
20 # pass on tag_model and free_tagging kwargs to the widget,
21 # if (and only if) they have been passed explicitly here.
22 # Otherwise, set default values for clean() to use
23 if self.tag_model is None:
24 self.tag_model = Tag
25 else:
26 self.widget.tag_model = self.tag_model
27
28 if self.free_tagging is None:
29 self.free_tagging = getattr(self.tag_model, "free_tagging", True)
30 else:
31 self.widget.free_tagging = self.free_tagging
32
33 def clean(self, value):
34 value = super().clean(value)
35
36 if not self.free_tagging:
37 # filter value to just the tags that already exist in tag_model
38 value = list(
39 self.tag_model.objects.filter(name__in=value).values_list(
40 "name", flat=True
41 )
42 )
43
44 return value
45
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/wagtail/admin/forms/tags.py b/wagtail/admin/forms/tags.py
--- a/wagtail/admin/forms/tags.py
+++ b/wagtail/admin/forms/tags.py
@@ -1,3 +1,5 @@
+from django.core.exceptions import ValidationError
+from django.utils.translation import gettext_lazy as _
from taggit.forms import TagField as TaggitTagField
from taggit.models import Tag
@@ -31,8 +33,27 @@
self.widget.free_tagging = self.free_tagging
def clean(self, value):
+
value = super().clean(value)
+ max_tag_length = self.tag_model.name.field.max_length
+ value_too_long = ""
+ for val in value:
+ if len(val) > max_tag_length:
+ if value_too_long:
+ value_too_long += ", "
+ value_too_long += val
+ if value_too_long:
+ raise ValidationError(
+ _(
+ "Tag(s) %(value_too_long)s are over %(max_tag_length)d characters"
+ % {
+ "value_too_long": value_too_long,
+ "max_tag_length": max_tag_length,
+ }
+ )
+ )
+
if not self.free_tagging:
# filter value to just the tags that already exist in tag_model
value = list(
| {"golden_diff": "diff --git a/wagtail/admin/forms/tags.py b/wagtail/admin/forms/tags.py\n--- a/wagtail/admin/forms/tags.py\n+++ b/wagtail/admin/forms/tags.py\n@@ -1,3 +1,5 @@\n+from django.core.exceptions import ValidationError\n+from django.utils.translation import gettext_lazy as _\n from taggit.forms import TagField as TaggitTagField\n from taggit.models import Tag\n \n@@ -31,8 +33,27 @@\n self.widget.free_tagging = self.free_tagging\n \n def clean(self, value):\n+\n value = super().clean(value)\n \n+ max_tag_length = self.tag_model.name.field.max_length\n+ value_too_long = \"\"\n+ for val in value:\n+ if len(val) > max_tag_length:\n+ if value_too_long:\n+ value_too_long += \", \"\n+ value_too_long += val\n+ if value_too_long:\n+ raise ValidationError(\n+ _(\n+ \"Tag(s) %(value_too_long)s are over %(max_tag_length)d characters\"\n+ % {\n+ \"value_too_long\": value_too_long,\n+ \"max_tag_length\": max_tag_length,\n+ }\n+ )\n+ )\n+\n if not self.free_tagging:\n # filter value to just the tags that already exist in tag_model\n value = list(\n", "issue": "Tags over 100 characters\nFound a bug? Please fill out the sections below. \ud83d\udc4d\r\n\r\n### Issue Summary\r\n\r\nWhen adding a tag while using the ClusterTaggableManager class, if the tag name is greater than the character limit for the database column no validation error is given.\r\n\r\n### Steps to Reproduce\r\n\r\n1. login to admin and edit a page with a tag content panel\r\n2. create a tag with more than 100 characters\r\n3. save, or publish the page \r\n\r\n### Technical details\r\n\r\n* Python version: Python 3.5.1\r\n* Django version: 1.11.13\r\n* Wagtail version: 1.13.1\nTags over 100 characters\nFound a bug? Please fill out the sections below. \ud83d\udc4d\r\n\r\n### Issue Summary\r\n\r\nWhen adding a tag while using the ClusterTaggableManager class, if the tag name is greater than the character limit for the database column no validation error is given.\r\n\r\n### Steps to Reproduce\r\n\r\n1. login to admin and edit a page with a tag content panel\r\n2. create a tag with more than 100 characters\r\n3. save, or publish the page \r\n\r\n### Technical details\r\n\r\n* Python version: Python 3.5.1\r\n* Django version: 1.11.13\r\n* Wagtail version: 1.13.1\n", "before_files": [{"content": "from taggit.forms import TagField as TaggitTagField\nfrom taggit.models import Tag\n\nfrom wagtail.admin.widgets import AdminTagWidget\n\n\nclass TagField(TaggitTagField):\n \"\"\"\n Extends taggit's TagField with the option to prevent creating tags that do not already exist\n \"\"\"\n\n widget = AdminTagWidget\n\n def __init__(self, *args, **kwargs):\n self.tag_model = kwargs.pop(\"tag_model\", None)\n self.free_tagging = kwargs.pop(\"free_tagging\", None)\n\n super().__init__(*args, **kwargs)\n\n # pass on tag_model and free_tagging kwargs to the widget,\n # if (and only if) they have been passed explicitly here.\n # Otherwise, set default values for clean() to use\n if self.tag_model is None:\n self.tag_model = Tag\n else:\n self.widget.tag_model = self.tag_model\n\n if self.free_tagging is None:\n self.free_tagging = getattr(self.tag_model, \"free_tagging\", True)\n else:\n self.widget.free_tagging = self.free_tagging\n\n def clean(self, value):\n value = super().clean(value)\n\n if not self.free_tagging:\n # filter value to just the tags that already exist in tag_model\n value = list(\n self.tag_model.objects.filter(name__in=value).values_list(\n \"name\", flat=True\n )\n )\n\n return value\n", "path": "wagtail/admin/forms/tags.py"}], "after_files": [{"content": "from django.core.exceptions import ValidationError\nfrom django.utils.translation import gettext_lazy as _\nfrom taggit.forms import TagField as TaggitTagField\nfrom taggit.models import Tag\n\nfrom wagtail.admin.widgets import AdminTagWidget\n\n\nclass TagField(TaggitTagField):\n \"\"\"\n Extends taggit's TagField with the option to prevent creating tags that do not already exist\n \"\"\"\n\n widget = AdminTagWidget\n\n def __init__(self, *args, **kwargs):\n self.tag_model = kwargs.pop(\"tag_model\", None)\n self.free_tagging = kwargs.pop(\"free_tagging\", None)\n\n super().__init__(*args, **kwargs)\n\n # pass on tag_model and free_tagging kwargs to the widget,\n # if (and only if) they have been passed explicitly here.\n # Otherwise, set default values for clean() to use\n if self.tag_model is None:\n self.tag_model = Tag\n else:\n self.widget.tag_model = self.tag_model\n\n if self.free_tagging is None:\n self.free_tagging = getattr(self.tag_model, \"free_tagging\", True)\n else:\n self.widget.free_tagging = self.free_tagging\n\n def clean(self, value):\n\n value = super().clean(value)\n\n max_tag_length = self.tag_model.name.field.max_length\n value_too_long = \"\"\n for val in value:\n if len(val) > max_tag_length:\n if value_too_long:\n value_too_long += \", \"\n value_too_long += val\n if value_too_long:\n raise ValidationError(\n _(\n \"Tag(s) %(value_too_long)s are over %(max_tag_length)d characters\"\n % {\n \"value_too_long\": value_too_long,\n \"max_tag_length\": max_tag_length,\n }\n )\n )\n\n if not self.free_tagging:\n # filter value to just the tags that already exist in tag_model\n value = list(\n self.tag_model.objects.filter(name__in=value).values_list(\n \"name\", flat=True\n )\n )\n\n return value\n", "path": "wagtail/admin/forms/tags.py"}]} | 949 | 300 |
gh_patches_debug_66972 | rasdani/github-patches | git_diff | pandas-dev__pandas-19628 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DateTimeIndex.__iter__().next() rounds time to microseconds, when timezone aware
#### Code Sample
```python
>> import pandas as pd
>> datetimeindex = pd.DatetimeIndex(["2018-02-08 15:00:00.168456358"])
>> datetimeindex
DatetimeIndex(['2018-02-08 15:00:00.168456358'], dtype='datetime64[ns]', freq=None)
>> datetimeindex = datetimeindex.tz_localize(datetime.timezone.utc)
>> datetimeindex
DatetimeIndex(['2018-02-08 15:00:00.168456358+00:00'], dtype='datetime64[ns, UTC+00:00]', freq=None)
>> datetimeindex.__getitem__(0)
Timestamp('2018-02-08 15:00:00.168456358+0000', tz='UTC+00:00')
>> datetimeindex.__iter__().__next__()
Timestamp('2018-02-08 15:00:00.168456+0000', tz='UTC+00:00')
```
#### Problem description
When using localize DateTimeIndex with nanosecond precision, __getitem__ behavious differs from __iter__().__next__ behaviour, as when iterating thought the DateTimeIndex the date is round to microseconds. This doen not happends if the DatetimeIndex has no timezone.
#### Output of ``pd.show_versions()``
<details>
INSTALLED VERSIONS
------------------
commit: None
python: 3.4.2.final.0
python-bits: 64
OS: Linux
OS-release: 4.9.0-0.bpo.2-amd64
machine: x86_64
processor:
byteorder: little
LC_ALL: None
LANG: en_US.UTF-8
LOCALE: en_US.UTF-8
pandas: 0.22.0
pytest: None
pip: 9.0.1
setuptools: 36.5.0
Cython: None
numpy: 1.14.0
scipy: 1.0.0
pyarrow: None
xarray: None
IPython: 6.2.1
sphinx: None
patsy: None
dateutil: 2.6.1
pytz: 2017.3
blosc: None
bottleneck: None
tables: None
numexpr: None
feather: None
matplotlib: 2.0.2
openpyxl: None
xlrd: None
xlwt: None
xlsxwriter: None
lxml: None
bs4: None
html5lib: None
sqlalchemy: None
pymysql: None
psycopg2: None
jinja2: None
s3fs: None
fastparquet: None
pandas_gbq: None
pandas_datareader: None
</details>
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pandas/conftest.py`
Content:
```
1 import pytest
2
3 from distutils.version import LooseVersion
4 import numpy
5 import pandas
6 import dateutil
7 import pandas.util._test_decorators as td
8
9
10 def pytest_addoption(parser):
11 parser.addoption("--skip-slow", action="store_true",
12 help="skip slow tests")
13 parser.addoption("--skip-network", action="store_true",
14 help="skip network tests")
15 parser.addoption("--run-high-memory", action="store_true",
16 help="run high memory tests")
17 parser.addoption("--only-slow", action="store_true",
18 help="run only slow tests")
19
20
21 def pytest_runtest_setup(item):
22 if 'slow' in item.keywords and item.config.getoption("--skip-slow"):
23 pytest.skip("skipping due to --skip-slow")
24
25 if 'slow' not in item.keywords and item.config.getoption("--only-slow"):
26 pytest.skip("skipping due to --only-slow")
27
28 if 'network' in item.keywords and item.config.getoption("--skip-network"):
29 pytest.skip("skipping due to --skip-network")
30
31 if 'high_memory' in item.keywords and not item.config.getoption(
32 "--run-high-memory"):
33 pytest.skip(
34 "skipping high memory test since --run-high-memory was not set")
35
36
37 # Configurations for all tests and all test modules
38
39 @pytest.fixture(autouse=True)
40 def configure_tests():
41 pandas.set_option('chained_assignment', 'raise')
42
43
44 # For running doctests: make np and pd names available
45
46 @pytest.fixture(autouse=True)
47 def add_imports(doctest_namespace):
48 doctest_namespace['np'] = numpy
49 doctest_namespace['pd'] = pandas
50
51
52 @pytest.fixture(params=['bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil'])
53 def spmatrix(request):
54 from scipy import sparse
55 return getattr(sparse, request.param + '_matrix')
56
57
58 @pytest.fixture
59 def ip():
60 """
61 Get an instance of IPython.InteractiveShell.
62
63 Will raise a skip if IPython is not installed.
64 """
65
66 pytest.importorskip('IPython', minversion="6.0.0")
67 from IPython.core.interactiveshell import InteractiveShell
68 return InteractiveShell()
69
70
71 is_dateutil_le_261 = pytest.mark.skipif(
72 LooseVersion(dateutil.__version__) > LooseVersion('2.6.1'),
73 reason="dateutil api change version")
74 is_dateutil_gt_261 = pytest.mark.skipif(
75 LooseVersion(dateutil.__version__) <= LooseVersion('2.6.1'),
76 reason="dateutil stable version")
77
78
79 @pytest.fixture(params=[None, 'gzip', 'bz2', 'zip',
80 pytest.param('xz', marks=td.skip_if_no_lzma)])
81 def compression(request):
82 """
83 Fixture for trying common compression types in compression tests
84 """
85 return request.param
86
87
88 @pytest.fixture(params=[None, 'gzip', 'bz2',
89 pytest.param('xz', marks=td.skip_if_no_lzma)])
90 def compression_no_zip(request):
91 """
92 Fixture for trying common compression types in compression tests
93 except zip
94 """
95 return request.param
96
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pandas/conftest.py b/pandas/conftest.py
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -93,3 +93,9 @@
except zip
"""
return request.param
+
+
[email protected](scope='module')
+def datetime_tz_utc():
+ from datetime import timezone
+ return timezone.utc
| {"golden_diff": "diff --git a/pandas/conftest.py b/pandas/conftest.py\n--- a/pandas/conftest.py\n+++ b/pandas/conftest.py\n@@ -93,3 +93,9 @@\n except zip\n \"\"\"\n return request.param\n+\n+\[email protected](scope='module')\n+def datetime_tz_utc():\n+ from datetime import timezone\n+ return timezone.utc\n", "issue": "DateTimeIndex.__iter__().next() rounds time to microseconds, when timezone aware\n#### Code Sample\r\n\r\n```python\r\n>> import pandas as pd\r\n>> datetimeindex = pd.DatetimeIndex([\"2018-02-08 15:00:00.168456358\"])\r\n>> datetimeindex\r\nDatetimeIndex(['2018-02-08 15:00:00.168456358'], dtype='datetime64[ns]', freq=None)\r\n>> datetimeindex = datetimeindex.tz_localize(datetime.timezone.utc)\r\n>> datetimeindex\r\nDatetimeIndex(['2018-02-08 15:00:00.168456358+00:00'], dtype='datetime64[ns, UTC+00:00]', freq=None)\r\n>> datetimeindex.__getitem__(0)\r\nTimestamp('2018-02-08 15:00:00.168456358+0000', tz='UTC+00:00')\r\n>> datetimeindex.__iter__().__next__()\r\nTimestamp('2018-02-08 15:00:00.168456+0000', tz='UTC+00:00')\r\n```\r\n#### Problem description\r\n\r\nWhen using localize DateTimeIndex with nanosecond precision, __getitem__ behavious differs from __iter__().__next__ behaviour, as when iterating thought the DateTimeIndex the date is round to microseconds. This doen not happends if the DatetimeIndex has no timezone.\r\n\r\n#### Output of ``pd.show_versions()``\r\n\r\n<details>\r\n\r\nINSTALLED VERSIONS\r\n------------------\r\ncommit: None\r\npython: 3.4.2.final.0\r\npython-bits: 64\r\nOS: Linux\r\nOS-release: 4.9.0-0.bpo.2-amd64\r\nmachine: x86_64\r\nprocessor: \r\nbyteorder: little\r\nLC_ALL: None\r\nLANG: en_US.UTF-8\r\nLOCALE: en_US.UTF-8\r\n\r\npandas: 0.22.0\r\npytest: None\r\npip: 9.0.1\r\nsetuptools: 36.5.0\r\nCython: None\r\nnumpy: 1.14.0\r\nscipy: 1.0.0\r\npyarrow: None\r\nxarray: None\r\nIPython: 6.2.1\r\nsphinx: None\r\npatsy: None\r\ndateutil: 2.6.1\r\npytz: 2017.3\r\nblosc: None\r\nbottleneck: None\r\ntables: None\r\nnumexpr: None\r\nfeather: None\r\nmatplotlib: 2.0.2\r\nopenpyxl: None\r\nxlrd: None\r\nxlwt: None\r\nxlsxwriter: None\r\nlxml: None\r\nbs4: None\r\nhtml5lib: None\r\nsqlalchemy: None\r\npymysql: None\r\npsycopg2: None\r\njinja2: None\r\ns3fs: None\r\nfastparquet: None\r\npandas_gbq: None\r\npandas_datareader: None\r\n\r\n</details>\r\n\n", "before_files": [{"content": "import pytest\n\nfrom distutils.version import LooseVersion\nimport numpy\nimport pandas\nimport dateutil\nimport pandas.util._test_decorators as td\n\n\ndef pytest_addoption(parser):\n parser.addoption(\"--skip-slow\", action=\"store_true\",\n help=\"skip slow tests\")\n parser.addoption(\"--skip-network\", action=\"store_true\",\n help=\"skip network tests\")\n parser.addoption(\"--run-high-memory\", action=\"store_true\",\n help=\"run high memory tests\")\n parser.addoption(\"--only-slow\", action=\"store_true\",\n help=\"run only slow tests\")\n\n\ndef pytest_runtest_setup(item):\n if 'slow' in item.keywords and item.config.getoption(\"--skip-slow\"):\n pytest.skip(\"skipping due to --skip-slow\")\n\n if 'slow' not in item.keywords and item.config.getoption(\"--only-slow\"):\n pytest.skip(\"skipping due to --only-slow\")\n\n if 'network' in item.keywords and item.config.getoption(\"--skip-network\"):\n pytest.skip(\"skipping due to --skip-network\")\n\n if 'high_memory' in item.keywords and not item.config.getoption(\n \"--run-high-memory\"):\n pytest.skip(\n \"skipping high memory test since --run-high-memory was not set\")\n\n\n# Configurations for all tests and all test modules\n\[email protected](autouse=True)\ndef configure_tests():\n pandas.set_option('chained_assignment', 'raise')\n\n\n# For running doctests: make np and pd names available\n\[email protected](autouse=True)\ndef add_imports(doctest_namespace):\n doctest_namespace['np'] = numpy\n doctest_namespace['pd'] = pandas\n\n\[email protected](params=['bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil'])\ndef spmatrix(request):\n from scipy import sparse\n return getattr(sparse, request.param + '_matrix')\n\n\[email protected]\ndef ip():\n \"\"\"\n Get an instance of IPython.InteractiveShell.\n\n Will raise a skip if IPython is not installed.\n \"\"\"\n\n pytest.importorskip('IPython', minversion=\"6.0.0\")\n from IPython.core.interactiveshell import InteractiveShell\n return InteractiveShell()\n\n\nis_dateutil_le_261 = pytest.mark.skipif(\n LooseVersion(dateutil.__version__) > LooseVersion('2.6.1'),\n reason=\"dateutil api change version\")\nis_dateutil_gt_261 = pytest.mark.skipif(\n LooseVersion(dateutil.__version__) <= LooseVersion('2.6.1'),\n reason=\"dateutil stable version\")\n\n\[email protected](params=[None, 'gzip', 'bz2', 'zip',\n pytest.param('xz', marks=td.skip_if_no_lzma)])\ndef compression(request):\n \"\"\"\n Fixture for trying common compression types in compression tests\n \"\"\"\n return request.param\n\n\[email protected](params=[None, 'gzip', 'bz2',\n pytest.param('xz', marks=td.skip_if_no_lzma)])\ndef compression_no_zip(request):\n \"\"\"\n Fixture for trying common compression types in compression tests\n except zip\n \"\"\"\n return request.param\n", "path": "pandas/conftest.py"}], "after_files": [{"content": "import pytest\n\nfrom distutils.version import LooseVersion\nimport numpy\nimport pandas\nimport dateutil\nimport pandas.util._test_decorators as td\n\n\ndef pytest_addoption(parser):\n parser.addoption(\"--skip-slow\", action=\"store_true\",\n help=\"skip slow tests\")\n parser.addoption(\"--skip-network\", action=\"store_true\",\n help=\"skip network tests\")\n parser.addoption(\"--run-high-memory\", action=\"store_true\",\n help=\"run high memory tests\")\n parser.addoption(\"--only-slow\", action=\"store_true\",\n help=\"run only slow tests\")\n\n\ndef pytest_runtest_setup(item):\n if 'slow' in item.keywords and item.config.getoption(\"--skip-slow\"):\n pytest.skip(\"skipping due to --skip-slow\")\n\n if 'slow' not in item.keywords and item.config.getoption(\"--only-slow\"):\n pytest.skip(\"skipping due to --only-slow\")\n\n if 'network' in item.keywords and item.config.getoption(\"--skip-network\"):\n pytest.skip(\"skipping due to --skip-network\")\n\n if 'high_memory' in item.keywords and not item.config.getoption(\n \"--run-high-memory\"):\n pytest.skip(\n \"skipping high memory test since --run-high-memory was not set\")\n\n\n# Configurations for all tests and all test modules\n\[email protected](autouse=True)\ndef configure_tests():\n pandas.set_option('chained_assignment', 'raise')\n\n\n# For running doctests: make np and pd names available\n\[email protected](autouse=True)\ndef add_imports(doctest_namespace):\n doctest_namespace['np'] = numpy\n doctest_namespace['pd'] = pandas\n\n\[email protected](params=['bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil'])\ndef spmatrix(request):\n from scipy import sparse\n return getattr(sparse, request.param + '_matrix')\n\n\[email protected]\ndef ip():\n \"\"\"\n Get an instance of IPython.InteractiveShell.\n\n Will raise a skip if IPython is not installed.\n \"\"\"\n\n pytest.importorskip('IPython', minversion=\"6.0.0\")\n from IPython.core.interactiveshell import InteractiveShell\n return InteractiveShell()\n\n\nis_dateutil_le_261 = pytest.mark.skipif(\n LooseVersion(dateutil.__version__) > LooseVersion('2.6.1'),\n reason=\"dateutil api change version\")\nis_dateutil_gt_261 = pytest.mark.skipif(\n LooseVersion(dateutil.__version__) <= LooseVersion('2.6.1'),\n reason=\"dateutil stable version\")\n\n\[email protected](params=[None, 'gzip', 'bz2', 'zip',\n pytest.param('xz', marks=td.skip_if_no_lzma)])\ndef compression(request):\n \"\"\"\n Fixture for trying common compression types in compression tests\n \"\"\"\n return request.param\n\n\[email protected](params=[None, 'gzip', 'bz2',\n pytest.param('xz', marks=td.skip_if_no_lzma)])\ndef compression_no_zip(request):\n \"\"\"\n Fixture for trying common compression types in compression tests\n except zip\n \"\"\"\n return request.param\n\n\[email protected](scope='module')\ndef datetime_tz_utc():\n from datetime import timezone\n return timezone.utc\n", "path": "pandas/conftest.py"}]} | 1,847 | 89 |
gh_patches_debug_18828 | rasdani/github-patches | git_diff | platformsh__platformsh-docs-2079 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add keywords for search
### Where on docs.platform.sh should be changed?
/configuration/app/app-reference.html
### What exactly should be updated?
We'd like specific pages to be findable by searching for specific words. For example, the app reference when searching for `.platform.app.yaml` (this may also involve a problem with escaping characters like `.`). Add keywords or other to make these pages findable.
### Additional context
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `search/main.py`
Content:
```
1 import os
2 import glob
3 import json
4 import meilisearch
5 from platformshconfig import Config
6
7 class Search:
8 def __init__(self):
9 self.default = {
10 "host": "http://127.0.0.1",
11 "key": None,
12 "port": 7700
13 }
14
15 self.scrape_dir = "output"
16 self.scrape_config = "config/scrape.json"
17 self.docs_index = "docs"
18 self.primaryKey = "documentId"
19 self.index_name = "Docs"
20
21 # Below are Platform.sh custom settings for how the search engine functions.
22
23 # Data available to the dropdown React app in docs, used to fill out autocomplete results.
24 self.displayed_attributes = ['title', 'text', 'url', 'site', 'section']
25 # Data actually searchable by our queries.
26 self.searchable_attributes = ['title', 'pageUrl', 'section', 'url', 'text']
27
28 # Show results for one query with the listed pages, when they by default would not show up as best results.
29 # Note: these aren't automatically two-way, which is why they're all defined twice.
30 self.synonyms = {
31 "cron": ["crons"],
32 "crons": ["cron tasks", "cron jobs"],
33 "e-mail": ["email"],
34 "routes.yaml": ["routes"],
35 "routes": ["routes.yaml"],
36 "services": ["services.yaml"],
37 "services.yaml": ["services"],
38 "application": [".platform.app.yaml", "app.yaml", "applications.yaml"],
39 ".platform.app.yaml": ["application"],
40 "app.yaml": ["application"],
41 "applications.yaml": ["application", "multi-app"],
42 "multi-app": ["applications.yaml"],
43 "regions": ["public ip addresses"],
44 "public ip addresses": ["regions"],
45 "ssl": ["https", "tls"],
46 "https": ["ssl"],
47 }
48
49 # Ranking rules:
50 #
51 # - Default order: ["words", "typo", "proximity", "attribute", "sort", "exactness"]
52 #
53 # - words: number of times query is in document (greater number gets priority)
54 # - typo: fewer typos > more typos
55 # - proximity: smaller distance between multiple occurences of query in same document > larger distances
56 # - attribute: sorted according to order of importance of attributes (searchable_attributes). terms in
57 # more important attributes first.
58 # - sort: queries are sorted at query time
59 # - exactness: similarity of matched words in document with query
60
61 self.ranking_rules = ["rank:asc", "attribute", "typo", "words", "proximity", "exactness"]
62
63 self.updated_settings = {
64 "rankingRules": self.ranking_rules,
65 "searchableAttributes": self.searchable_attributes,
66 "displayedAttributes": self.displayed_attributes
67 }
68
69 # Group results by page
70 self.distinct_attribute = "pageUrl"
71
72 def getConnectionString(self):
73 """
74 Sets the Meilisearch host string, depending on the environment.
75
76 Returns:
77 string: Meilisearch host string.
78 """
79 if os.environ.get('PORT'):
80 return "{}:{}".format(self.default["host"], os.environ['PORT'])
81 else:
82 return "{}:{}".format(self.default["host"], self.default["port"])
83
84 def getMasterKey(self):
85 """
86 Retrieves the Meilisearch master key, either from the Platform.sh environment or locally.
87 """
88 config = Config()
89 if config.is_valid_platform():
90 return config.projectEntropy
91 elif os.environ.get("MEILI_MASTER_KEY"):
92 return os.environ["MEILI_MASTER_KEY"]
93 else:
94 return self.default["key"]
95
96 def add_documents(self, index):
97 """
98 Cycle through the individual site indexes in /outputs so their individual documents can be added to Meilisearch.
99 """
100 documents = [f for f in glob.glob("{}/*.json".format(self.scrape_dir))]
101 for doc in documents:
102 self.add(doc, index)
103
104 def add(self, doc, index):
105 """
106 Add an individual site's index to the Meilisearch service.
107 """
108 with open(doc) as scraped_index:
109 data = json.load(scraped_index)
110 index.add_documents(data)
111
112 def update(self):
113 """
114 Updates the Meilisearch index.
115 """
116 # Create a Meilisearch client.
117 client = meilisearch.Client(self.getConnectionString(), self.getMasterKey())
118
119 # Delete previous index
120 if len(client.get_indexes()):
121 client.get_index(self.docs_index).delete()
122
123 # Create a new index
124 index = client.create_index(uid=self.docs_index, options={'primaryKey': self.primaryKey, 'uid': self.index_name})
125
126 # Add synonyms for the index
127 index.update_synonyms(self.synonyms)
128
129 # Update its settings: what can be searched, what's displayable, and how results should be ranked.
130 index.update_settings(self.updated_settings)
131
132 # Update distinct attribute.
133 index.update_distinct_attribute(self.distinct_attribute)
134
135 # Add documents to the index
136 self.add_documents(index)
137
138 if __name__ == "__main__":
139 meili = Search()
140 meili.update()
141
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/search/main.py b/search/main.py
--- a/search/main.py
+++ b/search/main.py
@@ -21,9 +21,9 @@
# Below are Platform.sh custom settings for how the search engine functions.
# Data available to the dropdown React app in docs, used to fill out autocomplete results.
- self.displayed_attributes = ['title', 'text', 'url', 'site', 'section']
+ self.displayed_attributes = ['keywords', 'title', 'text', 'url', 'site', 'section']
# Data actually searchable by our queries.
- self.searchable_attributes = ['title', 'pageUrl', 'section', 'url', 'text']
+ self.searchable_attributes = ['keywords', 'title', 'pageUrl', 'section', 'text', 'url']
# Show results for one query with the listed pages, when they by default would not show up as best results.
# Note: these aren't automatically two-way, which is why they're all defined twice.
| {"golden_diff": "diff --git a/search/main.py b/search/main.py\n--- a/search/main.py\n+++ b/search/main.py\n@@ -21,9 +21,9 @@\n # Below are Platform.sh custom settings for how the search engine functions.\n \n # Data available to the dropdown React app in docs, used to fill out autocomplete results.\n- self.displayed_attributes = ['title', 'text', 'url', 'site', 'section']\n+ self.displayed_attributes = ['keywords', 'title', 'text', 'url', 'site', 'section']\n # Data actually searchable by our queries.\n- self.searchable_attributes = ['title', 'pageUrl', 'section', 'url', 'text']\n+ self.searchable_attributes = ['keywords', 'title', 'pageUrl', 'section', 'text', 'url']\n \n # Show results for one query with the listed pages, when they by default would not show up as best results.\n # Note: these aren't automatically two-way, which is why they're all defined twice.\n", "issue": "Add keywords for search\n### Where on docs.platform.sh should be changed?\n\n/configuration/app/app-reference.html\n\n### What exactly should be updated?\n\nWe'd like specific pages to be findable by searching for specific words. For example, the app reference when searching for `.platform.app.yaml` (this may also involve a problem with escaping characters like `.`). Add keywords or other to make these pages findable.\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "import os\nimport glob\nimport json\nimport meilisearch\nfrom platformshconfig import Config\n\nclass Search:\n def __init__(self):\n self.default = {\n \"host\": \"http://127.0.0.1\",\n \"key\": None,\n \"port\": 7700\n }\n\n self.scrape_dir = \"output\"\n self.scrape_config = \"config/scrape.json\"\n self.docs_index = \"docs\"\n self.primaryKey = \"documentId\"\n self.index_name = \"Docs\"\n\n # Below are Platform.sh custom settings for how the search engine functions.\n\n # Data available to the dropdown React app in docs, used to fill out autocomplete results.\n self.displayed_attributes = ['title', 'text', 'url', 'site', 'section']\n # Data actually searchable by our queries.\n self.searchable_attributes = ['title', 'pageUrl', 'section', 'url', 'text']\n\n # Show results for one query with the listed pages, when they by default would not show up as best results.\n # Note: these aren't automatically two-way, which is why they're all defined twice.\n self.synonyms = {\n \"cron\": [\"crons\"],\n \"crons\": [\"cron tasks\", \"cron jobs\"],\n \"e-mail\": [\"email\"],\n \"routes.yaml\": [\"routes\"],\n \"routes\": [\"routes.yaml\"],\n \"services\": [\"services.yaml\"],\n \"services.yaml\": [\"services\"],\n \"application\": [\".platform.app.yaml\", \"app.yaml\", \"applications.yaml\"],\n \".platform.app.yaml\": [\"application\"],\n \"app.yaml\": [\"application\"],\n \"applications.yaml\": [\"application\", \"multi-app\"],\n \"multi-app\": [\"applications.yaml\"],\n \"regions\": [\"public ip addresses\"],\n \"public ip addresses\": [\"regions\"],\n \"ssl\": [\"https\", \"tls\"],\n \"https\": [\"ssl\"],\n }\n\n # Ranking rules:\n #\n # - Default order: [\"words\", \"typo\", \"proximity\", \"attribute\", \"sort\", \"exactness\"]\n #\n # - words: number of times query is in document (greater number gets priority)\n # - typo: fewer typos > more typos\n # - proximity: smaller distance between multiple occurences of query in same document > larger distances\n # - attribute: sorted according to order of importance of attributes (searchable_attributes). terms in\n # more important attributes first.\n # - sort: queries are sorted at query time\n # - exactness: similarity of matched words in document with query\n\n self.ranking_rules = [\"rank:asc\", \"attribute\", \"typo\", \"words\", \"proximity\", \"exactness\"]\n\n self.updated_settings = {\n \"rankingRules\": self.ranking_rules,\n \"searchableAttributes\": self.searchable_attributes,\n \"displayedAttributes\": self.displayed_attributes\n }\n\n # Group results by page\n self.distinct_attribute = \"pageUrl\"\n\n def getConnectionString(self):\n \"\"\"\n Sets the Meilisearch host string, depending on the environment.\n\n Returns:\n string: Meilisearch host string.\n \"\"\"\n if os.environ.get('PORT'):\n return \"{}:{}\".format(self.default[\"host\"], os.environ['PORT'])\n else:\n return \"{}:{}\".format(self.default[\"host\"], self.default[\"port\"])\n\n def getMasterKey(self):\n \"\"\"\n Retrieves the Meilisearch master key, either from the Platform.sh environment or locally.\n \"\"\"\n config = Config()\n if config.is_valid_platform():\n return config.projectEntropy\n elif os.environ.get(\"MEILI_MASTER_KEY\"):\n return os.environ[\"MEILI_MASTER_KEY\"]\n else:\n return self.default[\"key\"]\n\n def add_documents(self, index):\n \"\"\"\n Cycle through the individual site indexes in /outputs so their individual documents can be added to Meilisearch.\n \"\"\"\n documents = [f for f in glob.glob(\"{}/*.json\".format(self.scrape_dir))]\n for doc in documents:\n self.add(doc, index)\n\n def add(self, doc, index):\n \"\"\"\n Add an individual site's index to the Meilisearch service.\n \"\"\"\n with open(doc) as scraped_index:\n data = json.load(scraped_index)\n index.add_documents(data)\n\n def update(self):\n \"\"\"\n Updates the Meilisearch index.\n \"\"\"\n # Create a Meilisearch client.\n client = meilisearch.Client(self.getConnectionString(), self.getMasterKey())\n\n # Delete previous index\n if len(client.get_indexes()):\n client.get_index(self.docs_index).delete()\n\n # Create a new index\n index = client.create_index(uid=self.docs_index, options={'primaryKey': self.primaryKey, 'uid': self.index_name})\n\n # Add synonyms for the index\n index.update_synonyms(self.synonyms)\n\n # Update its settings: what can be searched, what's displayable, and how results should be ranked.\n index.update_settings(self.updated_settings)\n\n # Update distinct attribute.\n index.update_distinct_attribute(self.distinct_attribute)\n\n # Add documents to the index\n self.add_documents(index)\n\nif __name__ == \"__main__\":\n meili = Search()\n meili.update()\n", "path": "search/main.py"}], "after_files": [{"content": "import os\nimport glob\nimport json\nimport meilisearch\nfrom platformshconfig import Config\n\nclass Search:\n def __init__(self):\n self.default = {\n \"host\": \"http://127.0.0.1\",\n \"key\": None,\n \"port\": 7700\n }\n\n self.scrape_dir = \"output\"\n self.scrape_config = \"config/scrape.json\"\n self.docs_index = \"docs\"\n self.primaryKey = \"documentId\"\n self.index_name = \"Docs\"\n\n # Below are Platform.sh custom settings for how the search engine functions.\n\n # Data available to the dropdown React app in docs, used to fill out autocomplete results.\n self.displayed_attributes = ['keywords', 'title', 'text', 'url', 'site', 'section']\n # Data actually searchable by our queries.\n self.searchable_attributes = ['keywords', 'title', 'pageUrl', 'section', 'text', 'url']\n\n # Show results for one query with the listed pages, when they by default would not show up as best results.\n # Note: these aren't automatically two-way, which is why they're all defined twice.\n self.synonyms = {\n \"cron\": [\"crons\"],\n \"crons\": [\"cron tasks\", \"cron jobs\"],\n \"e-mail\": [\"email\"],\n \"routes.yaml\": [\"routes\"],\n \"routes\": [\"routes.yaml\"],\n \"services\": [\"services.yaml\"],\n \"services.yaml\": [\"services\"],\n \"application\": [\".platform.app.yaml\", \"app.yaml\", \"applications.yaml\"],\n \".platform.app.yaml\": [\"application\"],\n \"app.yaml\": [\"application\"],\n \"applications.yaml\": [\"application\", \"multi-app\"],\n \"multi-app\": [\"applications.yaml\"],\n \"regions\": [\"public ip addresses\"],\n \"public ip addresses\": [\"regions\"],\n \"ssl\": [\"https\", \"tls\"],\n \"https\": [\"ssl\"],\n }\n\n # Ranking rules:\n #\n # - Default order: [\"words\", \"typo\", \"proximity\", \"attribute\", \"sort\", \"exactness\"]\n #\n # - words: number of times query is in document (greater number gets priority)\n # - typo: fewer typos > more typos\n # - proximity: smaller distance between multiple occurences of query in same document > larger distances\n # - attribute: sorted according to order of importance of attributes (searchable_attributes). terms in\n # more important attributes first.\n # - sort: queries are sorted at query time\n # - exactness: similarity of matched words in document with query\n\n self.ranking_rules = [\"rank:asc\", \"attribute\", \"typo\", \"words\", \"proximity\", \"exactness\"]\n\n self.updated_settings = {\n \"rankingRules\": self.ranking_rules,\n \"searchableAttributes\": self.searchable_attributes,\n \"displayedAttributes\": self.displayed_attributes\n }\n\n # Group results by page\n self.distinct_attribute = \"pageUrl\"\n\n def getConnectionString(self):\n \"\"\"\n Sets the Meilisearch host string, depending on the environment.\n\n Returns:\n string: Meilisearch host string.\n \"\"\"\n if os.environ.get('PORT'):\n return \"{}:{}\".format(self.default[\"host\"], os.environ['PORT'])\n else:\n return \"{}:{}\".format(self.default[\"host\"], self.default[\"port\"])\n\n def getMasterKey(self):\n \"\"\"\n Retrieves the Meilisearch master key, either from the Platform.sh environment or locally.\n \"\"\"\n config = Config()\n if config.is_valid_platform():\n return config.projectEntropy\n elif os.environ.get(\"MEILI_MASTER_KEY\"):\n return os.environ[\"MEILI_MASTER_KEY\"]\n else:\n return self.default[\"key\"]\n\n def add_documents(self, index):\n \"\"\"\n Cycle through the individual site indexes in /outputs so their individual documents can be added to Meilisearch.\n \"\"\"\n documents = [f for f in glob.glob(\"{}/*.json\".format(self.scrape_dir))]\n for doc in documents:\n self.add(doc, index)\n\n def add(self, doc, index):\n \"\"\"\n Add an individual site's index to the Meilisearch service.\n \"\"\"\n with open(doc) as scraped_index:\n data = json.load(scraped_index)\n index.add_documents(data)\n\n def update(self):\n \"\"\"\n Updates the Meilisearch index.\n \"\"\"\n # Create a Meilisearch client.\n client = meilisearch.Client(self.getConnectionString(), self.getMasterKey())\n\n # Delete previous index\n if len(client.get_indexes()):\n client.get_index(self.docs_index).delete()\n\n # Create a new index\n index = client.create_index(uid=self.docs_index, options={'primaryKey': self.primaryKey, 'uid': self.index_name})\n\n # Add synonyms for the index\n index.update_synonyms(self.synonyms)\n\n # Update its settings: what can be searched, what's displayable, and how results should be ranked.\n index.update_settings(self.updated_settings)\n\n # Update distinct attribute.\n index.update_distinct_attribute(self.distinct_attribute)\n\n # Add documents to the index\n self.add_documents(index)\n\nif __name__ == \"__main__\":\n meili = Search()\n meili.update()\n", "path": "search/main.py"}]} | 1,808 | 221 |
gh_patches_debug_16950 | rasdani/github-patches | git_diff | pyg-team__pytorch_geometric-8016 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Strange behaviour with the to_dense_adj function
### 🐛 Describe the bug
While using to_dense_adj with edge attributes, I observed that the `idx` values generated are not unique ((line 94 in to_dense_adj.py). As such, the scatter_add function sums up overlapping values and generating an output greater than the original range of edge_attr values.

The required tensors can be downloaded from [here](https://filesender.switch.ch/filesender2/download.php?token=d4b1599a-6eee-4b06-8640-be16fb784ab5&files_ids=490595)
Any help or insights are highly appreciated.
Thanks,
Chinmay
### Environment
* PyG version:2.3.1
* PyTorch version: 2.0.1+cu117
* OS: Ubuntu 20.04
* Python version:3.8.10
* CUDA/cuDNN version:11.7
* How you installed PyTorch and PyG (`conda`, `pip`, source):pip
* Any other relevant information (*e.g.*, version of `torch-scatter`):
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torch_geometric/utils/to_dense_adj.py`
Content:
```
1 from typing import Optional
2
3 import torch
4 from torch import Tensor
5
6 from torch_geometric.typing import OptTensor
7 from torch_geometric.utils import cumsum, scatter
8
9
10 def to_dense_adj(
11 edge_index: Tensor,
12 batch: OptTensor = None,
13 edge_attr: OptTensor = None,
14 max_num_nodes: Optional[int] = None,
15 batch_size: Optional[int] = None,
16 ) -> Tensor:
17 r"""Converts batched sparse adjacency matrices given by edge indices and
18 edge attributes to a single dense batched adjacency matrix.
19
20 Args:
21 edge_index (LongTensor): The edge indices.
22 batch (LongTensor, optional): Batch vector
23 :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each
24 node to a specific example. (default: :obj:`None`)
25 edge_attr (Tensor, optional): Edge weights or multi-dimensional edge
26 features. (default: :obj:`None`)
27 max_num_nodes (int, optional): The size of the output node dimension.
28 (default: :obj:`None`)
29 batch_size (int, optional) The batch size. (default: :obj:`None`)
30
31 :rtype: :class:`Tensor`
32
33 Examples:
34
35 >>> edge_index = torch.tensor([[0, 0, 1, 2, 3],
36 ... [0, 1, 0, 3, 0]])
37 >>> batch = torch.tensor([0, 0, 1, 1])
38 >>> to_dense_adj(edge_index, batch)
39 tensor([[[1., 1.],
40 [1., 0.]],
41 [[0., 1.],
42 [1., 0.]]])
43
44 >>> to_dense_adj(edge_index, batch, max_num_nodes=4)
45 tensor([[[1., 1., 0., 0.],
46 [1., 0., 0., 0.],
47 [0., 0., 0., 0.],
48 [0., 0., 0., 0.]],
49 [[0., 1., 0., 0.],
50 [1., 0., 0., 0.],
51 [0., 0., 0., 0.],
52 [0., 0., 0., 0.]]])
53
54 >>> edge_attr = torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0])
55 >>> to_dense_adj(edge_index, batch, edge_attr)
56 tensor([[[1., 2.],
57 [3., 0.]],
58 [[0., 4.],
59 [5., 0.]]])
60 """
61 if batch is None:
62 num_nodes = int(edge_index.max()) + 1 if edge_index.numel() > 0 else 0
63 batch = edge_index.new_zeros(num_nodes)
64
65 if batch_size is None:
66 batch_size = int(batch.max()) + 1 if batch.numel() > 0 else 1
67
68 one = batch.new_ones(batch.size(0))
69 num_nodes = scatter(one, batch, dim=0, dim_size=batch_size, reduce='sum')
70 cum_nodes = cumsum(num_nodes)
71
72 idx0 = batch[edge_index[0]]
73 idx1 = edge_index[0] - cum_nodes[batch][edge_index[0]]
74 idx2 = edge_index[1] - cum_nodes[batch][edge_index[1]]
75
76 if max_num_nodes is None:
77 max_num_nodes = int(num_nodes.max())
78
79 elif ((idx1.numel() > 0 and idx1.max() >= max_num_nodes)
80 or (idx2.numel() > 0 and idx2.max() >= max_num_nodes)):
81 mask = (idx1 < max_num_nodes) & (idx2 < max_num_nodes)
82 idx0 = idx0[mask]
83 idx1 = idx1[mask]
84 idx2 = idx2[mask]
85 edge_attr = None if edge_attr is None else edge_attr[mask]
86
87 if edge_attr is None:
88 edge_attr = torch.ones(idx0.numel(), device=edge_index.device)
89
90 size = [batch_size, max_num_nodes, max_num_nodes]
91 size += list(edge_attr.size())[1:]
92 flattened_size = batch_size * max_num_nodes * max_num_nodes
93
94 idx = idx0 * max_num_nodes * max_num_nodes + idx1 * max_num_nodes + idx2
95 adj = scatter(edge_attr, idx, dim=0, dim_size=flattened_size, reduce='sum')
96 adj = adj.view(size)
97
98 return adj
99
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/torch_geometric/utils/to_dense_adj.py b/torch_geometric/utils/to_dense_adj.py
--- a/torch_geometric/utils/to_dense_adj.py
+++ b/torch_geometric/utils/to_dense_adj.py
@@ -23,7 +23,10 @@
:math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each
node to a specific example. (default: :obj:`None`)
edge_attr (Tensor, optional): Edge weights or multi-dimensional edge
- features. (default: :obj:`None`)
+ features.
+ If :obj:`edge_index` contains duplicated edges, the dense adjacency
+ matrix output holds the summed up entries of :obj:`edge_attr` for
+ duplicated edges. (default: :obj:`None`)
max_num_nodes (int, optional): The size of the output node dimension.
(default: :obj:`None`)
batch_size (int, optional) The batch size. (default: :obj:`None`)
| {"golden_diff": "diff --git a/torch_geometric/utils/to_dense_adj.py b/torch_geometric/utils/to_dense_adj.py\n--- a/torch_geometric/utils/to_dense_adj.py\n+++ b/torch_geometric/utils/to_dense_adj.py\n@@ -23,7 +23,10 @@\n :math:`\\mathbf{b} \\in {\\{ 0, \\ldots, B-1\\}}^N`, which assigns each\n node to a specific example. (default: :obj:`None`)\n edge_attr (Tensor, optional): Edge weights or multi-dimensional edge\n- features. (default: :obj:`None`)\n+ features.\n+ If :obj:`edge_index` contains duplicated edges, the dense adjacency\n+ matrix output holds the summed up entries of :obj:`edge_attr` for\n+ duplicated edges. (default: :obj:`None`)\n max_num_nodes (int, optional): The size of the output node dimension.\n (default: :obj:`None`)\n batch_size (int, optional) The batch size. (default: :obj:`None`)\n", "issue": "Strange behaviour with the to_dense_adj function\n### \ud83d\udc1b Describe the bug\n\nWhile using to_dense_adj with edge attributes, I observed that the `idx` values generated are not unique ((line 94 in to_dense_adj.py). As such, the scatter_add function sums up overlapping values and generating an output greater than the original range of edge_attr values.\r\n\r\n\r\n\r\nThe required tensors can be downloaded from [here](https://filesender.switch.ch/filesender2/download.php?token=d4b1599a-6eee-4b06-8640-be16fb784ab5&files_ids=490595)\r\n\r\nAny help or insights are highly appreciated. \r\n\r\nThanks,\r\nChinmay\n\n### Environment\n\n* PyG version:2.3.1\r\n* PyTorch version: 2.0.1+cu117\r\n* OS: Ubuntu 20.04\r\n* Python version:3.8.10\r\n* CUDA/cuDNN version:11.7\r\n* How you installed PyTorch and PyG (`conda`, `pip`, source):pip\r\n* Any other relevant information (*e.g.*, version of `torch-scatter`):\r\n\n", "before_files": [{"content": "from typing import Optional\n\nimport torch\nfrom torch import Tensor\n\nfrom torch_geometric.typing import OptTensor\nfrom torch_geometric.utils import cumsum, scatter\n\n\ndef to_dense_adj(\n edge_index: Tensor,\n batch: OptTensor = None,\n edge_attr: OptTensor = None,\n max_num_nodes: Optional[int] = None,\n batch_size: Optional[int] = None,\n) -> Tensor:\n r\"\"\"Converts batched sparse adjacency matrices given by edge indices and\n edge attributes to a single dense batched adjacency matrix.\n\n Args:\n edge_index (LongTensor): The edge indices.\n batch (LongTensor, optional): Batch vector\n :math:`\\mathbf{b} \\in {\\{ 0, \\ldots, B-1\\}}^N`, which assigns each\n node to a specific example. (default: :obj:`None`)\n edge_attr (Tensor, optional): Edge weights or multi-dimensional edge\n features. (default: :obj:`None`)\n max_num_nodes (int, optional): The size of the output node dimension.\n (default: :obj:`None`)\n batch_size (int, optional) The batch size. (default: :obj:`None`)\n\n :rtype: :class:`Tensor`\n\n Examples:\n\n >>> edge_index = torch.tensor([[0, 0, 1, 2, 3],\n ... [0, 1, 0, 3, 0]])\n >>> batch = torch.tensor([0, 0, 1, 1])\n >>> to_dense_adj(edge_index, batch)\n tensor([[[1., 1.],\n [1., 0.]],\n [[0., 1.],\n [1., 0.]]])\n\n >>> to_dense_adj(edge_index, batch, max_num_nodes=4)\n tensor([[[1., 1., 0., 0.],\n [1., 0., 0., 0.],\n [0., 0., 0., 0.],\n [0., 0., 0., 0.]],\n [[0., 1., 0., 0.],\n [1., 0., 0., 0.],\n [0., 0., 0., 0.],\n [0., 0., 0., 0.]]])\n\n >>> edge_attr = torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0])\n >>> to_dense_adj(edge_index, batch, edge_attr)\n tensor([[[1., 2.],\n [3., 0.]],\n [[0., 4.],\n [5., 0.]]])\n \"\"\"\n if batch is None:\n num_nodes = int(edge_index.max()) + 1 if edge_index.numel() > 0 else 0\n batch = edge_index.new_zeros(num_nodes)\n\n if batch_size is None:\n batch_size = int(batch.max()) + 1 if batch.numel() > 0 else 1\n\n one = batch.new_ones(batch.size(0))\n num_nodes = scatter(one, batch, dim=0, dim_size=batch_size, reduce='sum')\n cum_nodes = cumsum(num_nodes)\n\n idx0 = batch[edge_index[0]]\n idx1 = edge_index[0] - cum_nodes[batch][edge_index[0]]\n idx2 = edge_index[1] - cum_nodes[batch][edge_index[1]]\n\n if max_num_nodes is None:\n max_num_nodes = int(num_nodes.max())\n\n elif ((idx1.numel() > 0 and idx1.max() >= max_num_nodes)\n or (idx2.numel() > 0 and idx2.max() >= max_num_nodes)):\n mask = (idx1 < max_num_nodes) & (idx2 < max_num_nodes)\n idx0 = idx0[mask]\n idx1 = idx1[mask]\n idx2 = idx2[mask]\n edge_attr = None if edge_attr is None else edge_attr[mask]\n\n if edge_attr is None:\n edge_attr = torch.ones(idx0.numel(), device=edge_index.device)\n\n size = [batch_size, max_num_nodes, max_num_nodes]\n size += list(edge_attr.size())[1:]\n flattened_size = batch_size * max_num_nodes * max_num_nodes\n\n idx = idx0 * max_num_nodes * max_num_nodes + idx1 * max_num_nodes + idx2\n adj = scatter(edge_attr, idx, dim=0, dim_size=flattened_size, reduce='sum')\n adj = adj.view(size)\n\n return adj\n", "path": "torch_geometric/utils/to_dense_adj.py"}], "after_files": [{"content": "from typing import Optional\n\nimport torch\nfrom torch import Tensor\n\nfrom torch_geometric.typing import OptTensor\nfrom torch_geometric.utils import cumsum, scatter\n\n\ndef to_dense_adj(\n edge_index: Tensor,\n batch: OptTensor = None,\n edge_attr: OptTensor = None,\n max_num_nodes: Optional[int] = None,\n batch_size: Optional[int] = None,\n) -> Tensor:\n r\"\"\"Converts batched sparse adjacency matrices given by edge indices and\n edge attributes to a single dense batched adjacency matrix.\n\n Args:\n edge_index (LongTensor): The edge indices.\n batch (LongTensor, optional): Batch vector\n :math:`\\mathbf{b} \\in {\\{ 0, \\ldots, B-1\\}}^N`, which assigns each\n node to a specific example. (default: :obj:`None`)\n edge_attr (Tensor, optional): Edge weights or multi-dimensional edge\n features.\n If :obj:`edge_index` contains duplicated edges, the dense adjacency\n matrix output holds the summed up entries of :obj:`edge_attr` for\n duplicated edges. (default: :obj:`None`)\n max_num_nodes (int, optional): The size of the output node dimension.\n (default: :obj:`None`)\n batch_size (int, optional) The batch size. (default: :obj:`None`)\n\n :rtype: :class:`Tensor`\n\n Examples:\n\n >>> edge_index = torch.tensor([[0, 0, 1, 2, 3],\n ... [0, 1, 0, 3, 0]])\n >>> batch = torch.tensor([0, 0, 1, 1])\n >>> to_dense_adj(edge_index, batch)\n tensor([[[1., 1.],\n [1., 0.]],\n [[0., 1.],\n [1., 0.]]])\n\n >>> to_dense_adj(edge_index, batch, max_num_nodes=4)\n tensor([[[1., 1., 0., 0.],\n [1., 0., 0., 0.],\n [0., 0., 0., 0.],\n [0., 0., 0., 0.]],\n [[0., 1., 0., 0.],\n [1., 0., 0., 0.],\n [0., 0., 0., 0.],\n [0., 0., 0., 0.]]])\n\n >>> edge_attr = torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0])\n >>> to_dense_adj(edge_index, batch, edge_attr)\n tensor([[[1., 2.],\n [3., 0.]],\n [[0., 4.],\n [5., 0.]]])\n \"\"\"\n if batch is None:\n num_nodes = int(edge_index.max()) + 1 if edge_index.numel() > 0 else 0\n batch = edge_index.new_zeros(num_nodes)\n\n if batch_size is None:\n batch_size = int(batch.max()) + 1 if batch.numel() > 0 else 1\n\n one = batch.new_ones(batch.size(0))\n num_nodes = scatter(one, batch, dim=0, dim_size=batch_size, reduce='sum')\n cum_nodes = cumsum(num_nodes)\n\n idx0 = batch[edge_index[0]]\n idx1 = edge_index[0] - cum_nodes[batch][edge_index[0]]\n idx2 = edge_index[1] - cum_nodes[batch][edge_index[1]]\n\n if max_num_nodes is None:\n max_num_nodes = int(num_nodes.max())\n\n elif ((idx1.numel() > 0 and idx1.max() >= max_num_nodes)\n or (idx2.numel() > 0 and idx2.max() >= max_num_nodes)):\n mask = (idx1 < max_num_nodes) & (idx2 < max_num_nodes)\n idx0 = idx0[mask]\n idx1 = idx1[mask]\n idx2 = idx2[mask]\n edge_attr = None if edge_attr is None else edge_attr[mask]\n\n if edge_attr is None:\n edge_attr = torch.ones(idx0.numel(), device=edge_index.device)\n\n size = [batch_size, max_num_nodes, max_num_nodes]\n size += list(edge_attr.size())[1:]\n flattened_size = batch_size * max_num_nodes * max_num_nodes\n\n idx = idx0 * max_num_nodes * max_num_nodes + idx1 * max_num_nodes + idx2\n adj = scatter(edge_attr, idx, dim=0, dim_size=flattened_size, reduce='sum')\n adj = adj.view(size)\n\n return adj\n", "path": "torch_geometric/utils/to_dense_adj.py"}]} | 1,779 | 234 |
gh_patches_debug_1417 | rasdani/github-patches | git_diff | getmoto__moto-1400 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
mock_xray_client cannot be used as a context manager
PR #1255 added support for `aws_xray_sdk` which is great.
But there is a problem with it: `moto.mock_xray_client` is *only* a function decorator, and unlike all other `mock_*` methods it cannot be used as a context manager or directly with `start()`...`stop()`.
As a result, it is not possible to write a `py.test` fixture which would add support for mocking `xray_client`.
Also, `mock_xray_client` does not return the result of the function it decorates. Given it is meant to be used to decorate test functions it is most likely not a big issue, but I think it is still worth fixing.
I will prepare a PR for the return value issue soon.
Also I am thinking about refactoring `mock_xray_client` to base it on the existing infrastructure (`BaseBackend`, `base_decorator`) but am not yet enough familiar with `moto` internals to be sure which would be the best way to implement it.
Installed version: `moto-ext==1.1.25`
The problem seemingly persists in current `master` branch.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `moto/xray/mock_client.py`
Content:
```
1 from functools import wraps
2 import os
3 from moto.xray import xray_backends
4 import aws_xray_sdk.core
5 from aws_xray_sdk.core.context import Context as AWSContext
6 from aws_xray_sdk.core.emitters.udp_emitter import UDPEmitter
7
8
9 class MockEmitter(UDPEmitter):
10 """
11 Replaces the code that sends UDP to local X-Ray daemon
12 """
13 def __init__(self, daemon_address='127.0.0.1:2000'):
14 address = os.getenv('AWS_XRAY_DAEMON_ADDRESS_YEAH_NOT_TODAY_MATE', daemon_address)
15 self._ip, self._port = self._parse_address(address)
16
17 def _xray_backend(self, region):
18 return xray_backends[region]
19
20 def send_entity(self, entity):
21 # Hack to get region
22 # region = entity.subsegments[0].aws['region']
23 # xray = self._xray_backend(region)
24
25 # TODO store X-Ray data, pretty sure X-Ray needs refactor for this
26 pass
27
28 def _send_data(self, data):
29 raise RuntimeError('Should not be running this')
30
31
32 def mock_xray_client(f):
33 """
34 Mocks the X-Ray sdk by pwning its evil singleton with our methods
35
36 The X-Ray SDK has normally been imported and `patched()` called long before we start mocking.
37 This means the Context() will be very unhappy if an env var isnt present, so we set that, save
38 the old context, then supply our new context.
39 We also patch the Emitter by subclassing the UDPEmitter class replacing its methods and pushing
40 that itno the recorder instance.
41 """
42 @wraps(f)
43 def _wrapped(*args, **kwargs):
44 print("Starting X-Ray Patch")
45
46 old_xray_context_var = os.environ.get('AWS_XRAY_CONTEXT_MISSING')
47 os.environ['AWS_XRAY_CONTEXT_MISSING'] = 'LOG_ERROR'
48 old_xray_context = aws_xray_sdk.core.xray_recorder._context
49 old_xray_emitter = aws_xray_sdk.core.xray_recorder._emitter
50 aws_xray_sdk.core.xray_recorder._context = AWSContext()
51 aws_xray_sdk.core.xray_recorder._emitter = MockEmitter()
52
53 try:
54 f(*args, **kwargs)
55 finally:
56
57 if old_xray_context_var is None:
58 del os.environ['AWS_XRAY_CONTEXT_MISSING']
59 else:
60 os.environ['AWS_XRAY_CONTEXT_MISSING'] = old_xray_context_var
61
62 aws_xray_sdk.core.xray_recorder._emitter = old_xray_emitter
63 aws_xray_sdk.core.xray_recorder._context = old_xray_context
64
65 return _wrapped
66
67
68 class XRaySegment(object):
69 """
70 XRay is request oriented, when a request comes in, normally middleware like django (or automatically in lambda) will mark
71 the start of a segment, this stay open during the lifetime of the request. During that time subsegments may be generated
72 by calling other SDK aware services or using some boto functions. Once the request is finished, middleware will also stop
73 the segment, thus causing it to be emitted via UDP.
74
75 During testing we're going to have to control the start and end of a segment via context managers.
76 """
77 def __enter__(self):
78 aws_xray_sdk.core.xray_recorder.begin_segment(name='moto_mock', traceid=None, parent_id=None, sampling=1)
79
80 return self
81
82 def __exit__(self, exc_type, exc_val, exc_tb):
83 aws_xray_sdk.core.xray_recorder.end_segment()
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/moto/xray/mock_client.py b/moto/xray/mock_client.py
--- a/moto/xray/mock_client.py
+++ b/moto/xray/mock_client.py
@@ -51,7 +51,7 @@
aws_xray_sdk.core.xray_recorder._emitter = MockEmitter()
try:
- f(*args, **kwargs)
+ return f(*args, **kwargs)
finally:
if old_xray_context_var is None:
| {"golden_diff": "diff --git a/moto/xray/mock_client.py b/moto/xray/mock_client.py\n--- a/moto/xray/mock_client.py\n+++ b/moto/xray/mock_client.py\n@@ -51,7 +51,7 @@\n aws_xray_sdk.core.xray_recorder._emitter = MockEmitter()\n \n try:\n- f(*args, **kwargs)\n+ return f(*args, **kwargs)\n finally:\n \n if old_xray_context_var is None:\n", "issue": "mock_xray_client cannot be used as a context manager\nPR #1255 added support for `aws_xray_sdk` which is great.\r\nBut there is a problem with it: `moto.mock_xray_client` is *only* a function decorator, and unlike all other `mock_*` methods it cannot be used as a context manager or directly with `start()`...`stop()`.\r\nAs a result, it is not possible to write a `py.test` fixture which would add support for mocking `xray_client`.\r\n\r\nAlso, `mock_xray_client` does not return the result of the function it decorates. Given it is meant to be used to decorate test functions it is most likely not a big issue, but I think it is still worth fixing.\r\n\r\nI will prepare a PR for the return value issue soon.\r\nAlso I am thinking about refactoring `mock_xray_client` to base it on the existing infrastructure (`BaseBackend`, `base_decorator`) but am not yet enough familiar with `moto` internals to be sure which would be the best way to implement it.\r\n\r\nInstalled version: `moto-ext==1.1.25`\r\nThe problem seemingly persists in current `master` branch.\n", "before_files": [{"content": "from functools import wraps\nimport os\nfrom moto.xray import xray_backends\nimport aws_xray_sdk.core\nfrom aws_xray_sdk.core.context import Context as AWSContext\nfrom aws_xray_sdk.core.emitters.udp_emitter import UDPEmitter\n\n\nclass MockEmitter(UDPEmitter):\n \"\"\"\n Replaces the code that sends UDP to local X-Ray daemon\n \"\"\"\n def __init__(self, daemon_address='127.0.0.1:2000'):\n address = os.getenv('AWS_XRAY_DAEMON_ADDRESS_YEAH_NOT_TODAY_MATE', daemon_address)\n self._ip, self._port = self._parse_address(address)\n\n def _xray_backend(self, region):\n return xray_backends[region]\n\n def send_entity(self, entity):\n # Hack to get region\n # region = entity.subsegments[0].aws['region']\n # xray = self._xray_backend(region)\n\n # TODO store X-Ray data, pretty sure X-Ray needs refactor for this\n pass\n\n def _send_data(self, data):\n raise RuntimeError('Should not be running this')\n\n\ndef mock_xray_client(f):\n \"\"\"\n Mocks the X-Ray sdk by pwning its evil singleton with our methods\n\n The X-Ray SDK has normally been imported and `patched()` called long before we start mocking.\n This means the Context() will be very unhappy if an env var isnt present, so we set that, save\n the old context, then supply our new context.\n We also patch the Emitter by subclassing the UDPEmitter class replacing its methods and pushing\n that itno the recorder instance.\n \"\"\"\n @wraps(f)\n def _wrapped(*args, **kwargs):\n print(\"Starting X-Ray Patch\")\n\n old_xray_context_var = os.environ.get('AWS_XRAY_CONTEXT_MISSING')\n os.environ['AWS_XRAY_CONTEXT_MISSING'] = 'LOG_ERROR'\n old_xray_context = aws_xray_sdk.core.xray_recorder._context\n old_xray_emitter = aws_xray_sdk.core.xray_recorder._emitter\n aws_xray_sdk.core.xray_recorder._context = AWSContext()\n aws_xray_sdk.core.xray_recorder._emitter = MockEmitter()\n\n try:\n f(*args, **kwargs)\n finally:\n\n if old_xray_context_var is None:\n del os.environ['AWS_XRAY_CONTEXT_MISSING']\n else:\n os.environ['AWS_XRAY_CONTEXT_MISSING'] = old_xray_context_var\n\n aws_xray_sdk.core.xray_recorder._emitter = old_xray_emitter\n aws_xray_sdk.core.xray_recorder._context = old_xray_context\n\n return _wrapped\n\n\nclass XRaySegment(object):\n \"\"\"\n XRay is request oriented, when a request comes in, normally middleware like django (or automatically in lambda) will mark\n the start of a segment, this stay open during the lifetime of the request. During that time subsegments may be generated\n by calling other SDK aware services or using some boto functions. Once the request is finished, middleware will also stop\n the segment, thus causing it to be emitted via UDP.\n\n During testing we're going to have to control the start and end of a segment via context managers.\n \"\"\"\n def __enter__(self):\n aws_xray_sdk.core.xray_recorder.begin_segment(name='moto_mock', traceid=None, parent_id=None, sampling=1)\n\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n aws_xray_sdk.core.xray_recorder.end_segment()\n", "path": "moto/xray/mock_client.py"}], "after_files": [{"content": "from functools import wraps\nimport os\nfrom moto.xray import xray_backends\nimport aws_xray_sdk.core\nfrom aws_xray_sdk.core.context import Context as AWSContext\nfrom aws_xray_sdk.core.emitters.udp_emitter import UDPEmitter\n\n\nclass MockEmitter(UDPEmitter):\n \"\"\"\n Replaces the code that sends UDP to local X-Ray daemon\n \"\"\"\n def __init__(self, daemon_address='127.0.0.1:2000'):\n address = os.getenv('AWS_XRAY_DAEMON_ADDRESS_YEAH_NOT_TODAY_MATE', daemon_address)\n self._ip, self._port = self._parse_address(address)\n\n def _xray_backend(self, region):\n return xray_backends[region]\n\n def send_entity(self, entity):\n # Hack to get region\n # region = entity.subsegments[0].aws['region']\n # xray = self._xray_backend(region)\n\n # TODO store X-Ray data, pretty sure X-Ray needs refactor for this\n pass\n\n def _send_data(self, data):\n raise RuntimeError('Should not be running this')\n\n\ndef mock_xray_client(f):\n \"\"\"\n Mocks the X-Ray sdk by pwning its evil singleton with our methods\n\n The X-Ray SDK has normally been imported and `patched()` called long before we start mocking.\n This means the Context() will be very unhappy if an env var isnt present, so we set that, save\n the old context, then supply our new context.\n We also patch the Emitter by subclassing the UDPEmitter class replacing its methods and pushing\n that itno the recorder instance.\n \"\"\"\n @wraps(f)\n def _wrapped(*args, **kwargs):\n print(\"Starting X-Ray Patch\")\n\n old_xray_context_var = os.environ.get('AWS_XRAY_CONTEXT_MISSING')\n os.environ['AWS_XRAY_CONTEXT_MISSING'] = 'LOG_ERROR'\n old_xray_context = aws_xray_sdk.core.xray_recorder._context\n old_xray_emitter = aws_xray_sdk.core.xray_recorder._emitter\n aws_xray_sdk.core.xray_recorder._context = AWSContext()\n aws_xray_sdk.core.xray_recorder._emitter = MockEmitter()\n\n try:\n return f(*args, **kwargs)\n finally:\n\n if old_xray_context_var is None:\n del os.environ['AWS_XRAY_CONTEXT_MISSING']\n else:\n os.environ['AWS_XRAY_CONTEXT_MISSING'] = old_xray_context_var\n\n aws_xray_sdk.core.xray_recorder._emitter = old_xray_emitter\n aws_xray_sdk.core.xray_recorder._context = old_xray_context\n\n return _wrapped\n\n\nclass XRaySegment(object):\n \"\"\"\n XRay is request oriented, when a request comes in, normally middleware like django (or automatically in lambda) will mark\n the start of a segment, this stay open during the lifetime of the request. During that time subsegments may be generated\n by calling other SDK aware services or using some boto functions. Once the request is finished, middleware will also stop\n the segment, thus causing it to be emitted via UDP.\n\n During testing we're going to have to control the start and end of a segment via context managers.\n \"\"\"\n def __enter__(self):\n aws_xray_sdk.core.xray_recorder.begin_segment(name='moto_mock', traceid=None, parent_id=None, sampling=1)\n\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n aws_xray_sdk.core.xray_recorder.end_segment()\n", "path": "moto/xray/mock_client.py"}]} | 1,463 | 106 |
gh_patches_debug_42167 | rasdani/github-patches | git_diff | sunpy__sunpy-3970 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
sunpy.data.manager does not allow for local path object in manager. override_file()
<!-- This comments are hidden when you submit the issue so you do not need to remove them!
Please be sure to check out our contributing guidelines: https://github.com/sunpy/sunpy/blob/master/CONTRIBUTING.rst
Please be sure to check out our code of conduct:
https://github.com/sunpy/sunpy/blob/master/CODE_OF_CONDUCT.rst -->
<!-- Please have a search on our GitHub repository to see if a similar issue has already been posted.
If a similar issue is closed, have a quick look to see if you are satisfied by the resolution.
If not please go ahead and open an issue! -->
### Description
It would be great if `sunpy.data.manager` could take a local file path
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sunpy/data/data_manager/manager.py`
Content:
```
1 from typing import Dict
2 import pathlib
3 import functools
4 from contextlib import contextmanager
5 import warnings
6
7 from sunpy.util.util import hash_file
8 from sunpy.util.exceptions import SunpyUserWarning
9
10 __all__ = ['DataManager']
11
12
13 class DataManager:
14 """
15 This class provides a remote data manager for managing remote files.
16
17 Parameters
18 ----------
19 cache: `sunpy.data.data_manager.cache.Cache`
20 Cache object to be used by `~sunpy.data.data_manager.manager.DataManager`.
21 """
22
23 def __init__(self, cache):
24 self._cache = cache
25
26 self._file_cache = {}
27
28 self._skip_hash_check = False
29 self._skip_file: Dict[str, str] = {}
30
31 def require(self, name, urls, sha_hash):
32 """
33 Decorator for informing the data manager about the requirement of
34 a file by a function.
35
36 Parameters
37 ----------
38 name: `str`
39 The name to reference the file with.
40 urls: `list` or `str`
41 A list of urls to download the file from.
42 sha_hash: `str`
43 SHA-1 hash of file.
44 """
45 if isinstance(urls, str):
46 urls = [urls]
47
48 def decorator(func):
49 @functools.wraps(func)
50 def wrapper(*args, **kwargs):
51 replace = self._skip_file.get(name, None)
52 if replace:
53 if replace['uri'].startswith('file://'):
54 file_path = replace['uri'][len('file://'):]
55 file_hash = hash_file(file_path)
56 else:
57 file_path, file_hash, _ = self._cache._download_and_hash([replace['uri']])
58 if replace['hash'] and file_hash != replace['hash']:
59 # if hash provided to replace function doesn't match the hash of the file
60 # raise error
61 raise ValueError(
62 "Hash provided to override_file does not match hash of the file.")
63 elif self._skip_hash_check:
64 file_path = self._cache.download(urls, redownload=True)
65 else:
66 details = self._cache.get_by_hash(sha_hash)
67 if not details:
68 # In case we are matching by hash and file does not exist
69 # That might mean the wrong hash is supplied to decorator
70 # We match by urls to make sure that is not the case
71 if self._cache_has_file(urls):
72 raise ValueError(" Hash provided does not match the hash in database.")
73 file_path = self._cache.download(urls)
74 if hash_file(file_path) != sha_hash:
75 # the hash of the file downloaded does not match provided hash
76 # this means the file has changed on the server.
77 # the function should be updated to use the new hash. Raise an error to notify.
78 raise RuntimeError(
79 "Remote file on the server has changed. Update hash of the function.")
80 else:
81 # This is to handle the case when the local file appears to be tampered/corrupted
82 if hash_file(details['file_path']) != details['file_hash']:
83 warnings.warn("Hashes do not match, the file will be redownloaded (could be be tampered/corrupted)",
84 SunpyUserWarning)
85 file_path = self._cache.download(urls, redownload=True)
86 # Recheck the hash again, if this fails, we will exit.
87 if hash_file(file_path) != details['file_hash']:
88 raise RuntimeError("Redownloaded file also has the incorrect hash."
89 "The remote file on the server might have changed.")
90 else:
91 file_path = details['file_path']
92
93 self._file_cache[name] = file_path
94 return func(*args, **kwargs)
95 return wrapper
96
97 return decorator
98
99 @contextmanager
100 def override_file(self, name, uri, sha_hash=None):
101 """
102 Replaces the file by the name with the file provided by the url/path.
103
104 Parameters
105 ----------
106 name: `str`
107 Name of the file provided in the `require` decorator.
108 uri: `str`
109 URI of the file which replaces original file. Scheme should be
110 one of ``http``, ``https``, ``ftp`` or ``file``.
111 sha_hash: `str`, optional
112 SHA256 hash of the file to compared to after downloading.
113 """
114 try:
115 self._skip_file[name] = {
116 'uri': uri,
117 'hash': sha_hash,
118 }
119 yield
120 finally:
121 _ = self._skip_file.pop(name, None)
122
123 @contextmanager
124 def skip_hash_check(self):
125 """
126 Disables hash checking temporarily
127
128 Examples
129 --------
130 >>> with remote_data_manager.skip_hash_check(): # doctest: +SKIP
131 ... myfunction() # doctest: +SKIP
132 """
133 try:
134 self._skip_hash_check = True
135 yield
136 finally:
137 self._skip_hash_check = False
138
139 def get(self, name):
140 """
141 Get the file by name.
142
143 Parameters
144 ----------
145 name: `str`
146 Name of the file given to the data manager, same as the one provided
147 in `~sunpy.data.data_manager.manager.DataManager.require`.
148
149 Returns
150 -------
151 `pathlib.Path`
152 Path of the file.
153
154 Raises
155 ------
156 `KeyError`
157 If ``name`` is not in the cache.
158 """
159 return pathlib.Path(self._file_cache[name])
160
161 def _cache_has_file(self, urls):
162 for url in urls:
163 if self._cache._get_by_url(url):
164 return True
165 return False
166
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sunpy/data/data_manager/manager.py b/sunpy/data/data_manager/manager.py
--- a/sunpy/data/data_manager/manager.py
+++ b/sunpy/data/data_manager/manager.py
@@ -1,11 +1,12 @@
-from typing import Dict
import pathlib
+import warnings
import functools
+from typing import Dict
from contextlib import contextmanager
-import warnings
+from urllib.parse import urlparse
-from sunpy.util.util import hash_file
from sunpy.util.exceptions import SunpyUserWarning
+from sunpy.util.util import hash_file
__all__ = ['DataManager']
@@ -50,8 +51,14 @@
def wrapper(*args, **kwargs):
replace = self._skip_file.get(name, None)
if replace:
- if replace['uri'].startswith('file://'):
- file_path = replace['uri'][len('file://'):]
+ uri_parse = urlparse(replace['uri'])
+ if uri_parse.scheme in ("", "file"):
+ # If a relative file uri is specified (i.e.
+ # `file://sunpy/test`) this maintains compatibility
+ # with the original behaviour where this would be
+ # interpreted as `./sunpy/test` if no scheme is
+ # specified netloc will be '' by default.
+ file_path = uri_parse.netloc + uri_parse.path
file_hash = hash_file(file_path)
else:
file_path, file_hash, _ = self._cache._download_and_hash([replace['uri']])
@@ -74,11 +81,13 @@
if hash_file(file_path) != sha_hash:
# the hash of the file downloaded does not match provided hash
# this means the file has changed on the server.
- # the function should be updated to use the new hash. Raise an error to notify.
+ # the function should be updated to use the new
+ # hash. Raise an error to notify.
raise RuntimeError(
"Remote file on the server has changed. Update hash of the function.")
else:
- # This is to handle the case when the local file appears to be tampered/corrupted
+ # This is to handle the case when the local file
+ # appears to be tampered/corrupted
if hash_file(details['file_path']) != details['file_hash']:
warnings.warn("Hashes do not match, the file will be redownloaded (could be be tampered/corrupted)",
SunpyUserWarning)
@@ -106,8 +115,10 @@
name: `str`
Name of the file provided in the `require` decorator.
uri: `str`
- URI of the file which replaces original file. Scheme should be
- one of ``http``, ``https``, ``ftp`` or ``file``.
+ URI of the file which replaces original file. Scheme should be one
+ of ``http``, ``https``, ``ftp`` or ``file``. If no scheme is given
+ the uri will be interpreted as a local path. i.e.
+ ``file:///tmp/test`` and ``/tmp/test`` are the same.
sha_hash: `str`, optional
SHA256 hash of the file to compared to after downloading.
"""
| {"golden_diff": "diff --git a/sunpy/data/data_manager/manager.py b/sunpy/data/data_manager/manager.py\n--- a/sunpy/data/data_manager/manager.py\n+++ b/sunpy/data/data_manager/manager.py\n@@ -1,11 +1,12 @@\n-from typing import Dict\n import pathlib\n+import warnings\n import functools\n+from typing import Dict\n from contextlib import contextmanager\n-import warnings\n+from urllib.parse import urlparse\n \n-from sunpy.util.util import hash_file\n from sunpy.util.exceptions import SunpyUserWarning\n+from sunpy.util.util import hash_file\n \n __all__ = ['DataManager']\n \n@@ -50,8 +51,14 @@\n def wrapper(*args, **kwargs):\n replace = self._skip_file.get(name, None)\n if replace:\n- if replace['uri'].startswith('file://'):\n- file_path = replace['uri'][len('file://'):]\n+ uri_parse = urlparse(replace['uri'])\n+ if uri_parse.scheme in (\"\", \"file\"):\n+ # If a relative file uri is specified (i.e.\n+ # `file://sunpy/test`) this maintains compatibility\n+ # with the original behaviour where this would be\n+ # interpreted as `./sunpy/test` if no scheme is\n+ # specified netloc will be '' by default.\n+ file_path = uri_parse.netloc + uri_parse.path\n file_hash = hash_file(file_path)\n else:\n file_path, file_hash, _ = self._cache._download_and_hash([replace['uri']])\n@@ -74,11 +81,13 @@\n if hash_file(file_path) != sha_hash:\n # the hash of the file downloaded does not match provided hash\n # this means the file has changed on the server.\n- # the function should be updated to use the new hash. Raise an error to notify.\n+ # the function should be updated to use the new\n+ # hash. Raise an error to notify.\n raise RuntimeError(\n \"Remote file on the server has changed. Update hash of the function.\")\n else:\n- # This is to handle the case when the local file appears to be tampered/corrupted\n+ # This is to handle the case when the local file\n+ # appears to be tampered/corrupted\n if hash_file(details['file_path']) != details['file_hash']:\n warnings.warn(\"Hashes do not match, the file will be redownloaded (could be be tampered/corrupted)\",\n SunpyUserWarning)\n@@ -106,8 +115,10 @@\n name: `str`\n Name of the file provided in the `require` decorator.\n uri: `str`\n- URI of the file which replaces original file. Scheme should be\n- one of ``http``, ``https``, ``ftp`` or ``file``.\n+ URI of the file which replaces original file. Scheme should be one\n+ of ``http``, ``https``, ``ftp`` or ``file``. If no scheme is given\n+ the uri will be interpreted as a local path. i.e.\n+ ``file:///tmp/test`` and ``/tmp/test`` are the same.\n sha_hash: `str`, optional\n SHA256 hash of the file to compared to after downloading.\n \"\"\"\n", "issue": "sunpy.data.manager does not allow for local path object in manager. override_file()\n<!-- This comments are hidden when you submit the issue so you do not need to remove them!\r\nPlease be sure to check out our contributing guidelines: https://github.com/sunpy/sunpy/blob/master/CONTRIBUTING.rst\r\nPlease be sure to check out our code of conduct:\r\nhttps://github.com/sunpy/sunpy/blob/master/CODE_OF_CONDUCT.rst -->\r\n\r\n<!-- Please have a search on our GitHub repository to see if a similar issue has already been posted.\r\nIf a similar issue is closed, have a quick look to see if you are satisfied by the resolution.\r\nIf not please go ahead and open an issue! -->\r\n\r\n### Description\r\nIt would be great if `sunpy.data.manager` could take a local file path\r\n\r\n\r\n\n", "before_files": [{"content": "from typing import Dict\nimport pathlib\nimport functools\nfrom contextlib import contextmanager\nimport warnings\n\nfrom sunpy.util.util import hash_file\nfrom sunpy.util.exceptions import SunpyUserWarning\n\n__all__ = ['DataManager']\n\n\nclass DataManager:\n \"\"\"\n This class provides a remote data manager for managing remote files.\n\n Parameters\n ----------\n cache: `sunpy.data.data_manager.cache.Cache`\n Cache object to be used by `~sunpy.data.data_manager.manager.DataManager`.\n \"\"\"\n\n def __init__(self, cache):\n self._cache = cache\n\n self._file_cache = {}\n\n self._skip_hash_check = False\n self._skip_file: Dict[str, str] = {}\n\n def require(self, name, urls, sha_hash):\n \"\"\"\n Decorator for informing the data manager about the requirement of\n a file by a function.\n\n Parameters\n ----------\n name: `str`\n The name to reference the file with.\n urls: `list` or `str`\n A list of urls to download the file from.\n sha_hash: `str`\n SHA-1 hash of file.\n \"\"\"\n if isinstance(urls, str):\n urls = [urls]\n\n def decorator(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n replace = self._skip_file.get(name, None)\n if replace:\n if replace['uri'].startswith('file://'):\n file_path = replace['uri'][len('file://'):]\n file_hash = hash_file(file_path)\n else:\n file_path, file_hash, _ = self._cache._download_and_hash([replace['uri']])\n if replace['hash'] and file_hash != replace['hash']:\n # if hash provided to replace function doesn't match the hash of the file\n # raise error\n raise ValueError(\n \"Hash provided to override_file does not match hash of the file.\")\n elif self._skip_hash_check:\n file_path = self._cache.download(urls, redownload=True)\n else:\n details = self._cache.get_by_hash(sha_hash)\n if not details:\n # In case we are matching by hash and file does not exist\n # That might mean the wrong hash is supplied to decorator\n # We match by urls to make sure that is not the case\n if self._cache_has_file(urls):\n raise ValueError(\" Hash provided does not match the hash in database.\")\n file_path = self._cache.download(urls)\n if hash_file(file_path) != sha_hash:\n # the hash of the file downloaded does not match provided hash\n # this means the file has changed on the server.\n # the function should be updated to use the new hash. Raise an error to notify.\n raise RuntimeError(\n \"Remote file on the server has changed. Update hash of the function.\")\n else:\n # This is to handle the case when the local file appears to be tampered/corrupted\n if hash_file(details['file_path']) != details['file_hash']:\n warnings.warn(\"Hashes do not match, the file will be redownloaded (could be be tampered/corrupted)\",\n SunpyUserWarning)\n file_path = self._cache.download(urls, redownload=True)\n # Recheck the hash again, if this fails, we will exit.\n if hash_file(file_path) != details['file_hash']:\n raise RuntimeError(\"Redownloaded file also has the incorrect hash.\"\n \"The remote file on the server might have changed.\")\n else:\n file_path = details['file_path']\n\n self._file_cache[name] = file_path\n return func(*args, **kwargs)\n return wrapper\n\n return decorator\n\n @contextmanager\n def override_file(self, name, uri, sha_hash=None):\n \"\"\"\n Replaces the file by the name with the file provided by the url/path.\n\n Parameters\n ----------\n name: `str`\n Name of the file provided in the `require` decorator.\n uri: `str`\n URI of the file which replaces original file. Scheme should be\n one of ``http``, ``https``, ``ftp`` or ``file``.\n sha_hash: `str`, optional\n SHA256 hash of the file to compared to after downloading.\n \"\"\"\n try:\n self._skip_file[name] = {\n 'uri': uri,\n 'hash': sha_hash,\n }\n yield\n finally:\n _ = self._skip_file.pop(name, None)\n\n @contextmanager\n def skip_hash_check(self):\n \"\"\"\n Disables hash checking temporarily\n\n Examples\n --------\n >>> with remote_data_manager.skip_hash_check(): # doctest: +SKIP\n ... myfunction() # doctest: +SKIP\n \"\"\"\n try:\n self._skip_hash_check = True\n yield\n finally:\n self._skip_hash_check = False\n\n def get(self, name):\n \"\"\"\n Get the file by name.\n\n Parameters\n ----------\n name: `str`\n Name of the file given to the data manager, same as the one provided\n in `~sunpy.data.data_manager.manager.DataManager.require`.\n\n Returns\n -------\n `pathlib.Path`\n Path of the file.\n\n Raises\n ------\n `KeyError`\n If ``name`` is not in the cache.\n \"\"\"\n return pathlib.Path(self._file_cache[name])\n\n def _cache_has_file(self, urls):\n for url in urls:\n if self._cache._get_by_url(url):\n return True\n return False\n", "path": "sunpy/data/data_manager/manager.py"}], "after_files": [{"content": "import pathlib\nimport warnings\nimport functools\nfrom typing import Dict\nfrom contextlib import contextmanager\nfrom urllib.parse import urlparse\n\nfrom sunpy.util.exceptions import SunpyUserWarning\nfrom sunpy.util.util import hash_file\n\n__all__ = ['DataManager']\n\n\nclass DataManager:\n \"\"\"\n This class provides a remote data manager for managing remote files.\n\n Parameters\n ----------\n cache: `sunpy.data.data_manager.cache.Cache`\n Cache object to be used by `~sunpy.data.data_manager.manager.DataManager`.\n \"\"\"\n\n def __init__(self, cache):\n self._cache = cache\n\n self._file_cache = {}\n\n self._skip_hash_check = False\n self._skip_file: Dict[str, str] = {}\n\n def require(self, name, urls, sha_hash):\n \"\"\"\n Decorator for informing the data manager about the requirement of\n a file by a function.\n\n Parameters\n ----------\n name: `str`\n The name to reference the file with.\n urls: `list` or `str`\n A list of urls to download the file from.\n sha_hash: `str`\n SHA-1 hash of file.\n \"\"\"\n if isinstance(urls, str):\n urls = [urls]\n\n def decorator(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n replace = self._skip_file.get(name, None)\n if replace:\n uri_parse = urlparse(replace['uri'])\n if uri_parse.scheme in (\"\", \"file\"):\n # If a relative file uri is specified (i.e.\n # `file://sunpy/test`) this maintains compatibility\n # with the original behaviour where this would be\n # interpreted as `./sunpy/test` if no scheme is\n # specified netloc will be '' by default.\n file_path = uri_parse.netloc + uri_parse.path\n file_hash = hash_file(file_path)\n else:\n file_path, file_hash, _ = self._cache._download_and_hash([replace['uri']])\n if replace['hash'] and file_hash != replace['hash']:\n # if hash provided to replace function doesn't match the hash of the file\n # raise error\n raise ValueError(\n \"Hash provided to override_file does not match hash of the file.\")\n elif self._skip_hash_check:\n file_path = self._cache.download(urls, redownload=True)\n else:\n details = self._cache.get_by_hash(sha_hash)\n if not details:\n # In case we are matching by hash and file does not exist\n # That might mean the wrong hash is supplied to decorator\n # We match by urls to make sure that is not the case\n if self._cache_has_file(urls):\n raise ValueError(\" Hash provided does not match the hash in database.\")\n file_path = self._cache.download(urls)\n if hash_file(file_path) != sha_hash:\n # the hash of the file downloaded does not match provided hash\n # this means the file has changed on the server.\n # the function should be updated to use the new\n # hash. Raise an error to notify.\n raise RuntimeError(\n \"Remote file on the server has changed. Update hash of the function.\")\n else:\n # This is to handle the case when the local file\n # appears to be tampered/corrupted\n if hash_file(details['file_path']) != details['file_hash']:\n warnings.warn(\"Hashes do not match, the file will be redownloaded (could be be tampered/corrupted)\",\n SunpyUserWarning)\n file_path = self._cache.download(urls, redownload=True)\n # Recheck the hash again, if this fails, we will exit.\n if hash_file(file_path) != details['file_hash']:\n raise RuntimeError(\"Redownloaded file also has the incorrect hash.\"\n \"The remote file on the server might have changed.\")\n else:\n file_path = details['file_path']\n\n self._file_cache[name] = file_path\n return func(*args, **kwargs)\n return wrapper\n\n return decorator\n\n @contextmanager\n def override_file(self, name, uri, sha_hash=None):\n \"\"\"\n Replaces the file by the name with the file provided by the url/path.\n\n Parameters\n ----------\n name: `str`\n Name of the file provided in the `require` decorator.\n uri: `str`\n URI of the file which replaces original file. Scheme should be one\n of ``http``, ``https``, ``ftp`` or ``file``. If no scheme is given\n the uri will be interpreted as a local path. i.e.\n ``file:///tmp/test`` and ``/tmp/test`` are the same.\n sha_hash: `str`, optional\n SHA256 hash of the file to compared to after downloading.\n \"\"\"\n try:\n self._skip_file[name] = {\n 'uri': uri,\n 'hash': sha_hash,\n }\n yield\n finally:\n _ = self._skip_file.pop(name, None)\n\n @contextmanager\n def skip_hash_check(self):\n \"\"\"\n Disables hash checking temporarily\n\n Examples\n --------\n >>> with remote_data_manager.skip_hash_check(): # doctest: +SKIP\n ... myfunction() # doctest: +SKIP\n \"\"\"\n try:\n self._skip_hash_check = True\n yield\n finally:\n self._skip_hash_check = False\n\n def get(self, name):\n \"\"\"\n Get the file by name.\n\n Parameters\n ----------\n name: `str`\n Name of the file given to the data manager, same as the one provided\n in `~sunpy.data.data_manager.manager.DataManager.require`.\n\n Returns\n -------\n `pathlib.Path`\n Path of the file.\n\n Raises\n ------\n `KeyError`\n If ``name`` is not in the cache.\n \"\"\"\n return pathlib.Path(self._file_cache[name])\n\n def _cache_has_file(self, urls):\n for url in urls:\n if self._cache._get_by_url(url):\n return True\n return False\n", "path": "sunpy/data/data_manager/manager.py"}]} | 2,028 | 724 |
gh_patches_debug_57933 | rasdani/github-patches | git_diff | scrapy__scrapy-3668 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
about the signal retry_complete
I didn't find the singnal in the singnal list,how can I use it
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/downloadermiddlewares/retry.py`
Content:
```
1 """
2 An extension to retry failed requests that are potentially caused by temporary
3 problems such as a connection timeout or HTTP 500 error.
4
5 You can change the behaviour of this middleware by modifing the scraping settings:
6 RETRY_TIMES - how many times to retry a failed page
7 RETRY_HTTP_CODES - which HTTP response codes to retry
8
9 Failed pages are collected on the scraping process and rescheduled at the end,
10 once the spider has finished crawling all regular (non failed) pages. Once
11 there is no more failed pages to retry this middleware sends a signal
12 (retry_complete), so other extensions could connect to that signal.
13 """
14 import logging
15
16 from twisted.internet import defer
17 from twisted.internet.error import TimeoutError, DNSLookupError, \
18 ConnectionRefusedError, ConnectionDone, ConnectError, \
19 ConnectionLost, TCPTimedOutError
20 from twisted.web.client import ResponseFailed
21
22 from scrapy.exceptions import NotConfigured
23 from scrapy.utils.response import response_status_message
24 from scrapy.core.downloader.handlers.http11 import TunnelError
25 from scrapy.utils.python import global_object_name
26
27 logger = logging.getLogger(__name__)
28
29
30 class RetryMiddleware(object):
31
32 # IOError is raised by the HttpCompression middleware when trying to
33 # decompress an empty response
34 EXCEPTIONS_TO_RETRY = (defer.TimeoutError, TimeoutError, DNSLookupError,
35 ConnectionRefusedError, ConnectionDone, ConnectError,
36 ConnectionLost, TCPTimedOutError, ResponseFailed,
37 IOError, TunnelError)
38
39 def __init__(self, settings):
40 if not settings.getbool('RETRY_ENABLED'):
41 raise NotConfigured
42 self.max_retry_times = settings.getint('RETRY_TIMES')
43 self.retry_http_codes = set(int(x) for x in settings.getlist('RETRY_HTTP_CODES'))
44 self.priority_adjust = settings.getint('RETRY_PRIORITY_ADJUST')
45
46 @classmethod
47 def from_crawler(cls, crawler):
48 return cls(crawler.settings)
49
50 def process_response(self, request, response, spider):
51 if request.meta.get('dont_retry', False):
52 return response
53 if response.status in self.retry_http_codes:
54 reason = response_status_message(response.status)
55 return self._retry(request, reason, spider) or response
56 return response
57
58 def process_exception(self, request, exception, spider):
59 if isinstance(exception, self.EXCEPTIONS_TO_RETRY) \
60 and not request.meta.get('dont_retry', False):
61 return self._retry(request, exception, spider)
62
63 def _retry(self, request, reason, spider):
64 retries = request.meta.get('retry_times', 0) + 1
65
66 retry_times = self.max_retry_times
67
68 if 'max_retry_times' in request.meta:
69 retry_times = request.meta['max_retry_times']
70
71 stats = spider.crawler.stats
72 if retries <= retry_times:
73 logger.debug("Retrying %(request)s (failed %(retries)d times): %(reason)s",
74 {'request': request, 'retries': retries, 'reason': reason},
75 extra={'spider': spider})
76 retryreq = request.copy()
77 retryreq.meta['retry_times'] = retries
78 retryreq.dont_filter = True
79 retryreq.priority = request.priority + self.priority_adjust
80
81 if isinstance(reason, Exception):
82 reason = global_object_name(reason.__class__)
83
84 stats.inc_value('retry/count')
85 stats.inc_value('retry/reason_count/%s' % reason)
86 return retryreq
87 else:
88 stats.inc_value('retry/max_reached')
89 logger.debug("Gave up retrying %(request)s (failed %(retries)d times): %(reason)s",
90 {'request': request, 'retries': retries, 'reason': reason},
91 extra={'spider': spider})
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scrapy/downloadermiddlewares/retry.py b/scrapy/downloadermiddlewares/retry.py
--- a/scrapy/downloadermiddlewares/retry.py
+++ b/scrapy/downloadermiddlewares/retry.py
@@ -7,9 +7,7 @@
RETRY_HTTP_CODES - which HTTP response codes to retry
Failed pages are collected on the scraping process and rescheduled at the end,
-once the spider has finished crawling all regular (non failed) pages. Once
-there is no more failed pages to retry this middleware sends a signal
-(retry_complete), so other extensions could connect to that signal.
+once the spider has finished crawling all regular (non failed) pages.
"""
import logging
| {"golden_diff": "diff --git a/scrapy/downloadermiddlewares/retry.py b/scrapy/downloadermiddlewares/retry.py\n--- a/scrapy/downloadermiddlewares/retry.py\n+++ b/scrapy/downloadermiddlewares/retry.py\n@@ -7,9 +7,7 @@\n RETRY_HTTP_CODES - which HTTP response codes to retry\n \n Failed pages are collected on the scraping process and rescheduled at the end,\n-once the spider has finished crawling all regular (non failed) pages. Once\n-there is no more failed pages to retry this middleware sends a signal\n-(retry_complete), so other extensions could connect to that signal.\n+once the spider has finished crawling all regular (non failed) pages.\n \"\"\"\n import logging\n", "issue": "about the signal retry_complete\nI didn't find the singnal in the singnal list,how can I use it\n", "before_files": [{"content": "\"\"\"\nAn extension to retry failed requests that are potentially caused by temporary\nproblems such as a connection timeout or HTTP 500 error.\n\nYou can change the behaviour of this middleware by modifing the scraping settings:\nRETRY_TIMES - how many times to retry a failed page\nRETRY_HTTP_CODES - which HTTP response codes to retry\n\nFailed pages are collected on the scraping process and rescheduled at the end,\nonce the spider has finished crawling all regular (non failed) pages. Once\nthere is no more failed pages to retry this middleware sends a signal\n(retry_complete), so other extensions could connect to that signal.\n\"\"\"\nimport logging\n\nfrom twisted.internet import defer\nfrom twisted.internet.error import TimeoutError, DNSLookupError, \\\n ConnectionRefusedError, ConnectionDone, ConnectError, \\\n ConnectionLost, TCPTimedOutError\nfrom twisted.web.client import ResponseFailed\n\nfrom scrapy.exceptions import NotConfigured\nfrom scrapy.utils.response import response_status_message\nfrom scrapy.core.downloader.handlers.http11 import TunnelError\nfrom scrapy.utils.python import global_object_name\n\nlogger = logging.getLogger(__name__)\n\n\nclass RetryMiddleware(object):\n\n # IOError is raised by the HttpCompression middleware when trying to\n # decompress an empty response\n EXCEPTIONS_TO_RETRY = (defer.TimeoutError, TimeoutError, DNSLookupError,\n ConnectionRefusedError, ConnectionDone, ConnectError,\n ConnectionLost, TCPTimedOutError, ResponseFailed,\n IOError, TunnelError)\n\n def __init__(self, settings):\n if not settings.getbool('RETRY_ENABLED'):\n raise NotConfigured\n self.max_retry_times = settings.getint('RETRY_TIMES')\n self.retry_http_codes = set(int(x) for x in settings.getlist('RETRY_HTTP_CODES'))\n self.priority_adjust = settings.getint('RETRY_PRIORITY_ADJUST')\n\n @classmethod\n def from_crawler(cls, crawler):\n return cls(crawler.settings)\n\n def process_response(self, request, response, spider):\n if request.meta.get('dont_retry', False):\n return response\n if response.status in self.retry_http_codes:\n reason = response_status_message(response.status)\n return self._retry(request, reason, spider) or response\n return response\n\n def process_exception(self, request, exception, spider):\n if isinstance(exception, self.EXCEPTIONS_TO_RETRY) \\\n and not request.meta.get('dont_retry', False):\n return self._retry(request, exception, spider)\n\n def _retry(self, request, reason, spider):\n retries = request.meta.get('retry_times', 0) + 1\n\n retry_times = self.max_retry_times\n\n if 'max_retry_times' in request.meta:\n retry_times = request.meta['max_retry_times']\n\n stats = spider.crawler.stats\n if retries <= retry_times:\n logger.debug(\"Retrying %(request)s (failed %(retries)d times): %(reason)s\",\n {'request': request, 'retries': retries, 'reason': reason},\n extra={'spider': spider})\n retryreq = request.copy()\n retryreq.meta['retry_times'] = retries\n retryreq.dont_filter = True\n retryreq.priority = request.priority + self.priority_adjust\n\n if isinstance(reason, Exception):\n reason = global_object_name(reason.__class__)\n\n stats.inc_value('retry/count')\n stats.inc_value('retry/reason_count/%s' % reason)\n return retryreq\n else:\n stats.inc_value('retry/max_reached')\n logger.debug(\"Gave up retrying %(request)s (failed %(retries)d times): %(reason)s\",\n {'request': request, 'retries': retries, 'reason': reason},\n extra={'spider': spider})\n", "path": "scrapy/downloadermiddlewares/retry.py"}], "after_files": [{"content": "\"\"\"\nAn extension to retry failed requests that are potentially caused by temporary\nproblems such as a connection timeout or HTTP 500 error.\n\nYou can change the behaviour of this middleware by modifing the scraping settings:\nRETRY_TIMES - how many times to retry a failed page\nRETRY_HTTP_CODES - which HTTP response codes to retry\n\nFailed pages are collected on the scraping process and rescheduled at the end,\nonce the spider has finished crawling all regular (non failed) pages.\n\"\"\"\nimport logging\n\nfrom twisted.internet import defer\nfrom twisted.internet.error import TimeoutError, DNSLookupError, \\\n ConnectionRefusedError, ConnectionDone, ConnectError, \\\n ConnectionLost, TCPTimedOutError\nfrom twisted.web.client import ResponseFailed\n\nfrom scrapy.exceptions import NotConfigured\nfrom scrapy.utils.response import response_status_message\nfrom scrapy.core.downloader.handlers.http11 import TunnelError\nfrom scrapy.utils.python import global_object_name\n\nlogger = logging.getLogger(__name__)\n\n\nclass RetryMiddleware(object):\n\n # IOError is raised by the HttpCompression middleware when trying to\n # decompress an empty response\n EXCEPTIONS_TO_RETRY = (defer.TimeoutError, TimeoutError, DNSLookupError,\n ConnectionRefusedError, ConnectionDone, ConnectError,\n ConnectionLost, TCPTimedOutError, ResponseFailed,\n IOError, TunnelError)\n\n def __init__(self, settings):\n if not settings.getbool('RETRY_ENABLED'):\n raise NotConfigured\n self.max_retry_times = settings.getint('RETRY_TIMES')\n self.retry_http_codes = set(int(x) for x in settings.getlist('RETRY_HTTP_CODES'))\n self.priority_adjust = settings.getint('RETRY_PRIORITY_ADJUST')\n\n @classmethod\n def from_crawler(cls, crawler):\n return cls(crawler.settings)\n\n def process_response(self, request, response, spider):\n if request.meta.get('dont_retry', False):\n return response\n if response.status in self.retry_http_codes:\n reason = response_status_message(response.status)\n return self._retry(request, reason, spider) or response\n return response\n\n def process_exception(self, request, exception, spider):\n if isinstance(exception, self.EXCEPTIONS_TO_RETRY) \\\n and not request.meta.get('dont_retry', False):\n return self._retry(request, exception, spider)\n\n def _retry(self, request, reason, spider):\n retries = request.meta.get('retry_times', 0) + 1\n\n retry_times = self.max_retry_times\n\n if 'max_retry_times' in request.meta:\n retry_times = request.meta['max_retry_times']\n\n stats = spider.crawler.stats\n if retries <= retry_times:\n logger.debug(\"Retrying %(request)s (failed %(retries)d times): %(reason)s\",\n {'request': request, 'retries': retries, 'reason': reason},\n extra={'spider': spider})\n retryreq = request.copy()\n retryreq.meta['retry_times'] = retries\n retryreq.dont_filter = True\n retryreq.priority = request.priority + self.priority_adjust\n\n if isinstance(reason, Exception):\n reason = global_object_name(reason.__class__)\n\n stats.inc_value('retry/count')\n stats.inc_value('retry/reason_count/%s' % reason)\n return retryreq\n else:\n stats.inc_value('retry/max_reached')\n logger.debug(\"Gave up retrying %(request)s (failed %(retries)d times): %(reason)s\",\n {'request': request, 'retries': retries, 'reason': reason},\n extra={'spider': spider})\n", "path": "scrapy/downloadermiddlewares/retry.py"}]} | 1,259 | 145 |
gh_patches_debug_23607 | rasdani/github-patches | git_diff | vaexio__vaex-217 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pandas dependency
We now depends on Pandas:
https://github.com/vaexio/vaex/blob/255ccbc192d54c619a273de21a05f919da8ffadf/packages/vaex-core/vaex/formatting.py
Introduced in https://github.com/vaexio/vaex/pull/192
We should not depend on pandas, it is not a dependency of vaex-core and should not become, we might also grow to large to run on AWS Lambda.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `packages/vaex-core/vaex/formatting.py`
Content:
```
1 import numpy as np
2 import numbers
3 import six
4 import pandas as pd
5
6
7 MAX_LENGTH = 50
8
9
10 def _format_value(value):
11 if isinstance(value, six.string_types):
12 value = str(value)
13 elif isinstance(value, bytes):
14 value = repr(value)
15 elif isinstance(value, np.ma.core.MaskedConstant):
16 value = str(value)
17 if isinstance(value, np.datetime64):
18 value = str(pd.to_datetime(value))
19 if isinstance(value, np.timedelta64):
20 value = str(pd.to_timedelta(value))
21 elif not isinstance(value, numbers.Number):
22 value = str(value)
23 if isinstance(value, float):
24 value = repr(value)
25 if isinstance(value, (str, bytes)):
26 if len(value) > MAX_LENGTH:
27 value = repr(value[:MAX_LENGTH-3])[:-1] + '...'
28 return value
29
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/packages/vaex-core/vaex/formatting.py b/packages/vaex-core/vaex/formatting.py
--- a/packages/vaex-core/vaex/formatting.py
+++ b/packages/vaex-core/vaex/formatting.py
@@ -1,7 +1,7 @@
import numpy as np
import numbers
import six
-import pandas as pd
+import datetime
MAX_LENGTH = 50
@@ -15,9 +15,24 @@
elif isinstance(value, np.ma.core.MaskedConstant):
value = str(value)
if isinstance(value, np.datetime64):
- value = str(pd.to_datetime(value))
+ if np.isnat(value):
+ value = 'NaT'
+ else:
+ value = ' '.join(str(value).split('T'))
if isinstance(value, np.timedelta64):
- value = str(pd.to_timedelta(value))
+ if np.isnat(value):
+ value = 'NaT'
+ else:
+ tmp = datetime.timedelta(seconds=value / np.timedelta64(1, 's'))
+ ms = tmp.microseconds
+ s = np.mod(tmp.seconds, 60)
+ m = np.mod(tmp.seconds//60, 60)
+ h = tmp.seconds // 3600
+ d = tmp.days
+ if ms:
+ value = str('%i days %+02i:%02i:%02i.%i' % (d,h,m,s,ms))
+ else:
+ value = str('%i days %+02i:%02i:%02i' % (d,h,m,s))
elif not isinstance(value, numbers.Number):
value = str(value)
if isinstance(value, float):
| {"golden_diff": "diff --git a/packages/vaex-core/vaex/formatting.py b/packages/vaex-core/vaex/formatting.py\n--- a/packages/vaex-core/vaex/formatting.py\n+++ b/packages/vaex-core/vaex/formatting.py\n@@ -1,7 +1,7 @@\n import numpy as np\n import numbers\n import six\n-import pandas as pd\n+import datetime\n \n \n MAX_LENGTH = 50\n@@ -15,9 +15,24 @@\n elif isinstance(value, np.ma.core.MaskedConstant):\n value = str(value)\n if isinstance(value, np.datetime64):\n- value = str(pd.to_datetime(value))\n+ if np.isnat(value):\n+ value = 'NaT'\n+ else:\n+ value = ' '.join(str(value).split('T'))\n if isinstance(value, np.timedelta64):\n- value = str(pd.to_timedelta(value))\n+ if np.isnat(value):\n+ value = 'NaT'\n+ else:\n+ tmp = datetime.timedelta(seconds=value / np.timedelta64(1, 's'))\n+ ms = tmp.microseconds\n+ s = np.mod(tmp.seconds, 60)\n+ m = np.mod(tmp.seconds//60, 60)\n+ h = tmp.seconds // 3600\n+ d = tmp.days\n+ if ms:\n+ value = str('%i days %+02i:%02i:%02i.%i' % (d,h,m,s,ms))\n+ else:\n+ value = str('%i days %+02i:%02i:%02i' % (d,h,m,s))\n elif not isinstance(value, numbers.Number):\n value = str(value)\n if isinstance(value, float):\n", "issue": "Pandas dependency\nWe now depends on Pandas:\r\nhttps://github.com/vaexio/vaex/blob/255ccbc192d54c619a273de21a05f919da8ffadf/packages/vaex-core/vaex/formatting.py\r\n\r\nIntroduced in https://github.com/vaexio/vaex/pull/192\r\n\r\nWe should not depend on pandas, it is not a dependency of vaex-core and should not become, we might also grow to large to run on AWS Lambda.\n", "before_files": [{"content": "import numpy as np\nimport numbers\nimport six\nimport pandas as pd\n\n\nMAX_LENGTH = 50\n\n\ndef _format_value(value):\n if isinstance(value, six.string_types):\n value = str(value)\n elif isinstance(value, bytes):\n value = repr(value)\n elif isinstance(value, np.ma.core.MaskedConstant):\n value = str(value)\n if isinstance(value, np.datetime64):\n value = str(pd.to_datetime(value))\n if isinstance(value, np.timedelta64):\n value = str(pd.to_timedelta(value))\n elif not isinstance(value, numbers.Number):\n value = str(value)\n if isinstance(value, float):\n value = repr(value)\n if isinstance(value, (str, bytes)):\n if len(value) > MAX_LENGTH:\n value = repr(value[:MAX_LENGTH-3])[:-1] + '...'\n return value\n", "path": "packages/vaex-core/vaex/formatting.py"}], "after_files": [{"content": "import numpy as np\nimport numbers\nimport six\nimport datetime\n\n\nMAX_LENGTH = 50\n\n\ndef _format_value(value):\n if isinstance(value, six.string_types):\n value = str(value)\n elif isinstance(value, bytes):\n value = repr(value)\n elif isinstance(value, np.ma.core.MaskedConstant):\n value = str(value)\n if isinstance(value, np.datetime64):\n if np.isnat(value):\n value = 'NaT'\n else:\n value = ' '.join(str(value).split('T'))\n if isinstance(value, np.timedelta64):\n if np.isnat(value):\n value = 'NaT'\n else:\n tmp = datetime.timedelta(seconds=value / np.timedelta64(1, 's'))\n ms = tmp.microseconds\n s = np.mod(tmp.seconds, 60)\n m = np.mod(tmp.seconds//60, 60)\n h = tmp.seconds // 3600\n d = tmp.days\n if ms:\n value = str('%i days %+02i:%02i:%02i.%i' % (d,h,m,s,ms))\n else:\n value = str('%i days %+02i:%02i:%02i' % (d,h,m,s))\n elif not isinstance(value, numbers.Number):\n value = str(value)\n if isinstance(value, float):\n value = repr(value)\n if isinstance(value, (str, bytes)):\n if len(value) > MAX_LENGTH:\n value = repr(value[:MAX_LENGTH-3])[:-1] + '...'\n return value\n", "path": "packages/vaex-core/vaex/formatting.py"}]} | 622 | 387 |
gh_patches_debug_23234 | rasdani/github-patches | git_diff | streamlink__streamlink-1394 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
July 17 Douyu.com error
0.7.0
streamlink https://www.douyu.com/17732 source -o "PATH & FILENAME"
[cli][info] Found matching plugin douyutv for URL https://www.douyu.com/17732
error: Unable to open URL: https://www.douyu.com/lapi/live/getPlay/17732 (500 Server Error: Internal Server Error for url: https://www.douyu.com/lapi/live/getPlay/17732)
@fozzysec @steven7851
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/douyutv.py`
Content:
```
1 import re
2 import time
3 import hashlib
4
5 from requests.adapters import HTTPAdapter
6
7 from streamlink.plugin import Plugin
8 from streamlink.plugin.api import http, validate, useragents
9 from streamlink.stream import HTTPStream, HLSStream, RTMPStream
10
11 API_URL = "https://capi.douyucdn.cn/api/v1/{0}&auth={1}"
12 VAPI_URL = "https://vmobile.douyu.com/video/getInfo?vid={0}"
13 API_SECRET = "Y237pxTx2In5ayGz"
14 SHOW_STATUS_ONLINE = 1
15 SHOW_STATUS_OFFLINE = 2
16 STREAM_WEIGHTS = {
17 "low": 540,
18 "medium": 720,
19 "source": 1080
20 }
21
22 _url_re = re.compile(r"""
23 http(s)?://
24 (?:
25 (?P<subdomain>.+)
26 \.
27 )?
28 douyu.com/
29 (?:
30 show/(?P<vid>[^/&?]+)|
31 (?P<channel>[^/&?]+)
32 )
33 """, re.VERBOSE)
34
35 _room_id_re = re.compile(r'"room_id\\*"\s*:\s*(\d+),')
36 _room_id_alt_re = re.compile(r'data-onlineid=(\d+)')
37
38 _room_id_schema = validate.Schema(
39 validate.all(
40 validate.transform(_room_id_re.search),
41 validate.any(
42 None,
43 validate.all(
44 validate.get(1),
45 validate.transform(int)
46 )
47 )
48 )
49 )
50
51 _room_id_alt_schema = validate.Schema(
52 validate.all(
53 validate.transform(_room_id_alt_re.search),
54 validate.any(
55 None,
56 validate.all(
57 validate.get(1),
58 validate.transform(int)
59 )
60 )
61 )
62 )
63
64 _room_schema = validate.Schema(
65 {
66 "data": validate.any(None, {
67 "show_status": validate.all(
68 validate.text,
69 validate.transform(int)
70 ),
71 "rtmp_url": validate.text,
72 "rtmp_live": validate.text,
73 "hls_url": validate.text,
74 "rtmp_multi_bitrate": validate.all(
75 validate.any([], {
76 validate.text: validate.text
77 }),
78 validate.transform(dict)
79 )
80 })
81 },
82 validate.get("data")
83 )
84
85 _vapi_schema = validate.Schema(
86 {
87 "data": validate.any(None, {
88 "video_url": validate.text
89 })
90 },
91 validate.get("data")
92 )
93
94
95 class Douyutv(Plugin):
96 @classmethod
97 def can_handle_url(cls, url):
98 return _url_re.match(url)
99
100 @classmethod
101 def stream_weight(cls, stream):
102 if stream in STREAM_WEIGHTS:
103 return STREAM_WEIGHTS[stream], "douyutv"
104 return Plugin.stream_weight(stream)
105
106 def _get_streams(self):
107 match = _url_re.match(self.url)
108 subdomain = match.group("subdomain")
109
110 http.verify = False
111 http.mount('https://', HTTPAdapter(max_retries=99))
112
113 if subdomain == 'v':
114 vid = match.group("vid")
115 headers = {
116 "User-Agent": useragents.ANDROID,
117 "X-Requested-With": "XMLHttpRequest"
118 }
119 res = http.get(VAPI_URL.format(vid), headers=headers)
120 room = http.json(res, schema=_vapi_schema)
121 yield "source", HLSStream(self.session, room["video_url"])
122 return
123
124 channel = match.group("channel")
125 try:
126 channel = int(channel)
127 except ValueError:
128 channel = http.get(self.url, schema=_room_id_schema)
129 if channel is None:
130 channel = http.get(self.url, schema=_room_id_alt_schema)
131
132 http.headers.update({'User-Agent': useragents.ANDROID})
133 cdns = ["ws", "tct", "ws2", "dl"]
134 ts = int(time.time())
135 suffix = "room/{0}?aid=androidhd1&cdn={1}&client_sys=android&time={2}".format(channel, cdns[0], ts)
136 sign = hashlib.md5((suffix + API_SECRET).encode()).hexdigest()
137
138 res = http.get(API_URL.format(suffix, sign))
139 room = http.json(res, schema=_room_schema)
140 if not room:
141 self.logger.info("Not a valid room url.")
142 return
143
144 if room["show_status"] != SHOW_STATUS_ONLINE:
145 self.logger.info("Stream currently unavailable.")
146 return
147
148 url = room["hls_url"]
149 yield "source", HLSStream(self.session, url)
150
151 url = "{room[rtmp_url]}/{room[rtmp_live]}".format(room=room)
152 if 'rtmp:' in url:
153 stream = RTMPStream(self.session, {
154 "rtmp": url,
155 "live": True
156 })
157 yield "source", stream
158 else:
159 yield "source", HTTPStream(self.session, url)
160
161 multi_streams = {
162 "middle": "low",
163 "middle2": "medium"
164 }
165 for name, url in room["rtmp_multi_bitrate"].items():
166 url = "{room[rtmp_url]}/{url}".format(room=room, url=url)
167 name = multi_streams[name]
168 if 'rtmp:' in url:
169 stream = RTMPStream(self.session, {
170 "rtmp": url,
171 "live": True
172 })
173 yield name, stream
174 else:
175 yield name, HTTPStream(self.session, url)
176
177
178 __plugin__ = Douyutv
179
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/streamlink/plugins/douyutv.py b/src/streamlink/plugins/douyutv.py
--- a/src/streamlink/plugins/douyutv.py
+++ b/src/streamlink/plugins/douyutv.py
@@ -10,7 +10,7 @@
API_URL = "https://capi.douyucdn.cn/api/v1/{0}&auth={1}"
VAPI_URL = "https://vmobile.douyu.com/video/getInfo?vid={0}"
-API_SECRET = "Y237pxTx2In5ayGz"
+API_SECRET = "zNzMV1y4EMxOHS6I5WKm"
SHOW_STATUS_ONLINE = 1
SHOW_STATUS_OFFLINE = 2
STREAM_WEIGHTS = {
@@ -129,10 +129,10 @@
if channel is None:
channel = http.get(self.url, schema=_room_id_alt_schema)
- http.headers.update({'User-Agent': useragents.ANDROID})
+ http.headers.update({'User-Agent': useragents.WINDOWS_PHONE_8})
cdns = ["ws", "tct", "ws2", "dl"]
ts = int(time.time())
- suffix = "room/{0}?aid=androidhd1&cdn={1}&client_sys=android&time={2}".format(channel, cdns[0], ts)
+ suffix = "room/{0}?aid=wp&cdn={1}&client_sys=wp&time={2}".format(channel, cdns[0], ts)
sign = hashlib.md5((suffix + API_SECRET).encode()).hexdigest()
res = http.get(API_URL.format(suffix, sign))
| {"golden_diff": "diff --git a/src/streamlink/plugins/douyutv.py b/src/streamlink/plugins/douyutv.py\n--- a/src/streamlink/plugins/douyutv.py\n+++ b/src/streamlink/plugins/douyutv.py\n@@ -10,7 +10,7 @@\n \n API_URL = \"https://capi.douyucdn.cn/api/v1/{0}&auth={1}\"\n VAPI_URL = \"https://vmobile.douyu.com/video/getInfo?vid={0}\"\n-API_SECRET = \"Y237pxTx2In5ayGz\"\n+API_SECRET = \"zNzMV1y4EMxOHS6I5WKm\"\n SHOW_STATUS_ONLINE = 1\n SHOW_STATUS_OFFLINE = 2\n STREAM_WEIGHTS = {\n@@ -129,10 +129,10 @@\n if channel is None:\n channel = http.get(self.url, schema=_room_id_alt_schema)\n \n- http.headers.update({'User-Agent': useragents.ANDROID})\n+ http.headers.update({'User-Agent': useragents.WINDOWS_PHONE_8})\n cdns = [\"ws\", \"tct\", \"ws2\", \"dl\"]\n ts = int(time.time())\n- suffix = \"room/{0}?aid=androidhd1&cdn={1}&client_sys=android&time={2}\".format(channel, cdns[0], ts)\n+ suffix = \"room/{0}?aid=wp&cdn={1}&client_sys=wp&time={2}\".format(channel, cdns[0], ts)\n sign = hashlib.md5((suffix + API_SECRET).encode()).hexdigest()\n \n res = http.get(API_URL.format(suffix, sign))\n", "issue": "July 17 Douyu.com error\n0.7.0\r\nstreamlink https://www.douyu.com/17732 source -o \"PATH & FILENAME\"\r\n[cli][info] Found matching plugin douyutv for URL https://www.douyu.com/17732\r\nerror: Unable to open URL: https://www.douyu.com/lapi/live/getPlay/17732 (500 Server Error: Internal Server Error for url: https://www.douyu.com/lapi/live/getPlay/17732)\r\n@fozzysec @steven7851\n", "before_files": [{"content": "import re\nimport time\nimport hashlib\n\nfrom requests.adapters import HTTPAdapter\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http, validate, useragents\nfrom streamlink.stream import HTTPStream, HLSStream, RTMPStream\n\nAPI_URL = \"https://capi.douyucdn.cn/api/v1/{0}&auth={1}\"\nVAPI_URL = \"https://vmobile.douyu.com/video/getInfo?vid={0}\"\nAPI_SECRET = \"Y237pxTx2In5ayGz\"\nSHOW_STATUS_ONLINE = 1\nSHOW_STATUS_OFFLINE = 2\nSTREAM_WEIGHTS = {\n \"low\": 540,\n \"medium\": 720,\n \"source\": 1080\n }\n\n_url_re = re.compile(r\"\"\"\n http(s)?://\n (?:\n (?P<subdomain>.+)\n \\.\n )?\n douyu.com/\n (?:\n show/(?P<vid>[^/&?]+)|\n (?P<channel>[^/&?]+)\n )\n\"\"\", re.VERBOSE)\n\n_room_id_re = re.compile(r'\"room_id\\\\*\"\\s*:\\s*(\\d+),')\n_room_id_alt_re = re.compile(r'data-onlineid=(\\d+)')\n\n_room_id_schema = validate.Schema(\n validate.all(\n validate.transform(_room_id_re.search),\n validate.any(\n None,\n validate.all(\n validate.get(1),\n validate.transform(int)\n )\n )\n )\n)\n\n_room_id_alt_schema = validate.Schema(\n validate.all(\n validate.transform(_room_id_alt_re.search),\n validate.any(\n None,\n validate.all(\n validate.get(1),\n validate.transform(int)\n )\n )\n )\n)\n\n_room_schema = validate.Schema(\n {\n \"data\": validate.any(None, {\n \"show_status\": validate.all(\n validate.text,\n validate.transform(int)\n ),\n \"rtmp_url\": validate.text,\n \"rtmp_live\": validate.text,\n \"hls_url\": validate.text,\n \"rtmp_multi_bitrate\": validate.all(\n validate.any([], {\n validate.text: validate.text\n }),\n validate.transform(dict)\n )\n })\n },\n validate.get(\"data\")\n)\n\n_vapi_schema = validate.Schema(\n {\n \"data\": validate.any(None, {\n \"video_url\": validate.text\n })\n },\n validate.get(\"data\")\n)\n\n\nclass Douyutv(Plugin):\n @classmethod\n def can_handle_url(cls, url):\n return _url_re.match(url)\n\n @classmethod\n def stream_weight(cls, stream):\n if stream in STREAM_WEIGHTS:\n return STREAM_WEIGHTS[stream], \"douyutv\"\n return Plugin.stream_weight(stream)\n\n def _get_streams(self):\n match = _url_re.match(self.url)\n subdomain = match.group(\"subdomain\")\n\n http.verify = False\n http.mount('https://', HTTPAdapter(max_retries=99))\n\n if subdomain == 'v':\n vid = match.group(\"vid\")\n headers = {\n \"User-Agent\": useragents.ANDROID,\n \"X-Requested-With\": \"XMLHttpRequest\"\n }\n res = http.get(VAPI_URL.format(vid), headers=headers)\n room = http.json(res, schema=_vapi_schema)\n yield \"source\", HLSStream(self.session, room[\"video_url\"])\n return\n\n channel = match.group(\"channel\")\n try:\n channel = int(channel)\n except ValueError:\n channel = http.get(self.url, schema=_room_id_schema)\n if channel is None:\n channel = http.get(self.url, schema=_room_id_alt_schema)\n\n http.headers.update({'User-Agent': useragents.ANDROID})\n cdns = [\"ws\", \"tct\", \"ws2\", \"dl\"]\n ts = int(time.time())\n suffix = \"room/{0}?aid=androidhd1&cdn={1}&client_sys=android&time={2}\".format(channel, cdns[0], ts)\n sign = hashlib.md5((suffix + API_SECRET).encode()).hexdigest()\n\n res = http.get(API_URL.format(suffix, sign))\n room = http.json(res, schema=_room_schema)\n if not room:\n self.logger.info(\"Not a valid room url.\")\n return\n\n if room[\"show_status\"] != SHOW_STATUS_ONLINE:\n self.logger.info(\"Stream currently unavailable.\")\n return\n\n url = room[\"hls_url\"]\n yield \"source\", HLSStream(self.session, url)\n\n url = \"{room[rtmp_url]}/{room[rtmp_live]}\".format(room=room)\n if 'rtmp:' in url:\n stream = RTMPStream(self.session, {\n \"rtmp\": url,\n \"live\": True\n })\n yield \"source\", stream\n else:\n yield \"source\", HTTPStream(self.session, url)\n\n multi_streams = {\n \"middle\": \"low\",\n \"middle2\": \"medium\"\n }\n for name, url in room[\"rtmp_multi_bitrate\"].items():\n url = \"{room[rtmp_url]}/{url}\".format(room=room, url=url)\n name = multi_streams[name]\n if 'rtmp:' in url:\n stream = RTMPStream(self.session, {\n \"rtmp\": url,\n \"live\": True\n })\n yield name, stream\n else:\n yield name, HTTPStream(self.session, url)\n\n\n__plugin__ = Douyutv\n", "path": "src/streamlink/plugins/douyutv.py"}], "after_files": [{"content": "import re\nimport time\nimport hashlib\n\nfrom requests.adapters import HTTPAdapter\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http, validate, useragents\nfrom streamlink.stream import HTTPStream, HLSStream, RTMPStream\n\nAPI_URL = \"https://capi.douyucdn.cn/api/v1/{0}&auth={1}\"\nVAPI_URL = \"https://vmobile.douyu.com/video/getInfo?vid={0}\"\nAPI_SECRET = \"zNzMV1y4EMxOHS6I5WKm\"\nSHOW_STATUS_ONLINE = 1\nSHOW_STATUS_OFFLINE = 2\nSTREAM_WEIGHTS = {\n \"low\": 540,\n \"medium\": 720,\n \"source\": 1080\n }\n\n_url_re = re.compile(r\"\"\"\n http(s)?://\n (?:\n (?P<subdomain>.+)\n \\.\n )?\n douyu.com/\n (?:\n show/(?P<vid>[^/&?]+)|\n (?P<channel>[^/&?]+)\n )\n\"\"\", re.VERBOSE)\n\n_room_id_re = re.compile(r'\"room_id\\\\*\"\\s*:\\s*(\\d+),')\n_room_id_alt_re = re.compile(r'data-onlineid=(\\d+)')\n\n_room_id_schema = validate.Schema(\n validate.all(\n validate.transform(_room_id_re.search),\n validate.any(\n None,\n validate.all(\n validate.get(1),\n validate.transform(int)\n )\n )\n )\n)\n\n_room_id_alt_schema = validate.Schema(\n validate.all(\n validate.transform(_room_id_alt_re.search),\n validate.any(\n None,\n validate.all(\n validate.get(1),\n validate.transform(int)\n )\n )\n )\n)\n\n_room_schema = validate.Schema(\n {\n \"data\": validate.any(None, {\n \"show_status\": validate.all(\n validate.text,\n validate.transform(int)\n ),\n \"rtmp_url\": validate.text,\n \"rtmp_live\": validate.text,\n \"hls_url\": validate.text,\n \"rtmp_multi_bitrate\": validate.all(\n validate.any([], {\n validate.text: validate.text\n }),\n validate.transform(dict)\n )\n })\n },\n validate.get(\"data\")\n)\n\n_vapi_schema = validate.Schema(\n {\n \"data\": validate.any(None, {\n \"video_url\": validate.text\n })\n },\n validate.get(\"data\")\n)\n\n\nclass Douyutv(Plugin):\n @classmethod\n def can_handle_url(cls, url):\n return _url_re.match(url)\n\n @classmethod\n def stream_weight(cls, stream):\n if stream in STREAM_WEIGHTS:\n return STREAM_WEIGHTS[stream], \"douyutv\"\n return Plugin.stream_weight(stream)\n\n def _get_streams(self):\n match = _url_re.match(self.url)\n subdomain = match.group(\"subdomain\")\n\n http.verify = False\n http.mount('https://', HTTPAdapter(max_retries=99))\n\n if subdomain == 'v':\n vid = match.group(\"vid\")\n headers = {\n \"User-Agent\": useragents.ANDROID,\n \"X-Requested-With\": \"XMLHttpRequest\"\n }\n res = http.get(VAPI_URL.format(vid), headers=headers)\n room = http.json(res, schema=_vapi_schema)\n yield \"source\", HLSStream(self.session, room[\"video_url\"])\n return\n\n channel = match.group(\"channel\")\n try:\n channel = int(channel)\n except ValueError:\n channel = http.get(self.url, schema=_room_id_schema)\n if channel is None:\n channel = http.get(self.url, schema=_room_id_alt_schema)\n\n http.headers.update({'User-Agent': useragents.WINDOWS_PHONE_8})\n cdns = [\"ws\", \"tct\", \"ws2\", \"dl\"]\n ts = int(time.time())\n suffix = \"room/{0}?aid=wp&cdn={1}&client_sys=wp&time={2}\".format(channel, cdns[0], ts)\n sign = hashlib.md5((suffix + API_SECRET).encode()).hexdigest()\n\n res = http.get(API_URL.format(suffix, sign))\n room = http.json(res, schema=_room_schema)\n if not room:\n self.logger.info(\"Not a valid room url.\")\n return\n\n if room[\"show_status\"] != SHOW_STATUS_ONLINE:\n self.logger.info(\"Stream currently unavailable.\")\n return\n\n url = room[\"hls_url\"]\n yield \"source\", HLSStream(self.session, url)\n\n url = \"{room[rtmp_url]}/{room[rtmp_live]}\".format(room=room)\n if 'rtmp:' in url:\n stream = RTMPStream(self.session, {\n \"rtmp\": url,\n \"live\": True\n })\n yield \"source\", stream\n else:\n yield \"source\", HTTPStream(self.session, url)\n\n multi_streams = {\n \"middle\": \"low\",\n \"middle2\": \"medium\"\n }\n for name, url in room[\"rtmp_multi_bitrate\"].items():\n url = \"{room[rtmp_url]}/{url}\".format(room=room, url=url)\n name = multi_streams[name]\n if 'rtmp:' in url:\n stream = RTMPStream(self.session, {\n \"rtmp\": url,\n \"live\": True\n })\n yield name, stream\n else:\n yield name, HTTPStream(self.session, url)\n\n\n__plugin__ = Douyutv\n", "path": "src/streamlink/plugins/douyutv.py"}]} | 2,033 | 369 |
gh_patches_debug_8623 | rasdani/github-patches | git_diff | archlinux__archinstall-262 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Awesome profile installation failed with no such file or directory on xdg-mime


Resolve #261 and related issues
Closes #262.
🚨 PR Guidelines:
# New features *(v2.2.0)*
Merge new features in to `torxed-v2.2.0`.<br>
This branch is designated for potential breaking changes, added complexity and new functionality.
# Bug fixes *(v2.1.4)*
Merge against `master` for bug fixes and anything that improves stability and quality of life.<br>
This excludes:
* New functionality
* Added complexity
* Breaking changes
Any changes to `master` automatically gets pulled in to `torxed-v2.2.0` to avoid merge hell.
# Describe your PR
If the changes has been discussed in an Issue, please tag it so we can backtrace from the Issue later on.<br>
If the PR is larger than ~20 lines, please describe it here unless described in an issue.
# Testing
Any new feature or stability improvement should be tested if possible.
Please follow the test instructions at the bottom of the README.
*These PR guidelines will change after 2021-05-01, which is when `v2.1.4` gets onto the new ISO*
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `profiles/desktop.py`
Content:
```
1 # A desktop environment selector.
2
3 import archinstall, os
4
5 is_top_level_profile = True
6
7 # New way of defining packages for a profile, which is iterable and can be used out side
8 # of the profile to get a list of "what packages will be installed".
9 __packages__ = ['nano', 'vim', 'openssh', 'htop', 'wget', 'iwd', 'wireless_tools', 'wpa_supplicant', 'smartmontools']
10
11 def _prep_function(*args, **kwargs):
12 """
13 Magic function called by the importing installer
14 before continuing any further. It also avoids executing any
15 other code in this stage. So it's a safe way to ask the user
16 for more input before any other installer steps start.
17 """
18
19 supported_desktops = ['gnome', 'kde', 'awesome', 'sway', 'cinnamon', 'xfce4', 'lxqt', 'i3', 'budgie']
20 desktop = archinstall.generic_select(supported_desktops, 'Select your desired desktop environment: ')
21
22 # Temporarily store the selected desktop profile
23 # in a session-safe location, since this module will get reloaded
24 # the next time it gets executed.
25 archinstall.storage['_desktop_profile'] = desktop
26
27 profile = archinstall.Profile(None, desktop)
28 # Loading the instructions with a custom namespace, ensures that a __name__ comparison is never triggered.
29 with profile.load_instructions(namespace=f"{desktop}.py") as imported:
30 if hasattr(imported, '_prep_function'):
31 return imported._prep_function()
32 else:
33 print(f"Deprecated (??): {desktop} profile has no _prep_function() anymore")
34
35 if __name__ == 'desktop':
36 """
37 This "profile" is a meta-profile.
38 There are no desktop-specific steps, it simply routes
39 the installer to whichever desktop environment/window manager was chosen.
40
41 Maybe in the future, a network manager or similar things *could* be added here.
42 We should honor that Arch Linux does not officially endorse a desktop-setup, nor is
43 it trying to be a turn-key desktop distribution.
44
45 There are plenty of desktop-turn-key-solutions based on Arch Linux,
46 this is therefore just a helper to get started
47 """
48
49 # Install common packages for all desktop environments
50 installation.add_additional_packages(__packages__)
51
52 # TODO: Remove magic variable 'installation' and place it
53 # in archinstall.storage or archinstall.session/archinstall.installation
54 installation.install_profile(archinstall.storage['_desktop_profile'])
55
56
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/profiles/desktop.py b/profiles/desktop.py
--- a/profiles/desktop.py
+++ b/profiles/desktop.py
@@ -6,7 +6,7 @@
# New way of defining packages for a profile, which is iterable and can be used out side
# of the profile to get a list of "what packages will be installed".
-__packages__ = ['nano', 'vim', 'openssh', 'htop', 'wget', 'iwd', 'wireless_tools', 'wpa_supplicant', 'smartmontools']
+__packages__ = ['nano', 'vim', 'openssh', 'htop', 'wget', 'iwd', 'wireless_tools', 'wpa_supplicant', 'smartmontools', 'xdg-utils']
def _prep_function(*args, **kwargs):
"""
| {"golden_diff": "diff --git a/profiles/desktop.py b/profiles/desktop.py\n--- a/profiles/desktop.py\n+++ b/profiles/desktop.py\n@@ -6,7 +6,7 @@\n \n # New way of defining packages for a profile, which is iterable and can be used out side\n # of the profile to get a list of \"what packages will be installed\".\n-__packages__ = ['nano', 'vim', 'openssh', 'htop', 'wget', 'iwd', 'wireless_tools', 'wpa_supplicant', 'smartmontools']\n+__packages__ = ['nano', 'vim', 'openssh', 'htop', 'wget', 'iwd', 'wireless_tools', 'wpa_supplicant', 'smartmontools', 'xdg-utils']\n \n def _prep_function(*args, **kwargs):\n \t\"\"\"\n", "issue": "Awesome profile installation failed with no such file or directory on xdg-mime\n\r\n\r\n\nResolve #261 and related issues\nCloses #262.\r\n\r\n\ud83d\udea8 PR Guidelines:\r\n\r\n# New features *(v2.2.0)*\r\n\r\nMerge new features in to `torxed-v2.2.0`.<br>\r\nThis branch is designated for potential breaking changes, added complexity and new functionality.\r\n\r\n# Bug fixes *(v2.1.4)*\r\n\r\nMerge against `master` for bug fixes and anything that improves stability and quality of life.<br>\r\nThis excludes:\r\n * New functionality\r\n * Added complexity\r\n * Breaking changes\r\n\r\nAny changes to `master` automatically gets pulled in to `torxed-v2.2.0` to avoid merge hell.\r\n\r\n# Describe your PR\r\n\r\nIf the changes has been discussed in an Issue, please tag it so we can backtrace from the Issue later on.<br>\r\nIf the PR is larger than ~20 lines, please describe it here unless described in an issue.\r\n\r\n# Testing\r\n\r\nAny new feature or stability improvement should be tested if possible.\r\nPlease follow the test instructions at the bottom of the README.\r\n\r\n*These PR guidelines will change after 2021-05-01, which is when `v2.1.4` gets onto the new ISO*\r\n\n", "before_files": [{"content": "# A desktop environment selector.\n\nimport archinstall, os\n\nis_top_level_profile = True\n\n# New way of defining packages for a profile, which is iterable and can be used out side\n# of the profile to get a list of \"what packages will be installed\".\n__packages__ = ['nano', 'vim', 'openssh', 'htop', 'wget', 'iwd', 'wireless_tools', 'wpa_supplicant', 'smartmontools']\n\ndef _prep_function(*args, **kwargs):\n\t\"\"\"\n\tMagic function called by the importing installer\n\tbefore continuing any further. It also avoids executing any\n\tother code in this stage. So it's a safe way to ask the user\n\tfor more input before any other installer steps start.\n\t\"\"\"\n\n\tsupported_desktops = ['gnome', 'kde', 'awesome', 'sway', 'cinnamon', 'xfce4', 'lxqt', 'i3', 'budgie']\n\tdesktop = archinstall.generic_select(supported_desktops, 'Select your desired desktop environment: ')\n\t\n\t# Temporarily store the selected desktop profile\n\t# in a session-safe location, since this module will get reloaded\n\t# the next time it gets executed.\n\tarchinstall.storage['_desktop_profile'] = desktop\n\n\tprofile = archinstall.Profile(None, desktop)\n\t# Loading the instructions with a custom namespace, ensures that a __name__ comparison is never triggered.\n\twith profile.load_instructions(namespace=f\"{desktop}.py\") as imported:\n\t\tif hasattr(imported, '_prep_function'):\n\t\t\treturn imported._prep_function()\n\t\telse:\n\t\t\tprint(f\"Deprecated (??): {desktop} profile has no _prep_function() anymore\")\n\nif __name__ == 'desktop':\n\t\"\"\"\n\tThis \"profile\" is a meta-profile.\n\tThere are no desktop-specific steps, it simply routes\n\tthe installer to whichever desktop environment/window manager was chosen.\n\n\tMaybe in the future, a network manager or similar things *could* be added here.\n\tWe should honor that Arch Linux does not officially endorse a desktop-setup, nor is\n\tit trying to be a turn-key desktop distribution.\n\n\tThere are plenty of desktop-turn-key-solutions based on Arch Linux,\n\tthis is therefore just a helper to get started\n\t\"\"\"\n\t\n\t# Install common packages for all desktop environments\n\tinstallation.add_additional_packages(__packages__)\n\n\t# TODO: Remove magic variable 'installation' and place it\n\t# in archinstall.storage or archinstall.session/archinstall.installation\n\tinstallation.install_profile(archinstall.storage['_desktop_profile'])\n\n", "path": "profiles/desktop.py"}], "after_files": [{"content": "# A desktop environment selector.\n\nimport archinstall, os\n\nis_top_level_profile = True\n\n# New way of defining packages for a profile, which is iterable and can be used out side\n# of the profile to get a list of \"what packages will be installed\".\n__packages__ = ['nano', 'vim', 'openssh', 'htop', 'wget', 'iwd', 'wireless_tools', 'wpa_supplicant', 'smartmontools', 'xdg-utils']\n\ndef _prep_function(*args, **kwargs):\n\t\"\"\"\n\tMagic function called by the importing installer\n\tbefore continuing any further. It also avoids executing any\n\tother code in this stage. So it's a safe way to ask the user\n\tfor more input before any other installer steps start.\n\t\"\"\"\n\n\tsupported_desktops = ['gnome', 'kde', 'awesome', 'sway', 'cinnamon', 'xfce4', 'lxqt', 'i3', 'budgie']\n\tdesktop = archinstall.generic_select(supported_desktops, 'Select your desired desktop environment: ')\n\t\n\t# Temporarily store the selected desktop profile\n\t# in a session-safe location, since this module will get reloaded\n\t# the next time it gets executed.\n\tarchinstall.storage['_desktop_profile'] = desktop\n\n\tprofile = archinstall.Profile(None, desktop)\n\t# Loading the instructions with a custom namespace, ensures that a __name__ comparison is never triggered.\n\twith profile.load_instructions(namespace=f\"{desktop}.py\") as imported:\n\t\tif hasattr(imported, '_prep_function'):\n\t\t\treturn imported._prep_function()\n\t\telse:\n\t\t\tprint(f\"Deprecated (??): {desktop} profile has no _prep_function() anymore\")\n\nif __name__ == 'desktop':\n\t\"\"\"\n\tThis \"profile\" is a meta-profile.\n\tThere are no desktop-specific steps, it simply routes\n\tthe installer to whichever desktop environment/window manager was chosen.\n\n\tMaybe in the future, a network manager or similar things *could* be added here.\n\tWe should honor that Arch Linux does not officially endorse a desktop-setup, nor is\n\tit trying to be a turn-key desktop distribution.\n\n\tThere are plenty of desktop-turn-key-solutions based on Arch Linux,\n\tthis is therefore just a helper to get started\n\t\"\"\"\n\t\n\t# Install common packages for all desktop environments\n\tinstallation.add_additional_packages(__packages__)\n\n\t# TODO: Remove magic variable 'installation' and place it\n\t# in archinstall.storage or archinstall.session/archinstall.installation\n\tinstallation.install_profile(archinstall.storage['_desktop_profile'])\n\n", "path": "profiles/desktop.py"}]} | 1,367 | 178 |
gh_patches_debug_31470 | rasdani/github-patches | git_diff | cowrie__cowrie-1093 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
greynoise should catch timeout error
```2019-04-08T03:12:05.460833Z [twisted.internet.defer#critical] Unhandled error in Deferred:
2019-04-08T03:12:05.462257Z [twisted.internet.defer#critical]
Traceback (most recent call last):
--- <exception caught here> ---
File "/home/cowrie/cowrie/src/cowrie/output/greynoise.py", line 65, in scanip
headers=headers)
twisted.internet.error.TimeoutError: User timeout caused connection failure.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cowrie/output/greynoise.py`
Content:
```
1 """
2 Send attackers IP to GreyNoise
3 """
4
5 from __future__ import absolute_import, division
6
7 import treq
8
9 from twisted.internet import defer
10 from twisted.python import log
11
12 import cowrie.core.output
13 from cowrie.core.config import CONFIG
14
15 COWRIE_USER_AGENT = 'Cowrie Honeypot'
16 GNAPI_URL = 'http://api.greynoise.io:8888/v1/'
17
18
19 class Output(cowrie.core.output.Output):
20
21 def __init__(self):
22 self.apiKey = CONFIG.get('output_greynoise', 'api_key', fallback=None)
23 self.tags = CONFIG.get('output_greynoise', 'tags', fallback="all").split(",")
24 self.debug = CONFIG.getboolean('output_greynoise', 'debug', fallback=False)
25 cowrie.core.output.Output.__init__(self)
26
27 def start(self):
28 """
29 Start output plugin
30 """
31
32 def stop(self):
33 """
34 Stop output plugin
35 """
36 pass
37
38 def write(self, entry):
39 if entry['eventid'] == "cowrie.session.connect":
40 self.scanip(entry)
41
42 @defer.inlineCallbacks
43 def scanip(self, entry):
44 """
45 Scan IP againt Greynoise API
46 """
47 def message(query):
48 log.msg(
49 eventid='cowrie.greynoise.result',
50 format='greynoise: Scan for %(IP)s with %(tag)s have %(conf)s confidence'
51 ' along with the following %(meta)s metadata',
52 IP=entry['src_ip'],
53 tag=query['name'],
54 conf=query['confidence'],
55 meta=query['metadata']
56 )
57
58 gnUrl = '{0}query/ip'.format(GNAPI_URL).encode('utf8')
59 headers = ({'User-Agent': [COWRIE_USER_AGENT]})
60 fields = {'key': self.apiKey, 'ip': entry['src_ip']}
61
62 response = yield treq.post(
63 url=gnUrl,
64 data=fields,
65 headers=headers)
66
67 if response.code != 200:
68 rsp = yield response.text()
69 log.error("greynoise: got error {}".format(rsp))
70 return
71
72 j = yield response.json()
73 if self.debug:
74 log.msg("greynoise: debug: "+repr(j))
75 if j['status'] == "ok":
76 if "all" not in self.tags:
77 for query in j['records']:
78 if query['name'] in self.tags:
79 message(query)
80 else:
81 for query in j['records']:
82 message(query)
83 else:
84 log.msg("greynoise: no results for for IP {0}".format(entry['src_ip']))
85
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cowrie/output/greynoise.py b/src/cowrie/output/greynoise.py
--- a/src/cowrie/output/greynoise.py
+++ b/src/cowrie/output/greynoise.py
@@ -6,7 +6,7 @@
import treq
-from twisted.internet import defer
+from twisted.internet import defer, error
from twisted.python import log
import cowrie.core.output
@@ -59,10 +59,15 @@
headers = ({'User-Agent': [COWRIE_USER_AGENT]})
fields = {'key': self.apiKey, 'ip': entry['src_ip']}
- response = yield treq.post(
- url=gnUrl,
- data=fields,
- headers=headers)
+ try:
+ response = yield treq.post(
+ url=gnUrl,
+ data=fields,
+ headers=headers,
+ timeout=10)
+ except (defer.CancelledError, error.ConnectingCancelledError, error.DNSLookupError):
+ log.msg("GreyNoise requests timeout")
+ return
if response.code != 200:
rsp = yield response.text()
@@ -72,13 +77,14 @@
j = yield response.json()
if self.debug:
log.msg("greynoise: debug: "+repr(j))
- if j['status'] == "ok":
- if "all" not in self.tags:
- for query in j['records']:
- if query['name'] in self.tags:
- message(query)
- else:
- for query in j['records']:
+
+ if j['status'] == "ok":
+ if "all" not in self.tags:
+ for query in j['records']:
+ if query['name'] in self.tags:
message(query)
else:
- log.msg("greynoise: no results for for IP {0}".format(entry['src_ip']))
+ for query in j['records']:
+ message(query)
+ else:
+ log.msg("greynoise: no results for for IP {0}".format(entry['src_ip']))
| {"golden_diff": "diff --git a/src/cowrie/output/greynoise.py b/src/cowrie/output/greynoise.py\n--- a/src/cowrie/output/greynoise.py\n+++ b/src/cowrie/output/greynoise.py\n@@ -6,7 +6,7 @@\n \n import treq\n \n-from twisted.internet import defer\n+from twisted.internet import defer, error\n from twisted.python import log\n \n import cowrie.core.output\n@@ -59,10 +59,15 @@\n headers = ({'User-Agent': [COWRIE_USER_AGENT]})\n fields = {'key': self.apiKey, 'ip': entry['src_ip']}\n \n- response = yield treq.post(\n- url=gnUrl,\n- data=fields,\n- headers=headers)\n+ try:\n+ response = yield treq.post(\n+ url=gnUrl,\n+ data=fields,\n+ headers=headers,\n+ timeout=10)\n+ except (defer.CancelledError, error.ConnectingCancelledError, error.DNSLookupError):\n+ log.msg(\"GreyNoise requests timeout\")\n+ return\n \n if response.code != 200:\n rsp = yield response.text()\n@@ -72,13 +77,14 @@\n j = yield response.json()\n if self.debug:\n log.msg(\"greynoise: debug: \"+repr(j))\n- if j['status'] == \"ok\":\n- if \"all\" not in self.tags:\n- for query in j['records']:\n- if query['name'] in self.tags:\n- message(query)\n- else:\n- for query in j['records']:\n+\n+ if j['status'] == \"ok\":\n+ if \"all\" not in self.tags:\n+ for query in j['records']:\n+ if query['name'] in self.tags:\n message(query)\n else:\n- log.msg(\"greynoise: no results for for IP {0}\".format(entry['src_ip']))\n+ for query in j['records']:\n+ message(query)\n+ else:\n+ log.msg(\"greynoise: no results for for IP {0}\".format(entry['src_ip']))\n", "issue": "greynoise should catch timeout error\n```2019-04-08T03:12:05.460833Z [twisted.internet.defer#critical] Unhandled error in Deferred:\r\n2019-04-08T03:12:05.462257Z [twisted.internet.defer#critical]\r\n Traceback (most recent call last):\r\n --- <exception caught here> ---\r\n File \"/home/cowrie/cowrie/src/cowrie/output/greynoise.py\", line 65, in scanip\r\n headers=headers)\r\n twisted.internet.error.TimeoutError: User timeout caused connection failure.\r\n```\r\n\n", "before_files": [{"content": "\"\"\"\nSend attackers IP to GreyNoise\n\"\"\"\n\nfrom __future__ import absolute_import, division\n\nimport treq\n\nfrom twisted.internet import defer\nfrom twisted.python import log\n\nimport cowrie.core.output\nfrom cowrie.core.config import CONFIG\n\nCOWRIE_USER_AGENT = 'Cowrie Honeypot'\nGNAPI_URL = 'http://api.greynoise.io:8888/v1/'\n\n\nclass Output(cowrie.core.output.Output):\n\n def __init__(self):\n self.apiKey = CONFIG.get('output_greynoise', 'api_key', fallback=None)\n self.tags = CONFIG.get('output_greynoise', 'tags', fallback=\"all\").split(\",\")\n self.debug = CONFIG.getboolean('output_greynoise', 'debug', fallback=False)\n cowrie.core.output.Output.__init__(self)\n\n def start(self):\n \"\"\"\n Start output plugin\n \"\"\"\n\n def stop(self):\n \"\"\"\n Stop output plugin\n \"\"\"\n pass\n\n def write(self, entry):\n if entry['eventid'] == \"cowrie.session.connect\":\n self.scanip(entry)\n\n @defer.inlineCallbacks\n def scanip(self, entry):\n \"\"\"\n Scan IP againt Greynoise API\n \"\"\"\n def message(query):\n log.msg(\n eventid='cowrie.greynoise.result',\n format='greynoise: Scan for %(IP)s with %(tag)s have %(conf)s confidence'\n ' along with the following %(meta)s metadata',\n IP=entry['src_ip'],\n tag=query['name'],\n conf=query['confidence'],\n meta=query['metadata']\n )\n\n gnUrl = '{0}query/ip'.format(GNAPI_URL).encode('utf8')\n headers = ({'User-Agent': [COWRIE_USER_AGENT]})\n fields = {'key': self.apiKey, 'ip': entry['src_ip']}\n\n response = yield treq.post(\n url=gnUrl,\n data=fields,\n headers=headers)\n\n if response.code != 200:\n rsp = yield response.text()\n log.error(\"greynoise: got error {}\".format(rsp))\n return\n\n j = yield response.json()\n if self.debug:\n log.msg(\"greynoise: debug: \"+repr(j))\n if j['status'] == \"ok\":\n if \"all\" not in self.tags:\n for query in j['records']:\n if query['name'] in self.tags:\n message(query)\n else:\n for query in j['records']:\n message(query)\n else:\n log.msg(\"greynoise: no results for for IP {0}\".format(entry['src_ip']))\n", "path": "src/cowrie/output/greynoise.py"}], "after_files": [{"content": "\"\"\"\nSend attackers IP to GreyNoise\n\"\"\"\n\nfrom __future__ import absolute_import, division\n\nimport treq\n\nfrom twisted.internet import defer, error\nfrom twisted.python import log\n\nimport cowrie.core.output\nfrom cowrie.core.config import CONFIG\n\nCOWRIE_USER_AGENT = 'Cowrie Honeypot'\nGNAPI_URL = 'http://api.greynoise.io:8888/v1/'\n\n\nclass Output(cowrie.core.output.Output):\n\n def __init__(self):\n self.apiKey = CONFIG.get('output_greynoise', 'api_key', fallback=None)\n self.tags = CONFIG.get('output_greynoise', 'tags', fallback=\"all\").split(\",\")\n self.debug = CONFIG.getboolean('output_greynoise', 'debug', fallback=False)\n cowrie.core.output.Output.__init__(self)\n\n def start(self):\n \"\"\"\n Start output plugin\n \"\"\"\n\n def stop(self):\n \"\"\"\n Stop output plugin\n \"\"\"\n pass\n\n def write(self, entry):\n if entry['eventid'] == \"cowrie.session.connect\":\n self.scanip(entry)\n\n @defer.inlineCallbacks\n def scanip(self, entry):\n \"\"\"\n Scan IP againt Greynoise API\n \"\"\"\n def message(query):\n log.msg(\n eventid='cowrie.greynoise.result',\n format='greynoise: Scan for %(IP)s with %(tag)s have %(conf)s confidence'\n ' along with the following %(meta)s metadata',\n IP=entry['src_ip'],\n tag=query['name'],\n conf=query['confidence'],\n meta=query['metadata']\n )\n\n gnUrl = '{0}query/ip'.format(GNAPI_URL).encode('utf8')\n headers = ({'User-Agent': [COWRIE_USER_AGENT]})\n fields = {'key': self.apiKey, 'ip': entry['src_ip']}\n\n try:\n response = yield treq.post(\n url=gnUrl,\n data=fields,\n headers=headers,\n timeout=10)\n except (defer.CancelledError, error.ConnectingCancelledError, error.DNSLookupError):\n log.msg(\"GreyNoise requests timeout\")\n return\n\n if response.code != 200:\n rsp = yield response.text()\n log.error(\"greynoise: got error {}\".format(rsp))\n return\n\n j = yield response.json()\n if self.debug:\n log.msg(\"greynoise: debug: \"+repr(j))\n\n if j['status'] == \"ok\":\n if \"all\" not in self.tags:\n for query in j['records']:\n if query['name'] in self.tags:\n message(query)\n else:\n for query in j['records']:\n message(query)\n else:\n log.msg(\"greynoise: no results for for IP {0}\".format(entry['src_ip']))\n", "path": "src/cowrie/output/greynoise.py"}]} | 1,158 | 474 |
gh_patches_debug_3627 | rasdani/github-patches | git_diff | ethereum__web3.py-912 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Upgrade to or add support for websockets v5
### What was wrong?
We are currently using the `websockets` library's v4 line. The v5 line is out.
### How can it be fixed?
Look into adding support for both v4 and v5.
If this is too cumbersome, we can simply upgrade to requiring `>=v5`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 from setuptools import (
4 find_packages,
5 setup,
6 )
7
8
9 setup(
10 name='web3',
11 # *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility.
12 version='4.3.0',
13 description="""Web3.py""",
14 long_description_markdown_filename='README.md',
15 author='Piper Merriam',
16 author_email='[email protected]',
17 url='https://github.com/ethereum/web3.py',
18 include_package_data=True,
19 install_requires=[
20 "toolz>=0.9.0,<1.0.0;implementation_name=='pypy'",
21 "cytoolz>=0.9.0,<1.0.0;implementation_name=='cpython'",
22 "eth-abi>=1.1.1,<2",
23 "eth-account>=0.2.1,<0.3.0",
24 "eth-utils>=1.0.1,<2.0.0",
25 "hexbytes>=0.1.0,<1.0.0",
26 "lru-dict>=1.1.6,<2.0.0",
27 "eth-hash[pycryptodome]",
28 "requests>=2.16.0,<3.0.0",
29 "websockets>=4.0.1,<5.0.0",
30 "pypiwin32>=223;platform_system=='Windows'",
31 ],
32 setup_requires=['setuptools-markdown'],
33 python_requires='>=3.5, <4',
34 extras_require={
35 'tester': [
36 "eth-tester[py-evm]==0.1.0-beta.26",
37 "py-geth>=2.0.1,<3.0.0",
38 ],
39 'testrpc': ["eth-testrpc>=1.3.3,<2.0.0"],
40 'linter': [
41 "flake8==3.4.1",
42 "isort>=4.2.15,<5",
43 ],
44 },
45 py_modules=['web3', 'ens'],
46 license="MIT",
47 zip_safe=False,
48 keywords='ethereum',
49 packages=find_packages(exclude=["tests", "tests.*"]),
50 classifiers=[
51 'Development Status :: 5 - Production/Stable',
52 'Intended Audience :: Developers',
53 'License :: OSI Approved :: MIT License',
54 'Natural Language :: English',
55 'Programming Language :: Python :: 3',
56 'Programming Language :: Python :: 3.5',
57 'Programming Language :: Python :: 3.6',
58 ],
59 )
60
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -26,7 +26,7 @@
"lru-dict>=1.1.6,<2.0.0",
"eth-hash[pycryptodome]",
"requests>=2.16.0,<3.0.0",
- "websockets>=4.0.1,<5.0.0",
+ "websockets>=5.0.1,<6.0.0",
"pypiwin32>=223;platform_system=='Windows'",
],
setup_requires=['setuptools-markdown'],
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -26,7 +26,7 @@\n \"lru-dict>=1.1.6,<2.0.0\",\n \"eth-hash[pycryptodome]\",\n \"requests>=2.16.0,<3.0.0\",\n- \"websockets>=4.0.1,<5.0.0\",\n+ \"websockets>=5.0.1,<6.0.0\",\n \"pypiwin32>=223;platform_system=='Windows'\",\n ],\n setup_requires=['setuptools-markdown'],\n", "issue": "Upgrade to or add support for websockets v5\n### What was wrong?\r\n\r\nWe are currently using the `websockets` library's v4 line. The v5 line is out.\r\n\r\n### How can it be fixed?\r\n\r\nLook into adding support for both v4 and v5.\r\n\r\nIf this is too cumbersome, we can simply upgrade to requiring `>=v5`\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom setuptools import (\n find_packages,\n setup,\n)\n\n\nsetup(\n name='web3',\n # *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility.\n version='4.3.0',\n description=\"\"\"Web3.py\"\"\",\n long_description_markdown_filename='README.md',\n author='Piper Merriam',\n author_email='[email protected]',\n url='https://github.com/ethereum/web3.py',\n include_package_data=True,\n install_requires=[\n \"toolz>=0.9.0,<1.0.0;implementation_name=='pypy'\",\n \"cytoolz>=0.9.0,<1.0.0;implementation_name=='cpython'\",\n \"eth-abi>=1.1.1,<2\",\n \"eth-account>=0.2.1,<0.3.0\",\n \"eth-utils>=1.0.1,<2.0.0\",\n \"hexbytes>=0.1.0,<1.0.0\",\n \"lru-dict>=1.1.6,<2.0.0\",\n \"eth-hash[pycryptodome]\",\n \"requests>=2.16.0,<3.0.0\",\n \"websockets>=4.0.1,<5.0.0\",\n \"pypiwin32>=223;platform_system=='Windows'\",\n ],\n setup_requires=['setuptools-markdown'],\n python_requires='>=3.5, <4',\n extras_require={\n 'tester': [\n \"eth-tester[py-evm]==0.1.0-beta.26\",\n \"py-geth>=2.0.1,<3.0.0\",\n ],\n 'testrpc': [\"eth-testrpc>=1.3.3,<2.0.0\"],\n 'linter': [\n \"flake8==3.4.1\",\n \"isort>=4.2.15,<5\",\n ],\n },\n py_modules=['web3', 'ens'],\n license=\"MIT\",\n zip_safe=False,\n keywords='ethereum',\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom setuptools import (\n find_packages,\n setup,\n)\n\n\nsetup(\n name='web3',\n # *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility.\n version='4.3.0',\n description=\"\"\"Web3.py\"\"\",\n long_description_markdown_filename='README.md',\n author='Piper Merriam',\n author_email='[email protected]',\n url='https://github.com/ethereum/web3.py',\n include_package_data=True,\n install_requires=[\n \"toolz>=0.9.0,<1.0.0;implementation_name=='pypy'\",\n \"cytoolz>=0.9.0,<1.0.0;implementation_name=='cpython'\",\n \"eth-abi>=1.1.1,<2\",\n \"eth-account>=0.2.1,<0.3.0\",\n \"eth-utils>=1.0.1,<2.0.0\",\n \"hexbytes>=0.1.0,<1.0.0\",\n \"lru-dict>=1.1.6,<2.0.0\",\n \"eth-hash[pycryptodome]\",\n \"requests>=2.16.0,<3.0.0\",\n \"websockets>=5.0.1,<6.0.0\",\n \"pypiwin32>=223;platform_system=='Windows'\",\n ],\n setup_requires=['setuptools-markdown'],\n python_requires='>=3.5, <4',\n extras_require={\n 'tester': [\n \"eth-tester[py-evm]==0.1.0-beta.26\",\n \"py-geth>=2.0.1,<3.0.0\",\n ],\n 'testrpc': [\"eth-testrpc>=1.3.3,<2.0.0\"],\n 'linter': [\n \"flake8==3.4.1\",\n \"isort>=4.2.15,<5\",\n ],\n },\n py_modules=['web3', 'ens'],\n license=\"MIT\",\n zip_safe=False,\n keywords='ethereum',\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n)\n", "path": "setup.py"}]} | 1,011 | 142 |
gh_patches_debug_30860 | rasdani/github-patches | git_diff | TheAlgorithms__Python-2032 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Mergesort Update Variable Names
I was looking over the mergesort.py file in the divide_and_conquer directory when I saw that all of the variable names are a single letter and there is not much documentation. Does anyone know enough about this file to improve the variable names and make the code more understandable?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `divide_and_conquer/mergesort.py`
Content:
```
1 def merge(a, b, m, e):
2 l = a[b : m + 1] # noqa: E741
3 r = a[m + 1 : e + 1]
4 k = b
5 i = 0
6 j = 0
7 while i < len(l) and j < len(r):
8 # change sign for Descending order
9 if l[i] < r[j]:
10 a[k] = l[i]
11 i += 1
12 else:
13 a[k] = r[j]
14 j += 1
15 k += 1
16 while i < len(l):
17 a[k] = l[i]
18 i += 1
19 k += 1
20 while j < len(r):
21 a[k] = r[j]
22 j += 1
23 k += 1
24 return a
25
26
27 def mergesort(a, b, e):
28 """
29 >>> mergesort([3,2,1],0,2)
30 [1, 2, 3]
31 >>> mergesort([3,2,1,0,1,2,3,5,4],0,8)
32 [0, 1, 1, 2, 2, 3, 3, 4, 5]
33 """
34 if b < e:
35 m = (b + e) // 2
36 # print("ms1",a,b,m)
37 mergesort(a, b, m)
38 # print("ms2",a,m+1,e)
39 mergesort(a, m + 1, e)
40 # print("m",a,b,m,e)
41 merge(a, b, m, e)
42 return a
43
44
45 if __name__ == "__main__":
46 import doctest
47
48 doctest.testmod()
49
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/divide_and_conquer/mergesort.py b/divide_and_conquer/mergesort.py
--- a/divide_and_conquer/mergesort.py
+++ b/divide_and_conquer/mergesort.py
@@ -1,45 +1,48 @@
-def merge(a, b, m, e):
- l = a[b : m + 1] # noqa: E741
- r = a[m + 1 : e + 1]
- k = b
+def merge(arr, left, mid, right):
+ # overall array will divided into 2 array
+ # left_arr contains the left portion of array from left to mid
+ # right_arr contains the right portion of array from mid + 1 to right
+ left_arr = arr[left : mid + 1]
+ right_arr = arr[mid + 1 : right + 1]
+ k = left
i = 0
j = 0
- while i < len(l) and j < len(r):
+ while i < len(left_arr) and j < len(right_arr):
# change sign for Descending order
- if l[i] < r[j]:
- a[k] = l[i]
+ if left_arr[i] < right_arr[j]:
+ arr[k] = left_arr[i]
i += 1
else:
- a[k] = r[j]
+ arr[k] = right_arr[j]
j += 1
k += 1
- while i < len(l):
- a[k] = l[i]
+ while i < len(left_arr):
+ arr[k] = left_arr[i]
i += 1
k += 1
- while j < len(r):
- a[k] = r[j]
+ while j < len(right_arr):
+ arr[k] = right_arr[j]
j += 1
k += 1
- return a
+ return arr
-def mergesort(a, b, e):
+def mergesort(arr, left, right):
"""
- >>> mergesort([3,2,1],0,2)
+ >>> mergesort([3, 2, 1], 0, 2)
[1, 2, 3]
- >>> mergesort([3,2,1,0,1,2,3,5,4],0,8)
+ >>> mergesort([3, 2, 1, 0, 1, 2, 3, 5, 4], 0, 8)
[0, 1, 1, 2, 2, 3, 3, 4, 5]
"""
- if b < e:
- m = (b + e) // 2
+ if left < right:
+ mid = (left + right) // 2
# print("ms1",a,b,m)
- mergesort(a, b, m)
+ mergesort(arr, left, mid)
# print("ms2",a,m+1,e)
- mergesort(a, m + 1, e)
+ mergesort(arr, mid + 1, right)
# print("m",a,b,m,e)
- merge(a, b, m, e)
- return a
+ merge(arr, left, mid, right)
+ return arr
if __name__ == "__main__":
| {"golden_diff": "diff --git a/divide_and_conquer/mergesort.py b/divide_and_conquer/mergesort.py\n--- a/divide_and_conquer/mergesort.py\n+++ b/divide_and_conquer/mergesort.py\n@@ -1,45 +1,48 @@\n-def merge(a, b, m, e):\n- l = a[b : m + 1] # noqa: E741\n- r = a[m + 1 : e + 1]\n- k = b\n+def merge(arr, left, mid, right):\n+ # overall array will divided into 2 array\n+ # left_arr contains the left portion of array from left to mid\n+ # right_arr contains the right portion of array from mid + 1 to right\n+ left_arr = arr[left : mid + 1]\n+ right_arr = arr[mid + 1 : right + 1]\n+ k = left\n i = 0\n j = 0\n- while i < len(l) and j < len(r):\n+ while i < len(left_arr) and j < len(right_arr):\n # change sign for Descending order\n- if l[i] < r[j]:\n- a[k] = l[i]\n+ if left_arr[i] < right_arr[j]:\n+ arr[k] = left_arr[i]\n i += 1\n else:\n- a[k] = r[j]\n+ arr[k] = right_arr[j]\n j += 1\n k += 1\n- while i < len(l):\n- a[k] = l[i]\n+ while i < len(left_arr):\n+ arr[k] = left_arr[i]\n i += 1\n k += 1\n- while j < len(r):\n- a[k] = r[j]\n+ while j < len(right_arr):\n+ arr[k] = right_arr[j]\n j += 1\n k += 1\n- return a\n+ return arr\n \n \n-def mergesort(a, b, e):\n+def mergesort(arr, left, right):\n \"\"\"\n- >>> mergesort([3,2,1],0,2)\n+ >>> mergesort([3, 2, 1], 0, 2)\n [1, 2, 3]\n- >>> mergesort([3,2,1,0,1,2,3,5,4],0,8)\n+ >>> mergesort([3, 2, 1, 0, 1, 2, 3, 5, 4], 0, 8)\n [0, 1, 1, 2, 2, 3, 3, 4, 5]\n \"\"\"\n- if b < e:\n- m = (b + e) // 2\n+ if left < right:\n+ mid = (left + right) // 2\n # print(\"ms1\",a,b,m)\n- mergesort(a, b, m)\n+ mergesort(arr, left, mid)\n # print(\"ms2\",a,m+1,e)\n- mergesort(a, m + 1, e)\n+ mergesort(arr, mid + 1, right)\n # print(\"m\",a,b,m,e)\n- merge(a, b, m, e)\n- return a\n+ merge(arr, left, mid, right)\n+ return arr\n \n \n if __name__ == \"__main__\":\n", "issue": "Mergesort Update Variable Names\nI was looking over the mergesort.py file in the divide_and_conquer directory when I saw that all of the variable names are a single letter and there is not much documentation. Does anyone know enough about this file to improve the variable names and make the code more understandable?\n", "before_files": [{"content": "def merge(a, b, m, e):\n l = a[b : m + 1] # noqa: E741\n r = a[m + 1 : e + 1]\n k = b\n i = 0\n j = 0\n while i < len(l) and j < len(r):\n # change sign for Descending order\n if l[i] < r[j]:\n a[k] = l[i]\n i += 1\n else:\n a[k] = r[j]\n j += 1\n k += 1\n while i < len(l):\n a[k] = l[i]\n i += 1\n k += 1\n while j < len(r):\n a[k] = r[j]\n j += 1\n k += 1\n return a\n\n\ndef mergesort(a, b, e):\n \"\"\"\n >>> mergesort([3,2,1],0,2)\n [1, 2, 3]\n >>> mergesort([3,2,1,0,1,2,3,5,4],0,8)\n [0, 1, 1, 2, 2, 3, 3, 4, 5]\n \"\"\"\n if b < e:\n m = (b + e) // 2\n # print(\"ms1\",a,b,m)\n mergesort(a, b, m)\n # print(\"ms2\",a,m+1,e)\n mergesort(a, m + 1, e)\n # print(\"m\",a,b,m,e)\n merge(a, b, m, e)\n return a\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod()\n", "path": "divide_and_conquer/mergesort.py"}], "after_files": [{"content": "def merge(arr, left, mid, right):\n # overall array will divided into 2 array\n # left_arr contains the left portion of array from left to mid\n # right_arr contains the right portion of array from mid + 1 to right\n left_arr = arr[left : mid + 1]\n right_arr = arr[mid + 1 : right + 1]\n k = left\n i = 0\n j = 0\n while i < len(left_arr) and j < len(right_arr):\n # change sign for Descending order\n if left_arr[i] < right_arr[j]:\n arr[k] = left_arr[i]\n i += 1\n else:\n arr[k] = right_arr[j]\n j += 1\n k += 1\n while i < len(left_arr):\n arr[k] = left_arr[i]\n i += 1\n k += 1\n while j < len(right_arr):\n arr[k] = right_arr[j]\n j += 1\n k += 1\n return arr\n\n\ndef mergesort(arr, left, right):\n \"\"\"\n >>> mergesort([3, 2, 1], 0, 2)\n [1, 2, 3]\n >>> mergesort([3, 2, 1, 0, 1, 2, 3, 5, 4], 0, 8)\n [0, 1, 1, 2, 2, 3, 3, 4, 5]\n \"\"\"\n if left < right:\n mid = (left + right) // 2\n # print(\"ms1\",a,b,m)\n mergesort(arr, left, mid)\n # print(\"ms2\",a,m+1,e)\n mergesort(arr, mid + 1, right)\n # print(\"m\",a,b,m,e)\n merge(arr, left, mid, right)\n return arr\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod()\n", "path": "divide_and_conquer/mergesort.py"}]} | 807 | 773 |
gh_patches_debug_13420 | rasdani/github-patches | git_diff | comic__grand-challenge.org-786 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Partial update not working correctly because of custom validation
The validation rule in `app/grandchallenge/annotations/serializers.py:31` is breaking the partial update functionality.
If you try to do a partial update PATCH request to the endpoint, it will try to find the `annotation_set` attribute in the request data. If this is not present it will throw a KeyError.
This should be fixed by first checking if the key exists in the request data and only then running the validation check. The validation check is not needed if the key does not exist because it will then either not change (for partial update request) or throw a `field is required` validation error (for every other type of request).
I will fix this and add a test for it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/grandchallenge/annotations/serializers.py`
Content:
```
1 from rest_framework import serializers
2
3 from .models import (
4 ETDRSGridAnnotation,
5 MeasurementAnnotation,
6 BooleanClassificationAnnotation,
7 PolygonAnnotationSet,
8 SinglePolygonAnnotation,
9 LandmarkAnnotationSet,
10 SingleLandmarkAnnotation,
11 )
12 from .validators import validate_grader_is_current_retina_user
13
14
15 class AbstractAnnotationSerializer(serializers.ModelSerializer):
16 def validate_grader(self, value):
17 """
18 Validate that grader is the user creating the object for retina_graders group
19 """
20 validate_grader_is_current_retina_user(value, self.context)
21 return value
22
23 class Meta:
24 abstract = True
25
26
27 class AbstractSingleAnnotationSerializer(serializers.ModelSerializer):
28 def validate(self, data):
29 """
30 Validate that the user that is creating this object equals the annotation_set.grader for retina_graders
31 """
32 validate_grader_is_current_retina_user(
33 data["annotation_set"].grader, self.context
34 )
35 return data
36
37 class Meta:
38 abstract = True
39
40
41 class ETDRSGridAnnotationSerializer(AbstractAnnotationSerializer):
42 class Meta:
43 model = ETDRSGridAnnotation
44 fields = ("grader", "created", "image", "fovea", "optic_disk")
45
46
47 class MeasurementAnnotationSerializer(AbstractAnnotationSerializer):
48 class Meta:
49 model = MeasurementAnnotation
50 fields = ("image", "grader", "created", "start_voxel", "end_voxel")
51
52
53 class BooleanClassificationAnnotationSerializer(AbstractAnnotationSerializer):
54 class Meta:
55 model = BooleanClassificationAnnotation
56 fields = ("image", "grader", "created", "name", "value")
57
58
59 class SinglePolygonAnnotationSerializer(AbstractSingleAnnotationSerializer):
60 annotation_set = serializers.PrimaryKeyRelatedField(
61 queryset=PolygonAnnotationSet.objects.all()
62 )
63
64 class Meta:
65 model = SinglePolygonAnnotation
66 fields = ("id", "value", "annotation_set")
67
68
69 class PolygonAnnotationSetSerializer(AbstractAnnotationSerializer):
70 singlepolygonannotation_set = SinglePolygonAnnotationSerializer(
71 many=True, read_only=True
72 )
73
74 class Meta:
75 model = PolygonAnnotationSet
76 fields = (
77 "id",
78 "image",
79 "grader",
80 "created",
81 "name",
82 "singlepolygonannotation_set",
83 )
84
85
86 class LandmarkAnnotationSetSerializer(AbstractAnnotationSerializer):
87 class Meta:
88 model = LandmarkAnnotationSet
89 fields = ("grader", "created")
90
91
92 class SingleLandmarkAnnotationSerializer(AbstractSingleAnnotationSerializer):
93 class Meta:
94 model = SingleLandmarkAnnotation
95 fields = ("image", "annotation_set", "landmarks")
96
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/grandchallenge/annotations/serializers.py b/app/grandchallenge/annotations/serializers.py
--- a/app/grandchallenge/annotations/serializers.py
+++ b/app/grandchallenge/annotations/serializers.py
@@ -27,11 +27,14 @@
class AbstractSingleAnnotationSerializer(serializers.ModelSerializer):
def validate(self, data):
"""
- Validate that the user that is creating this object equals the annotation_set.grader for retina_graders
+ Validate that the user that is creating this object equals the
+ annotation_set.grader for retina_graders
"""
- validate_grader_is_current_retina_user(
- data["annotation_set"].grader, self.context
- )
+ if data.get("annotation_set") is None:
+ return data
+
+ grader = data["annotation_set"].grader
+ validate_grader_is_current_retina_user(grader, self.context)
return data
class Meta:
| {"golden_diff": "diff --git a/app/grandchallenge/annotations/serializers.py b/app/grandchallenge/annotations/serializers.py\n--- a/app/grandchallenge/annotations/serializers.py\n+++ b/app/grandchallenge/annotations/serializers.py\n@@ -27,11 +27,14 @@\n class AbstractSingleAnnotationSerializer(serializers.ModelSerializer):\n def validate(self, data):\n \"\"\"\n- Validate that the user that is creating this object equals the annotation_set.grader for retina_graders\n+ Validate that the user that is creating this object equals the\n+ annotation_set.grader for retina_graders\n \"\"\"\n- validate_grader_is_current_retina_user(\n- data[\"annotation_set\"].grader, self.context\n- )\n+ if data.get(\"annotation_set\") is None:\n+ return data\n+\n+ grader = data[\"annotation_set\"].grader\n+ validate_grader_is_current_retina_user(grader, self.context)\n return data\n \n class Meta:\n", "issue": "Partial update not working correctly because of custom validation\nThe validation rule in `app/grandchallenge/annotations/serializers.py:31` is breaking the partial update functionality.\r\nIf you try to do a partial update PATCH request to the endpoint, it will try to find the `annotation_set` attribute in the request data. If this is not present it will throw a KeyError. \r\n\r\nThis should be fixed by first checking if the key exists in the request data and only then running the validation check. The validation check is not needed if the key does not exist because it will then either not change (for partial update request) or throw a `field is required` validation error (for every other type of request).\r\n\r\nI will fix this and add a test for it.\n", "before_files": [{"content": "from rest_framework import serializers\n\nfrom .models import (\n ETDRSGridAnnotation,\n MeasurementAnnotation,\n BooleanClassificationAnnotation,\n PolygonAnnotationSet,\n SinglePolygonAnnotation,\n LandmarkAnnotationSet,\n SingleLandmarkAnnotation,\n)\nfrom .validators import validate_grader_is_current_retina_user\n\n\nclass AbstractAnnotationSerializer(serializers.ModelSerializer):\n def validate_grader(self, value):\n \"\"\"\n Validate that grader is the user creating the object for retina_graders group\n \"\"\"\n validate_grader_is_current_retina_user(value, self.context)\n return value\n\n class Meta:\n abstract = True\n\n\nclass AbstractSingleAnnotationSerializer(serializers.ModelSerializer):\n def validate(self, data):\n \"\"\"\n Validate that the user that is creating this object equals the annotation_set.grader for retina_graders\n \"\"\"\n validate_grader_is_current_retina_user(\n data[\"annotation_set\"].grader, self.context\n )\n return data\n\n class Meta:\n abstract = True\n\n\nclass ETDRSGridAnnotationSerializer(AbstractAnnotationSerializer):\n class Meta:\n model = ETDRSGridAnnotation\n fields = (\"grader\", \"created\", \"image\", \"fovea\", \"optic_disk\")\n\n\nclass MeasurementAnnotationSerializer(AbstractAnnotationSerializer):\n class Meta:\n model = MeasurementAnnotation\n fields = (\"image\", \"grader\", \"created\", \"start_voxel\", \"end_voxel\")\n\n\nclass BooleanClassificationAnnotationSerializer(AbstractAnnotationSerializer):\n class Meta:\n model = BooleanClassificationAnnotation\n fields = (\"image\", \"grader\", \"created\", \"name\", \"value\")\n\n\nclass SinglePolygonAnnotationSerializer(AbstractSingleAnnotationSerializer):\n annotation_set = serializers.PrimaryKeyRelatedField(\n queryset=PolygonAnnotationSet.objects.all()\n )\n\n class Meta:\n model = SinglePolygonAnnotation\n fields = (\"id\", \"value\", \"annotation_set\")\n\n\nclass PolygonAnnotationSetSerializer(AbstractAnnotationSerializer):\n singlepolygonannotation_set = SinglePolygonAnnotationSerializer(\n many=True, read_only=True\n )\n\n class Meta:\n model = PolygonAnnotationSet\n fields = (\n \"id\",\n \"image\",\n \"grader\",\n \"created\",\n \"name\",\n \"singlepolygonannotation_set\",\n )\n\n\nclass LandmarkAnnotationSetSerializer(AbstractAnnotationSerializer):\n class Meta:\n model = LandmarkAnnotationSet\n fields = (\"grader\", \"created\")\n\n\nclass SingleLandmarkAnnotationSerializer(AbstractSingleAnnotationSerializer):\n class Meta:\n model = SingleLandmarkAnnotation\n fields = (\"image\", \"annotation_set\", \"landmarks\")\n", "path": "app/grandchallenge/annotations/serializers.py"}], "after_files": [{"content": "from rest_framework import serializers\n\nfrom .models import (\n ETDRSGridAnnotation,\n MeasurementAnnotation,\n BooleanClassificationAnnotation,\n PolygonAnnotationSet,\n SinglePolygonAnnotation,\n LandmarkAnnotationSet,\n SingleLandmarkAnnotation,\n)\nfrom .validators import validate_grader_is_current_retina_user\n\n\nclass AbstractAnnotationSerializer(serializers.ModelSerializer):\n def validate_grader(self, value):\n \"\"\"\n Validate that grader is the user creating the object for retina_graders group\n \"\"\"\n validate_grader_is_current_retina_user(value, self.context)\n return value\n\n class Meta:\n abstract = True\n\n\nclass AbstractSingleAnnotationSerializer(serializers.ModelSerializer):\n def validate(self, data):\n \"\"\"\n Validate that the user that is creating this object equals the\n annotation_set.grader for retina_graders\n \"\"\"\n if data.get(\"annotation_set\") is None:\n return data\n\n grader = data[\"annotation_set\"].grader\n validate_grader_is_current_retina_user(grader, self.context)\n return data\n\n class Meta:\n abstract = True\n\n\nclass ETDRSGridAnnotationSerializer(AbstractAnnotationSerializer):\n class Meta:\n model = ETDRSGridAnnotation\n fields = (\"grader\", \"created\", \"image\", \"fovea\", \"optic_disk\")\n\n\nclass MeasurementAnnotationSerializer(AbstractAnnotationSerializer):\n class Meta:\n model = MeasurementAnnotation\n fields = (\"image\", \"grader\", \"created\", \"start_voxel\", \"end_voxel\")\n\n\nclass BooleanClassificationAnnotationSerializer(AbstractAnnotationSerializer):\n class Meta:\n model = BooleanClassificationAnnotation\n fields = (\"image\", \"grader\", \"created\", \"name\", \"value\")\n\n\nclass SinglePolygonAnnotationSerializer(AbstractSingleAnnotationSerializer):\n annotation_set = serializers.PrimaryKeyRelatedField(\n queryset=PolygonAnnotationSet.objects.all()\n )\n\n class Meta:\n model = SinglePolygonAnnotation\n fields = (\"id\", \"value\", \"annotation_set\")\n\n\nclass PolygonAnnotationSetSerializer(AbstractAnnotationSerializer):\n singlepolygonannotation_set = SinglePolygonAnnotationSerializer(\n many=True, read_only=True\n )\n\n class Meta:\n model = PolygonAnnotationSet\n fields = (\n \"id\",\n \"image\",\n \"grader\",\n \"created\",\n \"name\",\n \"singlepolygonannotation_set\",\n )\n\n\nclass LandmarkAnnotationSetSerializer(AbstractAnnotationSerializer):\n class Meta:\n model = LandmarkAnnotationSet\n fields = (\"grader\", \"created\")\n\n\nclass SingleLandmarkAnnotationSerializer(AbstractSingleAnnotationSerializer):\n class Meta:\n model = SingleLandmarkAnnotation\n fields = (\"image\", \"annotation_set\", \"landmarks\")\n", "path": "app/grandchallenge/annotations/serializers.py"}]} | 1,154 | 214 |
gh_patches_debug_21085 | rasdani/github-patches | git_diff | google__flax-541 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PPO example does not terminate properly
### Configuration
Running the PPO example for a short number of frames in order to reproduce as fast as possible on a cloud VM with a V100 GPU. Config python3.7, flax 0.2.2, jax 0.2.1, jaxlib 0.1.55 .
Command run:
`python ppo_main.py --config.game=Qbert --config.total_frames=4000`
### Problem you have encountered:
Program does not exit. One can `print('Done')` after `ppo_lib.train` in `ppo_main` but there is an open thread and program can't exit (even after adding `raise SystemExit`).
### Extra comments
Added extra line in `main` ` tf.config.experimental.set_visible_devices([],'GPU')` in order for the program to run properly with `tensorflow-gpu`, this is common in other `flax/examples`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/ppo/ppo_main.py`
Content:
```
1 # Copyright 2020 The Flax Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16 from absl import flags
17 from absl import app
18 import jax
19 import jax.random
20 from ml_collections import config_flags
21
22 import ppo_lib
23 import models
24 import env_utils
25
26 FLAGS = flags.FLAGS
27
28 flags.DEFINE_string(
29 'logdir', default='/tmp/ppo_training',
30 help=('Directory to save checkpoints and logging info.'))
31
32 config_flags.DEFINE_config_file(
33 'config', os.path.join(os.path.dirname(__file__), 'default_config.py'),
34 'File path to the default configuration file.')
35
36 def main(argv):
37 config = FLAGS.config
38 game = config.game + 'NoFrameskip-v4'
39 num_actions = env_utils.get_num_actions(game)
40 print(f'Playing {game} with {num_actions} actions')
41 key = jax.random.PRNGKey(0)
42 key, subkey = jax.random.split(key)
43 model = models.create_model(subkey, num_outputs=num_actions)
44 optimizer = models.create_optimizer(model, learning_rate=config.learning_rate)
45 del model
46 optimizer = ppo_lib.train(optimizer, config, FLAGS.logdir)
47
48 if __name__ == '__main__':
49 app.run(main)
50
```
Path: `examples/ppo/agent.py`
Content:
```
1 # Copyright 2020 The Flax Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Agent utilities, incl. choosing the move and running in separate process."""
16
17 import multiprocessing
18 import collections
19 import jax
20 import numpy as onp
21
22 import env_utils
23
24 @jax.jit
25 def policy_action(model, state):
26 """Forward pass of the network."""
27 out = model(state)
28 return out
29
30
31 ExpTuple = collections.namedtuple(
32 'ExpTuple', ['state', 'action', 'reward', 'value', 'log_prob', 'done'])
33
34
35 class RemoteSimulator:
36 """Wrap functionality for an agent emulating Atari in a separate process.
37
38 An object of this class is created for every agent.
39 """
40
41 def __init__(self, game: str):
42 """Start the remote process and create Pipe() to communicate with it."""
43 parent_conn, child_conn = multiprocessing.Pipe()
44 self.proc = multiprocessing.Process(
45 target=rcv_action_send_exp, args=(child_conn, game))
46 self.conn = parent_conn
47 self.proc.start()
48
49
50 def rcv_action_send_exp(conn, game: str):
51 """Run the remote agents.
52
53 Receive action from the main learner, perform one step of simulation and
54 send back collected experience.
55 """
56 env = env_utils.create_env(game, clip_rewards=True)
57 while True:
58 obs = env.reset()
59 done = False
60 # Observations fetched from Atari env need additional batch dimension.
61 state = obs[None, ...]
62 while not done:
63 conn.send(state)
64 action = conn.recv()
65 obs, reward, done, _ = env.step(action)
66 next_state = obs[None, ...] if not done else None
67 experience = (state, action, reward, done)
68 conn.send(experience)
69 if done:
70 break
71 state = next_state
72
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/ppo/agent.py b/examples/ppo/agent.py
--- a/examples/ppo/agent.py
+++ b/examples/ppo/agent.py
@@ -43,6 +43,7 @@
parent_conn, child_conn = multiprocessing.Pipe()
self.proc = multiprocessing.Process(
target=rcv_action_send_exp, args=(child_conn, game))
+ self.proc.daemon = True
self.conn = parent_conn
self.proc.start()
diff --git a/examples/ppo/ppo_main.py b/examples/ppo/ppo_main.py
--- a/examples/ppo/ppo_main.py
+++ b/examples/ppo/ppo_main.py
@@ -19,6 +19,8 @@
import jax.random
from ml_collections import config_flags
+import tensorflow as tf
+
import ppo_lib
import models
import env_utils
@@ -34,6 +36,9 @@
'File path to the default configuration file.')
def main(argv):
+ # Make sure tf does not allocate gpu memory.
+ tf.config.experimental.set_visible_devices([], 'GPU')
+
config = FLAGS.config
game = config.game + 'NoFrameskip-v4'
num_actions = env_utils.get_num_actions(game)
| {"golden_diff": "diff --git a/examples/ppo/agent.py b/examples/ppo/agent.py\n--- a/examples/ppo/agent.py\n+++ b/examples/ppo/agent.py\n@@ -43,6 +43,7 @@\n parent_conn, child_conn = multiprocessing.Pipe()\n self.proc = multiprocessing.Process(\n target=rcv_action_send_exp, args=(child_conn, game))\n+ self.proc.daemon = True\n self.conn = parent_conn\n self.proc.start()\n \ndiff --git a/examples/ppo/ppo_main.py b/examples/ppo/ppo_main.py\n--- a/examples/ppo/ppo_main.py\n+++ b/examples/ppo/ppo_main.py\n@@ -19,6 +19,8 @@\n import jax.random\n from ml_collections import config_flags\n \n+import tensorflow as tf\n+\n import ppo_lib\n import models\n import env_utils\n@@ -34,6 +36,9 @@\n 'File path to the default configuration file.')\n \n def main(argv):\n+ # Make sure tf does not allocate gpu memory.\n+ tf.config.experimental.set_visible_devices([], 'GPU')\n+\n config = FLAGS.config\n game = config.game + 'NoFrameskip-v4'\n num_actions = env_utils.get_num_actions(game)\n", "issue": "PPO example does not terminate properly\n### Configuration\r\n\r\nRunning the PPO example for a short number of frames in order to reproduce as fast as possible on a cloud VM with a V100 GPU. Config python3.7, flax 0.2.2, jax 0.2.1, jaxlib 0.1.55 .\r\n\r\nCommand run:\r\n`python ppo_main.py --config.game=Qbert --config.total_frames=4000`\r\n\r\n### Problem you have encountered:\r\n\r\nProgram does not exit. One can `print('Done')` after `ppo_lib.train` in `ppo_main` but there is an open thread and program can't exit (even after adding `raise SystemExit`).\r\n\r\n### Extra comments\r\n\r\nAdded extra line in `main` ` tf.config.experimental.set_visible_devices([],'GPU')` in order for the program to run properly with `tensorflow-gpu`, this is common in other `flax/examples`. \n", "before_files": [{"content": "# Copyright 2020 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nfrom absl import flags\nfrom absl import app\nimport jax\nimport jax.random\nfrom ml_collections import config_flags\n\nimport ppo_lib\nimport models\nimport env_utils\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\n 'logdir', default='/tmp/ppo_training',\n help=('Directory to save checkpoints and logging info.'))\n\nconfig_flags.DEFINE_config_file(\n 'config', os.path.join(os.path.dirname(__file__), 'default_config.py'),\n 'File path to the default configuration file.')\n\ndef main(argv):\n config = FLAGS.config\n game = config.game + 'NoFrameskip-v4'\n num_actions = env_utils.get_num_actions(game)\n print(f'Playing {game} with {num_actions} actions')\n key = jax.random.PRNGKey(0)\n key, subkey = jax.random.split(key)\n model = models.create_model(subkey, num_outputs=num_actions)\n optimizer = models.create_optimizer(model, learning_rate=config.learning_rate)\n del model\n optimizer = ppo_lib.train(optimizer, config, FLAGS.logdir)\n\nif __name__ == '__main__':\n app.run(main)\n", "path": "examples/ppo/ppo_main.py"}, {"content": "# Copyright 2020 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Agent utilities, incl. choosing the move and running in separate process.\"\"\"\n\nimport multiprocessing\nimport collections\nimport jax\nimport numpy as onp\n\nimport env_utils\n\[email protected]\ndef policy_action(model, state):\n \"\"\"Forward pass of the network.\"\"\"\n out = model(state)\n return out\n\n\nExpTuple = collections.namedtuple(\n 'ExpTuple', ['state', 'action', 'reward', 'value', 'log_prob', 'done'])\n\n\nclass RemoteSimulator:\n \"\"\"Wrap functionality for an agent emulating Atari in a separate process.\n\n An object of this class is created for every agent.\n \"\"\"\n\n def __init__(self, game: str):\n \"\"\"Start the remote process and create Pipe() to communicate with it.\"\"\"\n parent_conn, child_conn = multiprocessing.Pipe()\n self.proc = multiprocessing.Process(\n target=rcv_action_send_exp, args=(child_conn, game))\n self.conn = parent_conn\n self.proc.start()\n\n\ndef rcv_action_send_exp(conn, game: str):\n \"\"\"Run the remote agents.\n\n Receive action from the main learner, perform one step of simulation and\n send back collected experience.\n \"\"\"\n env = env_utils.create_env(game, clip_rewards=True)\n while True:\n obs = env.reset()\n done = False\n # Observations fetched from Atari env need additional batch dimension.\n state = obs[None, ...]\n while not done:\n conn.send(state)\n action = conn.recv()\n obs, reward, done, _ = env.step(action)\n next_state = obs[None, ...] if not done else None\n experience = (state, action, reward, done)\n conn.send(experience)\n if done:\n break\n state = next_state\n", "path": "examples/ppo/agent.py"}], "after_files": [{"content": "# Copyright 2020 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nfrom absl import flags\nfrom absl import app\nimport jax\nimport jax.random\nfrom ml_collections import config_flags\n\nimport tensorflow as tf\n\nimport ppo_lib\nimport models\nimport env_utils\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\n 'logdir', default='/tmp/ppo_training',\n help=('Directory to save checkpoints and logging info.'))\n\nconfig_flags.DEFINE_config_file(\n 'config', os.path.join(os.path.dirname(__file__), 'default_config.py'),\n 'File path to the default configuration file.')\n\ndef main(argv):\n # Make sure tf does not allocate gpu memory.\n tf.config.experimental.set_visible_devices([], 'GPU')\n\n config = FLAGS.config\n game = config.game + 'NoFrameskip-v4'\n num_actions = env_utils.get_num_actions(game)\n print(f'Playing {game} with {num_actions} actions')\n key = jax.random.PRNGKey(0)\n key, subkey = jax.random.split(key)\n model = models.create_model(subkey, num_outputs=num_actions)\n optimizer = models.create_optimizer(model, learning_rate=config.learning_rate)\n del model\n optimizer = ppo_lib.train(optimizer, config, FLAGS.logdir)\n\nif __name__ == '__main__':\n app.run(main)\n", "path": "examples/ppo/ppo_main.py"}, {"content": "# Copyright 2020 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Agent utilities, incl. choosing the move and running in separate process.\"\"\"\n\nimport multiprocessing\nimport collections\nimport jax\nimport numpy as onp\n\nimport env_utils\n\[email protected]\ndef policy_action(model, state):\n \"\"\"Forward pass of the network.\"\"\"\n out = model(state)\n return out\n\n\nExpTuple = collections.namedtuple(\n 'ExpTuple', ['state', 'action', 'reward', 'value', 'log_prob', 'done'])\n\n\nclass RemoteSimulator:\n \"\"\"Wrap functionality for an agent emulating Atari in a separate process.\n\n An object of this class is created for every agent.\n \"\"\"\n\n def __init__(self, game: str):\n \"\"\"Start the remote process and create Pipe() to communicate with it.\"\"\"\n parent_conn, child_conn = multiprocessing.Pipe()\n self.proc = multiprocessing.Process(\n target=rcv_action_send_exp, args=(child_conn, game))\n self.proc.daemon = True\n self.conn = parent_conn\n self.proc.start()\n\n\ndef rcv_action_send_exp(conn, game: str):\n \"\"\"Run the remote agents.\n\n Receive action from the main learner, perform one step of simulation and\n send back collected experience.\n \"\"\"\n env = env_utils.create_env(game, clip_rewards=True)\n while True:\n obs = env.reset()\n done = False\n # Observations fetched from Atari env need additional batch dimension.\n state = obs[None, ...]\n while not done:\n conn.send(state)\n action = conn.recv()\n obs, reward, done, _ = env.step(action)\n next_state = obs[None, ...] if not done else None\n experience = (state, action, reward, done)\n conn.send(experience)\n if done:\n break\n state = next_state\n", "path": "examples/ppo/agent.py"}]} | 1,605 | 269 |
gh_patches_debug_921 | rasdani/github-patches | git_diff | tensorflow__addons-1941 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Usage with tf.keras API
https://github.com/tensorflow/addons/blob/5f618fdb92d9737da059de2a33fa606e97505398/tensorflow_addons/losses/focal_loss.py#L52-L53
The usage in `tf.keras` API example is incorrect. It should be replaced with:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tfa.losses.SigmoidFocalCrossEntropy())
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tensorflow_addons/losses/focal_loss.py`
Content:
```
1 # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15 """Implements Focal loss."""
16
17 import tensorflow as tf
18 import tensorflow.keras.backend as K
19
20 from tensorflow_addons.utils.keras_utils import LossFunctionWrapper
21 from tensorflow_addons.utils.types import FloatTensorLike, TensorLike
22 from typeguard import typechecked
23
24
25 @tf.keras.utils.register_keras_serializable(package="Addons")
26 class SigmoidFocalCrossEntropy(LossFunctionWrapper):
27 """Implements the focal loss function.
28
29 Focal loss was first introduced in the RetinaNet paper
30 (https://arxiv.org/pdf/1708.02002.pdf). Focal loss is extremely useful for
31 classification when you have highly imbalanced classes. It down-weights
32 well-classified examples and focuses on hard examples. The loss value is
33 much high for a sample which is misclassified by the classifier as compared
34 to the loss value corresponding to a well-classified example. One of the
35 best use-cases of focal loss is its usage in object detection where the
36 imbalance between the background class and other classes is extremely high.
37
38 Usage:
39
40 ```python
41 fl = tfa.losses.SigmoidFocalCrossEntropy()
42 loss = fl(
43 y_true = [[1.0], [1.0], [0.0]],
44 y_pred = [[0.97], [0.91], [0.03]])
45 print('Loss: ', loss.numpy()) # Loss: [6.8532745e-06,
46 1.9097870e-04,
47 2.0559824e-05]
48 ```
49 Usage with tf.keras API:
50
51 ```python
52 model = tf.keras.Model(inputs, outputs)
53 model.compile('sgd', loss=tf.keras.losses.SigmoidFocalCrossEntropy())
54 ```
55
56 Args
57 alpha: balancing factor, default value is 0.25
58 gamma: modulating factor, default value is 2.0
59
60 Returns:
61 Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
62 shape as `y_true`; otherwise, it is scalar.
63
64 Raises:
65 ValueError: If the shape of `sample_weight` is invalid or value of
66 `gamma` is less than zero
67 """
68
69 @typechecked
70 def __init__(
71 self,
72 from_logits: bool = False,
73 alpha: FloatTensorLike = 0.25,
74 gamma: FloatTensorLike = 2.0,
75 reduction: str = tf.keras.losses.Reduction.NONE,
76 name: str = "sigmoid_focal_crossentropy",
77 ):
78 super().__init__(
79 sigmoid_focal_crossentropy,
80 name=name,
81 reduction=reduction,
82 from_logits=from_logits,
83 alpha=alpha,
84 gamma=gamma,
85 )
86
87
88 @tf.keras.utils.register_keras_serializable(package="Addons")
89 @tf.function
90 def sigmoid_focal_crossentropy(
91 y_true: TensorLike,
92 y_pred: TensorLike,
93 alpha: FloatTensorLike = 0.25,
94 gamma: FloatTensorLike = 2.0,
95 from_logits: bool = False,
96 ) -> tf.Tensor:
97 """
98 Args
99 y_true: true targets tensor.
100 y_pred: predictions tensor.
101 alpha: balancing factor.
102 gamma: modulating factor.
103
104 Returns:
105 Weighted loss float `Tensor`. If `reduction` is `NONE`,this has the
106 same shape as `y_true`; otherwise, it is scalar.
107 """
108 if gamma and gamma < 0:
109 raise ValueError("Value of gamma should be greater than or equal to zero")
110
111 y_pred = tf.convert_to_tensor(y_pred)
112 y_true = tf.convert_to_tensor(y_true, dtype=y_pred.dtype)
113
114 # Get the cross_entropy for each entry
115 ce = K.binary_crossentropy(y_true, y_pred, from_logits=from_logits)
116
117 # If logits are provided then convert the predictions into probabilities
118 if from_logits:
119 pred_prob = tf.sigmoid(y_pred)
120 else:
121 pred_prob = y_pred
122
123 p_t = (y_true * pred_prob) + ((1 - y_true) * (1 - pred_prob))
124 alpha_factor = 1.0
125 modulating_factor = 1.0
126
127 if alpha:
128 alpha = tf.convert_to_tensor(alpha, dtype=K.floatx())
129 alpha_factor = y_true * alpha + (1 - y_true) * (1 - alpha)
130
131 if gamma:
132 gamma = tf.convert_to_tensor(gamma, dtype=K.floatx())
133 modulating_factor = tf.pow((1.0 - p_t), gamma)
134
135 # compute the final loss and return
136 return tf.reduce_sum(alpha_factor * modulating_factor * ce, axis=-1)
137
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/tensorflow_addons/losses/focal_loss.py b/tensorflow_addons/losses/focal_loss.py
--- a/tensorflow_addons/losses/focal_loss.py
+++ b/tensorflow_addons/losses/focal_loss.py
@@ -50,7 +50,7 @@
```python
model = tf.keras.Model(inputs, outputs)
- model.compile('sgd', loss=tf.keras.losses.SigmoidFocalCrossEntropy())
+ model.compile('sgd', loss=tfa.losses.SigmoidFocalCrossEntropy())
```
Args
| {"golden_diff": "diff --git a/tensorflow_addons/losses/focal_loss.py b/tensorflow_addons/losses/focal_loss.py\n--- a/tensorflow_addons/losses/focal_loss.py\n+++ b/tensorflow_addons/losses/focal_loss.py\n@@ -50,7 +50,7 @@\n \n ```python\n model = tf.keras.Model(inputs, outputs)\n- model.compile('sgd', loss=tf.keras.losses.SigmoidFocalCrossEntropy())\n+ model.compile('sgd', loss=tfa.losses.SigmoidFocalCrossEntropy())\n ```\n \n Args\n", "issue": "Usage with tf.keras API\nhttps://github.com/tensorflow/addons/blob/5f618fdb92d9737da059de2a33fa606e97505398/tensorflow_addons/losses/focal_loss.py#L52-L53\r\n\r\nThe usage in `tf.keras` API example is incorrect. It should be replaced with:\r\n\r\n```python\r\nmodel = tf.keras.Model(inputs, outputs)\r\nmodel.compile('sgd', loss=tfa.losses.SigmoidFocalCrossEntropy())\r\n```\n", "before_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Implements Focal loss.\"\"\"\n\nimport tensorflow as tf\nimport tensorflow.keras.backend as K\n\nfrom tensorflow_addons.utils.keras_utils import LossFunctionWrapper\nfrom tensorflow_addons.utils.types import FloatTensorLike, TensorLike\nfrom typeguard import typechecked\n\n\[email protected]_keras_serializable(package=\"Addons\")\nclass SigmoidFocalCrossEntropy(LossFunctionWrapper):\n \"\"\"Implements the focal loss function.\n\n Focal loss was first introduced in the RetinaNet paper\n (https://arxiv.org/pdf/1708.02002.pdf). Focal loss is extremely useful for\n classification when you have highly imbalanced classes. It down-weights\n well-classified examples and focuses on hard examples. The loss value is\n much high for a sample which is misclassified by the classifier as compared\n to the loss value corresponding to a well-classified example. One of the\n best use-cases of focal loss is its usage in object detection where the\n imbalance between the background class and other classes is extremely high.\n\n Usage:\n\n ```python\n fl = tfa.losses.SigmoidFocalCrossEntropy()\n loss = fl(\n y_true = [[1.0], [1.0], [0.0]],\n y_pred = [[0.97], [0.91], [0.03]])\n print('Loss: ', loss.numpy()) # Loss: [6.8532745e-06,\n 1.9097870e-04,\n 2.0559824e-05]\n ```\n Usage with tf.keras API:\n\n ```python\n model = tf.keras.Model(inputs, outputs)\n model.compile('sgd', loss=tf.keras.losses.SigmoidFocalCrossEntropy())\n ```\n\n Args\n alpha: balancing factor, default value is 0.25\n gamma: modulating factor, default value is 2.0\n\n Returns:\n Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same\n shape as `y_true`; otherwise, it is scalar.\n\n Raises:\n ValueError: If the shape of `sample_weight` is invalid or value of\n `gamma` is less than zero\n \"\"\"\n\n @typechecked\n def __init__(\n self,\n from_logits: bool = False,\n alpha: FloatTensorLike = 0.25,\n gamma: FloatTensorLike = 2.0,\n reduction: str = tf.keras.losses.Reduction.NONE,\n name: str = \"sigmoid_focal_crossentropy\",\n ):\n super().__init__(\n sigmoid_focal_crossentropy,\n name=name,\n reduction=reduction,\n from_logits=from_logits,\n alpha=alpha,\n gamma=gamma,\n )\n\n\[email protected]_keras_serializable(package=\"Addons\")\[email protected]\ndef sigmoid_focal_crossentropy(\n y_true: TensorLike,\n y_pred: TensorLike,\n alpha: FloatTensorLike = 0.25,\n gamma: FloatTensorLike = 2.0,\n from_logits: bool = False,\n) -> tf.Tensor:\n \"\"\"\n Args\n y_true: true targets tensor.\n y_pred: predictions tensor.\n alpha: balancing factor.\n gamma: modulating factor.\n\n Returns:\n Weighted loss float `Tensor`. If `reduction` is `NONE`,this has the\n same shape as `y_true`; otherwise, it is scalar.\n \"\"\"\n if gamma and gamma < 0:\n raise ValueError(\"Value of gamma should be greater than or equal to zero\")\n\n y_pred = tf.convert_to_tensor(y_pred)\n y_true = tf.convert_to_tensor(y_true, dtype=y_pred.dtype)\n\n # Get the cross_entropy for each entry\n ce = K.binary_crossentropy(y_true, y_pred, from_logits=from_logits)\n\n # If logits are provided then convert the predictions into probabilities\n if from_logits:\n pred_prob = tf.sigmoid(y_pred)\n else:\n pred_prob = y_pred\n\n p_t = (y_true * pred_prob) + ((1 - y_true) * (1 - pred_prob))\n alpha_factor = 1.0\n modulating_factor = 1.0\n\n if alpha:\n alpha = tf.convert_to_tensor(alpha, dtype=K.floatx())\n alpha_factor = y_true * alpha + (1 - y_true) * (1 - alpha)\n\n if gamma:\n gamma = tf.convert_to_tensor(gamma, dtype=K.floatx())\n modulating_factor = tf.pow((1.0 - p_t), gamma)\n\n # compute the final loss and return\n return tf.reduce_sum(alpha_factor * modulating_factor * ce, axis=-1)\n", "path": "tensorflow_addons/losses/focal_loss.py"}], "after_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Implements Focal loss.\"\"\"\n\nimport tensorflow as tf\nimport tensorflow.keras.backend as K\n\nfrom tensorflow_addons.utils.keras_utils import LossFunctionWrapper\nfrom tensorflow_addons.utils.types import FloatTensorLike, TensorLike\nfrom typeguard import typechecked\n\n\[email protected]_keras_serializable(package=\"Addons\")\nclass SigmoidFocalCrossEntropy(LossFunctionWrapper):\n \"\"\"Implements the focal loss function.\n\n Focal loss was first introduced in the RetinaNet paper\n (https://arxiv.org/pdf/1708.02002.pdf). Focal loss is extremely useful for\n classification when you have highly imbalanced classes. It down-weights\n well-classified examples and focuses on hard examples. The loss value is\n much high for a sample which is misclassified by the classifier as compared\n to the loss value corresponding to a well-classified example. One of the\n best use-cases of focal loss is its usage in object detection where the\n imbalance between the background class and other classes is extremely high.\n\n Usage:\n\n ```python\n fl = tfa.losses.SigmoidFocalCrossEntropy()\n loss = fl(\n y_true = [[1.0], [1.0], [0.0]],\n y_pred = [[0.97], [0.91], [0.03]])\n print('Loss: ', loss.numpy()) # Loss: [6.8532745e-06,\n 1.9097870e-04,\n 2.0559824e-05]\n ```\n Usage with tf.keras API:\n\n ```python\n model = tf.keras.Model(inputs, outputs)\n model.compile('sgd', loss=tfa.losses.SigmoidFocalCrossEntropy())\n ```\n\n Args\n alpha: balancing factor, default value is 0.25\n gamma: modulating factor, default value is 2.0\n\n Returns:\n Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same\n shape as `y_true`; otherwise, it is scalar.\n\n Raises:\n ValueError: If the shape of `sample_weight` is invalid or value of\n `gamma` is less than zero\n \"\"\"\n\n @typechecked\n def __init__(\n self,\n from_logits: bool = False,\n alpha: FloatTensorLike = 0.25,\n gamma: FloatTensorLike = 2.0,\n reduction: str = tf.keras.losses.Reduction.NONE,\n name: str = \"sigmoid_focal_crossentropy\",\n ):\n super().__init__(\n sigmoid_focal_crossentropy,\n name=name,\n reduction=reduction,\n from_logits=from_logits,\n alpha=alpha,\n gamma=gamma,\n )\n\n\[email protected]_keras_serializable(package=\"Addons\")\[email protected]\ndef sigmoid_focal_crossentropy(\n y_true: TensorLike,\n y_pred: TensorLike,\n alpha: FloatTensorLike = 0.25,\n gamma: FloatTensorLike = 2.0,\n from_logits: bool = False,\n) -> tf.Tensor:\n \"\"\"\n Args\n y_true: true targets tensor.\n y_pred: predictions tensor.\n alpha: balancing factor.\n gamma: modulating factor.\n\n Returns:\n Weighted loss float `Tensor`. If `reduction` is `NONE`,this has the\n same shape as `y_true`; otherwise, it is scalar.\n \"\"\"\n if gamma and gamma < 0:\n raise ValueError(\"Value of gamma should be greater than or equal to zero\")\n\n y_pred = tf.convert_to_tensor(y_pred)\n y_true = tf.convert_to_tensor(y_true, dtype=y_pred.dtype)\n\n # Get the cross_entropy for each entry\n ce = K.binary_crossentropy(y_true, y_pred, from_logits=from_logits)\n\n # If logits are provided then convert the predictions into probabilities\n if from_logits:\n pred_prob = tf.sigmoid(y_pred)\n else:\n pred_prob = y_pred\n\n p_t = (y_true * pred_prob) + ((1 - y_true) * (1 - pred_prob))\n alpha_factor = 1.0\n modulating_factor = 1.0\n\n if alpha:\n alpha = tf.convert_to_tensor(alpha, dtype=K.floatx())\n alpha_factor = y_true * alpha + (1 - y_true) * (1 - alpha)\n\n if gamma:\n gamma = tf.convert_to_tensor(gamma, dtype=K.floatx())\n modulating_factor = tf.pow((1.0 - p_t), gamma)\n\n # compute the final loss and return\n return tf.reduce_sum(alpha_factor * modulating_factor * ce, axis=-1)\n", "path": "tensorflow_addons/losses/focal_loss.py"}]} | 1,871 | 133 |
gh_patches_debug_30444 | rasdani/github-patches | git_diff | dask__dask-618 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add Base.to_graphviz
Add function to return `graphviz` instance created from dask graph for below reasons:
- When using IPython, `.visualize` outputs unnecessary image file
- Sometimes we want to modify graphviz instance directly
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dask/dot.py`
Content:
```
1 from __future__ import absolute_import, division, print_function
2
3 import re
4 from subprocess import check_call, CalledProcessError
5
6 from graphviz import Digraph
7
8 from .core import istask, get_dependencies, ishashable
9
10
11 def task_label(task):
12 """Label for a task on a dot graph.
13
14 Examples
15 --------
16 >>> from operator import add
17 >>> task_label((add, 1, 2))
18 'add'
19 >>> task_label((add, (add, 1, 2), 3))
20 'add(...)'
21 """
22 func = task[0]
23 if hasattr(func, 'funcs'):
24 if len(func.funcs) > 1:
25 return '{0}(...)'.format(funcname(func.funcs[0]))
26 else:
27 head = funcname(func.funcs[0])
28 else:
29 head = funcname(task[0])
30 if any(has_sub_tasks(i) for i in task[1:]):
31 return '{0}(...)'.format(head)
32 else:
33 return head
34
35
36 def has_sub_tasks(task):
37 """Returns True if the task has sub tasks"""
38 if istask(task):
39 return True
40 elif isinstance(task, list):
41 return any(has_sub_tasks(i) for i in task)
42 else:
43 return False
44
45
46 def funcname(func):
47 """Get the name of a function."""
48 while hasattr(func, 'func'):
49 func = func.func
50 return func.__name__
51
52
53 def name(x):
54 try:
55 return str(hash(x))
56 except TypeError:
57 return str(hash(str(x)))
58
59
60 _HASHPAT = re.compile('([0-9a-z]{32})')
61
62
63 def label(x, cache=None):
64 """
65
66 >>> label('x')
67 'x'
68
69 >>> label(('x', 1))
70 "('x', 1)"
71
72 >>> from hashlib import md5
73 >>> x = 'x-%s-hello' % md5(b'1234').hexdigest()
74 >>> x
75 'x-81dc9bdb52d04dc20036dbd8313ed055-hello'
76
77 >>> label(x)
78 'x-#-hello'
79 """
80 s = str(x)
81 m = re.search(_HASHPAT, s)
82 if m is not None:
83 for h in m.groups():
84 if cache is not None:
85 n = cache.get(h, len(cache))
86 label = '#{0}'.format(n)
87 # cache will be overwritten destructively
88 cache[h] = n
89 else:
90 label = '#'
91 s = s.replace(h, label)
92 return s
93
94
95 def to_graphviz(dsk, data_attributes=None, function_attributes=None):
96 if data_attributes is None:
97 data_attributes = {}
98 if function_attributes is None:
99 function_attributes = {}
100
101 g = Digraph(graph_attr={'rankdir': 'BT'})
102
103 seen = set()
104 cache = {}
105
106 for k, v in dsk.items():
107 k_name = name(k)
108 if k_name not in seen:
109 seen.add(k_name)
110 g.node(k_name, label=label(k, cache=cache), shape='box',
111 **data_attributes.get(k, {}))
112
113 if istask(v):
114 func_name = name((k, 'function'))
115 if func_name not in seen:
116 seen.add(func_name)
117 g.node(func_name, label=task_label(v), shape='circle',
118 **function_attributes.get(k, {}))
119 g.edge(func_name, k_name)
120
121 for dep in get_dependencies(dsk, k):
122 dep_name = name(dep)
123 if dep_name not in seen:
124 seen.add(dep_name)
125 g.node(dep_name, label=label(dep, cache=cache), shape='box',
126 **data_attributes.get(dep, {}))
127 g.edge(dep_name, func_name)
128 elif ishashable(v) and v in dsk:
129 g.edge(name(v), k_name)
130 return g
131
132
133 def dot_graph(dsk, filename='mydask', **kwargs):
134 g = to_graphviz(dsk, **kwargs)
135 g.save(filename + '.dot')
136
137 try:
138 check_call('dot -Tpdf {0}.dot -o {0}.pdf'.format(filename), shell=True)
139 check_call('dot -Tpng {0}.dot -o {0}.png'.format(filename), shell=True)
140 except CalledProcessError:
141 raise RuntimeError(
142 "Please install The `dot` utility from graphviz:\n"
143 " Debian: sudo apt-get install graphviz\n"
144 " Mac OSX: brew install graphviz\n"
145 " Windows: http://www.graphviz.org/Download..php") # pragma: no cover
146 try:
147 from IPython.display import Image
148 return Image(filename + '.png')
149 except ImportError:
150 pass
151
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dask/dot.py b/dask/dot.py
--- a/dask/dot.py
+++ b/dask/dot.py
@@ -6,6 +6,7 @@
from graphviz import Digraph
from .core import istask, get_dependencies, ishashable
+from .compatibility import BytesIO
def task_label(task):
@@ -132,19 +133,35 @@
def dot_graph(dsk, filename='mydask', **kwargs):
g = to_graphviz(dsk, **kwargs)
- g.save(filename + '.dot')
- try:
- check_call('dot -Tpdf {0}.dot -o {0}.pdf'.format(filename), shell=True)
- check_call('dot -Tpng {0}.dot -o {0}.png'.format(filename), shell=True)
- except CalledProcessError:
- raise RuntimeError(
- "Please install The `dot` utility from graphviz:\n"
- " Debian: sudo apt-get install graphviz\n"
- " Mac OSX: brew install graphviz\n"
- " Windows: http://www.graphviz.org/Download..php") # pragma: no cover
- try:
- from IPython.display import Image
- return Image(filename + '.png')
- except ImportError:
- pass
+ if filename is not None:
+ g.save(filename + '.dot')
+
+ try:
+ check_call('dot -Tpdf {0}.dot -o {0}.pdf'.format(filename),
+ shell=True)
+ check_call('dot -Tpng {0}.dot -o {0}.png'.format(filename),
+ shell=True)
+
+ except CalledProcessError:
+ msg = ("Please install The `dot` utility from graphviz:\n"
+ " Debian: sudo apt-get install graphviz\n"
+ " Mac OSX: brew install graphviz\n"
+ " Windows: http://www.graphviz.org/Download..php")
+ raise RuntimeError(msg) # pragma: no cover
+
+ try:
+ from IPython.display import Image
+ return Image(filename + '.png')
+ except ImportError:
+ pass
+
+ else:
+ try:
+ from IPython.display import Image
+ s = BytesIO()
+ s.write(g.pipe(format='png'))
+ s.seek(0)
+ return Image(s.read())
+ except ImportError:
+ pass
| {"golden_diff": "diff --git a/dask/dot.py b/dask/dot.py\n--- a/dask/dot.py\n+++ b/dask/dot.py\n@@ -6,6 +6,7 @@\n from graphviz import Digraph\n \n from .core import istask, get_dependencies, ishashable\n+from .compatibility import BytesIO\n \n \n def task_label(task):\n@@ -132,19 +133,35 @@\n \n def dot_graph(dsk, filename='mydask', **kwargs):\n g = to_graphviz(dsk, **kwargs)\n- g.save(filename + '.dot')\n \n- try:\n- check_call('dot -Tpdf {0}.dot -o {0}.pdf'.format(filename), shell=True)\n- check_call('dot -Tpng {0}.dot -o {0}.png'.format(filename), shell=True)\n- except CalledProcessError:\n- raise RuntimeError(\n- \"Please install The `dot` utility from graphviz:\\n\"\n- \" Debian: sudo apt-get install graphviz\\n\"\n- \" Mac OSX: brew install graphviz\\n\"\n- \" Windows: http://www.graphviz.org/Download..php\") # pragma: no cover\n- try:\n- from IPython.display import Image\n- return Image(filename + '.png')\n- except ImportError:\n- pass\n+ if filename is not None:\n+ g.save(filename + '.dot')\n+\n+ try:\n+ check_call('dot -Tpdf {0}.dot -o {0}.pdf'.format(filename),\n+ shell=True)\n+ check_call('dot -Tpng {0}.dot -o {0}.png'.format(filename),\n+ shell=True)\n+\n+ except CalledProcessError:\n+ msg = (\"Please install The `dot` utility from graphviz:\\n\"\n+ \" Debian: sudo apt-get install graphviz\\n\"\n+ \" Mac OSX: brew install graphviz\\n\"\n+ \" Windows: http://www.graphviz.org/Download..php\")\n+ raise RuntimeError(msg) # pragma: no cover\n+\n+ try:\n+ from IPython.display import Image\n+ return Image(filename + '.png')\n+ except ImportError:\n+ pass\n+\n+ else:\n+ try:\n+ from IPython.display import Image\n+ s = BytesIO()\n+ s.write(g.pipe(format='png'))\n+ s.seek(0)\n+ return Image(s.read())\n+ except ImportError:\n+ pass\n", "issue": "Add Base.to_graphviz\nAdd function to return `graphviz` instance created from dask graph for below reasons:\n- When using IPython, `.visualize` outputs unnecessary image file\n- Sometimes we want to modify graphviz instance directly\n\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport re\nfrom subprocess import check_call, CalledProcessError\n\nfrom graphviz import Digraph\n\nfrom .core import istask, get_dependencies, ishashable\n\n\ndef task_label(task):\n \"\"\"Label for a task on a dot graph.\n\n Examples\n --------\n >>> from operator import add\n >>> task_label((add, 1, 2))\n 'add'\n >>> task_label((add, (add, 1, 2), 3))\n 'add(...)'\n \"\"\"\n func = task[0]\n if hasattr(func, 'funcs'):\n if len(func.funcs) > 1:\n return '{0}(...)'.format(funcname(func.funcs[0]))\n else:\n head = funcname(func.funcs[0])\n else:\n head = funcname(task[0])\n if any(has_sub_tasks(i) for i in task[1:]):\n return '{0}(...)'.format(head)\n else:\n return head\n\n\ndef has_sub_tasks(task):\n \"\"\"Returns True if the task has sub tasks\"\"\"\n if istask(task):\n return True\n elif isinstance(task, list):\n return any(has_sub_tasks(i) for i in task)\n else:\n return False\n\n\ndef funcname(func):\n \"\"\"Get the name of a function.\"\"\"\n while hasattr(func, 'func'):\n func = func.func\n return func.__name__\n\n\ndef name(x):\n try:\n return str(hash(x))\n except TypeError:\n return str(hash(str(x)))\n\n\n_HASHPAT = re.compile('([0-9a-z]{32})')\n\n\ndef label(x, cache=None):\n \"\"\"\n\n >>> label('x')\n 'x'\n\n >>> label(('x', 1))\n \"('x', 1)\"\n\n >>> from hashlib import md5\n >>> x = 'x-%s-hello' % md5(b'1234').hexdigest()\n >>> x\n 'x-81dc9bdb52d04dc20036dbd8313ed055-hello'\n\n >>> label(x)\n 'x-#-hello'\n \"\"\"\n s = str(x)\n m = re.search(_HASHPAT, s)\n if m is not None:\n for h in m.groups():\n if cache is not None:\n n = cache.get(h, len(cache))\n label = '#{0}'.format(n)\n # cache will be overwritten destructively\n cache[h] = n\n else:\n label = '#'\n s = s.replace(h, label)\n return s\n\n\ndef to_graphviz(dsk, data_attributes=None, function_attributes=None):\n if data_attributes is None:\n data_attributes = {}\n if function_attributes is None:\n function_attributes = {}\n\n g = Digraph(graph_attr={'rankdir': 'BT'})\n\n seen = set()\n cache = {}\n\n for k, v in dsk.items():\n k_name = name(k)\n if k_name not in seen:\n seen.add(k_name)\n g.node(k_name, label=label(k, cache=cache), shape='box',\n **data_attributes.get(k, {}))\n\n if istask(v):\n func_name = name((k, 'function'))\n if func_name not in seen:\n seen.add(func_name)\n g.node(func_name, label=task_label(v), shape='circle',\n **function_attributes.get(k, {}))\n g.edge(func_name, k_name)\n\n for dep in get_dependencies(dsk, k):\n dep_name = name(dep)\n if dep_name not in seen:\n seen.add(dep_name)\n g.node(dep_name, label=label(dep, cache=cache), shape='box',\n **data_attributes.get(dep, {}))\n g.edge(dep_name, func_name)\n elif ishashable(v) and v in dsk:\n g.edge(name(v), k_name)\n return g\n\n\ndef dot_graph(dsk, filename='mydask', **kwargs):\n g = to_graphviz(dsk, **kwargs)\n g.save(filename + '.dot')\n\n try:\n check_call('dot -Tpdf {0}.dot -o {0}.pdf'.format(filename), shell=True)\n check_call('dot -Tpng {0}.dot -o {0}.png'.format(filename), shell=True)\n except CalledProcessError:\n raise RuntimeError(\n \"Please install The `dot` utility from graphviz:\\n\"\n \" Debian: sudo apt-get install graphviz\\n\"\n \" Mac OSX: brew install graphviz\\n\"\n \" Windows: http://www.graphviz.org/Download..php\") # pragma: no cover\n try:\n from IPython.display import Image\n return Image(filename + '.png')\n except ImportError:\n pass\n", "path": "dask/dot.py"}], "after_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport re\nfrom subprocess import check_call, CalledProcessError\n\nfrom graphviz import Digraph\n\nfrom .core import istask, get_dependencies, ishashable\nfrom .compatibility import BytesIO\n\n\ndef task_label(task):\n \"\"\"Label for a task on a dot graph.\n\n Examples\n --------\n >>> from operator import add\n >>> task_label((add, 1, 2))\n 'add'\n >>> task_label((add, (add, 1, 2), 3))\n 'add(...)'\n \"\"\"\n func = task[0]\n if hasattr(func, 'funcs'):\n if len(func.funcs) > 1:\n return '{0}(...)'.format(funcname(func.funcs[0]))\n else:\n head = funcname(func.funcs[0])\n else:\n head = funcname(task[0])\n if any(has_sub_tasks(i) for i in task[1:]):\n return '{0}(...)'.format(head)\n else:\n return head\n\n\ndef has_sub_tasks(task):\n \"\"\"Returns True if the task has sub tasks\"\"\"\n if istask(task):\n return True\n elif isinstance(task, list):\n return any(has_sub_tasks(i) for i in task)\n else:\n return False\n\n\ndef funcname(func):\n \"\"\"Get the name of a function.\"\"\"\n while hasattr(func, 'func'):\n func = func.func\n return func.__name__\n\n\ndef name(x):\n try:\n return str(hash(x))\n except TypeError:\n return str(hash(str(x)))\n\n\n_HASHPAT = re.compile('([0-9a-z]{32})')\n\n\ndef label(x, cache=None):\n \"\"\"\n\n >>> label('x')\n 'x'\n\n >>> label(('x', 1))\n \"('x', 1)\"\n\n >>> from hashlib import md5\n >>> x = 'x-%s-hello' % md5(b'1234').hexdigest()\n >>> x\n 'x-81dc9bdb52d04dc20036dbd8313ed055-hello'\n\n >>> label(x)\n 'x-#-hello'\n \"\"\"\n s = str(x)\n m = re.search(_HASHPAT, s)\n if m is not None:\n for h in m.groups():\n if cache is not None:\n n = cache.get(h, len(cache))\n label = '#{0}'.format(n)\n # cache will be overwritten destructively\n cache[h] = n\n else:\n label = '#'\n s = s.replace(h, label)\n return s\n\n\ndef to_graphviz(dsk, data_attributes=None, function_attributes=None):\n if data_attributes is None:\n data_attributes = {}\n if function_attributes is None:\n function_attributes = {}\n\n g = Digraph(graph_attr={'rankdir': 'BT'})\n\n seen = set()\n cache = {}\n\n for k, v in dsk.items():\n k_name = name(k)\n if k_name not in seen:\n seen.add(k_name)\n g.node(k_name, label=label(k, cache=cache), shape='box',\n **data_attributes.get(k, {}))\n\n if istask(v):\n func_name = name((k, 'function'))\n if func_name not in seen:\n seen.add(func_name)\n g.node(func_name, label=task_label(v), shape='circle',\n **function_attributes.get(k, {}))\n g.edge(func_name, k_name)\n\n for dep in get_dependencies(dsk, k):\n dep_name = name(dep)\n if dep_name not in seen:\n seen.add(dep_name)\n g.node(dep_name, label=label(dep, cache=cache), shape='box',\n **data_attributes.get(dep, {}))\n g.edge(dep_name, func_name)\n elif ishashable(v) and v in dsk:\n g.edge(name(v), k_name)\n return g\n\n\ndef dot_graph(dsk, filename='mydask', **kwargs):\n g = to_graphviz(dsk, **kwargs)\n\n if filename is not None:\n g.save(filename + '.dot')\n\n try:\n check_call('dot -Tpdf {0}.dot -o {0}.pdf'.format(filename),\n shell=True)\n check_call('dot -Tpng {0}.dot -o {0}.png'.format(filename),\n shell=True)\n\n except CalledProcessError:\n msg = (\"Please install The `dot` utility from graphviz:\\n\"\n \" Debian: sudo apt-get install graphviz\\n\"\n \" Mac OSX: brew install graphviz\\n\"\n \" Windows: http://www.graphviz.org/Download..php\")\n raise RuntimeError(msg) # pragma: no cover\n\n try:\n from IPython.display import Image\n return Image(filename + '.png')\n except ImportError:\n pass\n\n else:\n try:\n from IPython.display import Image\n s = BytesIO()\n s.write(g.pipe(format='png'))\n s.seek(0)\n return Image(s.read())\n except ImportError:\n pass\n", "path": "dask/dot.py"}]} | 1,714 | 551 |
gh_patches_debug_18332 | rasdani/github-patches | git_diff | pyjanitor-devs__pyjanitor-569 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[INF] Make requirements.txt smaller
Follow-up from #257
The idea is to have feature-specific requirements.txt. Such as for biology, specifically requires biopython.
so we can install the package per feature as needed, such as with extra biology. It goes `pip install "pyjanitor[biology]"`
The example of such implementations in `setup.py` is available at this link: https://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-extras-optional-features-with-their-own-dependencies
[INF] Make requirements.txt smaller
Follow-up from #257
The idea is to have feature-specific requirements.txt. Such as for biology, specifically requires biopython.
so we can install the package per feature as needed, such as with extra biology. It goes `pip install "pyjanitor[biology]"`
The example of such implementations in `setup.py` is available at this link: https://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-extras-optional-features-with-their-own-dependencies
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import re
2 from pathlib import Path
3
4 from setuptools import setup
5
6
7 def requirements():
8 with open("requirements.txt", "r+") as f:
9 return f.read()
10
11
12 def generate_long_description() -> str:
13 """
14 Extra chunks from README for PyPI description.
15
16 Target chunks must be contained within `.. pypi-doc` pair comments,
17 so there must be an even number of comments in README.
18
19 :returns: Extracted description from README
20
21 """
22 # Read the contents of README file
23 this_directory = Path(__file__).parent
24 with open(this_directory / "README.rst", encoding="utf-8") as f:
25 readme = f.read()
26
27 # Find pypi-doc comments in README
28 indices = [m.start() for m in re.finditer(".. pypi-doc", readme)]
29 if len(indices) % 2 != 0:
30 raise Exception("Odd number of `.. pypi-doc` comments in README")
31
32 # Loop through pairs of comments and save text between pairs
33 long_description = ""
34 for i in range(0, len(indices), 2):
35 start_index = indices[i] + 11
36 end_index = indices[i + 1]
37 long_description += readme[start_index:end_index]
38 return long_description
39
40
41 setup(
42 name="pyjanitor",
43 version="0.18.2",
44 description="Tools for cleaning pandas DataFrames",
45 author="Eric J. Ma",
46 author_email="[email protected]",
47 url="https://github.com/ericmjl/pyjanitor",
48 packages=["janitor"],
49 install_requires=requirements(),
50 python_requires=">=3.6",
51 long_description=generate_long_description(),
52 long_description_content_type="text/x-rst",
53 )
54
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -38,6 +38,12 @@
return long_description
+extra_spark = ["pyspark"]
+extra_biology = ["biopython"]
+extra_chemistry = ["rdkit"]
+extra_engineering = ["unyt"]
+extra_all = extra_biology + extra_engineering + extra_spark
+
setup(
name="pyjanitor",
version="0.18.2",
@@ -47,6 +53,14 @@
url="https://github.com/ericmjl/pyjanitor",
packages=["janitor"],
install_requires=requirements(),
+ extras_require={
+ "all": extra_all,
+ "biology": extra_biology,
+ # "chemistry": extra_chemistry, should be inserted once rdkit
+ # fixes https://github.com/rdkit/rdkit/issues/1812
+ "engineering": extra_engineering,
+ "spark": extra_spark,
+ },
python_requires=">=3.6",
long_description=generate_long_description(),
long_description_content_type="text/x-rst",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -38,6 +38,12 @@\n return long_description\n \n \n+extra_spark = [\"pyspark\"]\n+extra_biology = [\"biopython\"]\n+extra_chemistry = [\"rdkit\"]\n+extra_engineering = [\"unyt\"]\n+extra_all = extra_biology + extra_engineering + extra_spark\n+\n setup(\n name=\"pyjanitor\",\n version=\"0.18.2\",\n@@ -47,6 +53,14 @@\n url=\"https://github.com/ericmjl/pyjanitor\",\n packages=[\"janitor\"],\n install_requires=requirements(),\n+ extras_require={\n+ \"all\": extra_all,\n+ \"biology\": extra_biology,\n+ # \"chemistry\": extra_chemistry, should be inserted once rdkit\n+ # fixes https://github.com/rdkit/rdkit/issues/1812\n+ \"engineering\": extra_engineering,\n+ \"spark\": extra_spark,\n+ },\n python_requires=\">=3.6\",\n long_description=generate_long_description(),\n long_description_content_type=\"text/x-rst\",\n", "issue": "[INF] Make requirements.txt smaller\nFollow-up from #257 \r\n\r\nThe idea is to have feature-specific requirements.txt. Such as for biology, specifically requires biopython.\r\n\r\nso we can install the package per feature as needed, such as with extra biology. It goes `pip install \"pyjanitor[biology]\"`\r\n\r\nThe example of such implementations in `setup.py` is available at this link: https://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-extras-optional-features-with-their-own-dependencies\n[INF] Make requirements.txt smaller\nFollow-up from #257 \r\n\r\nThe idea is to have feature-specific requirements.txt. Such as for biology, specifically requires biopython.\r\n\r\nso we can install the package per feature as needed, such as with extra biology. It goes `pip install \"pyjanitor[biology]\"`\r\n\r\nThe example of such implementations in `setup.py` is available at this link: https://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-extras-optional-features-with-their-own-dependencies\n", "before_files": [{"content": "import re\nfrom pathlib import Path\n\nfrom setuptools import setup\n\n\ndef requirements():\n with open(\"requirements.txt\", \"r+\") as f:\n return f.read()\n\n\ndef generate_long_description() -> str:\n \"\"\"\n Extra chunks from README for PyPI description.\n\n Target chunks must be contained within `.. pypi-doc` pair comments,\n so there must be an even number of comments in README.\n\n :returns: Extracted description from README\n\n \"\"\"\n # Read the contents of README file\n this_directory = Path(__file__).parent\n with open(this_directory / \"README.rst\", encoding=\"utf-8\") as f:\n readme = f.read()\n\n # Find pypi-doc comments in README\n indices = [m.start() for m in re.finditer(\".. pypi-doc\", readme)]\n if len(indices) % 2 != 0:\n raise Exception(\"Odd number of `.. pypi-doc` comments in README\")\n\n # Loop through pairs of comments and save text between pairs\n long_description = \"\"\n for i in range(0, len(indices), 2):\n start_index = indices[i] + 11\n end_index = indices[i + 1]\n long_description += readme[start_index:end_index]\n return long_description\n\n\nsetup(\n name=\"pyjanitor\",\n version=\"0.18.2\",\n description=\"Tools for cleaning pandas DataFrames\",\n author=\"Eric J. Ma\",\n author_email=\"[email protected]\",\n url=\"https://github.com/ericmjl/pyjanitor\",\n packages=[\"janitor\"],\n install_requires=requirements(),\n python_requires=\">=3.6\",\n long_description=generate_long_description(),\n long_description_content_type=\"text/x-rst\",\n)\n", "path": "setup.py"}], "after_files": [{"content": "import re\nfrom pathlib import Path\n\nfrom setuptools import setup\n\n\ndef requirements():\n with open(\"requirements.txt\", \"r+\") as f:\n return f.read()\n\n\ndef generate_long_description() -> str:\n \"\"\"\n Extra chunks from README for PyPI description.\n\n Target chunks must be contained within `.. pypi-doc` pair comments,\n so there must be an even number of comments in README.\n\n :returns: Extracted description from README\n\n \"\"\"\n # Read the contents of README file\n this_directory = Path(__file__).parent\n with open(this_directory / \"README.rst\", encoding=\"utf-8\") as f:\n readme = f.read()\n\n # Find pypi-doc comments in README\n indices = [m.start() for m in re.finditer(\".. pypi-doc\", readme)]\n if len(indices) % 2 != 0:\n raise Exception(\"Odd number of `.. pypi-doc` comments in README\")\n\n # Loop through pairs of comments and save text between pairs\n long_description = \"\"\n for i in range(0, len(indices), 2):\n start_index = indices[i] + 11\n end_index = indices[i + 1]\n long_description += readme[start_index:end_index]\n return long_description\n\n\nextra_spark = [\"pyspark\"]\nextra_biology = [\"biopython\"]\nextra_chemistry = [\"rdkit\"]\nextra_engineering = [\"unyt\"]\nextra_all = extra_biology + extra_engineering + extra_spark\n\nsetup(\n name=\"pyjanitor\",\n version=\"0.18.2\",\n description=\"Tools for cleaning pandas DataFrames\",\n author=\"Eric J. Ma\",\n author_email=\"[email protected]\",\n url=\"https://github.com/ericmjl/pyjanitor\",\n packages=[\"janitor\"],\n install_requires=requirements(),\n extras_require={\n \"all\": extra_all,\n \"biology\": extra_biology,\n # \"chemistry\": extra_chemistry, should be inserted once rdkit\n # fixes https://github.com/rdkit/rdkit/issues/1812\n \"engineering\": extra_engineering,\n \"spark\": extra_spark,\n },\n python_requires=\">=3.6\",\n long_description=generate_long_description(),\n long_description_content_type=\"text/x-rst\",\n)\n", "path": "setup.py"}]} | 965 | 263 |
gh_patches_debug_30083 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-1188 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The type inference algorithm should use `TEXT` rather than `VARCHAR`
## Reproduce
1. "New Table" > "Import Data" > "Copy and Paste Text"
1. Paste the following data and proceed to create and view the table.
```txt
first_name
Marge
Homer
Lisa
Bart
Maggie
```
1. From the `columns` API, expect the response for the `first_name` column to have `"type": "TEXT"`
1. Observe instead that the column is `VARCHAR` without a length set.
## Rationale
- I spoke with @kgodey about the Mathesar Text type today and she say that Mathesar should only be configuring either: `TEXT` columns or `VARCHAR` columns with a length specified. She may be able to elaborate on the thinking that went into this decision.
## Additional context
- In #1118, we are doing some work to bring the front end into alignment with the above expectations when the user manually configures the DB settings for the Text type.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `db/columns/operations/infer_types.py`
Content:
```
1 import logging
2
3 from sqlalchemy import VARCHAR, TEXT, Text
4 from sqlalchemy.exc import DatabaseError
5
6 from db.columns.exceptions import DagCycleError
7 from db.columns.operations.alter import alter_column_type
8 from db.tables.operations.select import get_oid_from_table, reflect_table
9 from db.types.operations.cast import get_supported_alter_column_types
10 from db.types import base
11
12
13 logger = logging.getLogger(__name__)
14
15 MAX_INFERENCE_DAG_DEPTH = 100
16
17 TYPE_INFERENCE_DAG = {
18 base.PostgresType.BOOLEAN.value: [],
19 base.MathesarCustomType.EMAIL.value: [],
20 base.PostgresType.INTERVAL.value: [],
21 base.PostgresType.NUMERIC.value: [
22 base.PostgresType.BOOLEAN.value,
23 ],
24 base.STRING: [
25 base.PostgresType.BOOLEAN.value,
26 base.PostgresType.DATE.value,
27 base.PostgresType.NUMERIC.value,
28 base.MathesarCustomType.MATHESAR_MONEY.value,
29 base.PostgresType.TIMESTAMP_WITHOUT_TIME_ZONE.value,
30 base.PostgresType.TIMESTAMP_WITH_TIME_ZONE.value,
31 # We only infer to TIME_WITHOUT_TIME_ZONE as time zones don't make much sense
32 # without additional date information. See postgres documentation for further
33 # details: https://www.postgresql.org/docs/13/datatype-datetime.html
34 base.PostgresType.TIME_WITHOUT_TIME_ZONE.value,
35 base.PostgresType.INTERVAL.value,
36 base.MathesarCustomType.EMAIL.value,
37 base.MathesarCustomType.URI.value,
38 ],
39 }
40
41
42 def _get_reverse_type_map(engine):
43 supported_types = get_supported_alter_column_types(engine)
44 reverse_type_map = {v: k for k, v in supported_types.items()}
45 reverse_type_map.update(
46 {
47 Text: base.STRING,
48 TEXT: base.STRING,
49 VARCHAR: base.STRING,
50 }
51 )
52 return reverse_type_map
53
54
55 def infer_column_type(schema, table_name, column_name, engine, depth=0, type_inference_dag=TYPE_INFERENCE_DAG):
56 if depth > MAX_INFERENCE_DAG_DEPTH:
57 raise DagCycleError("The type_inference_dag likely has a cycle")
58 reverse_type_map = _get_reverse_type_map(engine)
59
60 table = reflect_table(table_name, schema, engine)
61 column_type = table.columns[column_name].type.__class__
62 column_type_str = reverse_type_map.get(column_type)
63
64 logger.debug(f"column_type_str: {column_type_str}")
65 table_oid = get_oid_from_table(table_name, schema, engine)
66 for type_str in type_inference_dag.get(column_type_str, []):
67 try:
68 with engine.begin() as conn:
69 alter_column_type(table_oid, column_name, engine, conn, type_str)
70 logger.info(f"Column {column_name} altered to type {type_str}")
71 column_type = infer_column_type(
72 schema,
73 table_name,
74 column_name,
75 engine,
76 depth=depth + 1,
77 type_inference_dag=type_inference_dag,
78 )
79 break
80 # It's expected we catch this error when the test to see whether
81 # a type is appropriate for a column fails.
82 except DatabaseError:
83 logger.info(
84 f"Cannot alter column {column_name} to type {type_str}"
85 )
86 return column_type
87
```
Path: `db/tables/operations/create.py`
Content:
```
1 from sqlalchemy import Column, String, Table, MetaData
2 from sqlalchemy.ext import compiler
3 from sqlalchemy.schema import DDLElement
4
5 from db.columns.utils import init_mathesar_table_column_list_with_defaults
6 from db.schemas.operations.create import create_schema
7
8
9 def create_mathesar_table(name, schema, columns, engine, metadata=None):
10 """
11 This method creates a Postgres table in the specified schema using the
12 given name and column list. It adds internal mathesar columns to the
13 table.
14 """
15 columns = init_mathesar_table_column_list_with_defaults(columns)
16 create_schema(schema, engine)
17 # We need this so that we can create multiple mathesar tables in the
18 # same MetaData, enabling them to reference each other in the
19 # SQLAlchemy context (e.g., for creating a ForeignKey relationship)
20 if metadata is None:
21 metadata = MetaData(bind=engine, schema=schema)
22 table = Table(
23 name,
24 metadata,
25 *columns,
26 schema=schema
27 )
28 table.create(engine)
29 return table
30
31
32 def create_string_column_table(name, schema, column_names, engine):
33 """
34 This method creates a Postgres table in the specified schema, with all
35 columns being String type.
36 """
37 columns_ = [Column(name=column_name, type_=String) for column_name in column_names]
38 table = create_mathesar_table(name, schema, columns_, engine)
39 return table
40
41
42 class CreateTableAs(DDLElement):
43 def __init__(self, name, selectable):
44 self.name = name
45 self.selectable = selectable
46
47
48 @compiler.compiles(CreateTableAs)
49 def compile_create_table_as(element, compiler, **_):
50 return "CREATE TABLE %s AS (%s)" % (
51 element.name,
52 compiler.sql_compiler.process(element.selectable, literal_binds=True),
53 )
54
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/db/columns/operations/infer_types.py b/db/columns/operations/infer_types.py
--- a/db/columns/operations/infer_types.py
+++ b/db/columns/operations/infer_types.py
@@ -21,7 +21,7 @@
base.PostgresType.NUMERIC.value: [
base.PostgresType.BOOLEAN.value,
],
- base.STRING: [
+ base.PostgresType.TEXT.value: [
base.PostgresType.BOOLEAN.value,
base.PostgresType.DATE.value,
base.PostgresType.NUMERIC.value,
@@ -44,9 +44,9 @@
reverse_type_map = {v: k for k, v in supported_types.items()}
reverse_type_map.update(
{
- Text: base.STRING,
- TEXT: base.STRING,
- VARCHAR: base.STRING,
+ Text: base.PostgresType.TEXT.value,
+ TEXT: base.PostgresType.TEXT.value,
+ VARCHAR: base.PostgresType.TEXT.value,
}
)
return reverse_type_map
diff --git a/db/tables/operations/create.py b/db/tables/operations/create.py
--- a/db/tables/operations/create.py
+++ b/db/tables/operations/create.py
@@ -1,4 +1,4 @@
-from sqlalchemy import Column, String, Table, MetaData
+from sqlalchemy import Column, TEXT, Table, MetaData
from sqlalchemy.ext import compiler
from sqlalchemy.schema import DDLElement
@@ -34,7 +34,7 @@
This method creates a Postgres table in the specified schema, with all
columns being String type.
"""
- columns_ = [Column(name=column_name, type_=String) for column_name in column_names]
+ columns_ = [Column(name=column_name, type_=TEXT) for column_name in column_names]
table = create_mathesar_table(name, schema, columns_, engine)
return table
| {"golden_diff": "diff --git a/db/columns/operations/infer_types.py b/db/columns/operations/infer_types.py\n--- a/db/columns/operations/infer_types.py\n+++ b/db/columns/operations/infer_types.py\n@@ -21,7 +21,7 @@\n base.PostgresType.NUMERIC.value: [\n base.PostgresType.BOOLEAN.value,\n ],\n- base.STRING: [\n+ base.PostgresType.TEXT.value: [\n base.PostgresType.BOOLEAN.value,\n base.PostgresType.DATE.value,\n base.PostgresType.NUMERIC.value,\n@@ -44,9 +44,9 @@\n reverse_type_map = {v: k for k, v in supported_types.items()}\n reverse_type_map.update(\n {\n- Text: base.STRING,\n- TEXT: base.STRING,\n- VARCHAR: base.STRING,\n+ Text: base.PostgresType.TEXT.value,\n+ TEXT: base.PostgresType.TEXT.value,\n+ VARCHAR: base.PostgresType.TEXT.value,\n }\n )\n return reverse_type_map\ndiff --git a/db/tables/operations/create.py b/db/tables/operations/create.py\n--- a/db/tables/operations/create.py\n+++ b/db/tables/operations/create.py\n@@ -1,4 +1,4 @@\n-from sqlalchemy import Column, String, Table, MetaData\n+from sqlalchemy import Column, TEXT, Table, MetaData\n from sqlalchemy.ext import compiler\n from sqlalchemy.schema import DDLElement\n \n@@ -34,7 +34,7 @@\n This method creates a Postgres table in the specified schema, with all\n columns being String type.\n \"\"\"\n- columns_ = [Column(name=column_name, type_=String) for column_name in column_names]\n+ columns_ = [Column(name=column_name, type_=TEXT) for column_name in column_names]\n table = create_mathesar_table(name, schema, columns_, engine)\n return table\n", "issue": "The type inference algorithm should use `TEXT` rather than `VARCHAR`\n## Reproduce\r\n\r\n1. \"New Table\" > \"Import Data\" > \"Copy and Paste Text\"\r\n\r\n1. Paste the following data and proceed to create and view the table.\r\n\r\n ```txt\r\n first_name\r\n Marge\r\n Homer\r\n Lisa\r\n Bart\r\n Maggie\r\n ```\r\n\r\n1. From the `columns` API, expect the response for the `first_name` column to have `\"type\": \"TEXT\"`\r\n\r\n1. Observe instead that the column is `VARCHAR` without a length set.\r\n\r\n## Rationale\r\n\r\n- I spoke with @kgodey about the Mathesar Text type today and she say that Mathesar should only be configuring either: `TEXT` columns or `VARCHAR` columns with a length specified. She may be able to elaborate on the thinking that went into this decision.\r\n\r\n\r\n## Additional context\r\n\r\n- In #1118, we are doing some work to bring the front end into alignment with the above expectations when the user manually configures the DB settings for the Text type.\r\n\r\n\n", "before_files": [{"content": "import logging\n\nfrom sqlalchemy import VARCHAR, TEXT, Text\nfrom sqlalchemy.exc import DatabaseError\n\nfrom db.columns.exceptions import DagCycleError\nfrom db.columns.operations.alter import alter_column_type\nfrom db.tables.operations.select import get_oid_from_table, reflect_table\nfrom db.types.operations.cast import get_supported_alter_column_types\nfrom db.types import base\n\n\nlogger = logging.getLogger(__name__)\n\nMAX_INFERENCE_DAG_DEPTH = 100\n\nTYPE_INFERENCE_DAG = {\n base.PostgresType.BOOLEAN.value: [],\n base.MathesarCustomType.EMAIL.value: [],\n base.PostgresType.INTERVAL.value: [],\n base.PostgresType.NUMERIC.value: [\n base.PostgresType.BOOLEAN.value,\n ],\n base.STRING: [\n base.PostgresType.BOOLEAN.value,\n base.PostgresType.DATE.value,\n base.PostgresType.NUMERIC.value,\n base.MathesarCustomType.MATHESAR_MONEY.value,\n base.PostgresType.TIMESTAMP_WITHOUT_TIME_ZONE.value,\n base.PostgresType.TIMESTAMP_WITH_TIME_ZONE.value,\n # We only infer to TIME_WITHOUT_TIME_ZONE as time zones don't make much sense\n # without additional date information. See postgres documentation for further\n # details: https://www.postgresql.org/docs/13/datatype-datetime.html\n base.PostgresType.TIME_WITHOUT_TIME_ZONE.value,\n base.PostgresType.INTERVAL.value,\n base.MathesarCustomType.EMAIL.value,\n base.MathesarCustomType.URI.value,\n ],\n}\n\n\ndef _get_reverse_type_map(engine):\n supported_types = get_supported_alter_column_types(engine)\n reverse_type_map = {v: k for k, v in supported_types.items()}\n reverse_type_map.update(\n {\n Text: base.STRING,\n TEXT: base.STRING,\n VARCHAR: base.STRING,\n }\n )\n return reverse_type_map\n\n\ndef infer_column_type(schema, table_name, column_name, engine, depth=0, type_inference_dag=TYPE_INFERENCE_DAG):\n if depth > MAX_INFERENCE_DAG_DEPTH:\n raise DagCycleError(\"The type_inference_dag likely has a cycle\")\n reverse_type_map = _get_reverse_type_map(engine)\n\n table = reflect_table(table_name, schema, engine)\n column_type = table.columns[column_name].type.__class__\n column_type_str = reverse_type_map.get(column_type)\n\n logger.debug(f\"column_type_str: {column_type_str}\")\n table_oid = get_oid_from_table(table_name, schema, engine)\n for type_str in type_inference_dag.get(column_type_str, []):\n try:\n with engine.begin() as conn:\n alter_column_type(table_oid, column_name, engine, conn, type_str)\n logger.info(f\"Column {column_name} altered to type {type_str}\")\n column_type = infer_column_type(\n schema,\n table_name,\n column_name,\n engine,\n depth=depth + 1,\n type_inference_dag=type_inference_dag,\n )\n break\n # It's expected we catch this error when the test to see whether\n # a type is appropriate for a column fails.\n except DatabaseError:\n logger.info(\n f\"Cannot alter column {column_name} to type {type_str}\"\n )\n return column_type\n", "path": "db/columns/operations/infer_types.py"}, {"content": "from sqlalchemy import Column, String, Table, MetaData\nfrom sqlalchemy.ext import compiler\nfrom sqlalchemy.schema import DDLElement\n\nfrom db.columns.utils import init_mathesar_table_column_list_with_defaults\nfrom db.schemas.operations.create import create_schema\n\n\ndef create_mathesar_table(name, schema, columns, engine, metadata=None):\n \"\"\"\n This method creates a Postgres table in the specified schema using the\n given name and column list. It adds internal mathesar columns to the\n table.\n \"\"\"\n columns = init_mathesar_table_column_list_with_defaults(columns)\n create_schema(schema, engine)\n # We need this so that we can create multiple mathesar tables in the\n # same MetaData, enabling them to reference each other in the\n # SQLAlchemy context (e.g., for creating a ForeignKey relationship)\n if metadata is None:\n metadata = MetaData(bind=engine, schema=schema)\n table = Table(\n name,\n metadata,\n *columns,\n schema=schema\n )\n table.create(engine)\n return table\n\n\ndef create_string_column_table(name, schema, column_names, engine):\n \"\"\"\n This method creates a Postgres table in the specified schema, with all\n columns being String type.\n \"\"\"\n columns_ = [Column(name=column_name, type_=String) for column_name in column_names]\n table = create_mathesar_table(name, schema, columns_, engine)\n return table\n\n\nclass CreateTableAs(DDLElement):\n def __init__(self, name, selectable):\n self.name = name\n self.selectable = selectable\n\n\[email protected](CreateTableAs)\ndef compile_create_table_as(element, compiler, **_):\n return \"CREATE TABLE %s AS (%s)\" % (\n element.name,\n compiler.sql_compiler.process(element.selectable, literal_binds=True),\n )\n", "path": "db/tables/operations/create.py"}], "after_files": [{"content": "import logging\n\nfrom sqlalchemy import VARCHAR, TEXT, Text\nfrom sqlalchemy.exc import DatabaseError\n\nfrom db.columns.exceptions import DagCycleError\nfrom db.columns.operations.alter import alter_column_type\nfrom db.tables.operations.select import get_oid_from_table, reflect_table\nfrom db.types.operations.cast import get_supported_alter_column_types\nfrom db.types import base\n\n\nlogger = logging.getLogger(__name__)\n\nMAX_INFERENCE_DAG_DEPTH = 100\n\nTYPE_INFERENCE_DAG = {\n base.PostgresType.BOOLEAN.value: [],\n base.MathesarCustomType.EMAIL.value: [],\n base.PostgresType.INTERVAL.value: [],\n base.PostgresType.NUMERIC.value: [\n base.PostgresType.BOOLEAN.value,\n ],\n base.PostgresType.TEXT.value: [\n base.PostgresType.BOOLEAN.value,\n base.PostgresType.DATE.value,\n base.PostgresType.NUMERIC.value,\n base.MathesarCustomType.MATHESAR_MONEY.value,\n base.PostgresType.TIMESTAMP_WITHOUT_TIME_ZONE.value,\n base.PostgresType.TIMESTAMP_WITH_TIME_ZONE.value,\n # We only infer to TIME_WITHOUT_TIME_ZONE as time zones don't make much sense\n # without additional date information. See postgres documentation for further\n # details: https://www.postgresql.org/docs/13/datatype-datetime.html\n base.PostgresType.TIME_WITHOUT_TIME_ZONE.value,\n base.PostgresType.INTERVAL.value,\n base.MathesarCustomType.EMAIL.value,\n base.MathesarCustomType.URI.value,\n ],\n}\n\n\ndef _get_reverse_type_map(engine):\n supported_types = get_supported_alter_column_types(engine)\n reverse_type_map = {v: k for k, v in supported_types.items()}\n reverse_type_map.update(\n {\n Text: base.PostgresType.TEXT.value,\n TEXT: base.PostgresType.TEXT.value,\n VARCHAR: base.PostgresType.TEXT.value,\n }\n )\n return reverse_type_map\n\n\ndef infer_column_type(schema, table_name, column_name, engine, depth=0, type_inference_dag=TYPE_INFERENCE_DAG):\n if depth > MAX_INFERENCE_DAG_DEPTH:\n raise DagCycleError(\"The type_inference_dag likely has a cycle\")\n reverse_type_map = _get_reverse_type_map(engine)\n\n table = reflect_table(table_name, schema, engine)\n column_type = table.columns[column_name].type.__class__\n column_type_str = reverse_type_map.get(column_type)\n\n logger.debug(f\"column_type_str: {column_type_str}\")\n table_oid = get_oid_from_table(table_name, schema, engine)\n for type_str in type_inference_dag.get(column_type_str, []):\n try:\n with engine.begin() as conn:\n alter_column_type(table_oid, column_name, engine, conn, type_str)\n logger.info(f\"Column {column_name} altered to type {type_str}\")\n column_type = infer_column_type(\n schema,\n table_name,\n column_name,\n engine,\n depth=depth + 1,\n type_inference_dag=type_inference_dag,\n )\n break\n # It's expected we catch this error when the test to see whether\n # a type is appropriate for a column fails.\n except DatabaseError:\n logger.info(\n f\"Cannot alter column {column_name} to type {type_str}\"\n )\n return column_type\n", "path": "db/columns/operations/infer_types.py"}, {"content": "from sqlalchemy import Column, TEXT, Table, MetaData\nfrom sqlalchemy.ext import compiler\nfrom sqlalchemy.schema import DDLElement\n\nfrom db.columns.utils import init_mathesar_table_column_list_with_defaults\nfrom db.schemas.operations.create import create_schema\n\n\ndef create_mathesar_table(name, schema, columns, engine, metadata=None):\n \"\"\"\n This method creates a Postgres table in the specified schema using the\n given name and column list. It adds internal mathesar columns to the\n table.\n \"\"\"\n columns = init_mathesar_table_column_list_with_defaults(columns)\n create_schema(schema, engine)\n # We need this so that we can create multiple mathesar tables in the\n # same MetaData, enabling them to reference each other in the\n # SQLAlchemy context (e.g., for creating a ForeignKey relationship)\n if metadata is None:\n metadata = MetaData(bind=engine, schema=schema)\n table = Table(\n name,\n metadata,\n *columns,\n schema=schema\n )\n table.create(engine)\n return table\n\n\ndef create_string_column_table(name, schema, column_names, engine):\n \"\"\"\n This method creates a Postgres table in the specified schema, with all\n columns being String type.\n \"\"\"\n columns_ = [Column(name=column_name, type_=TEXT) for column_name in column_names]\n table = create_mathesar_table(name, schema, columns_, engine)\n return table\n\n\nclass CreateTableAs(DDLElement):\n def __init__(self, name, selectable):\n self.name = name\n self.selectable = selectable\n\n\[email protected](CreateTableAs)\ndef compile_create_table_as(element, compiler, **_):\n return \"CREATE TABLE %s AS (%s)\" % (\n element.name,\n compiler.sql_compiler.process(element.selectable, literal_binds=True),\n )\n", "path": "db/tables/operations/create.py"}]} | 1,874 | 415 |
gh_patches_debug_36545 | rasdani/github-patches | git_diff | translate__pootle-6680 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Try simpler language code as fallback before settings.LANGUAGE_CODE
In https://github.com/translate/pootle/blob/10913224/pootle/i18n/override.py#L87-L101 if the language code `it-IT` (for example) is tried and eventually falls back to `settings.LANGUAGE_CODE`, but it makes sense to first try `it` (simpler version of `it-IT`) before falling back to `settings.LANGUAGE_CODE`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pootle/i18n/override.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 """Overrides and support functions for arbitrary locale support."""
10
11 import os
12
13 from translate.lang import data
14
15 from django.utils import translation
16 from django.utils.translation import LANGUAGE_SESSION_KEY, trans_real
17
18 from pootle.i18n import gettext
19
20
21 def find_languages(locale_path):
22 """Generate supported languages list from the :param:`locale_path`
23 directory.
24 """
25 dirs = os.listdir(locale_path)
26 langs = []
27 for lang in dirs:
28 if (data.langcode_re.match(lang) and
29 os.path.isdir(os.path.join(locale_path, lang))):
30 langs.append((trans_real.to_language(lang),
31 data.languages.get(lang, (lang,))[0]))
32 return langs
33
34
35 def supported_langs():
36 """Returns a list of supported locales."""
37 from django.conf import settings
38 return settings.LANGUAGES
39
40
41 def get_lang_from_session(request, supported):
42 if hasattr(request, 'session'):
43 lang_code = request.session.get(LANGUAGE_SESSION_KEY, None)
44 if lang_code and lang_code in supported:
45 return lang_code
46
47 return None
48
49
50 def get_lang_from_cookie(request, supported):
51 """See if the user's browser sent a cookie with a preferred language."""
52 from django.conf import settings
53 lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)
54
55 if lang_code and lang_code in supported:
56 return lang_code
57
58 return None
59
60
61 def get_lang_from_http_header(request, supported):
62 """If the user's browser sends a list of preferred languages in the
63 HTTP_ACCEPT_LANGUAGE header, parse it into a list. Then walk through
64 the list, and for each entry, we check whether we have a matching
65 pootle translation project. If so, we return it.
66
67 If nothing is found, return None.
68 """
69 accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '')
70 for accept_lang, __ in trans_real.parse_accept_lang_header(accept):
71 if accept_lang == '*':
72 return None
73
74 normalized = data.normalize_code(data.simplify_to_common(accept_lang))
75 if normalized in ['en-us', 'en']:
76 return None
77 if normalized in supported:
78 return normalized
79
80 # FIXME: horribly slow way of dealing with languages with @ in them
81 for lang in supported.keys():
82 if normalized == data.normalize_code(lang):
83 return lang
84 return None
85
86
87 def get_language_from_request(request, check_path=False):
88 """Try to get the user's preferred language by first checking the
89 cookie and then by checking the HTTP language headers.
90
91 If all fails, try fall back to default language.
92 """
93 supported = dict(supported_langs())
94 for lang_getter in (get_lang_from_session,
95 get_lang_from_cookie,
96 get_lang_from_http_header):
97 lang = lang_getter(request, supported)
98 if lang is not None:
99 return lang
100 from django.conf import settings
101 return settings.LANGUAGE_CODE
102
103
104 def get_language_bidi():
105 """Override for Django's get_language_bidi that's aware of more RTL
106 languages.
107 """
108 return gettext.language_dir(translation.get_language()) == 'rtl'
109
110
111 def hijack_translation():
112 """Sabotage Django's fascist linguistical regime."""
113 # Override functions that check if language is known to Django
114 translation.check_for_language = lambda lang_code: True
115 trans_real.check_for_language = lambda lang_code: True
116 translation.get_language_from_request = get_language_from_request
117
118 # Override django's inadequate bidi detection
119 translation.get_language_bidi = get_language_bidi
120
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pootle/i18n/override.py b/pootle/i18n/override.py
--- a/pootle/i18n/override.py
+++ b/pootle/i18n/override.py
@@ -38,24 +38,35 @@
return settings.LANGUAGES
-def get_lang_from_session(request, supported):
- if hasattr(request, 'session'):
- lang_code = request.session.get(LANGUAGE_SESSION_KEY, None)
- if lang_code and lang_code in supported:
- return lang_code
+def get_language_supported(lang_code, supported):
+ normalized = data.normalize_code(data.simplify_to_common(lang_code))
+ if normalized in supported:
+ return normalized
+
+ # FIXME: horribly slow way of dealing with languages with @ in them
+ for lang in supported.keys():
+ if normalized == data.normalize_code(lang):
+ return lang
return None
+def get_lang_from_session(request, supported):
+ if not hasattr(request, 'session'):
+ return None
+ lang_code = request.session.get(LANGUAGE_SESSION_KEY, None)
+ if not lang_code:
+ return None
+ return get_language_supported(lang_code, supported)
+
+
def get_lang_from_cookie(request, supported):
"""See if the user's browser sent a cookie with a preferred language."""
from django.conf import settings
lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)
-
- if lang_code and lang_code in supported:
- return lang_code
-
- return None
+ if not lang_code:
+ return None
+ return get_language_supported(lang_code, supported)
def get_lang_from_http_header(request, supported):
@@ -70,17 +81,9 @@
for accept_lang, __ in trans_real.parse_accept_lang_header(accept):
if accept_lang == '*':
return None
-
- normalized = data.normalize_code(data.simplify_to_common(accept_lang))
- if normalized in ['en-us', 'en']:
- return None
- if normalized in supported:
- return normalized
-
- # FIXME: horribly slow way of dealing with languages with @ in them
- for lang in supported.keys():
- if normalized == data.normalize_code(lang):
- return lang
+ supported_lang = get_language_supported(accept_lang, supported)
+ if supported_lang:
+ return supported_lang
return None
@@ -98,7 +101,9 @@
if lang is not None:
return lang
from django.conf import settings
- return settings.LANGUAGE_CODE
+ if settings.LANGUAGE_CODE in supported:
+ return settings.LANGUAGE_CODE
+ return 'en-us'
def get_language_bidi():
| {"golden_diff": "diff --git a/pootle/i18n/override.py b/pootle/i18n/override.py\n--- a/pootle/i18n/override.py\n+++ b/pootle/i18n/override.py\n@@ -38,24 +38,35 @@\n return settings.LANGUAGES\n \n \n-def get_lang_from_session(request, supported):\n- if hasattr(request, 'session'):\n- lang_code = request.session.get(LANGUAGE_SESSION_KEY, None)\n- if lang_code and lang_code in supported:\n- return lang_code\n+def get_language_supported(lang_code, supported):\n+ normalized = data.normalize_code(data.simplify_to_common(lang_code))\n+ if normalized in supported:\n+ return normalized\n+\n+ # FIXME: horribly slow way of dealing with languages with @ in them\n+ for lang in supported.keys():\n+ if normalized == data.normalize_code(lang):\n+ return lang\n \n return None\n \n \n+def get_lang_from_session(request, supported):\n+ if not hasattr(request, 'session'):\n+ return None\n+ lang_code = request.session.get(LANGUAGE_SESSION_KEY, None)\n+ if not lang_code:\n+ return None\n+ return get_language_supported(lang_code, supported)\n+\n+\n def get_lang_from_cookie(request, supported):\n \"\"\"See if the user's browser sent a cookie with a preferred language.\"\"\"\n from django.conf import settings\n lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)\n-\n- if lang_code and lang_code in supported:\n- return lang_code\n-\n- return None\n+ if not lang_code:\n+ return None\n+ return get_language_supported(lang_code, supported)\n \n \n def get_lang_from_http_header(request, supported):\n@@ -70,17 +81,9 @@\n for accept_lang, __ in trans_real.parse_accept_lang_header(accept):\n if accept_lang == '*':\n return None\n-\n- normalized = data.normalize_code(data.simplify_to_common(accept_lang))\n- if normalized in ['en-us', 'en']:\n- return None\n- if normalized in supported:\n- return normalized\n-\n- # FIXME: horribly slow way of dealing with languages with @ in them\n- for lang in supported.keys():\n- if normalized == data.normalize_code(lang):\n- return lang\n+ supported_lang = get_language_supported(accept_lang, supported)\n+ if supported_lang:\n+ return supported_lang\n return None\n \n \n@@ -98,7 +101,9 @@\n if lang is not None:\n return lang\n from django.conf import settings\n- return settings.LANGUAGE_CODE\n+ if settings.LANGUAGE_CODE in supported:\n+ return settings.LANGUAGE_CODE\n+ return 'en-us'\n \n \n def get_language_bidi():\n", "issue": "Try simpler language code as fallback before settings.LANGUAGE_CODE\nIn https://github.com/translate/pootle/blob/10913224/pootle/i18n/override.py#L87-L101 if the language code `it-IT` (for example) is tried and eventually falls back to `settings.LANGUAGE_CODE`, but it makes sense to first try `it` (simpler version of `it-IT`) before falling back to `settings.LANGUAGE_CODE`.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\n\"\"\"Overrides and support functions for arbitrary locale support.\"\"\"\n\nimport os\n\nfrom translate.lang import data\n\nfrom django.utils import translation\nfrom django.utils.translation import LANGUAGE_SESSION_KEY, trans_real\n\nfrom pootle.i18n import gettext\n\n\ndef find_languages(locale_path):\n \"\"\"Generate supported languages list from the :param:`locale_path`\n directory.\n \"\"\"\n dirs = os.listdir(locale_path)\n langs = []\n for lang in dirs:\n if (data.langcode_re.match(lang) and\n os.path.isdir(os.path.join(locale_path, lang))):\n langs.append((trans_real.to_language(lang),\n data.languages.get(lang, (lang,))[0]))\n return langs\n\n\ndef supported_langs():\n \"\"\"Returns a list of supported locales.\"\"\"\n from django.conf import settings\n return settings.LANGUAGES\n\n\ndef get_lang_from_session(request, supported):\n if hasattr(request, 'session'):\n lang_code = request.session.get(LANGUAGE_SESSION_KEY, None)\n if lang_code and lang_code in supported:\n return lang_code\n\n return None\n\n\ndef get_lang_from_cookie(request, supported):\n \"\"\"See if the user's browser sent a cookie with a preferred language.\"\"\"\n from django.conf import settings\n lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)\n\n if lang_code and lang_code in supported:\n return lang_code\n\n return None\n\n\ndef get_lang_from_http_header(request, supported):\n \"\"\"If the user's browser sends a list of preferred languages in the\n HTTP_ACCEPT_LANGUAGE header, parse it into a list. Then walk through\n the list, and for each entry, we check whether we have a matching\n pootle translation project. If so, we return it.\n\n If nothing is found, return None.\n \"\"\"\n accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '')\n for accept_lang, __ in trans_real.parse_accept_lang_header(accept):\n if accept_lang == '*':\n return None\n\n normalized = data.normalize_code(data.simplify_to_common(accept_lang))\n if normalized in ['en-us', 'en']:\n return None\n if normalized in supported:\n return normalized\n\n # FIXME: horribly slow way of dealing with languages with @ in them\n for lang in supported.keys():\n if normalized == data.normalize_code(lang):\n return lang\n return None\n\n\ndef get_language_from_request(request, check_path=False):\n \"\"\"Try to get the user's preferred language by first checking the\n cookie and then by checking the HTTP language headers.\n\n If all fails, try fall back to default language.\n \"\"\"\n supported = dict(supported_langs())\n for lang_getter in (get_lang_from_session,\n get_lang_from_cookie,\n get_lang_from_http_header):\n lang = lang_getter(request, supported)\n if lang is not None:\n return lang\n from django.conf import settings\n return settings.LANGUAGE_CODE\n\n\ndef get_language_bidi():\n \"\"\"Override for Django's get_language_bidi that's aware of more RTL\n languages.\n \"\"\"\n return gettext.language_dir(translation.get_language()) == 'rtl'\n\n\ndef hijack_translation():\n \"\"\"Sabotage Django's fascist linguistical regime.\"\"\"\n # Override functions that check if language is known to Django\n translation.check_for_language = lambda lang_code: True\n trans_real.check_for_language = lambda lang_code: True\n translation.get_language_from_request = get_language_from_request\n\n # Override django's inadequate bidi detection\n translation.get_language_bidi = get_language_bidi\n", "path": "pootle/i18n/override.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\n\"\"\"Overrides and support functions for arbitrary locale support.\"\"\"\n\nimport os\n\nfrom translate.lang import data\n\nfrom django.utils import translation\nfrom django.utils.translation import LANGUAGE_SESSION_KEY, trans_real\n\nfrom pootle.i18n import gettext\n\n\ndef find_languages(locale_path):\n \"\"\"Generate supported languages list from the :param:`locale_path`\n directory.\n \"\"\"\n dirs = os.listdir(locale_path)\n langs = []\n for lang in dirs:\n if (data.langcode_re.match(lang) and\n os.path.isdir(os.path.join(locale_path, lang))):\n langs.append((trans_real.to_language(lang),\n data.languages.get(lang, (lang,))[0]))\n return langs\n\n\ndef supported_langs():\n \"\"\"Returns a list of supported locales.\"\"\"\n from django.conf import settings\n return settings.LANGUAGES\n\n\ndef get_language_supported(lang_code, supported):\n normalized = data.normalize_code(data.simplify_to_common(lang_code))\n if normalized in supported:\n return normalized\n\n # FIXME: horribly slow way of dealing with languages with @ in them\n for lang in supported.keys():\n if normalized == data.normalize_code(lang):\n return lang\n\n return None\n\n\ndef get_lang_from_session(request, supported):\n if not hasattr(request, 'session'):\n return None\n lang_code = request.session.get(LANGUAGE_SESSION_KEY, None)\n if not lang_code:\n return None\n return get_language_supported(lang_code, supported)\n\n\ndef get_lang_from_cookie(request, supported):\n \"\"\"See if the user's browser sent a cookie with a preferred language.\"\"\"\n from django.conf import settings\n lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)\n if not lang_code:\n return None\n return get_language_supported(lang_code, supported)\n\n\ndef get_lang_from_http_header(request, supported):\n \"\"\"If the user's browser sends a list of preferred languages in the\n HTTP_ACCEPT_LANGUAGE header, parse it into a list. Then walk through\n the list, and for each entry, we check whether we have a matching\n pootle translation project. If so, we return it.\n\n If nothing is found, return None.\n \"\"\"\n accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '')\n for accept_lang, __ in trans_real.parse_accept_lang_header(accept):\n if accept_lang == '*':\n return None\n supported_lang = get_language_supported(accept_lang, supported)\n if supported_lang:\n return supported_lang\n return None\n\n\ndef get_language_from_request(request, check_path=False):\n \"\"\"Try to get the user's preferred language by first checking the\n cookie and then by checking the HTTP language headers.\n\n If all fails, try fall back to default language.\n \"\"\"\n supported = dict(supported_langs())\n for lang_getter in (get_lang_from_session,\n get_lang_from_cookie,\n get_lang_from_http_header):\n lang = lang_getter(request, supported)\n if lang is not None:\n return lang\n from django.conf import settings\n if settings.LANGUAGE_CODE in supported:\n return settings.LANGUAGE_CODE\n return 'en-us'\n\n\ndef get_language_bidi():\n \"\"\"Override for Django's get_language_bidi that's aware of more RTL\n languages.\n \"\"\"\n return gettext.language_dir(translation.get_language()) == 'rtl'\n\n\ndef hijack_translation():\n \"\"\"Sabotage Django's fascist linguistical regime.\"\"\"\n # Override functions that check if language is known to Django\n translation.check_for_language = lambda lang_code: True\n trans_real.check_for_language = lambda lang_code: True\n translation.get_language_from_request = get_language_from_request\n\n # Override django's inadequate bidi detection\n translation.get_language_bidi = get_language_bidi\n", "path": "pootle/i18n/override.py"}]} | 1,461 | 607 |
gh_patches_debug_8007 | rasdani/github-patches | git_diff | medtagger__MedTagger-401 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error indicator when logging in or registering went wrong
## Current Behaviour
- currently, only error icon is displayed when something went wrong during logging in or registering new account
## Expected Behaviour
- an error message should be displayed next to the error icon, so that user knows what went wrong
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `backend/medtagger/api/auth/business.py`
Content:
```
1 """Module responsible for business logic in all Auth endpoint."""
2 from medtagger.api import InvalidArgumentsException
3 from medtagger.api.security import hash_password, verify_user_password, generate_auth_token
4 from medtagger.database.models import User
5 from medtagger.repositories import roles as RolesRepository, users as UsersRepository
6
7
8 def create_user(email: str, password: str, first_name: str, last_name: str) -> int:
9 """Create user with the given user information. Password is being hashed.
10
11 :param email: user email in string format
12 :param password: user password in string format
13 :param first_name: user first name in string format
14 :param last_name: user last name in string format
15
16 :return: id of the new user
17 """
18 user = UsersRepository.get_user_by_email(email)
19 if user:
20 raise InvalidArgumentsException('User with this email already exist')
21 password_hash = hash_password(password)
22 new_user = User(email, password_hash, first_name, last_name)
23 role = RolesRepository.get_role_with_name('volunteer')
24 if not role:
25 raise InvalidArgumentsException('Role does not exist.')
26 new_user.roles.append(role)
27 return UsersRepository.add_new_user(new_user)
28
29
30 def sign_in_user(email: str, password: str) -> str:
31 """Sign in user using given username and password.
32
33 :param email: user email in string format
34 :param password: user password in string format
35
36 :return: authentication token
37 """
38 user = UsersRepository.get_user_by_email(email)
39 if not user:
40 raise InvalidArgumentsException('User does not exist.')
41 if not verify_user_password(user, password):
42 raise InvalidArgumentsException('Password does not match.')
43 return generate_auth_token(user)
44
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/backend/medtagger/api/auth/business.py b/backend/medtagger/api/auth/business.py
--- a/backend/medtagger/api/auth/business.py
+++ b/backend/medtagger/api/auth/business.py
@@ -17,7 +17,7 @@
"""
user = UsersRepository.get_user_by_email(email)
if user:
- raise InvalidArgumentsException('User with this email already exist')
+ raise InvalidArgumentsException('User with this email already exists')
password_hash = hash_password(password)
new_user = User(email, password_hash, first_name, last_name)
role = RolesRepository.get_role_with_name('volunteer')
| {"golden_diff": "diff --git a/backend/medtagger/api/auth/business.py b/backend/medtagger/api/auth/business.py\n--- a/backend/medtagger/api/auth/business.py\n+++ b/backend/medtagger/api/auth/business.py\n@@ -17,7 +17,7 @@\n \"\"\"\n user = UsersRepository.get_user_by_email(email)\n if user:\n- raise InvalidArgumentsException('User with this email already exist')\n+ raise InvalidArgumentsException('User with this email already exists')\n password_hash = hash_password(password)\n new_user = User(email, password_hash, first_name, last_name)\n role = RolesRepository.get_role_with_name('volunteer')\n", "issue": "Error indicator when logging in or registering went wrong\n## Current Behaviour\r\n - currently, only error icon is displayed when something went wrong during logging in or registering new account\r\n\r\n## Expected Behaviour \r\n - an error message should be displayed next to the error icon, so that user knows what went wrong\r\n\n", "before_files": [{"content": "\"\"\"Module responsible for business logic in all Auth endpoint.\"\"\"\nfrom medtagger.api import InvalidArgumentsException\nfrom medtagger.api.security import hash_password, verify_user_password, generate_auth_token\nfrom medtagger.database.models import User\nfrom medtagger.repositories import roles as RolesRepository, users as UsersRepository\n\n\ndef create_user(email: str, password: str, first_name: str, last_name: str) -> int:\n \"\"\"Create user with the given user information. Password is being hashed.\n\n :param email: user email in string format\n :param password: user password in string format\n :param first_name: user first name in string format\n :param last_name: user last name in string format\n\n :return: id of the new user\n \"\"\"\n user = UsersRepository.get_user_by_email(email)\n if user:\n raise InvalidArgumentsException('User with this email already exist')\n password_hash = hash_password(password)\n new_user = User(email, password_hash, first_name, last_name)\n role = RolesRepository.get_role_with_name('volunteer')\n if not role:\n raise InvalidArgumentsException('Role does not exist.')\n new_user.roles.append(role)\n return UsersRepository.add_new_user(new_user)\n\n\ndef sign_in_user(email: str, password: str) -> str:\n \"\"\"Sign in user using given username and password.\n\n :param email: user email in string format\n :param password: user password in string format\n\n :return: authentication token\n \"\"\"\n user = UsersRepository.get_user_by_email(email)\n if not user:\n raise InvalidArgumentsException('User does not exist.')\n if not verify_user_password(user, password):\n raise InvalidArgumentsException('Password does not match.')\n return generate_auth_token(user)\n", "path": "backend/medtagger/api/auth/business.py"}], "after_files": [{"content": "\"\"\"Module responsible for business logic in all Auth endpoint.\"\"\"\nfrom medtagger.api import InvalidArgumentsException\nfrom medtagger.api.security import hash_password, verify_user_password, generate_auth_token\nfrom medtagger.database.models import User\nfrom medtagger.repositories import roles as RolesRepository, users as UsersRepository\n\n\ndef create_user(email: str, password: str, first_name: str, last_name: str) -> int:\n \"\"\"Create user with the given user information. Password is being hashed.\n\n :param email: user email in string format\n :param password: user password in string format\n :param first_name: user first name in string format\n :param last_name: user last name in string format\n\n :return: id of the new user\n \"\"\"\n user = UsersRepository.get_user_by_email(email)\n if user:\n raise InvalidArgumentsException('User with this email already exists')\n password_hash = hash_password(password)\n new_user = User(email, password_hash, first_name, last_name)\n role = RolesRepository.get_role_with_name('volunteer')\n if not role:\n raise InvalidArgumentsException('Role does not exist.')\n new_user.roles.append(role)\n return UsersRepository.add_new_user(new_user)\n\n\ndef sign_in_user(email: str, password: str) -> str:\n \"\"\"Sign in user using given username and password.\n\n :param email: user email in string format\n :param password: user password in string format\n\n :return: authentication token\n \"\"\"\n user = UsersRepository.get_user_by_email(email)\n if not user:\n raise InvalidArgumentsException('User does not exist.')\n if not verify_user_password(user, password):\n raise InvalidArgumentsException('Password does not match.')\n return generate_auth_token(user)\n", "path": "backend/medtagger/api/auth/business.py"}]} | 783 | 142 |
gh_patches_debug_53690 | rasdani/github-patches | git_diff | sql-machine-learning__elasticdl-2180 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Elasticdl client crashes with invalid args
```
$ elasticdl -v
Traceback (most recent call last):
File "/usr/local/bin/elasticdl", line 8, in <module>
sys.exit(main())
File "/usr/local/lib/python3.7/site-packages/elasticdl_client/main.py", line 97, in main
args, _ = parser.parse_known_args()
File "/usr/local/Cellar/python/3.7.7/Frameworks/Python.framework/Versions/3.7/lib/python3.7/argparse.py", line 1787, in parse_known_args
namespace, args = self._parse_known_args(args, namespace)
File "/usr/local/Cellar/python/3.7.7/Frameworks/Python.framework/Versions/3.7/lib/python3.7/argparse.py", line 2022, in _parse_known_args
', '.join(required_actions))
TypeError: sequence item 0: expected str instance, NoneType found
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticdl_client/main.py`
Content:
```
1 # Copyright 2020 The ElasticDL Authors. All rights reserved.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at
5 #
6 # http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 import argparse
15 import sys
16
17 from elasticdl_client.api import (
18 build_zoo,
19 evaluate,
20 init_zoo,
21 predict,
22 push_zoo,
23 train,
24 )
25 from elasticdl_client.common import args
26
27
28 def build_argument_parser():
29 parser = argparse.ArgumentParser()
30 subparsers = parser.add_subparsers()
31 subparsers.required = True
32
33 # Initialize the parser for the `elasticdl zoo` commands
34 zoo_parser = subparsers.add_parser(
35 "zoo",
36 help="Initialize | Build | Push a docker image for the model zoo.",
37 )
38 zoo_subparsers = zoo_parser.add_subparsers()
39 zoo_subparsers.required = True
40
41 # elasticdl zoo init
42 zoo_init_parser = zoo_subparsers.add_parser(
43 "init", help="Initialize the model zoo."
44 )
45 zoo_init_parser.set_defaults(func=init_zoo)
46 args.add_zoo_init_params(zoo_init_parser)
47
48 # elasticdl zoo build
49 zoo_build_parser = zoo_subparsers.add_parser(
50 "build", help="Build a docker image for the model zoo."
51 )
52 zoo_build_parser.set_defaults(func=build_zoo)
53 args.add_zoo_build_params(zoo_build_parser)
54
55 # elasticdl zoo push
56 zoo_push_parser = zoo_subparsers.add_parser(
57 "push",
58 help="Push the docker image to a remote registry for the distributed"
59 "ElasticDL job.",
60 )
61 zoo_push_parser.set_defaults(func=push_zoo)
62 args.add_zoo_push_params(zoo_push_parser)
63
64 # elasticdl train
65 train_parser = subparsers.add_parser(
66 "train", help="Submit a ElasticDL distributed training job"
67 )
68 train_parser.set_defaults(func=train)
69 args.add_common_params(train_parser)
70 args.add_train_params(train_parser)
71
72 # elasticdl evaluate
73 evaluate_parser = subparsers.add_parser(
74 "evaluate", help="Submit a ElasticDL distributed evaluation job"
75 )
76 evaluate_parser.set_defaults(func=evaluate)
77 args.add_common_params(evaluate_parser)
78 args.add_evaluate_params(evaluate_parser)
79
80 # elasticdl predict
81 predict_parser = subparsers.add_parser(
82 "predict", help="Submit a ElasticDL distributed prediction job"
83 )
84 predict_parser.set_defaults(func=predict)
85 args.add_common_params(predict_parser)
86 args.add_predict_params(predict_parser)
87
88 return parser
89
90
91 def main():
92 parser = build_argument_parser()
93 if len(sys.argv) == 1:
94 parser.print_help(sys.stderr)
95 sys.exit(1)
96
97 args, _ = parser.parse_known_args()
98 args.func(args)
99
100
101 if __name__ == "__main__":
102 main()
103
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/elasticdl_client/main.py b/elasticdl_client/main.py
--- a/elasticdl_client/main.py
+++ b/elasticdl_client/main.py
@@ -94,7 +94,12 @@
parser.print_help(sys.stderr)
sys.exit(1)
- args, _ = parser.parse_known_args()
+ try:
+ args, _ = parser.parse_known_args()
+ except TypeError:
+ parser.print_help(sys.stderr)
+ sys.exit(1)
+
args.func(args)
| {"golden_diff": "diff --git a/elasticdl_client/main.py b/elasticdl_client/main.py\n--- a/elasticdl_client/main.py\n+++ b/elasticdl_client/main.py\n@@ -94,7 +94,12 @@\n parser.print_help(sys.stderr)\n sys.exit(1)\n \n- args, _ = parser.parse_known_args()\n+ try:\n+ args, _ = parser.parse_known_args()\n+ except TypeError:\n+ parser.print_help(sys.stderr)\n+ sys.exit(1)\n+\n args.func(args)\n", "issue": "Elasticdl client crashes with invalid args\n```\r\n$ elasticdl -v\r\nTraceback (most recent call last):\r\n File \"/usr/local/bin/elasticdl\", line 8, in <module>\r\n sys.exit(main())\r\n File \"/usr/local/lib/python3.7/site-packages/elasticdl_client/main.py\", line 97, in main\r\n args, _ = parser.parse_known_args()\r\n File \"/usr/local/Cellar/python/3.7.7/Frameworks/Python.framework/Versions/3.7/lib/python3.7/argparse.py\", line 1787, in parse_known_args\r\n namespace, args = self._parse_known_args(args, namespace)\r\n File \"/usr/local/Cellar/python/3.7.7/Frameworks/Python.framework/Versions/3.7/lib/python3.7/argparse.py\", line 2022, in _parse_known_args\r\n ', '.join(required_actions))\r\nTypeError: sequence item 0: expected str instance, NoneType found\r\n```\n", "before_files": [{"content": "# Copyright 2020 The ElasticDL Authors. All rights reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport sys\n\nfrom elasticdl_client.api import (\n build_zoo,\n evaluate,\n init_zoo,\n predict,\n push_zoo,\n train,\n)\nfrom elasticdl_client.common import args\n\n\ndef build_argument_parser():\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers()\n subparsers.required = True\n\n # Initialize the parser for the `elasticdl zoo` commands\n zoo_parser = subparsers.add_parser(\n \"zoo\",\n help=\"Initialize | Build | Push a docker image for the model zoo.\",\n )\n zoo_subparsers = zoo_parser.add_subparsers()\n zoo_subparsers.required = True\n\n # elasticdl zoo init\n zoo_init_parser = zoo_subparsers.add_parser(\n \"init\", help=\"Initialize the model zoo.\"\n )\n zoo_init_parser.set_defaults(func=init_zoo)\n args.add_zoo_init_params(zoo_init_parser)\n\n # elasticdl zoo build\n zoo_build_parser = zoo_subparsers.add_parser(\n \"build\", help=\"Build a docker image for the model zoo.\"\n )\n zoo_build_parser.set_defaults(func=build_zoo)\n args.add_zoo_build_params(zoo_build_parser)\n\n # elasticdl zoo push\n zoo_push_parser = zoo_subparsers.add_parser(\n \"push\",\n help=\"Push the docker image to a remote registry for the distributed\"\n \"ElasticDL job.\",\n )\n zoo_push_parser.set_defaults(func=push_zoo)\n args.add_zoo_push_params(zoo_push_parser)\n\n # elasticdl train\n train_parser = subparsers.add_parser(\n \"train\", help=\"Submit a ElasticDL distributed training job\"\n )\n train_parser.set_defaults(func=train)\n args.add_common_params(train_parser)\n args.add_train_params(train_parser)\n\n # elasticdl evaluate\n evaluate_parser = subparsers.add_parser(\n \"evaluate\", help=\"Submit a ElasticDL distributed evaluation job\"\n )\n evaluate_parser.set_defaults(func=evaluate)\n args.add_common_params(evaluate_parser)\n args.add_evaluate_params(evaluate_parser)\n\n # elasticdl predict\n predict_parser = subparsers.add_parser(\n \"predict\", help=\"Submit a ElasticDL distributed prediction job\"\n )\n predict_parser.set_defaults(func=predict)\n args.add_common_params(predict_parser)\n args.add_predict_params(predict_parser)\n\n return parser\n\n\ndef main():\n parser = build_argument_parser()\n if len(sys.argv) == 1:\n parser.print_help(sys.stderr)\n sys.exit(1)\n\n args, _ = parser.parse_known_args()\n args.func(args)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "elasticdl_client/main.py"}], "after_files": [{"content": "# Copyright 2020 The ElasticDL Authors. All rights reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport sys\n\nfrom elasticdl_client.api import (\n build_zoo,\n evaluate,\n init_zoo,\n predict,\n push_zoo,\n train,\n)\nfrom elasticdl_client.common import args\n\n\ndef build_argument_parser():\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers()\n subparsers.required = True\n\n # Initialize the parser for the `elasticdl zoo` commands\n zoo_parser = subparsers.add_parser(\n \"zoo\",\n help=\"Initialize | Build | Push a docker image for the model zoo.\",\n )\n zoo_subparsers = zoo_parser.add_subparsers()\n zoo_subparsers.required = True\n\n # elasticdl zoo init\n zoo_init_parser = zoo_subparsers.add_parser(\n \"init\", help=\"Initialize the model zoo.\"\n )\n zoo_init_parser.set_defaults(func=init_zoo)\n args.add_zoo_init_params(zoo_init_parser)\n\n # elasticdl zoo build\n zoo_build_parser = zoo_subparsers.add_parser(\n \"build\", help=\"Build a docker image for the model zoo.\"\n )\n zoo_build_parser.set_defaults(func=build_zoo)\n args.add_zoo_build_params(zoo_build_parser)\n\n # elasticdl zoo push\n zoo_push_parser = zoo_subparsers.add_parser(\n \"push\",\n help=\"Push the docker image to a remote registry for the distributed\"\n \"ElasticDL job.\",\n )\n zoo_push_parser.set_defaults(func=push_zoo)\n args.add_zoo_push_params(zoo_push_parser)\n\n # elasticdl train\n train_parser = subparsers.add_parser(\n \"train\", help=\"Submit a ElasticDL distributed training job\"\n )\n train_parser.set_defaults(func=train)\n args.add_common_params(train_parser)\n args.add_train_params(train_parser)\n\n # elasticdl evaluate\n evaluate_parser = subparsers.add_parser(\n \"evaluate\", help=\"Submit a ElasticDL distributed evaluation job\"\n )\n evaluate_parser.set_defaults(func=evaluate)\n args.add_common_params(evaluate_parser)\n args.add_evaluate_params(evaluate_parser)\n\n # elasticdl predict\n predict_parser = subparsers.add_parser(\n \"predict\", help=\"Submit a ElasticDL distributed prediction job\"\n )\n predict_parser.set_defaults(func=predict)\n args.add_common_params(predict_parser)\n args.add_predict_params(predict_parser)\n\n return parser\n\n\ndef main():\n parser = build_argument_parser()\n if len(sys.argv) == 1:\n parser.print_help(sys.stderr)\n sys.exit(1)\n\n try:\n args, _ = parser.parse_known_args()\n except TypeError:\n parser.print_help(sys.stderr)\n sys.exit(1)\n\n args.func(args)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "elasticdl_client/main.py"}]} | 1,386 | 115 |
gh_patches_debug_7059 | rasdani/github-patches | git_diff | modin-project__modin-6283 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Refactor ci.yml to reduce the amount of copy-pasting
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import setup, find_packages
2 import versioneer
3
4 with open("README.md", "r", encoding="utf-8") as fh:
5 long_description = fh.read()
6
7 dask_deps = ["dask>=2.22.0", "distributed>=2.22.0"]
8 ray_deps = ["ray[default]>=1.13.0", "pyarrow"]
9 unidist_deps = ["unidist[mpi]>=0.2.1"]
10 remote_deps = ["rpyc==4.1.5", "cloudpickle", "boto3"]
11 spreadsheet_deps = ["modin-spreadsheet>=0.1.0"]
12 sql_deps = ["dfsql>=0.4.2", "pyparsing<=2.4.7"]
13 all_deps = dask_deps + ray_deps + unidist_deps + remote_deps + spreadsheet_deps
14
15 # Distribute 'modin-autoimport-pandas.pth' along with binary and source distributions.
16 # This file provides the "import pandas before Ray init" feature if specific
17 # environment variable is set (see https://github.com/modin-project/modin/issues/4564).
18 cmdclass = versioneer.get_cmdclass()
19 extra_files = ["modin-autoimport-pandas.pth"]
20
21
22 class AddPthFileBuild(cmdclass["build_py"]):
23 def _get_data_files(self):
24 return (super()._get_data_files() or []) + [
25 (".", ".", self.build_lib, extra_files)
26 ]
27
28
29 class AddPthFileSDist(cmdclass["sdist"]):
30 def make_distribution(self):
31 self.filelist.extend(extra_files)
32 return super().make_distribution()
33
34
35 cmdclass["build_py"] = AddPthFileBuild
36 cmdclass["sdist"] = AddPthFileSDist
37
38 setup(
39 name="modin",
40 version=versioneer.get_version(),
41 cmdclass=cmdclass,
42 description="Modin: Make your pandas code run faster by changing one line of code.",
43 packages=find_packages(exclude=["scripts", "scripts.*"]),
44 include_package_data=True,
45 license="Apache 2",
46 url="https://github.com/modin-project/modin",
47 long_description=long_description,
48 long_description_content_type="text/markdown",
49 install_requires=[
50 "pandas>=2,<2.1",
51 "packaging",
52 "numpy>=1.18.5",
53 "fsspec",
54 "psutil",
55 ],
56 extras_require={
57 # can be installed by pip install modin[dask]
58 "dask": dask_deps,
59 "ray": ray_deps,
60 "unidist": unidist_deps,
61 "remote": remote_deps,
62 "spreadsheet": spreadsheet_deps,
63 "sql": sql_deps,
64 "all": all_deps,
65 },
66 python_requires=">=3.8",
67 )
68
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -5,7 +5,8 @@
long_description = fh.read()
dask_deps = ["dask>=2.22.0", "distributed>=2.22.0"]
-ray_deps = ["ray[default]>=1.13.0", "pyarrow"]
+# ray==2.5.0 broken: https://github.com/conda-forge/ray-packages-feedstock/issues/100
+ray_deps = ["ray[default]>=1.13.0,!=2.5.0", "pyarrow"]
unidist_deps = ["unidist[mpi]>=0.2.1"]
remote_deps = ["rpyc==4.1.5", "cloudpickle", "boto3"]
spreadsheet_deps = ["modin-spreadsheet>=0.1.0"]
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -5,7 +5,8 @@\n long_description = fh.read()\n \n dask_deps = [\"dask>=2.22.0\", \"distributed>=2.22.0\"]\n-ray_deps = [\"ray[default]>=1.13.0\", \"pyarrow\"]\n+# ray==2.5.0 broken: https://github.com/conda-forge/ray-packages-feedstock/issues/100\n+ray_deps = [\"ray[default]>=1.13.0,!=2.5.0\", \"pyarrow\"]\n unidist_deps = [\"unidist[mpi]>=0.2.1\"]\n remote_deps = [\"rpyc==4.1.5\", \"cloudpickle\", \"boto3\"]\n spreadsheet_deps = [\"modin-spreadsheet>=0.1.0\"]\n", "issue": "Refactor ci.yml to reduce the amount of copy-pasting\n\n", "before_files": [{"content": "from setuptools import setup, find_packages\nimport versioneer\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\ndask_deps = [\"dask>=2.22.0\", \"distributed>=2.22.0\"]\nray_deps = [\"ray[default]>=1.13.0\", \"pyarrow\"]\nunidist_deps = [\"unidist[mpi]>=0.2.1\"]\nremote_deps = [\"rpyc==4.1.5\", \"cloudpickle\", \"boto3\"]\nspreadsheet_deps = [\"modin-spreadsheet>=0.1.0\"]\nsql_deps = [\"dfsql>=0.4.2\", \"pyparsing<=2.4.7\"]\nall_deps = dask_deps + ray_deps + unidist_deps + remote_deps + spreadsheet_deps\n\n# Distribute 'modin-autoimport-pandas.pth' along with binary and source distributions.\n# This file provides the \"import pandas before Ray init\" feature if specific\n# environment variable is set (see https://github.com/modin-project/modin/issues/4564).\ncmdclass = versioneer.get_cmdclass()\nextra_files = [\"modin-autoimport-pandas.pth\"]\n\n\nclass AddPthFileBuild(cmdclass[\"build_py\"]):\n def _get_data_files(self):\n return (super()._get_data_files() or []) + [\n (\".\", \".\", self.build_lib, extra_files)\n ]\n\n\nclass AddPthFileSDist(cmdclass[\"sdist\"]):\n def make_distribution(self):\n self.filelist.extend(extra_files)\n return super().make_distribution()\n\n\ncmdclass[\"build_py\"] = AddPthFileBuild\ncmdclass[\"sdist\"] = AddPthFileSDist\n\nsetup(\n name=\"modin\",\n version=versioneer.get_version(),\n cmdclass=cmdclass,\n description=\"Modin: Make your pandas code run faster by changing one line of code.\",\n packages=find_packages(exclude=[\"scripts\", \"scripts.*\"]),\n include_package_data=True,\n license=\"Apache 2\",\n url=\"https://github.com/modin-project/modin\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n install_requires=[\n \"pandas>=2,<2.1\",\n \"packaging\",\n \"numpy>=1.18.5\",\n \"fsspec\",\n \"psutil\",\n ],\n extras_require={\n # can be installed by pip install modin[dask]\n \"dask\": dask_deps,\n \"ray\": ray_deps,\n \"unidist\": unidist_deps,\n \"remote\": remote_deps,\n \"spreadsheet\": spreadsheet_deps,\n \"sql\": sql_deps,\n \"all\": all_deps,\n },\n python_requires=\">=3.8\",\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import setup, find_packages\nimport versioneer\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\ndask_deps = [\"dask>=2.22.0\", \"distributed>=2.22.0\"]\n# ray==2.5.0 broken: https://github.com/conda-forge/ray-packages-feedstock/issues/100\nray_deps = [\"ray[default]>=1.13.0,!=2.5.0\", \"pyarrow\"]\nunidist_deps = [\"unidist[mpi]>=0.2.1\"]\nremote_deps = [\"rpyc==4.1.5\", \"cloudpickle\", \"boto3\"]\nspreadsheet_deps = [\"modin-spreadsheet>=0.1.0\"]\nsql_deps = [\"dfsql>=0.4.2\", \"pyparsing<=2.4.7\"]\nall_deps = dask_deps + ray_deps + unidist_deps + remote_deps + spreadsheet_deps\n\n# Distribute 'modin-autoimport-pandas.pth' along with binary and source distributions.\n# This file provides the \"import pandas before Ray init\" feature if specific\n# environment variable is set (see https://github.com/modin-project/modin/issues/4564).\ncmdclass = versioneer.get_cmdclass()\nextra_files = [\"modin-autoimport-pandas.pth\"]\n\n\nclass AddPthFileBuild(cmdclass[\"build_py\"]):\n def _get_data_files(self):\n return (super()._get_data_files() or []) + [\n (\".\", \".\", self.build_lib, extra_files)\n ]\n\n\nclass AddPthFileSDist(cmdclass[\"sdist\"]):\n def make_distribution(self):\n self.filelist.extend(extra_files)\n return super().make_distribution()\n\n\ncmdclass[\"build_py\"] = AddPthFileBuild\ncmdclass[\"sdist\"] = AddPthFileSDist\n\nsetup(\n name=\"modin\",\n version=versioneer.get_version(),\n cmdclass=cmdclass,\n description=\"Modin: Make your pandas code run faster by changing one line of code.\",\n packages=find_packages(exclude=[\"scripts\", \"scripts.*\"]),\n include_package_data=True,\n license=\"Apache 2\",\n url=\"https://github.com/modin-project/modin\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n install_requires=[\n \"pandas>=2,<2.1\",\n \"packaging\",\n \"numpy>=1.18.5\",\n \"fsspec\",\n \"psutil\",\n ],\n extras_require={\n # can be installed by pip install modin[dask]\n \"dask\": dask_deps,\n \"ray\": ray_deps,\n \"unidist\": unidist_deps,\n \"remote\": remote_deps,\n \"spreadsheet\": spreadsheet_deps,\n \"sql\": sql_deps,\n \"all\": all_deps,\n },\n python_requires=\">=3.8\",\n)\n", "path": "setup.py"}]} | 1,005 | 195 |
gh_patches_debug_1143 | rasdani/github-patches | git_diff | akvo__akvo-rsr-3132 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
When logged in landing page should be "myRSR"
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `akvo/rsr/views/__init__.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 """Akvo RSR is covered by the GNU Affero General Public License.
4
5 See more details in the license.txt file located at the root folder of the Akvo RSR module.
6 For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
7 """
8
9 from django.core.urlresolvers import reverse
10 from django.http import HttpResponseRedirect
11
12
13 def index(request):
14 """."""
15 return HttpResponseRedirect(reverse('project-directory', args=[]))
16
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/akvo/rsr/views/__init__.py b/akvo/rsr/views/__init__.py
--- a/akvo/rsr/views/__init__.py
+++ b/akvo/rsr/views/__init__.py
@@ -11,5 +11,7 @@
def index(request):
- """."""
- return HttpResponseRedirect(reverse('project-directory', args=[]))
+ """Redirect user to project directory or My RSR."""
+
+ redirect_url = 'project-directory' if request.user.is_anonymous() else 'my_rsr'
+ return HttpResponseRedirect(reverse(redirect_url, args=[]))
| {"golden_diff": "diff --git a/akvo/rsr/views/__init__.py b/akvo/rsr/views/__init__.py\n--- a/akvo/rsr/views/__init__.py\n+++ b/akvo/rsr/views/__init__.py\n@@ -11,5 +11,7 @@\n \n \n def index(request):\n- \"\"\".\"\"\"\n- return HttpResponseRedirect(reverse('project-directory', args=[]))\n+ \"\"\"Redirect user to project directory or My RSR.\"\"\"\n+\n+ redirect_url = 'project-directory' if request.user.is_anonymous() else 'my_rsr'\n+ return HttpResponseRedirect(reverse(redirect_url, args=[]))\n", "issue": "When logged in landing page should be \"myRSR\"\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Akvo RSR is covered by the GNU Affero General Public License.\n\nSee more details in the license.txt file located at the root folder of the Akvo RSR module.\nFor additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponseRedirect\n\n\ndef index(request):\n \"\"\".\"\"\"\n return HttpResponseRedirect(reverse('project-directory', args=[]))\n", "path": "akvo/rsr/views/__init__.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Akvo RSR is covered by the GNU Affero General Public License.\n\nSee more details in the license.txt file located at the root folder of the Akvo RSR module.\nFor additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponseRedirect\n\n\ndef index(request):\n \"\"\"Redirect user to project directory or My RSR.\"\"\"\n\n redirect_url = 'project-directory' if request.user.is_anonymous() else 'my_rsr'\n return HttpResponseRedirect(reverse(redirect_url, args=[]))\n", "path": "akvo/rsr/views/__init__.py"}]} | 402 | 133 |
gh_patches_debug_7097 | rasdani/github-patches | git_diff | learningequality__kolibri-3406 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Showing 120% score in exam report
### Observed behavior
After submitting exam, when coach user watching progress of each user in exam report. Coach user see 120% score in exam report. We have attached screenshot and database file,so you can easily re-generate this issue.
### Expected behavior
Score must be between 0-100%.
### Steps to reproduce
1. Copy attached database file in .kolibri folder.
2. login with username "pm" and password "sc".
3. Click on Coach.
4. Click on Class 4A.
5. Click on Exams.
6. See report of the Unit 2B-Final exam.
7. See learner Junaid Shaikh.
### Context
* Kolibri version : Kolibri 0.4.9
* Operating system : Ubuntu 14.04
* Browser : Chrome
### Screenshots

### Database
[db.sqlite3.zip](https://github.com/learningequality/kolibri/files/1617728/db.sqlite3.zip)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kolibri/logger/serializers.py`
Content:
```
1 from django.db.models import Sum
2 from django.utils.timezone import now
3 from kolibri.auth.models import FacilityUser
4 from kolibri.core.serializers import KolibriModelSerializer
5 from kolibri.logger.models import AttemptLog, ContentSessionLog, ContentSummaryLog, ExamAttemptLog, ExamLog, MasteryLog, UserSessionLog
6 from rest_framework import serializers
7
8
9 class ContentSessionLogSerializer(KolibriModelSerializer):
10
11 extra_fields = serializers.JSONField(default='{}')
12
13 class Meta:
14 model = ContentSessionLog
15 fields = ('pk', 'user', 'content_id', 'channel_id', 'start_timestamp',
16 'end_timestamp', 'time_spent', 'kind', 'extra_fields', 'progress')
17
18 class ExamLogSerializer(KolibriModelSerializer):
19 progress = serializers.SerializerMethodField()
20 score = serializers.SerializerMethodField()
21
22 def get_progress(self, obj):
23 return obj.attemptlogs.count()
24
25 def get_score(self, obj):
26 return obj.attemptlogs.aggregate(Sum('correct')).get('correct__sum')
27
28 class Meta:
29 model = ExamLog
30 fields = ('id', 'exam', 'user', 'closed', 'progress', 'score', 'completion_timestamp')
31 read_only_fields = ('completion_timestamp', )
32
33 def update(self, instance, validated_data):
34 # This has changed, set the completion timestamp
35 if validated_data.get('closed') and not instance.closed:
36 instance.completion_timestamp = now()
37 return super(ExamLogSerializer, self).update(instance, validated_data)
38
39 class MasteryLogSerializer(KolibriModelSerializer):
40
41 pastattempts = serializers.SerializerMethodField()
42 totalattempts = serializers.SerializerMethodField()
43 mastery_criterion = serializers.JSONField(default='{}')
44
45 class Meta:
46 model = MasteryLog
47 fields = ('id', 'summarylog', 'start_timestamp', 'pastattempts', 'totalattempts', 'user',
48 'end_timestamp', 'completion_timestamp', 'mastery_criterion', 'mastery_level', 'complete')
49
50 def get_pastattempts(self, obj):
51 # will return a list of the latest 10 correct and hint_taken fields for each attempt.
52 return AttemptLog.objects.filter(masterylog__summarylog=obj.summarylog).values('correct', 'hinted').order_by('-start_timestamp')[:10]
53
54 def get_totalattempts(self, obj):
55 return AttemptLog.objects.filter(masterylog__summarylog=obj.summarylog).count()
56
57 class AttemptLogSerializer(KolibriModelSerializer):
58 answer = serializers.JSONField(default='{}')
59 interaction_history = serializers.JSONField(default='[]')
60
61 class Meta:
62 model = AttemptLog
63 fields = ('id', 'masterylog', 'start_timestamp', 'sessionlog',
64 'end_timestamp', 'completion_timestamp', 'item', 'time_spent', 'user',
65 'complete', 'correct', 'hinted', 'answer', 'simple_answer', 'interaction_history')
66
67 class ExamAttemptLogSerializer(KolibriModelSerializer):
68 answer = serializers.JSONField(default='{}', allow_null=True)
69 interaction_history = serializers.JSONField(default='[]')
70
71 class Meta:
72 model = ExamAttemptLog
73 fields = ('id', 'examlog', 'start_timestamp', 'channel_id', 'content_id',
74 'end_timestamp', 'completion_timestamp', 'item', 'time_spent', 'user',
75 'complete', 'correct', 'hinted', 'answer', 'simple_answer', 'interaction_history')
76
77 def validate(self, data):
78 # Only do this validation when both are being set
79 # not necessary on PATCH, for example
80 if data.get('examlog') and data.get('user'):
81 try:
82 if data['examlog'].user != data['user']:
83 raise serializers.ValidationError('User field and user for related exam log are not the same')
84 except ExamLog.DoesNotExist:
85 raise serializers.ValidationError('Invalid exam log')
86 return data
87
88 class ContentSummaryLogSerializer(KolibriModelSerializer):
89
90 currentmasterylog = serializers.SerializerMethodField()
91 extra_fields = serializers.JSONField(default='{}')
92
93 class Meta:
94 model = ContentSummaryLog
95 fields = ('pk', 'user', 'content_id', 'channel_id', 'start_timestamp', 'currentmasterylog',
96 'end_timestamp', 'completion_timestamp', 'time_spent', 'progress', 'kind', 'extra_fields')
97
98 def get_currentmasterylog(self, obj):
99 try:
100 current_log = obj.masterylogs.latest('end_timestamp')
101 return MasteryLogSerializer(current_log).data
102 except MasteryLog.DoesNotExist:
103 return None
104
105 class UserSessionLogSerializer(KolibriModelSerializer):
106
107 class Meta:
108 model = UserSessionLog
109 fields = ('pk', 'user', 'channels', 'start_timestamp', 'last_interaction_timestamp', 'pages')
110
111 class TotalContentProgressSerializer(serializers.ModelSerializer):
112
113 progress = serializers.SerializerMethodField()
114
115 class Meta:
116 model = FacilityUser
117 fields = ('progress', 'id')
118
119 def get_progress(self, obj):
120 return obj.contentsummarylog_set.filter(progress=1).aggregate(Sum('progress')).get('progress__sum')
121
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kolibri/logger/serializers.py b/kolibri/logger/serializers.py
--- a/kolibri/logger/serializers.py
+++ b/kolibri/logger/serializers.py
@@ -20,10 +20,10 @@
score = serializers.SerializerMethodField()
def get_progress(self, obj):
- return obj.attemptlogs.count()
+ return obj.exam.question_count
def get_score(self, obj):
- return obj.attemptlogs.aggregate(Sum('correct')).get('correct__sum')
+ return obj.attemptlogs.values_list('item').order_by('completion_timestamp').distinct().aggregate(Sum('correct')).get('correct__sum')
class Meta:
model = ExamLog
| {"golden_diff": "diff --git a/kolibri/logger/serializers.py b/kolibri/logger/serializers.py\n--- a/kolibri/logger/serializers.py\n+++ b/kolibri/logger/serializers.py\n@@ -20,10 +20,10 @@\n score = serializers.SerializerMethodField()\n \n def get_progress(self, obj):\n- return obj.attemptlogs.count()\n+ return obj.exam.question_count\n \n def get_score(self, obj):\n- return obj.attemptlogs.aggregate(Sum('correct')).get('correct__sum')\n+ return obj.attemptlogs.values_list('item').order_by('completion_timestamp').distinct().aggregate(Sum('correct')).get('correct__sum')\n \n class Meta:\n model = ExamLog\n", "issue": "Showing 120% score in exam report\n### Observed behavior\r\nAfter submitting exam, when coach user watching progress of each user in exam report. Coach user see 120% score in exam report. We have attached screenshot and database file,so you can easily re-generate this issue.\r\n\r\n### Expected behavior\r\nScore must be between 0-100%.\r\n\r\n### Steps to reproduce\r\n1. Copy attached database file in .kolibri folder.\r\n2. login with username \"pm\" and password \"sc\".\r\n3. Click on Coach.\r\n4. Click on Class 4A.\r\n5. Click on Exams.\r\n6. See report of the Unit 2B-Final exam.\r\n7. See learner Junaid Shaikh.\r\n\r\n### Context\r\n * Kolibri version : Kolibri 0.4.9\r\n * Operating system : Ubuntu 14.04\r\n * Browser : Chrome \r\n\r\n### Screenshots\r\n\r\n\r\n### Database\r\n[db.sqlite3.zip](https://github.com/learningequality/kolibri/files/1617728/db.sqlite3.zip)\r\n\r\n\r\n\n", "before_files": [{"content": "from django.db.models import Sum\nfrom django.utils.timezone import now\nfrom kolibri.auth.models import FacilityUser\nfrom kolibri.core.serializers import KolibriModelSerializer\nfrom kolibri.logger.models import AttemptLog, ContentSessionLog, ContentSummaryLog, ExamAttemptLog, ExamLog, MasteryLog, UserSessionLog\nfrom rest_framework import serializers\n\n\nclass ContentSessionLogSerializer(KolibriModelSerializer):\n\n extra_fields = serializers.JSONField(default='{}')\n\n class Meta:\n model = ContentSessionLog\n fields = ('pk', 'user', 'content_id', 'channel_id', 'start_timestamp',\n 'end_timestamp', 'time_spent', 'kind', 'extra_fields', 'progress')\n\nclass ExamLogSerializer(KolibriModelSerializer):\n progress = serializers.SerializerMethodField()\n score = serializers.SerializerMethodField()\n\n def get_progress(self, obj):\n return obj.attemptlogs.count()\n\n def get_score(self, obj):\n return obj.attemptlogs.aggregate(Sum('correct')).get('correct__sum')\n\n class Meta:\n model = ExamLog\n fields = ('id', 'exam', 'user', 'closed', 'progress', 'score', 'completion_timestamp')\n read_only_fields = ('completion_timestamp', )\n\n def update(self, instance, validated_data):\n # This has changed, set the completion timestamp\n if validated_data.get('closed') and not instance.closed:\n instance.completion_timestamp = now()\n return super(ExamLogSerializer, self).update(instance, validated_data)\n\nclass MasteryLogSerializer(KolibriModelSerializer):\n\n pastattempts = serializers.SerializerMethodField()\n totalattempts = serializers.SerializerMethodField()\n mastery_criterion = serializers.JSONField(default='{}')\n\n class Meta:\n model = MasteryLog\n fields = ('id', 'summarylog', 'start_timestamp', 'pastattempts', 'totalattempts', 'user',\n 'end_timestamp', 'completion_timestamp', 'mastery_criterion', 'mastery_level', 'complete')\n\n def get_pastattempts(self, obj):\n # will return a list of the latest 10 correct and hint_taken fields for each attempt.\n return AttemptLog.objects.filter(masterylog__summarylog=obj.summarylog).values('correct', 'hinted').order_by('-start_timestamp')[:10]\n\n def get_totalattempts(self, obj):\n return AttemptLog.objects.filter(masterylog__summarylog=obj.summarylog).count()\n\nclass AttemptLogSerializer(KolibriModelSerializer):\n answer = serializers.JSONField(default='{}')\n interaction_history = serializers.JSONField(default='[]')\n\n class Meta:\n model = AttemptLog\n fields = ('id', 'masterylog', 'start_timestamp', 'sessionlog',\n 'end_timestamp', 'completion_timestamp', 'item', 'time_spent', 'user',\n 'complete', 'correct', 'hinted', 'answer', 'simple_answer', 'interaction_history')\n\nclass ExamAttemptLogSerializer(KolibriModelSerializer):\n answer = serializers.JSONField(default='{}', allow_null=True)\n interaction_history = serializers.JSONField(default='[]')\n\n class Meta:\n model = ExamAttemptLog\n fields = ('id', 'examlog', 'start_timestamp', 'channel_id', 'content_id',\n 'end_timestamp', 'completion_timestamp', 'item', 'time_spent', 'user',\n 'complete', 'correct', 'hinted', 'answer', 'simple_answer', 'interaction_history')\n\n def validate(self, data):\n # Only do this validation when both are being set\n # not necessary on PATCH, for example\n if data.get('examlog') and data.get('user'):\n try:\n if data['examlog'].user != data['user']:\n raise serializers.ValidationError('User field and user for related exam log are not the same')\n except ExamLog.DoesNotExist:\n raise serializers.ValidationError('Invalid exam log')\n return data\n\nclass ContentSummaryLogSerializer(KolibriModelSerializer):\n\n currentmasterylog = serializers.SerializerMethodField()\n extra_fields = serializers.JSONField(default='{}')\n\n class Meta:\n model = ContentSummaryLog\n fields = ('pk', 'user', 'content_id', 'channel_id', 'start_timestamp', 'currentmasterylog',\n 'end_timestamp', 'completion_timestamp', 'time_spent', 'progress', 'kind', 'extra_fields')\n\n def get_currentmasterylog(self, obj):\n try:\n current_log = obj.masterylogs.latest('end_timestamp')\n return MasteryLogSerializer(current_log).data\n except MasteryLog.DoesNotExist:\n return None\n\nclass UserSessionLogSerializer(KolibriModelSerializer):\n\n class Meta:\n model = UserSessionLog\n fields = ('pk', 'user', 'channels', 'start_timestamp', 'last_interaction_timestamp', 'pages')\n\nclass TotalContentProgressSerializer(serializers.ModelSerializer):\n\n progress = serializers.SerializerMethodField()\n\n class Meta:\n model = FacilityUser\n fields = ('progress', 'id')\n\n def get_progress(self, obj):\n return obj.contentsummarylog_set.filter(progress=1).aggregate(Sum('progress')).get('progress__sum')\n", "path": "kolibri/logger/serializers.py"}], "after_files": [{"content": "from django.db.models import Sum\nfrom django.utils.timezone import now\nfrom kolibri.auth.models import FacilityUser\nfrom kolibri.core.serializers import KolibriModelSerializer\nfrom kolibri.logger.models import AttemptLog, ContentSessionLog, ContentSummaryLog, ExamAttemptLog, ExamLog, MasteryLog, UserSessionLog\nfrom rest_framework import serializers\n\n\nclass ContentSessionLogSerializer(KolibriModelSerializer):\n\n extra_fields = serializers.JSONField(default='{}')\n\n class Meta:\n model = ContentSessionLog\n fields = ('pk', 'user', 'content_id', 'channel_id', 'start_timestamp',\n 'end_timestamp', 'time_spent', 'kind', 'extra_fields', 'progress')\n\nclass ExamLogSerializer(KolibriModelSerializer):\n progress = serializers.SerializerMethodField()\n score = serializers.SerializerMethodField()\n\n def get_progress(self, obj):\n return obj.exam.question_count\n\n def get_score(self, obj):\n return obj.attemptlogs.values_list('item').order_by('completion_timestamp').distinct().aggregate(Sum('correct')).get('correct__sum')\n\n class Meta:\n model = ExamLog\n fields = ('id', 'exam', 'user', 'closed', 'progress', 'score', 'completion_timestamp')\n read_only_fields = ('completion_timestamp', )\n\n def update(self, instance, validated_data):\n # This has changed, set the completion timestamp\n if validated_data.get('closed') and not instance.closed:\n instance.completion_timestamp = now()\n return super(ExamLogSerializer, self).update(instance, validated_data)\n\nclass MasteryLogSerializer(KolibriModelSerializer):\n\n pastattempts = serializers.SerializerMethodField()\n totalattempts = serializers.SerializerMethodField()\n mastery_criterion = serializers.JSONField(default='{}')\n\n class Meta:\n model = MasteryLog\n fields = ('id', 'summarylog', 'start_timestamp', 'pastattempts', 'totalattempts', 'user',\n 'end_timestamp', 'completion_timestamp', 'mastery_criterion', 'mastery_level', 'complete')\n\n def get_pastattempts(self, obj):\n # will return a list of the latest 10 correct and hint_taken fields for each attempt.\n return AttemptLog.objects.filter(masterylog__summarylog=obj.summarylog).values('correct', 'hinted').order_by('-start_timestamp')[:10]\n\n def get_totalattempts(self, obj):\n return AttemptLog.objects.filter(masterylog__summarylog=obj.summarylog).count()\n\nclass AttemptLogSerializer(KolibriModelSerializer):\n answer = serializers.JSONField(default='{}')\n interaction_history = serializers.JSONField(default='[]')\n\n class Meta:\n model = AttemptLog\n fields = ('id', 'masterylog', 'start_timestamp', 'sessionlog',\n 'end_timestamp', 'completion_timestamp', 'item', 'time_spent', 'user',\n 'complete', 'correct', 'hinted', 'answer', 'simple_answer', 'interaction_history')\n\nclass ExamAttemptLogSerializer(KolibriModelSerializer):\n answer = serializers.JSONField(default='{}', allow_null=True)\n interaction_history = serializers.JSONField(default='[]')\n\n class Meta:\n model = ExamAttemptLog\n fields = ('id', 'examlog', 'start_timestamp', 'channel_id', 'content_id',\n 'end_timestamp', 'completion_timestamp', 'item', 'time_spent', 'user',\n 'complete', 'correct', 'hinted', 'answer', 'simple_answer', 'interaction_history')\n\n def validate(self, data):\n # Only do this validation when both are being set\n # not necessary on PATCH, for example\n if data.get('examlog') and data.get('user'):\n try:\n if data['examlog'].user != data['user']:\n raise serializers.ValidationError('User field and user for related exam log are not the same')\n except ExamLog.DoesNotExist:\n raise serializers.ValidationError('Invalid exam log')\n return data\n\nclass ContentSummaryLogSerializer(KolibriModelSerializer):\n\n currentmasterylog = serializers.SerializerMethodField()\n extra_fields = serializers.JSONField(default='{}')\n\n class Meta:\n model = ContentSummaryLog\n fields = ('pk', 'user', 'content_id', 'channel_id', 'start_timestamp', 'currentmasterylog',\n 'end_timestamp', 'completion_timestamp', 'time_spent', 'progress', 'kind', 'extra_fields')\n\n def get_currentmasterylog(self, obj):\n try:\n current_log = obj.masterylogs.latest('end_timestamp')\n return MasteryLogSerializer(current_log).data\n except MasteryLog.DoesNotExist:\n return None\n\nclass UserSessionLogSerializer(KolibriModelSerializer):\n\n class Meta:\n model = UserSessionLog\n fields = ('pk', 'user', 'channels', 'start_timestamp', 'last_interaction_timestamp', 'pages')\n\nclass TotalContentProgressSerializer(serializers.ModelSerializer):\n\n progress = serializers.SerializerMethodField()\n\n class Meta:\n model = FacilityUser\n fields = ('progress', 'id')\n\n def get_progress(self, obj):\n return obj.contentsummarylog_set.filter(progress=1).aggregate(Sum('progress')).get('progress__sum')\n", "path": "kolibri/logger/serializers.py"}]} | 1,909 | 162 |
gh_patches_debug_4446 | rasdani/github-patches | git_diff | zenml-io__zenml-317 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Repeated Paragraph in the documentation for `core-concepts`
In the file `core-concepts.md`, the section on [`Pipeline`](https://github.com/zenml-io/zenml/blob/b94dff83f0e7c8ab29e99d6b42a0c906a3512b63/docs/book/introduction/core-concepts.md?plain=1#L27-L41) includes a repeated paragraph. The first paragraph in the the pipeline section is repeated in the 3rd paragraph of the same section.
```markdown
Within your repository, you will have one or more pipelines as part of your experimentation workflow. A ZenML
pipeline is a sequence of tasks that execute in a specific order and yield artifacts. The artifacts are stored
within the artifact store and indexed via the metadata store. Each individual task within a pipeline is known as a
step. The standard pipelines within ZenML are designed to have easy interfaces to add pre-decided steps, with the
order also pre-decided. Other sorts of pipelines can be created as well from scratch.
Pipelines are designed as simple functions. They are created by using decorators appropriate to the specific use case
you have. The moment it is `run`, a pipeline is compiled and passed directly to the orchestrator, to be run in the
orchestrator environment.
Within your repository, you will have one or more pipelines as part of your experimentation workflow. A ZenML
pipeline is a sequence of tasks that execute in a specific order and yield artifacts. The artifacts are stored
within the artifact store and indexed via the metadata store. Each individual task within a pipeline is known as a
step. The standard pipelines (like `TrainingPipeline`) within ZenML are designed to have easy interfaces to add
pre-decided steps, with the order also pre-decided. Other sorts of pipelines can be created as well from scratch.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/zenml/materializers/built_in_materializer.py`
Content:
```
1 # Copyright (c) ZenML GmbH 2021. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at:
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
12 # or implied. See the License for the specific language governing
13 # permissions and limitations under the License.
14 import os
15 from typing import Any, Type
16
17 from zenml.artifacts import DataAnalysisArtifact, DataArtifact
18 from zenml.logger import get_logger
19 from zenml.materializers.base_materializer import BaseMaterializer
20 from zenml.utils import yaml_utils
21
22 logger = get_logger(__name__)
23 DEFAULT_FILENAME = "data.json"
24
25
26 class BuiltInMaterializer(BaseMaterializer):
27 """Read/Write JSON files."""
28
29 # TODO [LOW]: consider adding typing.Dict and typing.List
30 # since these are the 'correct' way to annotate these types.
31
32 ASSOCIATED_ARTIFACT_TYPES = [
33 DataArtifact,
34 DataAnalysisArtifact,
35 ]
36 ASSOCIATED_TYPES = [
37 int,
38 str,
39 bytes,
40 dict,
41 float,
42 list,
43 tuple,
44 bool,
45 ]
46
47 def handle_input(self, data_type: Type[Any]) -> Any:
48 """Reads basic primitive types from json."""
49 super().handle_input(data_type)
50 filepath = os.path.join(self.artifact.uri, DEFAULT_FILENAME)
51 contents = yaml_utils.read_json(filepath)
52 if type(contents) != data_type:
53 # TODO [ENG-142]: Raise error or try to coerce
54 logger.debug(
55 f"Contents {contents} was type {type(contents)} but expected "
56 f"{data_type}"
57 )
58 return contents
59
60 def handle_return(self, data: Any) -> None:
61 """Handles basic built-in types and stores them as json"""
62 super().handle_return(data)
63 filepath = os.path.join(self.artifact.uri, DEFAULT_FILENAME)
64 yaml_utils.write_json(filepath, data)
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/zenml/materializers/built_in_materializer.py b/src/zenml/materializers/built_in_materializer.py
--- a/src/zenml/materializers/built_in_materializer.py
+++ b/src/zenml/materializers/built_in_materializer.py
@@ -26,7 +26,7 @@
class BuiltInMaterializer(BaseMaterializer):
"""Read/Write JSON files."""
- # TODO [LOW]: consider adding typing.Dict and typing.List
+ # TODO [ENG-322]: consider adding typing.Dict and typing.List
# since these are the 'correct' way to annotate these types.
ASSOCIATED_ARTIFACT_TYPES = [
| {"golden_diff": "diff --git a/src/zenml/materializers/built_in_materializer.py b/src/zenml/materializers/built_in_materializer.py\n--- a/src/zenml/materializers/built_in_materializer.py\n+++ b/src/zenml/materializers/built_in_materializer.py\n@@ -26,7 +26,7 @@\n class BuiltInMaterializer(BaseMaterializer):\n \"\"\"Read/Write JSON files.\"\"\"\n \n- # TODO [LOW]: consider adding typing.Dict and typing.List\n+ # TODO [ENG-322]: consider adding typing.Dict and typing.List\n # since these are the 'correct' way to annotate these types.\n \n ASSOCIATED_ARTIFACT_TYPES = [\n", "issue": "Repeated Paragraph in the documentation for `core-concepts`\nIn the file `core-concepts.md`, the section on [`Pipeline`](https://github.com/zenml-io/zenml/blob/b94dff83f0e7c8ab29e99d6b42a0c906a3512b63/docs/book/introduction/core-concepts.md?plain=1#L27-L41) includes a repeated paragraph. The first paragraph in the the pipeline section is repeated in the 3rd paragraph of the same section. \r\n\r\n```markdown\r\nWithin your repository, you will have one or more pipelines as part of your experimentation workflow. A ZenML \r\npipeline is a sequence of tasks that execute in a specific order and yield artifacts. The artifacts are stored \r\nwithin the artifact store and indexed via the metadata store. Each individual task within a pipeline is known as a \r\nstep. The standard pipelines within ZenML are designed to have easy interfaces to add pre-decided steps, with the \r\norder also pre-decided. Other sorts of pipelines can be created as well from scratch.\r\n\r\nPipelines are designed as simple functions. They are created by using decorators appropriate to the specific use case \r\nyou have. The moment it is `run`, a pipeline is compiled and passed directly to the orchestrator, to be run in the \r\norchestrator environment.\r\n\r\nWithin your repository, you will have one or more pipelines as part of your experimentation workflow. A ZenML \r\npipeline is a sequence of tasks that execute in a specific order and yield artifacts. The artifacts are stored \r\nwithin the artifact store and indexed via the metadata store. Each individual task within a pipeline is known as a \r\nstep. The standard pipelines (like `TrainingPipeline`) within ZenML are designed to have easy interfaces to add \r\npre-decided steps, with the order also pre-decided. Other sorts of pipelines can be created as well from scratch.\r\n```\n", "before_files": [{"content": "# Copyright (c) ZenML GmbH 2021. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at:\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\n# or implied. See the License for the specific language governing\n# permissions and limitations under the License.\nimport os\nfrom typing import Any, Type\n\nfrom zenml.artifacts import DataAnalysisArtifact, DataArtifact\nfrom zenml.logger import get_logger\nfrom zenml.materializers.base_materializer import BaseMaterializer\nfrom zenml.utils import yaml_utils\n\nlogger = get_logger(__name__)\nDEFAULT_FILENAME = \"data.json\"\n\n\nclass BuiltInMaterializer(BaseMaterializer):\n \"\"\"Read/Write JSON files.\"\"\"\n\n # TODO [LOW]: consider adding typing.Dict and typing.List\n # since these are the 'correct' way to annotate these types.\n\n ASSOCIATED_ARTIFACT_TYPES = [\n DataArtifact,\n DataAnalysisArtifact,\n ]\n ASSOCIATED_TYPES = [\n int,\n str,\n bytes,\n dict,\n float,\n list,\n tuple,\n bool,\n ]\n\n def handle_input(self, data_type: Type[Any]) -> Any:\n \"\"\"Reads basic primitive types from json.\"\"\"\n super().handle_input(data_type)\n filepath = os.path.join(self.artifact.uri, DEFAULT_FILENAME)\n contents = yaml_utils.read_json(filepath)\n if type(contents) != data_type:\n # TODO [ENG-142]: Raise error or try to coerce\n logger.debug(\n f\"Contents {contents} was type {type(contents)} but expected \"\n f\"{data_type}\"\n )\n return contents\n\n def handle_return(self, data: Any) -> None:\n \"\"\"Handles basic built-in types and stores them as json\"\"\"\n super().handle_return(data)\n filepath = os.path.join(self.artifact.uri, DEFAULT_FILENAME)\n yaml_utils.write_json(filepath, data)\n", "path": "src/zenml/materializers/built_in_materializer.py"}], "after_files": [{"content": "# Copyright (c) ZenML GmbH 2021. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at:\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\n# or implied. See the License for the specific language governing\n# permissions and limitations under the License.\nimport os\nfrom typing import Any, Type\n\nfrom zenml.artifacts import DataAnalysisArtifact, DataArtifact\nfrom zenml.logger import get_logger\nfrom zenml.materializers.base_materializer import BaseMaterializer\nfrom zenml.utils import yaml_utils\n\nlogger = get_logger(__name__)\nDEFAULT_FILENAME = \"data.json\"\n\n\nclass BuiltInMaterializer(BaseMaterializer):\n \"\"\"Read/Write JSON files.\"\"\"\n\n # TODO [ENG-322]: consider adding typing.Dict and typing.List\n # since these are the 'correct' way to annotate these types.\n\n ASSOCIATED_ARTIFACT_TYPES = [\n DataArtifact,\n DataAnalysisArtifact,\n ]\n ASSOCIATED_TYPES = [\n int,\n str,\n bytes,\n dict,\n float,\n list,\n tuple,\n bool,\n ]\n\n def handle_input(self, data_type: Type[Any]) -> Any:\n \"\"\"Reads basic primitive types from json.\"\"\"\n super().handle_input(data_type)\n filepath = os.path.join(self.artifact.uri, DEFAULT_FILENAME)\n contents = yaml_utils.read_json(filepath)\n if type(contents) != data_type:\n # TODO [ENG-142]: Raise error or try to coerce\n logger.debug(\n f\"Contents {contents} was type {type(contents)} but expected \"\n f\"{data_type}\"\n )\n return contents\n\n def handle_return(self, data: Any) -> None:\n \"\"\"Handles basic built-in types and stores them as json\"\"\"\n super().handle_return(data)\n filepath = os.path.join(self.artifact.uri, DEFAULT_FILENAME)\n yaml_utils.write_json(filepath, data)\n", "path": "src/zenml/materializers/built_in_materializer.py"}]} | 1,284 | 150 |
gh_patches_debug_7770 | rasdani/github-patches | git_diff | pandas-dev__pandas-8238 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BUG: rolling_window yields unexpected results with win_type='triang'
Here's the example in the documentation, modified to have non-zero mean:
```
n = 100
ser = pandas.Series(randn(n)+10, index=pandas.date_range('1/1/2000', periods=n))
pandas.rolling_window(ser, 5, 'triang').plot()
pandas.rolling_window(ser, 5, 'boxcar').plot()
```
The rolling boxcar window is centered around 10, as expected.
The triang window is centered around 6. That suggests that the weights in the window don't add up to 1.
Either that or my expectation of how it should work is wrong?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pandas/util/print_versions.py`
Content:
```
1 import os
2 import platform
3 import sys
4 import struct
5 import subprocess
6 import codecs
7
8
9 def get_sys_info():
10 "Returns system information as a dict"
11
12 blob = []
13
14 # get full commit hash
15 commit = None
16 if os.path.isdir(".git") and os.path.isdir("pandas"):
17 try:
18 pipe = subprocess.Popen('git log --format="%H" -n 1'.split(" "),
19 stdout=subprocess.PIPE, stderr=subprocess.PIPE)
20 so, serr = pipe.communicate()
21 except:
22 pass
23 else:
24 if pipe.returncode == 0:
25 commit = so
26 try:
27 commit = so.decode('utf-8')
28 except ValueError:
29 pass
30 commit = commit.strip().strip('"')
31
32 blob.append(('commit', commit))
33
34 try:
35 sysname, nodename, release, version, machine, processor = platform.uname(
36 )
37 blob.extend([
38 ("python", "%d.%d.%d.%s.%s" % sys.version_info[:]),
39 ("python-bits", struct.calcsize("P") * 8),
40 ("OS", "%s" % (sysname)),
41 ("OS-release", "%s" % (release)),
42 # ("Version", "%s" % (version)),
43 ("machine", "%s" % (machine)),
44 ("processor", "%s" % (processor)),
45 ("byteorder", "%s" % sys.byteorder),
46 ("LC_ALL", "%s" % os.environ.get('LC_ALL', "None")),
47 ("LANG", "%s" % os.environ.get('LANG', "None")),
48
49 ])
50 except:
51 pass
52
53 return blob
54
55
56 def show_versions(as_json=False):
57 import imp
58 sys_info = get_sys_info()
59
60 deps = [
61 # (MODULE_NAME, f(mod) -> mod version)
62 ("pandas", lambda mod: mod.__version__),
63 ("nose", lambda mod: mod.__version__),
64 ("Cython", lambda mod: mod.__version__),
65 ("numpy", lambda mod: mod.version.version),
66 ("scipy", lambda mod: mod.version.version),
67 ("statsmodels", lambda mod: mod.__version__),
68 ("IPython", lambda mod: mod.__version__),
69 ("sphinx", lambda mod: mod.__version__),
70 ("patsy", lambda mod: mod.__version__),
71 ("scikits.timeseries", lambda mod: mod.__version__),
72 ("dateutil", lambda mod: mod.__version__),
73 ("pytz", lambda mod: mod.VERSION),
74 ("bottleneck", lambda mod: mod.__version__),
75 ("tables", lambda mod: mod.__version__),
76 ("numexpr", lambda mod: mod.__version__),
77 ("matplotlib", lambda mod: mod.__version__),
78 ("openpyxl", lambda mod: mod.__version__),
79 ("xlrd", lambda mod: mod.__VERSION__),
80 ("xlwt", lambda mod: mod.__VERSION__),
81 ("xlsxwriter", lambda mod: mod.__version__),
82 ("lxml", lambda mod: mod.etree.__version__),
83 ("bs4", lambda mod: mod.__version__),
84 ("html5lib", lambda mod: mod.__version__),
85 ("httplib2", lambda mod: mod.__version__),
86 ("apiclient", lambda mod: mod.__version__),
87 ("rpy2", lambda mod: mod.__version__),
88 ("sqlalchemy", lambda mod: mod.__version__),
89 ("pymysql", lambda mod: mod.__version__),
90 ("psycopg2", lambda mod: mod.__version__),
91 ]
92
93 deps_blob = list()
94 for (modname, ver_f) in deps:
95 try:
96 try:
97 mod = imp.load_module(modname, *imp.find_module(modname))
98 except (ImportError):
99 import importlib
100 mod = importlib.import_module(modname)
101 ver = ver_f(mod)
102 deps_blob.append((modname, ver))
103 except:
104 deps_blob.append((modname, None))
105
106 if (as_json):
107 # 2.6-safe
108 try:
109 import json
110 except:
111 import simplejson as json
112
113 j = dict(system=dict(sys_info), dependencies=dict(deps_blob))
114
115 if as_json == True:
116 print(j)
117 else:
118 with codecs.open(as_json, "wb", encoding='utf8') as f:
119 json.dump(j, f, indent=2)
120
121 else:
122
123 print("\nINSTALLED VERSIONS")
124 print("------------------")
125
126 for k, stat in sys_info:
127 print("%s: %s" % (k, stat))
128
129 print("")
130 for k, stat in deps_blob:
131 print("%s: %s" % (k, stat))
132
133
134 def main():
135 # optparse is 2.6-safe
136 from optparse import OptionParser
137 parser = OptionParser()
138 parser.add_option("-j", "--json", metavar="FILE", nargs=1,
139 help="Save output as JSON into file, pass in '-' to output to stdout")
140
141 (options, args) = parser.parse_args()
142
143 if options.json == "-":
144 options.json = True
145
146 show_versions(as_json=options.json)
147
148 return 0
149
150 if __name__ == "__main__":
151 sys.exit(main())
152
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pandas/util/print_versions.py b/pandas/util/print_versions.py
--- a/pandas/util/print_versions.py
+++ b/pandas/util/print_versions.py
@@ -68,7 +68,6 @@
("IPython", lambda mod: mod.__version__),
("sphinx", lambda mod: mod.__version__),
("patsy", lambda mod: mod.__version__),
- ("scikits.timeseries", lambda mod: mod.__version__),
("dateutil", lambda mod: mod.__version__),
("pytz", lambda mod: mod.VERSION),
("bottleneck", lambda mod: mod.__version__),
| {"golden_diff": "diff --git a/pandas/util/print_versions.py b/pandas/util/print_versions.py\n--- a/pandas/util/print_versions.py\n+++ b/pandas/util/print_versions.py\n@@ -68,7 +68,6 @@\n (\"IPython\", lambda mod: mod.__version__),\n (\"sphinx\", lambda mod: mod.__version__),\n (\"patsy\", lambda mod: mod.__version__),\n- (\"scikits.timeseries\", lambda mod: mod.__version__),\n (\"dateutil\", lambda mod: mod.__version__),\n (\"pytz\", lambda mod: mod.VERSION),\n (\"bottleneck\", lambda mod: mod.__version__),\n", "issue": "BUG: rolling_window yields unexpected results with win_type='triang'\nHere's the example in the documentation, modified to have non-zero mean:\n\n```\nn = 100\nser = pandas.Series(randn(n)+10, index=pandas.date_range('1/1/2000', periods=n))\npandas.rolling_window(ser, 5, 'triang').plot()\npandas.rolling_window(ser, 5, 'boxcar').plot()\n```\n\nThe rolling boxcar window is centered around 10, as expected.\n\nThe triang window is centered around 6. That suggests that the weights in the window don't add up to 1.\n\nEither that or my expectation of how it should work is wrong?\n\n", "before_files": [{"content": "import os\nimport platform\nimport sys\nimport struct\nimport subprocess\nimport codecs\n\n\ndef get_sys_info():\n \"Returns system information as a dict\"\n\n blob = []\n\n # get full commit hash\n commit = None\n if os.path.isdir(\".git\") and os.path.isdir(\"pandas\"):\n try:\n pipe = subprocess.Popen('git log --format=\"%H\" -n 1'.split(\" \"),\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n so, serr = pipe.communicate()\n except:\n pass\n else:\n if pipe.returncode == 0:\n commit = so\n try:\n commit = so.decode('utf-8')\n except ValueError:\n pass\n commit = commit.strip().strip('\"')\n\n blob.append(('commit', commit))\n\n try:\n sysname, nodename, release, version, machine, processor = platform.uname(\n )\n blob.extend([\n (\"python\", \"%d.%d.%d.%s.%s\" % sys.version_info[:]),\n (\"python-bits\", struct.calcsize(\"P\") * 8),\n (\"OS\", \"%s\" % (sysname)),\n (\"OS-release\", \"%s\" % (release)),\n # (\"Version\", \"%s\" % (version)),\n (\"machine\", \"%s\" % (machine)),\n (\"processor\", \"%s\" % (processor)),\n (\"byteorder\", \"%s\" % sys.byteorder),\n (\"LC_ALL\", \"%s\" % os.environ.get('LC_ALL', \"None\")),\n (\"LANG\", \"%s\" % os.environ.get('LANG', \"None\")),\n\n ])\n except:\n pass\n\n return blob\n\n\ndef show_versions(as_json=False):\n import imp\n sys_info = get_sys_info()\n\n deps = [\n # (MODULE_NAME, f(mod) -> mod version)\n (\"pandas\", lambda mod: mod.__version__),\n (\"nose\", lambda mod: mod.__version__),\n (\"Cython\", lambda mod: mod.__version__),\n (\"numpy\", lambda mod: mod.version.version),\n (\"scipy\", lambda mod: mod.version.version),\n (\"statsmodels\", lambda mod: mod.__version__),\n (\"IPython\", lambda mod: mod.__version__),\n (\"sphinx\", lambda mod: mod.__version__),\n (\"patsy\", lambda mod: mod.__version__),\n (\"scikits.timeseries\", lambda mod: mod.__version__),\n (\"dateutil\", lambda mod: mod.__version__),\n (\"pytz\", lambda mod: mod.VERSION),\n (\"bottleneck\", lambda mod: mod.__version__),\n (\"tables\", lambda mod: mod.__version__),\n (\"numexpr\", lambda mod: mod.__version__),\n (\"matplotlib\", lambda mod: mod.__version__),\n (\"openpyxl\", lambda mod: mod.__version__),\n (\"xlrd\", lambda mod: mod.__VERSION__),\n (\"xlwt\", lambda mod: mod.__VERSION__),\n (\"xlsxwriter\", lambda mod: mod.__version__),\n (\"lxml\", lambda mod: mod.etree.__version__),\n (\"bs4\", lambda mod: mod.__version__),\n (\"html5lib\", lambda mod: mod.__version__),\n (\"httplib2\", lambda mod: mod.__version__),\n (\"apiclient\", lambda mod: mod.__version__),\n (\"rpy2\", lambda mod: mod.__version__),\n (\"sqlalchemy\", lambda mod: mod.__version__),\n (\"pymysql\", lambda mod: mod.__version__),\n (\"psycopg2\", lambda mod: mod.__version__),\n ]\n\n deps_blob = list()\n for (modname, ver_f) in deps:\n try:\n try:\n mod = imp.load_module(modname, *imp.find_module(modname))\n except (ImportError):\n import importlib\n mod = importlib.import_module(modname)\n ver = ver_f(mod)\n deps_blob.append((modname, ver))\n except:\n deps_blob.append((modname, None))\n\n if (as_json):\n # 2.6-safe\n try:\n import json\n except:\n import simplejson as json\n\n j = dict(system=dict(sys_info), dependencies=dict(deps_blob))\n\n if as_json == True:\n print(j)\n else:\n with codecs.open(as_json, \"wb\", encoding='utf8') as f:\n json.dump(j, f, indent=2)\n\n else:\n\n print(\"\\nINSTALLED VERSIONS\")\n print(\"------------------\")\n\n for k, stat in sys_info:\n print(\"%s: %s\" % (k, stat))\n\n print(\"\")\n for k, stat in deps_blob:\n print(\"%s: %s\" % (k, stat))\n\n\ndef main():\n # optparse is 2.6-safe\n from optparse import OptionParser\n parser = OptionParser()\n parser.add_option(\"-j\", \"--json\", metavar=\"FILE\", nargs=1,\n help=\"Save output as JSON into file, pass in '-' to output to stdout\")\n\n (options, args) = parser.parse_args()\n\n if options.json == \"-\":\n options.json = True\n\n show_versions(as_json=options.json)\n\n return 0\n\nif __name__ == \"__main__\":\n sys.exit(main())\n", "path": "pandas/util/print_versions.py"}], "after_files": [{"content": "import os\nimport platform\nimport sys\nimport struct\nimport subprocess\nimport codecs\n\n\ndef get_sys_info():\n \"Returns system information as a dict\"\n\n blob = []\n\n # get full commit hash\n commit = None\n if os.path.isdir(\".git\") and os.path.isdir(\"pandas\"):\n try:\n pipe = subprocess.Popen('git log --format=\"%H\" -n 1'.split(\" \"),\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n so, serr = pipe.communicate()\n except:\n pass\n else:\n if pipe.returncode == 0:\n commit = so\n try:\n commit = so.decode('utf-8')\n except ValueError:\n pass\n commit = commit.strip().strip('\"')\n\n blob.append(('commit', commit))\n\n try:\n sysname, nodename, release, version, machine, processor = platform.uname(\n )\n blob.extend([\n (\"python\", \"%d.%d.%d.%s.%s\" % sys.version_info[:]),\n (\"python-bits\", struct.calcsize(\"P\") * 8),\n (\"OS\", \"%s\" % (sysname)),\n (\"OS-release\", \"%s\" % (release)),\n # (\"Version\", \"%s\" % (version)),\n (\"machine\", \"%s\" % (machine)),\n (\"processor\", \"%s\" % (processor)),\n (\"byteorder\", \"%s\" % sys.byteorder),\n (\"LC_ALL\", \"%s\" % os.environ.get('LC_ALL', \"None\")),\n (\"LANG\", \"%s\" % os.environ.get('LANG', \"None\")),\n\n ])\n except:\n pass\n\n return blob\n\n\ndef show_versions(as_json=False):\n import imp\n sys_info = get_sys_info()\n\n deps = [\n # (MODULE_NAME, f(mod) -> mod version)\n (\"pandas\", lambda mod: mod.__version__),\n (\"nose\", lambda mod: mod.__version__),\n (\"Cython\", lambda mod: mod.__version__),\n (\"numpy\", lambda mod: mod.version.version),\n (\"scipy\", lambda mod: mod.version.version),\n (\"statsmodels\", lambda mod: mod.__version__),\n (\"IPython\", lambda mod: mod.__version__),\n (\"sphinx\", lambda mod: mod.__version__),\n (\"patsy\", lambda mod: mod.__version__),\n (\"dateutil\", lambda mod: mod.__version__),\n (\"pytz\", lambda mod: mod.VERSION),\n (\"bottleneck\", lambda mod: mod.__version__),\n (\"tables\", lambda mod: mod.__version__),\n (\"numexpr\", lambda mod: mod.__version__),\n (\"matplotlib\", lambda mod: mod.__version__),\n (\"openpyxl\", lambda mod: mod.__version__),\n (\"xlrd\", lambda mod: mod.__VERSION__),\n (\"xlwt\", lambda mod: mod.__VERSION__),\n (\"xlsxwriter\", lambda mod: mod.__version__),\n (\"lxml\", lambda mod: mod.etree.__version__),\n (\"bs4\", lambda mod: mod.__version__),\n (\"html5lib\", lambda mod: mod.__version__),\n (\"httplib2\", lambda mod: mod.__version__),\n (\"apiclient\", lambda mod: mod.__version__),\n (\"rpy2\", lambda mod: mod.__version__),\n (\"sqlalchemy\", lambda mod: mod.__version__),\n (\"pymysql\", lambda mod: mod.__version__),\n (\"psycopg2\", lambda mod: mod.__version__),\n ]\n\n deps_blob = list()\n for (modname, ver_f) in deps:\n try:\n try:\n mod = imp.load_module(modname, *imp.find_module(modname))\n except (ImportError):\n import importlib\n mod = importlib.import_module(modname)\n ver = ver_f(mod)\n deps_blob.append((modname, ver))\n except:\n deps_blob.append((modname, None))\n\n if (as_json):\n # 2.6-safe\n try:\n import json\n except:\n import simplejson as json\n\n j = dict(system=dict(sys_info), dependencies=dict(deps_blob))\n\n if as_json == True:\n print(j)\n else:\n with codecs.open(as_json, \"wb\", encoding='utf8') as f:\n json.dump(j, f, indent=2)\n\n else:\n\n print(\"\\nINSTALLED VERSIONS\")\n print(\"------------------\")\n\n for k, stat in sys_info:\n print(\"%s: %s\" % (k, stat))\n\n print(\"\")\n for k, stat in deps_blob:\n print(\"%s: %s\" % (k, stat))\n\n\ndef main():\n # optparse is 2.6-safe\n from optparse import OptionParser\n parser = OptionParser()\n parser.add_option(\"-j\", \"--json\", metavar=\"FILE\", nargs=1,\n help=\"Save output as JSON into file, pass in '-' to output to stdout\")\n\n (options, args) = parser.parse_args()\n\n if options.json == \"-\":\n options.json = True\n\n show_versions(as_json=options.json)\n\n return 0\n\nif __name__ == \"__main__\":\n sys.exit(main())\n", "path": "pandas/util/print_versions.py"}]} | 1,923 | 143 |
gh_patches_debug_37926 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-5842 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Samsonite spider finds dealers, not official stores
This spider is wrong, e.g., the stores in Norway are not official Samsonite stores but dealers carrying the Samsonite brand
E.g., this is Chillout Travel Store, not a Samsonite store
https://www.alltheplaces.xyz/map/#15.79/59.920398/10.757257
The website does list official stores and dealers separately, so it should be possible to import the right type?
https://www.samsonite.no/samsonite-store/?search=dealer&city=&country=no&lat=59.920469259204786&lng=10.755597088646583&radius=20
_Originally posted by @eisams in https://github.com/alltheplaces/alltheplaces/issues/4385#issuecomment-1586255246_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/samsonite_eu.py`
Content:
```
1 import scrapy
2 import xmltodict
3
4 from locations.dict_parser import DictParser
5
6
7 class SamsoniteEuSpider(scrapy.Spider):
8 name = "samsonite_eu"
9 item_attributes = {
10 "brand": "Samsonite",
11 "brand_wikidata": "Q1203426",
12 }
13 allowed_domains = ["samsonite.com"]
14
15 def start_requests(self):
16 country_eu = [
17 "AL",
18 "CZ",
19 "DE",
20 "DK",
21 "CY",
22 "AT",
23 "BE",
24 "BG",
25 "CH",
26 "EE",
27 "EL",
28 "ES",
29 "FI",
30 "FR",
31 "HR",
32 "HU",
33 "IE",
34 "IS",
35 "IT",
36 "LT",
37 "LU",
38 "NL",
39 "NO",
40 "LV",
41 "ME",
42 "MT",
43 "MK",
44 "LI",
45 "PL",
46 "SI",
47 "SK",
48 "TR",
49 "UK",
50 "RS",
51 "SE",
52 "PT",
53 "RO",
54 ]
55 template = "https://storelocator.samsonite.eu/data-exchange/getDealerLocatorMapV2_Radius.aspx?s=sams&country={}&search=dealer&lat=48.85799300000001&lng=2.381153&radius=100000"
56 for country in country_eu:
57 yield scrapy.Request(url=template.format(country), callback=self.parse)
58
59 def parse(self, response):
60 data = xmltodict.parse(response.text)
61 if data.get("dealers"):
62 stores = data.get("dealers", {}).get("dealer")
63 stores = stores if type(stores) == list else [stores]
64 for store in stores:
65 item = DictParser.parse(store)
66 item["ref"] = store.get("fld_Deal_Id")
67 item["street_address"] = store.get("fld_Deal_Address1")
68 item["city"] = store.get("fld_Deal_City1")
69 item["postcode"] = store.get("fld_Deal_Zip")
70 item["country"] = store.get("fld_Coun_Name")
71 item["phone"] = store.get("fld_Deal_Phone")
72 item["email"] = store.get("fld_Deal_Email")
73
74 yield item
75
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/locations/spiders/samsonite_eu.py b/locations/spiders/samsonite_eu.py
--- a/locations/spiders/samsonite_eu.py
+++ b/locations/spiders/samsonite_eu.py
@@ -1,15 +1,13 @@
import scrapy
import xmltodict
-from locations.dict_parser import DictParser
+from locations.items import Feature, add_social_media
class SamsoniteEuSpider(scrapy.Spider):
name = "samsonite_eu"
- item_attributes = {
- "brand": "Samsonite",
- "brand_wikidata": "Q1203426",
- }
+ CHIC_ACCENT = {"brand": "Chic Accent"}
+ SAMSONITE = {"brand": "Samsonite", "brand_wikidata": "Q1203426"}
allowed_domains = ["samsonite.com"]
def start_requests(self):
@@ -51,6 +49,7 @@
"SE",
"PT",
"RO",
+ "GB",
]
template = "https://storelocator.samsonite.eu/data-exchange/getDealerLocatorMapV2_Radius.aspx?s=sams&country={}&search=dealer&lat=48.85799300000001&lng=2.381153&radius=100000"
for country in country_eu:
@@ -62,13 +61,31 @@
stores = data.get("dealers", {}).get("dealer")
stores = stores if type(stores) == list else [stores]
for store in stores:
- item = DictParser.parse(store)
+ if store["fld_Deal_DeCl_ID"] != "9":
+ continue
+ item = Feature()
+ item["lat"] = store["Latitude"]
+ item["lon"] = store["Longitude"]
item["ref"] = store.get("fld_Deal_Id")
item["street_address"] = store.get("fld_Deal_Address1")
item["city"] = store.get("fld_Deal_City1")
item["postcode"] = store.get("fld_Deal_Zip")
item["country"] = store.get("fld_Coun_Name")
- item["phone"] = store.get("fld_Deal_Phone")
- item["email"] = store.get("fld_Deal_Email")
+ item["email"] = store.get("fld_Deal_Email") or ""
+ item["website"] = store["fld_Deal_DetailPageUrl"]
+
+ if "chicaccent.com" in item["email"]:
+ item.update(self.CHIC_ACCENT)
+ else:
+ item.update(self.SAMSONITE)
+
+ if phone := store.get("fld_Deal_Phone"):
+ phone = store["fld_Deal_Prefix"] + phone.lower()
+
+ if "whatsapp" in phone:
+ phone, whats_app = phone.split("whatsapp")
+ add_social_media(item, "WhatsApp", whats_app.strip(" :"))
+
+ item["phone"] = phone
yield item
| {"golden_diff": "diff --git a/locations/spiders/samsonite_eu.py b/locations/spiders/samsonite_eu.py\n--- a/locations/spiders/samsonite_eu.py\n+++ b/locations/spiders/samsonite_eu.py\n@@ -1,15 +1,13 @@\n import scrapy\n import xmltodict\n \n-from locations.dict_parser import DictParser\n+from locations.items import Feature, add_social_media\n \n \n class SamsoniteEuSpider(scrapy.Spider):\n name = \"samsonite_eu\"\n- item_attributes = {\n- \"brand\": \"Samsonite\",\n- \"brand_wikidata\": \"Q1203426\",\n- }\n+ CHIC_ACCENT = {\"brand\": \"Chic Accent\"}\n+ SAMSONITE = {\"brand\": \"Samsonite\", \"brand_wikidata\": \"Q1203426\"}\n allowed_domains = [\"samsonite.com\"]\n \n def start_requests(self):\n@@ -51,6 +49,7 @@\n \"SE\",\n \"PT\",\n \"RO\",\n+ \"GB\",\n ]\n template = \"https://storelocator.samsonite.eu/data-exchange/getDealerLocatorMapV2_Radius.aspx?s=sams&country={}&search=dealer&lat=48.85799300000001&lng=2.381153&radius=100000\"\n for country in country_eu:\n@@ -62,13 +61,31 @@\n stores = data.get(\"dealers\", {}).get(\"dealer\")\n stores = stores if type(stores) == list else [stores]\n for store in stores:\n- item = DictParser.parse(store)\n+ if store[\"fld_Deal_DeCl_ID\"] != \"9\":\n+ continue\n+ item = Feature()\n+ item[\"lat\"] = store[\"Latitude\"]\n+ item[\"lon\"] = store[\"Longitude\"]\n item[\"ref\"] = store.get(\"fld_Deal_Id\")\n item[\"street_address\"] = store.get(\"fld_Deal_Address1\")\n item[\"city\"] = store.get(\"fld_Deal_City1\")\n item[\"postcode\"] = store.get(\"fld_Deal_Zip\")\n item[\"country\"] = store.get(\"fld_Coun_Name\")\n- item[\"phone\"] = store.get(\"fld_Deal_Phone\")\n- item[\"email\"] = store.get(\"fld_Deal_Email\")\n+ item[\"email\"] = store.get(\"fld_Deal_Email\") or \"\"\n+ item[\"website\"] = store[\"fld_Deal_DetailPageUrl\"]\n+\n+ if \"chicaccent.com\" in item[\"email\"]:\n+ item.update(self.CHIC_ACCENT)\n+ else:\n+ item.update(self.SAMSONITE)\n+\n+ if phone := store.get(\"fld_Deal_Phone\"):\n+ phone = store[\"fld_Deal_Prefix\"] + phone.lower()\n+\n+ if \"whatsapp\" in phone:\n+ phone, whats_app = phone.split(\"whatsapp\")\n+ add_social_media(item, \"WhatsApp\", whats_app.strip(\" :\"))\n+\n+ item[\"phone\"] = phone\n \n yield item\n", "issue": "Samsonite spider finds dealers, not official stores\nThis spider is wrong, e.g., the stores in Norway are not official Samsonite stores but dealers carrying the Samsonite brand\r\n\r\nE.g., this is Chillout Travel Store, not a Samsonite store\r\nhttps://www.alltheplaces.xyz/map/#15.79/59.920398/10.757257\r\n\r\nThe website does list official stores and dealers separately, so it should be possible to import the right type?\r\nhttps://www.samsonite.no/samsonite-store/?search=dealer&city=&country=no&lat=59.920469259204786&lng=10.755597088646583&radius=20\r\n\r\n_Originally posted by @eisams in https://github.com/alltheplaces/alltheplaces/issues/4385#issuecomment-1586255246_\r\n \n", "before_files": [{"content": "import scrapy\nimport xmltodict\n\nfrom locations.dict_parser import DictParser\n\n\nclass SamsoniteEuSpider(scrapy.Spider):\n name = \"samsonite_eu\"\n item_attributes = {\n \"brand\": \"Samsonite\",\n \"brand_wikidata\": \"Q1203426\",\n }\n allowed_domains = [\"samsonite.com\"]\n\n def start_requests(self):\n country_eu = [\n \"AL\",\n \"CZ\",\n \"DE\",\n \"DK\",\n \"CY\",\n \"AT\",\n \"BE\",\n \"BG\",\n \"CH\",\n \"EE\",\n \"EL\",\n \"ES\",\n \"FI\",\n \"FR\",\n \"HR\",\n \"HU\",\n \"IE\",\n \"IS\",\n \"IT\",\n \"LT\",\n \"LU\",\n \"NL\",\n \"NO\",\n \"LV\",\n \"ME\",\n \"MT\",\n \"MK\",\n \"LI\",\n \"PL\",\n \"SI\",\n \"SK\",\n \"TR\",\n \"UK\",\n \"RS\",\n \"SE\",\n \"PT\",\n \"RO\",\n ]\n template = \"https://storelocator.samsonite.eu/data-exchange/getDealerLocatorMapV2_Radius.aspx?s=sams&country={}&search=dealer&lat=48.85799300000001&lng=2.381153&radius=100000\"\n for country in country_eu:\n yield scrapy.Request(url=template.format(country), callback=self.parse)\n\n def parse(self, response):\n data = xmltodict.parse(response.text)\n if data.get(\"dealers\"):\n stores = data.get(\"dealers\", {}).get(\"dealer\")\n stores = stores if type(stores) == list else [stores]\n for store in stores:\n item = DictParser.parse(store)\n item[\"ref\"] = store.get(\"fld_Deal_Id\")\n item[\"street_address\"] = store.get(\"fld_Deal_Address1\")\n item[\"city\"] = store.get(\"fld_Deal_City1\")\n item[\"postcode\"] = store.get(\"fld_Deal_Zip\")\n item[\"country\"] = store.get(\"fld_Coun_Name\")\n item[\"phone\"] = store.get(\"fld_Deal_Phone\")\n item[\"email\"] = store.get(\"fld_Deal_Email\")\n\n yield item\n", "path": "locations/spiders/samsonite_eu.py"}], "after_files": [{"content": "import scrapy\nimport xmltodict\n\nfrom locations.items import Feature, add_social_media\n\n\nclass SamsoniteEuSpider(scrapy.Spider):\n name = \"samsonite_eu\"\n CHIC_ACCENT = {\"brand\": \"Chic Accent\"}\n SAMSONITE = {\"brand\": \"Samsonite\", \"brand_wikidata\": \"Q1203426\"}\n allowed_domains = [\"samsonite.com\"]\n\n def start_requests(self):\n country_eu = [\n \"AL\",\n \"CZ\",\n \"DE\",\n \"DK\",\n \"CY\",\n \"AT\",\n \"BE\",\n \"BG\",\n \"CH\",\n \"EE\",\n \"EL\",\n \"ES\",\n \"FI\",\n \"FR\",\n \"HR\",\n \"HU\",\n \"IE\",\n \"IS\",\n \"IT\",\n \"LT\",\n \"LU\",\n \"NL\",\n \"NO\",\n \"LV\",\n \"ME\",\n \"MT\",\n \"MK\",\n \"LI\",\n \"PL\",\n \"SI\",\n \"SK\",\n \"TR\",\n \"UK\",\n \"RS\",\n \"SE\",\n \"PT\",\n \"RO\",\n \"GB\",\n ]\n template = \"https://storelocator.samsonite.eu/data-exchange/getDealerLocatorMapV2_Radius.aspx?s=sams&country={}&search=dealer&lat=48.85799300000001&lng=2.381153&radius=100000\"\n for country in country_eu:\n yield scrapy.Request(url=template.format(country), callback=self.parse)\n\n def parse(self, response):\n data = xmltodict.parse(response.text)\n if data.get(\"dealers\"):\n stores = data.get(\"dealers\", {}).get(\"dealer\")\n stores = stores if type(stores) == list else [stores]\n for store in stores:\n if store[\"fld_Deal_DeCl_ID\"] != \"9\":\n continue\n item = Feature()\n item[\"lat\"] = store[\"Latitude\"]\n item[\"lon\"] = store[\"Longitude\"]\n item[\"ref\"] = store.get(\"fld_Deal_Id\")\n item[\"street_address\"] = store.get(\"fld_Deal_Address1\")\n item[\"city\"] = store.get(\"fld_Deal_City1\")\n item[\"postcode\"] = store.get(\"fld_Deal_Zip\")\n item[\"country\"] = store.get(\"fld_Coun_Name\")\n item[\"email\"] = store.get(\"fld_Deal_Email\") or \"\"\n item[\"website\"] = store[\"fld_Deal_DetailPageUrl\"]\n\n if \"chicaccent.com\" in item[\"email\"]:\n item.update(self.CHIC_ACCENT)\n else:\n item.update(self.SAMSONITE)\n\n if phone := store.get(\"fld_Deal_Phone\"):\n phone = store[\"fld_Deal_Prefix\"] + phone.lower()\n\n if \"whatsapp\" in phone:\n phone, whats_app = phone.split(\"whatsapp\")\n add_social_media(item, \"WhatsApp\", whats_app.strip(\" :\"))\n\n item[\"phone\"] = phone\n\n yield item\n", "path": "locations/spiders/samsonite_eu.py"}]} | 1,149 | 699 |
gh_patches_debug_25598 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-3459 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider public_storage is broken
During the global build at 2021-08-04-14-42-45, spider **public_storage** failed with **834 features** and **1879 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-08-04-14-42-45/logs/public_storage.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-08-04-14-42-45/output/public_storage.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-08-04-14-42-45/output/public_storage.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/public_storage.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import scrapy
3 import json
4
5 from locations.items import GeojsonPointItem
6 from locations.hours import OpeningHours
7
8
9 class PublicStorageSpider(scrapy.Spider):
10 name = "public_storage"
11 item_attributes = { 'brand': "Public Storage" }
12 allowed_domains = ["www.publicstorage.com"]
13 start_urls = (
14 'https://www.publicstorage.com/sitemap_plp.xml',
15 )
16
17 def parse(self, response):
18 response.selector.remove_namespaces()
19 city_urls = response.xpath('//url/loc/text()').extract()
20 for path in city_urls:
21 yield scrapy.Request(
22 path.strip(),
23 callback=self.parse_store,
24 )
25
26 def parse_hours(self, hours):
27 opening_hours = OpeningHours()
28
29 for hour in hours:
30 for day in hour['dayOfWeek']:
31 opening_hours.add_range(
32 day=day[:2],
33 open_time=hour["opens"],
34 close_time=hour["closes"],
35 )
36
37 return opening_hours.as_opening_hours()
38
39 def parse_store(self, response):
40 data = json.loads(response.xpath('//script[@type="application/ld+json"]/text()').extract_first())
41 data = data['@graph'][0]
42
43 properties = {
44 "ref": data['@id'],
45 "opening_hours": self.parse_hours(data['openingHoursSpecification']),
46 "addr_full": data['address']['streetAddress'],
47 "city": data['address']['addressLocality'],
48 "state": data['address']['addressRegion'],
49 "postcode": data['address']['postalCode'],
50 "phone": data['telephone'],
51 "lat": data['geo']['latitude'],
52 "lon": data['geo']['longitude'],
53 }
54
55 yield GeojsonPointItem(**properties)
56
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/locations/spiders/public_storage.py b/locations/spiders/public_storage.py
--- a/locations/spiders/public_storage.py
+++ b/locations/spiders/public_storage.py
@@ -20,9 +20,13 @@
for path in city_urls:
yield scrapy.Request(
path.strip(),
- callback=self.parse_store,
+ callback=self.load_store,
)
+ def load_store(self, response):
+ ldjson = response.xpath('//link[@type="application/ld+json"]/@href').get()
+ yield scrapy.Request(response.urljoin(ldjson), callback=self.parse_store)
+
def parse_hours(self, hours):
opening_hours = OpeningHours()
@@ -37,11 +41,11 @@
return opening_hours.as_opening_hours()
def parse_store(self, response):
- data = json.loads(response.xpath('//script[@type="application/ld+json"]/text()').extract_first())
- data = data['@graph'][0]
+ data = response.json()['@graph'][0]
properties = {
"ref": data['@id'],
+ "website": data['url'],
"opening_hours": self.parse_hours(data['openingHoursSpecification']),
"addr_full": data['address']['streetAddress'],
"city": data['address']['addressLocality'],
| {"golden_diff": "diff --git a/locations/spiders/public_storage.py b/locations/spiders/public_storage.py\n--- a/locations/spiders/public_storage.py\n+++ b/locations/spiders/public_storage.py\n@@ -20,9 +20,13 @@\n for path in city_urls:\n yield scrapy.Request(\n path.strip(),\n- callback=self.parse_store,\n+ callback=self.load_store,\n )\n \n+ def load_store(self, response):\n+ ldjson = response.xpath('//link[@type=\"application/ld+json\"]/@href').get()\n+ yield scrapy.Request(response.urljoin(ldjson), callback=self.parse_store)\n+\n def parse_hours(self, hours):\n opening_hours = OpeningHours()\n \n@@ -37,11 +41,11 @@\n return opening_hours.as_opening_hours()\n \n def parse_store(self, response):\n- data = json.loads(response.xpath('//script[@type=\"application/ld+json\"]/text()').extract_first())\n- data = data['@graph'][0]\n+ data = response.json()['@graph'][0]\n \n properties = {\n \"ref\": data['@id'],\n+ \"website\": data['url'],\n \"opening_hours\": self.parse_hours(data['openingHoursSpecification']),\n \"addr_full\": data['address']['streetAddress'],\n \"city\": data['address']['addressLocality'],\n", "issue": "Spider public_storage is broken\nDuring the global build at 2021-08-04-14-42-45, spider **public_storage** failed with **834 features** and **1879 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-08-04-14-42-45/logs/public_storage.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-08-04-14-42-45/output/public_storage.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-08-04-14-42-45/output/public_storage.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nclass PublicStorageSpider(scrapy.Spider):\n name = \"public_storage\"\n item_attributes = { 'brand': \"Public Storage\" }\n allowed_domains = [\"www.publicstorage.com\"]\n start_urls = (\n 'https://www.publicstorage.com/sitemap_plp.xml',\n )\n\n def parse(self, response):\n response.selector.remove_namespaces()\n city_urls = response.xpath('//url/loc/text()').extract()\n for path in city_urls:\n yield scrapy.Request(\n path.strip(),\n callback=self.parse_store,\n )\n\n def parse_hours(self, hours):\n opening_hours = OpeningHours()\n\n for hour in hours:\n for day in hour['dayOfWeek']:\n opening_hours.add_range(\n day=day[:2],\n open_time=hour[\"opens\"],\n close_time=hour[\"closes\"],\n )\n\n return opening_hours.as_opening_hours()\n\n def parse_store(self, response):\n data = json.loads(response.xpath('//script[@type=\"application/ld+json\"]/text()').extract_first())\n data = data['@graph'][0]\n\n properties = {\n \"ref\": data['@id'],\n \"opening_hours\": self.parse_hours(data['openingHoursSpecification']),\n \"addr_full\": data['address']['streetAddress'],\n \"city\": data['address']['addressLocality'],\n \"state\": data['address']['addressRegion'],\n \"postcode\": data['address']['postalCode'],\n \"phone\": data['telephone'],\n \"lat\": data['geo']['latitude'],\n \"lon\": data['geo']['longitude'],\n }\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/public_storage.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nclass PublicStorageSpider(scrapy.Spider):\n name = \"public_storage\"\n item_attributes = { 'brand': \"Public Storage\" }\n allowed_domains = [\"www.publicstorage.com\"]\n start_urls = (\n 'https://www.publicstorage.com/sitemap_plp.xml',\n )\n\n def parse(self, response):\n response.selector.remove_namespaces()\n city_urls = response.xpath('//url/loc/text()').extract()\n for path in city_urls:\n yield scrapy.Request(\n path.strip(),\n callback=self.load_store,\n )\n\n def load_store(self, response):\n ldjson = response.xpath('//link[@type=\"application/ld+json\"]/@href').get()\n yield scrapy.Request(response.urljoin(ldjson), callback=self.parse_store)\n\n def parse_hours(self, hours):\n opening_hours = OpeningHours()\n\n for hour in hours:\n for day in hour['dayOfWeek']:\n opening_hours.add_range(\n day=day[:2],\n open_time=hour[\"opens\"],\n close_time=hour[\"closes\"],\n )\n\n return opening_hours.as_opening_hours()\n\n def parse_store(self, response):\n data = response.json()['@graph'][0]\n\n properties = {\n \"ref\": data['@id'],\n \"website\": data['url'],\n \"opening_hours\": self.parse_hours(data['openingHoursSpecification']),\n \"addr_full\": data['address']['streetAddress'],\n \"city\": data['address']['addressLocality'],\n \"state\": data['address']['addressRegion'],\n \"postcode\": data['address']['postalCode'],\n \"phone\": data['telephone'],\n \"lat\": data['geo']['latitude'],\n \"lon\": data['geo']['longitude'],\n }\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/public_storage.py"}]} | 926 | 292 |
gh_patches_debug_22879 | rasdani/github-patches | git_diff | techmatters__terraso-backend-1223 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add ability to change user password from Django admin
## Description
Add ability to change user password from Django admin.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `terraso_backend/apps/core/admin.py`
Content:
```
1 # Copyright © 2021-2023 Technology Matters
2 #
3 # This program is free software: you can redistribute it and/or modify
4 # it under the terms of the GNU Affero General Public License as published
5 # by the Free Software Foundation, either version 3 of the License, or
6 # (at your option) any later version.
7 #
8 # This program is distributed in the hope that it will be useful,
9 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # GNU Affero General Public License for more details.
12 #
13 # You should have received a copy of the GNU Affero General Public License
14 # along with this program. If not, see https://www.gnu.org/licenses/.
15
16 from django.contrib import admin
17
18 from .models import (
19 Group,
20 Landscape,
21 LandscapeDevelopmentStrategy,
22 LandscapeGroup,
23 SharedResource,
24 TaxonomyTerm,
25 User,
26 UserPreference,
27 )
28
29
30 @admin.register(Group)
31 class GroupAdmin(admin.ModelAdmin):
32 list_display = ("name", "slug", "website", "created_at")
33
34 def get_queryset(self, request):
35 qs = super().get_queryset(request)
36 return qs.exclude(associated_landscapes__is_default_landscape_group=True)
37
38
39 @admin.register(Landscape)
40 class LandscapeAdmin(admin.ModelAdmin):
41 list_display = ("name", "slug", "location", "website", "created_at")
42 raw_id_fields = ("membership_list",)
43
44
45 class LandscapeDefaultGroup(Group):
46 class Meta:
47 proxy = True
48
49
50 @admin.register(LandscapeGroup)
51 class LandscapeGroupAdmin(admin.ModelAdmin):
52 list_display = ("landscape", "group")
53
54
55 class UserPreferenceInline(admin.TabularInline):
56 model = UserPreference
57
58
59 @admin.register(User)
60 class UserAdmin(admin.ModelAdmin):
61 list_display = ("email", "first_name", "last_name", "created_at", "is_staff")
62 inlines = [UserPreferenceInline]
63
64
65 @admin.register(TaxonomyTerm)
66 class TaxonomyTermAdmin(admin.ModelAdmin):
67 list_display = ("value_original", "type", "value_en", "value_es")
68
69
70 @admin.register(LandscapeDevelopmentStrategy)
71 class LandscapeDevelopmentStrategyAdmin(admin.ModelAdmin):
72 list_display = ("id", "landscape")
73
74
75 @admin.register(SharedResource)
76 class SharedResourceAdmin(admin.ModelAdmin):
77 list_display = ("id", "share_uuid", "share_access")
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/terraso_backend/apps/core/admin.py b/terraso_backend/apps/core/admin.py
--- a/terraso_backend/apps/core/admin.py
+++ b/terraso_backend/apps/core/admin.py
@@ -14,6 +14,7 @@
# along with this program. If not, see https://www.gnu.org/licenses/.
from django.contrib import admin
+from django.contrib.auth.admin import UserAdmin as DjangoUserAdmin
from .models import (
Group,
@@ -57,9 +58,28 @@
@admin.register(User)
-class UserAdmin(admin.ModelAdmin):
+class UserAdmin(DjangoUserAdmin):
+ ordering = ("email",)
list_display = ("email", "first_name", "last_name", "created_at", "is_staff")
+ search_fields = ("email", "first_name", "last_name")
inlines = [UserPreferenceInline]
+ fieldsets = (
+ (None, {"fields": ("email", "password")}),
+ ("Personal info", {"fields": ("first_name", "last_name")}),
+ (
+ "Permissions",
+ {
+ "fields": (
+ "is_active",
+ "is_staff",
+ "is_superuser",
+ "groups",
+ "user_permissions",
+ ),
+ },
+ ),
+ ("Important dates", {"fields": ("last_login", "date_joined")}),
+ )
@admin.register(TaxonomyTerm)
| {"golden_diff": "diff --git a/terraso_backend/apps/core/admin.py b/terraso_backend/apps/core/admin.py\n--- a/terraso_backend/apps/core/admin.py\n+++ b/terraso_backend/apps/core/admin.py\n@@ -14,6 +14,7 @@\n # along with this program. If not, see https://www.gnu.org/licenses/.\n \n from django.contrib import admin\n+from django.contrib.auth.admin import UserAdmin as DjangoUserAdmin\n \n from .models import (\n Group,\n@@ -57,9 +58,28 @@\n \n \n @admin.register(User)\n-class UserAdmin(admin.ModelAdmin):\n+class UserAdmin(DjangoUserAdmin):\n+ ordering = (\"email\",)\n list_display = (\"email\", \"first_name\", \"last_name\", \"created_at\", \"is_staff\")\n+ search_fields = (\"email\", \"first_name\", \"last_name\")\n inlines = [UserPreferenceInline]\n+ fieldsets = (\n+ (None, {\"fields\": (\"email\", \"password\")}),\n+ (\"Personal info\", {\"fields\": (\"first_name\", \"last_name\")}),\n+ (\n+ \"Permissions\",\n+ {\n+ \"fields\": (\n+ \"is_active\",\n+ \"is_staff\",\n+ \"is_superuser\",\n+ \"groups\",\n+ \"user_permissions\",\n+ ),\n+ },\n+ ),\n+ (\"Important dates\", {\"fields\": (\"last_login\", \"date_joined\")}),\n+ )\n \n \n @admin.register(TaxonomyTerm)\n", "issue": "Add ability to change user password from Django admin\n## Description\r\nAdd ability to change user password from Django admin.\n", "before_files": [{"content": "# Copyright \u00a9 2021-2023 Technology Matters\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published\n# by the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see https://www.gnu.org/licenses/.\n\nfrom django.contrib import admin\n\nfrom .models import (\n Group,\n Landscape,\n LandscapeDevelopmentStrategy,\n LandscapeGroup,\n SharedResource,\n TaxonomyTerm,\n User,\n UserPreference,\n)\n\n\[email protected](Group)\nclass GroupAdmin(admin.ModelAdmin):\n list_display = (\"name\", \"slug\", \"website\", \"created_at\")\n\n def get_queryset(self, request):\n qs = super().get_queryset(request)\n return qs.exclude(associated_landscapes__is_default_landscape_group=True)\n\n\[email protected](Landscape)\nclass LandscapeAdmin(admin.ModelAdmin):\n list_display = (\"name\", \"slug\", \"location\", \"website\", \"created_at\")\n raw_id_fields = (\"membership_list\",)\n\n\nclass LandscapeDefaultGroup(Group):\n class Meta:\n proxy = True\n\n\[email protected](LandscapeGroup)\nclass LandscapeGroupAdmin(admin.ModelAdmin):\n list_display = (\"landscape\", \"group\")\n\n\nclass UserPreferenceInline(admin.TabularInline):\n model = UserPreference\n\n\[email protected](User)\nclass UserAdmin(admin.ModelAdmin):\n list_display = (\"email\", \"first_name\", \"last_name\", \"created_at\", \"is_staff\")\n inlines = [UserPreferenceInline]\n\n\[email protected](TaxonomyTerm)\nclass TaxonomyTermAdmin(admin.ModelAdmin):\n list_display = (\"value_original\", \"type\", \"value_en\", \"value_es\")\n\n\[email protected](LandscapeDevelopmentStrategy)\nclass LandscapeDevelopmentStrategyAdmin(admin.ModelAdmin):\n list_display = (\"id\", \"landscape\")\n\n\[email protected](SharedResource)\nclass SharedResourceAdmin(admin.ModelAdmin):\n list_display = (\"id\", \"share_uuid\", \"share_access\")\n", "path": "terraso_backend/apps/core/admin.py"}], "after_files": [{"content": "# Copyright \u00a9 2021-2023 Technology Matters\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published\n# by the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see https://www.gnu.org/licenses/.\n\nfrom django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin as DjangoUserAdmin\n\nfrom .models import (\n Group,\n Landscape,\n LandscapeDevelopmentStrategy,\n LandscapeGroup,\n SharedResource,\n TaxonomyTerm,\n User,\n UserPreference,\n)\n\n\[email protected](Group)\nclass GroupAdmin(admin.ModelAdmin):\n list_display = (\"name\", \"slug\", \"website\", \"created_at\")\n\n def get_queryset(self, request):\n qs = super().get_queryset(request)\n return qs.exclude(associated_landscapes__is_default_landscape_group=True)\n\n\[email protected](Landscape)\nclass LandscapeAdmin(admin.ModelAdmin):\n list_display = (\"name\", \"slug\", \"location\", \"website\", \"created_at\")\n raw_id_fields = (\"membership_list\",)\n\n\nclass LandscapeDefaultGroup(Group):\n class Meta:\n proxy = True\n\n\[email protected](LandscapeGroup)\nclass LandscapeGroupAdmin(admin.ModelAdmin):\n list_display = (\"landscape\", \"group\")\n\n\nclass UserPreferenceInline(admin.TabularInline):\n model = UserPreference\n\n\[email protected](User)\nclass UserAdmin(DjangoUserAdmin):\n ordering = (\"email\",)\n list_display = (\"email\", \"first_name\", \"last_name\", \"created_at\", \"is_staff\")\n search_fields = (\"email\", \"first_name\", \"last_name\")\n inlines = [UserPreferenceInline]\n fieldsets = (\n (None, {\"fields\": (\"email\", \"password\")}),\n (\"Personal info\", {\"fields\": (\"first_name\", \"last_name\")}),\n (\n \"Permissions\",\n {\n \"fields\": (\n \"is_active\",\n \"is_staff\",\n \"is_superuser\",\n \"groups\",\n \"user_permissions\",\n ),\n },\n ),\n (\"Important dates\", {\"fields\": (\"last_login\", \"date_joined\")}),\n )\n\n\[email protected](TaxonomyTerm)\nclass TaxonomyTermAdmin(admin.ModelAdmin):\n list_display = (\"value_original\", \"type\", \"value_en\", \"value_es\")\n\n\[email protected](LandscapeDevelopmentStrategy)\nclass LandscapeDevelopmentStrategyAdmin(admin.ModelAdmin):\n list_display = (\"id\", \"landscape\")\n\n\[email protected](SharedResource)\nclass SharedResourceAdmin(admin.ModelAdmin):\n list_display = (\"id\", \"share_uuid\", \"share_access\")\n", "path": "terraso_backend/apps/core/admin.py"}]} | 944 | 321 |
gh_patches_debug_26602 | rasdani/github-patches | git_diff | docker__docker-py-1263 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Build Image Missing Arguments
The build image function is missing some arguments that are present in the v1.24 api.
- shmsize - Size of /dev/shm in bytes. The size must be greater than 0. If omitted the system uses 64MB.
- labels – JSON map of string pairs for labels to set on the image.
See: https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/build-image-from-a-dockerfile
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docker/api/build.py`
Content:
```
1 import logging
2 import os
3 import re
4 import json
5
6 from .. import constants
7 from .. import errors
8 from .. import auth
9 from .. import utils
10
11
12 log = logging.getLogger(__name__)
13
14
15 class BuildApiMixin(object):
16 def build(self, path=None, tag=None, quiet=False, fileobj=None,
17 nocache=False, rm=False, stream=False, timeout=None,
18 custom_context=False, encoding=None, pull=False,
19 forcerm=False, dockerfile=None, container_limits=None,
20 decode=False, buildargs=None, gzip=False):
21 remote = context = None
22 headers = {}
23 container_limits = container_limits or {}
24 if path is None and fileobj is None:
25 raise TypeError("Either path or fileobj needs to be provided.")
26 if gzip and encoding is not None:
27 raise errors.DockerException(
28 'Can not use custom encoding if gzip is enabled'
29 )
30
31 for key in container_limits.keys():
32 if key not in constants.CONTAINER_LIMITS_KEYS:
33 raise errors.DockerException(
34 'Invalid container_limits key {0}'.format(key)
35 )
36
37 if custom_context:
38 if not fileobj:
39 raise TypeError("You must specify fileobj with custom_context")
40 context = fileobj
41 elif fileobj is not None:
42 context = utils.mkbuildcontext(fileobj)
43 elif path.startswith(('http://', 'https://',
44 'git://', 'github.com/', 'git@')):
45 remote = path
46 elif not os.path.isdir(path):
47 raise TypeError("You must specify a directory to build in path")
48 else:
49 dockerignore = os.path.join(path, '.dockerignore')
50 exclude = None
51 if os.path.exists(dockerignore):
52 with open(dockerignore, 'r') as f:
53 exclude = list(filter(bool, f.read().splitlines()))
54 context = utils.tar(
55 path, exclude=exclude, dockerfile=dockerfile, gzip=gzip
56 )
57 encoding = 'gzip' if gzip else encoding
58
59 if utils.compare_version('1.8', self._version) >= 0:
60 stream = True
61
62 if dockerfile and utils.compare_version('1.17', self._version) < 0:
63 raise errors.InvalidVersion(
64 'dockerfile was only introduced in API version 1.17'
65 )
66
67 if utils.compare_version('1.19', self._version) < 0:
68 pull = 1 if pull else 0
69
70 u = self._url('/build')
71 params = {
72 't': tag,
73 'remote': remote,
74 'q': quiet,
75 'nocache': nocache,
76 'rm': rm,
77 'forcerm': forcerm,
78 'pull': pull,
79 'dockerfile': dockerfile,
80 }
81 params.update(container_limits)
82
83 if buildargs:
84 if utils.version_gte(self._version, '1.21'):
85 params.update({'buildargs': json.dumps(buildargs)})
86 else:
87 raise errors.InvalidVersion(
88 'buildargs was only introduced in API version 1.21'
89 )
90
91 if context is not None:
92 headers = {'Content-Type': 'application/tar'}
93 if encoding:
94 headers['Content-Encoding'] = encoding
95
96 if utils.compare_version('1.9', self._version) >= 0:
97 self._set_auth_headers(headers)
98
99 response = self._post(
100 u,
101 data=context,
102 params=params,
103 headers=headers,
104 stream=stream,
105 timeout=timeout,
106 )
107
108 if context is not None and not custom_context:
109 context.close()
110
111 if stream:
112 return self._stream_helper(response, decode=decode)
113 else:
114 output = self._result(response)
115 srch = r'Successfully built ([0-9a-f]+)'
116 match = re.search(srch, output)
117 if not match:
118 return None, output
119 return match.group(1), output
120
121 def _set_auth_headers(self, headers):
122 log.debug('Looking for auth config')
123
124 # If we don't have any auth data so far, try reloading the config
125 # file one more time in case anything showed up in there.
126 if not self._auth_configs:
127 log.debug("No auth config in memory - loading from filesystem")
128 self._auth_configs = auth.load_config()
129
130 # Send the full auth configuration (if any exists), since the build
131 # could use any (or all) of the registries.
132 if self._auth_configs:
133 log.debug(
134 'Sending auth config ({0})'.format(
135 ', '.join(repr(k) for k in self._auth_configs.keys())
136 )
137 )
138
139 if utils.compare_version('1.19', self._version) >= 0:
140 headers['X-Registry-Config'] = auth.encode_header(
141 self._auth_configs
142 )
143 else:
144 headers['X-Registry-Config'] = auth.encode_header({
145 'configs': self._auth_configs
146 })
147 else:
148 log.debug('No auth config found')
149
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docker/api/build.py b/docker/api/build.py
--- a/docker/api/build.py
+++ b/docker/api/build.py
@@ -17,7 +17,8 @@
nocache=False, rm=False, stream=False, timeout=None,
custom_context=False, encoding=None, pull=False,
forcerm=False, dockerfile=None, container_limits=None,
- decode=False, buildargs=None, gzip=False):
+ decode=False, buildargs=None, gzip=False, shmsize=None,
+ labels=None):
remote = context = None
headers = {}
container_limits = container_limits or {}
@@ -88,6 +89,22 @@
'buildargs was only introduced in API version 1.21'
)
+ if shmsize:
+ if utils.version_gte(self._version, '1.22'):
+ params.update({'shmsize': shmsize})
+ else:
+ raise errors.InvalidVersion(
+ 'shmsize was only introduced in API version 1.22'
+ )
+
+ if labels:
+ if utils.version_gte(self._version, '1.23'):
+ params.update({'labels': json.dumps(labels)})
+ else:
+ raise errors.InvalidVersion(
+ 'labels was only introduced in API version 1.23'
+ )
+
if context is not None:
headers = {'Content-Type': 'application/tar'}
if encoding:
| {"golden_diff": "diff --git a/docker/api/build.py b/docker/api/build.py\n--- a/docker/api/build.py\n+++ b/docker/api/build.py\n@@ -17,7 +17,8 @@\n nocache=False, rm=False, stream=False, timeout=None,\n custom_context=False, encoding=None, pull=False,\n forcerm=False, dockerfile=None, container_limits=None,\n- decode=False, buildargs=None, gzip=False):\n+ decode=False, buildargs=None, gzip=False, shmsize=None,\n+ labels=None):\n remote = context = None\n headers = {}\n container_limits = container_limits or {}\n@@ -88,6 +89,22 @@\n 'buildargs was only introduced in API version 1.21'\n )\n \n+ if shmsize:\n+ if utils.version_gte(self._version, '1.22'):\n+ params.update({'shmsize': shmsize})\n+ else:\n+ raise errors.InvalidVersion(\n+ 'shmsize was only introduced in API version 1.22'\n+ )\n+\n+ if labels:\n+ if utils.version_gte(self._version, '1.23'):\n+ params.update({'labels': json.dumps(labels)})\n+ else:\n+ raise errors.InvalidVersion(\n+ 'labels was only introduced in API version 1.23'\n+ )\n+\n if context is not None:\n headers = {'Content-Type': 'application/tar'}\n if encoding:\n", "issue": "Build Image Missing Arguments\nThe build image function is missing some arguments that are present in the v1.24 api.\n- shmsize - Size of /dev/shm in bytes. The size must be greater than 0. If omitted the system uses 64MB.\n- labels \u2013 JSON map of string pairs for labels to set on the image.\n\nSee: https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/build-image-from-a-dockerfile\n\n", "before_files": [{"content": "import logging\nimport os\nimport re\nimport json\n\nfrom .. import constants\nfrom .. import errors\nfrom .. import auth\nfrom .. import utils\n\n\nlog = logging.getLogger(__name__)\n\n\nclass BuildApiMixin(object):\n def build(self, path=None, tag=None, quiet=False, fileobj=None,\n nocache=False, rm=False, stream=False, timeout=None,\n custom_context=False, encoding=None, pull=False,\n forcerm=False, dockerfile=None, container_limits=None,\n decode=False, buildargs=None, gzip=False):\n remote = context = None\n headers = {}\n container_limits = container_limits or {}\n if path is None and fileobj is None:\n raise TypeError(\"Either path or fileobj needs to be provided.\")\n if gzip and encoding is not None:\n raise errors.DockerException(\n 'Can not use custom encoding if gzip is enabled'\n )\n\n for key in container_limits.keys():\n if key not in constants.CONTAINER_LIMITS_KEYS:\n raise errors.DockerException(\n 'Invalid container_limits key {0}'.format(key)\n )\n\n if custom_context:\n if not fileobj:\n raise TypeError(\"You must specify fileobj with custom_context\")\n context = fileobj\n elif fileobj is not None:\n context = utils.mkbuildcontext(fileobj)\n elif path.startswith(('http://', 'https://',\n 'git://', 'github.com/', 'git@')):\n remote = path\n elif not os.path.isdir(path):\n raise TypeError(\"You must specify a directory to build in path\")\n else:\n dockerignore = os.path.join(path, '.dockerignore')\n exclude = None\n if os.path.exists(dockerignore):\n with open(dockerignore, 'r') as f:\n exclude = list(filter(bool, f.read().splitlines()))\n context = utils.tar(\n path, exclude=exclude, dockerfile=dockerfile, gzip=gzip\n )\n encoding = 'gzip' if gzip else encoding\n\n if utils.compare_version('1.8', self._version) >= 0:\n stream = True\n\n if dockerfile and utils.compare_version('1.17', self._version) < 0:\n raise errors.InvalidVersion(\n 'dockerfile was only introduced in API version 1.17'\n )\n\n if utils.compare_version('1.19', self._version) < 0:\n pull = 1 if pull else 0\n\n u = self._url('/build')\n params = {\n 't': tag,\n 'remote': remote,\n 'q': quiet,\n 'nocache': nocache,\n 'rm': rm,\n 'forcerm': forcerm,\n 'pull': pull,\n 'dockerfile': dockerfile,\n }\n params.update(container_limits)\n\n if buildargs:\n if utils.version_gte(self._version, '1.21'):\n params.update({'buildargs': json.dumps(buildargs)})\n else:\n raise errors.InvalidVersion(\n 'buildargs was only introduced in API version 1.21'\n )\n\n if context is not None:\n headers = {'Content-Type': 'application/tar'}\n if encoding:\n headers['Content-Encoding'] = encoding\n\n if utils.compare_version('1.9', self._version) >= 0:\n self._set_auth_headers(headers)\n\n response = self._post(\n u,\n data=context,\n params=params,\n headers=headers,\n stream=stream,\n timeout=timeout,\n )\n\n if context is not None and not custom_context:\n context.close()\n\n if stream:\n return self._stream_helper(response, decode=decode)\n else:\n output = self._result(response)\n srch = r'Successfully built ([0-9a-f]+)'\n match = re.search(srch, output)\n if not match:\n return None, output\n return match.group(1), output\n\n def _set_auth_headers(self, headers):\n log.debug('Looking for auth config')\n\n # If we don't have any auth data so far, try reloading the config\n # file one more time in case anything showed up in there.\n if not self._auth_configs:\n log.debug(\"No auth config in memory - loading from filesystem\")\n self._auth_configs = auth.load_config()\n\n # Send the full auth configuration (if any exists), since the build\n # could use any (or all) of the registries.\n if self._auth_configs:\n log.debug(\n 'Sending auth config ({0})'.format(\n ', '.join(repr(k) for k in self._auth_configs.keys())\n )\n )\n\n if utils.compare_version('1.19', self._version) >= 0:\n headers['X-Registry-Config'] = auth.encode_header(\n self._auth_configs\n )\n else:\n headers['X-Registry-Config'] = auth.encode_header({\n 'configs': self._auth_configs\n })\n else:\n log.debug('No auth config found')\n", "path": "docker/api/build.py"}], "after_files": [{"content": "import logging\nimport os\nimport re\nimport json\n\nfrom .. import constants\nfrom .. import errors\nfrom .. import auth\nfrom .. import utils\n\n\nlog = logging.getLogger(__name__)\n\n\nclass BuildApiMixin(object):\n def build(self, path=None, tag=None, quiet=False, fileobj=None,\n nocache=False, rm=False, stream=False, timeout=None,\n custom_context=False, encoding=None, pull=False,\n forcerm=False, dockerfile=None, container_limits=None,\n decode=False, buildargs=None, gzip=False, shmsize=None,\n labels=None):\n remote = context = None\n headers = {}\n container_limits = container_limits or {}\n if path is None and fileobj is None:\n raise TypeError(\"Either path or fileobj needs to be provided.\")\n if gzip and encoding is not None:\n raise errors.DockerException(\n 'Can not use custom encoding if gzip is enabled'\n )\n\n for key in container_limits.keys():\n if key not in constants.CONTAINER_LIMITS_KEYS:\n raise errors.DockerException(\n 'Invalid container_limits key {0}'.format(key)\n )\n\n if custom_context:\n if not fileobj:\n raise TypeError(\"You must specify fileobj with custom_context\")\n context = fileobj\n elif fileobj is not None:\n context = utils.mkbuildcontext(fileobj)\n elif path.startswith(('http://', 'https://',\n 'git://', 'github.com/', 'git@')):\n remote = path\n elif not os.path.isdir(path):\n raise TypeError(\"You must specify a directory to build in path\")\n else:\n dockerignore = os.path.join(path, '.dockerignore')\n exclude = None\n if os.path.exists(dockerignore):\n with open(dockerignore, 'r') as f:\n exclude = list(filter(bool, f.read().splitlines()))\n context = utils.tar(\n path, exclude=exclude, dockerfile=dockerfile, gzip=gzip\n )\n encoding = 'gzip' if gzip else encoding\n\n if utils.compare_version('1.8', self._version) >= 0:\n stream = True\n\n if dockerfile and utils.compare_version('1.17', self._version) < 0:\n raise errors.InvalidVersion(\n 'dockerfile was only introduced in API version 1.17'\n )\n\n if utils.compare_version('1.19', self._version) < 0:\n pull = 1 if pull else 0\n\n u = self._url('/build')\n params = {\n 't': tag,\n 'remote': remote,\n 'q': quiet,\n 'nocache': nocache,\n 'rm': rm,\n 'forcerm': forcerm,\n 'pull': pull,\n 'dockerfile': dockerfile,\n }\n params.update(container_limits)\n\n if buildargs:\n if utils.version_gte(self._version, '1.21'):\n params.update({'buildargs': json.dumps(buildargs)})\n else:\n raise errors.InvalidVersion(\n 'buildargs was only introduced in API version 1.21'\n )\n\n if shmsize:\n if utils.version_gte(self._version, '1.22'):\n params.update({'shmsize': shmsize})\n else:\n raise errors.InvalidVersion(\n 'shmsize was only introduced in API version 1.22'\n )\n\n if labels:\n if utils.version_gte(self._version, '1.23'):\n params.update({'labels': json.dumps(labels)})\n else:\n raise errors.InvalidVersion(\n 'labels was only introduced in API version 1.23'\n )\n\n if context is not None:\n headers = {'Content-Type': 'application/tar'}\n if encoding:\n headers['Content-Encoding'] = encoding\n\n if utils.compare_version('1.9', self._version) >= 0:\n self._set_auth_headers(headers)\n\n response = self._post(\n u,\n data=context,\n params=params,\n headers=headers,\n stream=stream,\n timeout=timeout,\n )\n\n if context is not None and not custom_context:\n context.close()\n\n if stream:\n return self._stream_helper(response, decode=decode)\n else:\n output = self._result(response)\n srch = r'Successfully built ([0-9a-f]+)'\n match = re.search(srch, output)\n if not match:\n return None, output\n return match.group(1), output\n\n def _set_auth_headers(self, headers):\n log.debug('Looking for auth config')\n\n # If we don't have any auth data so far, try reloading the config\n # file one more time in case anything showed up in there.\n if not self._auth_configs:\n log.debug(\"No auth config in memory - loading from filesystem\")\n self._auth_configs = auth.load_config()\n\n # Send the full auth configuration (if any exists), since the build\n # could use any (or all) of the registries.\n if self._auth_configs:\n log.debug(\n 'Sending auth config ({0})'.format(\n ', '.join(repr(k) for k in self._auth_configs.keys())\n )\n )\n\n if utils.compare_version('1.19', self._version) >= 0:\n headers['X-Registry-Config'] = auth.encode_header(\n self._auth_configs\n )\n else:\n headers['X-Registry-Config'] = auth.encode_header({\n 'configs': self._auth_configs\n })\n else:\n log.debug('No auth config found')\n", "path": "docker/api/build.py"}]} | 1,793 | 321 |
gh_patches_debug_27671 | rasdani/github-patches | git_diff | ocadotechnology__codeforlife-portal-417 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
From django administration page, in Portal, can't access Teachers or Students
Trying to access a Student or Teacher from the administration page leads to an error:
Failed to load resource: the server responded with a status of 500 (OK)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `portal/admin.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Code for Life
3 #
4 # Copyright (C) 2016, Ocado Innovation Limited
5 #
6 # This program is free software: you can redistribute it and/or modify
7 # it under the terms of the GNU Affero General Public License as
8 # published by the Free Software Foundation, either version 3 of the
9 # License, or (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU Affero General Public License for more details.
15 #
16 # You should have received a copy of the GNU Affero General Public License
17 # along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #
19 # ADDITIONAL TERMS – Section 7 GNU General Public Licence
20 #
21 # This licence does not grant any right, title or interest in any “Ocado” logos,
22 # trade names or the trademark “Ocado” or any other trademarks or domain names
23 # owned by Ocado Innovation Limited or the Ocado group of companies or any other
24 # distinctive brand features of “Ocado” as may be secured from time to time. You
25 # must not distribute any modification of this program using the trademark
26 # “Ocado” or claim any affiliation or association with Ocado or its employees.
27 #
28 # You are not authorised to use the name Ocado (or any of its trade names) or
29 # the names of any author or contributor in advertising or for publicity purposes
30 # pertaining to the distribution of this program, without the prior written
31 # authorisation of Ocado.
32 #
33 # Any propagation, distribution or conveyance of this program must include this
34 # copyright notice and these terms. You must not misrepresent the origins of this
35 # program; modified versions of the program must be marked as such and not
36 # identified as the original program.
37 from django.contrib import admin
38 from django.contrib.auth.models import User
39 from django.contrib.auth.admin import UserAdmin
40
41
42 from portal.models import Class, Student, Guardian, Teacher, School, UserProfile, FrontPageNews, EmailVerification
43
44
45 class ClassAdmin(admin.ModelAdmin):
46 search_fields = ['name', 'teacher__new_user__first_name', 'teacher__new_user__last_name']
47 list_filter = ['teacher']
48
49
50 class SchoolAdmin(admin.ModelAdmin):
51 search_fields = ['name', 'country', 'postcode', 'town']
52 list_filter = ['postcode', 'country']
53
54
55 class StudentAdmin(admin.ModelAdmin):
56 search_fields = ['new_user__first_name', 'new_user__last_name']
57 list_filter = ['class_field', 'class_field__teacher']
58
59
60 class TeacherAdmin(admin.ModelAdmin):
61 search_fields = ['new_user__first_name', 'new_user__last_name']
62 list_filter = ['school']
63
64
65 class UserProfileAdmin(admin.ModelAdmin):
66 search_fields = ['user__first_name', 'user__last_name', 'new_username', 'user__date_joined']
67 list_filter = ['user__date_joined']
68 list_display = ['user', 'joined_recently']
69
70
71 class EmailVerificationAdmin(admin.ModelAdmin):
72 search_fields = ['new_user']
73
74
75 UserAdmin.list_display += ('date_joined',)
76 UserAdmin.list_filter += ('date_joined',)
77
78
79 admin.site.register(Class, ClassAdmin)
80 admin.site.register(Student, StudentAdmin)
81 admin.site.register(Guardian)
82 admin.site.register(Teacher, TeacherAdmin)
83 admin.site.register(School, SchoolAdmin)
84 admin.site.unregister(User)
85 admin.site.register(User, UserAdmin)
86 admin.site.register(UserProfile, UserProfileAdmin)
87 admin.site.register(FrontPageNews)
88 admin.site.register(EmailVerification, EmailVerificationAdmin)
89
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/portal/admin.py b/portal/admin.py
--- a/portal/admin.py
+++ b/portal/admin.py
@@ -45,6 +45,7 @@
class ClassAdmin(admin.ModelAdmin):
search_fields = ['name', 'teacher__new_user__first_name', 'teacher__new_user__last_name']
list_filter = ['teacher']
+ readonly_fields = ['teacher']
class SchoolAdmin(admin.ModelAdmin):
@@ -55,17 +56,22 @@
class StudentAdmin(admin.ModelAdmin):
search_fields = ['new_user__first_name', 'new_user__last_name']
list_filter = ['class_field', 'class_field__teacher']
+ readonly_fields = ['user', 'new_user']
+ raw_id_fields = ['class_field', 'pending_class_request']
class TeacherAdmin(admin.ModelAdmin):
search_fields = ['new_user__first_name', 'new_user__last_name']
list_filter = ['school']
+ readonly_fields = ['user', 'new_user']
+ raw_id_fields = ['school', 'pending_join_request']
class UserProfileAdmin(admin.ModelAdmin):
search_fields = ['user__first_name', 'user__last_name', 'new_username', 'user__date_joined']
list_filter = ['user__date_joined']
list_display = ['user', 'joined_recently']
+ readonly_fields = ['user']
class EmailVerificationAdmin(admin.ModelAdmin):
| {"golden_diff": "diff --git a/portal/admin.py b/portal/admin.py\n--- a/portal/admin.py\n+++ b/portal/admin.py\n@@ -45,6 +45,7 @@\n class ClassAdmin(admin.ModelAdmin):\n search_fields = ['name', 'teacher__new_user__first_name', 'teacher__new_user__last_name']\n list_filter = ['teacher']\n+ readonly_fields = ['teacher']\n \n \n class SchoolAdmin(admin.ModelAdmin):\n@@ -55,17 +56,22 @@\n class StudentAdmin(admin.ModelAdmin):\n search_fields = ['new_user__first_name', 'new_user__last_name']\n list_filter = ['class_field', 'class_field__teacher']\n+ readonly_fields = ['user', 'new_user']\n+ raw_id_fields = ['class_field', 'pending_class_request']\n \n \n class TeacherAdmin(admin.ModelAdmin):\n search_fields = ['new_user__first_name', 'new_user__last_name']\n list_filter = ['school']\n+ readonly_fields = ['user', 'new_user']\n+ raw_id_fields = ['school', 'pending_join_request']\n \n \n class UserProfileAdmin(admin.ModelAdmin):\n search_fields = ['user__first_name', 'user__last_name', 'new_username', 'user__date_joined']\n list_filter = ['user__date_joined']\n list_display = ['user', 'joined_recently']\n+ readonly_fields = ['user']\n \n \n class EmailVerificationAdmin(admin.ModelAdmin):\n", "issue": "From django administration page, in Portal, can't access Teachers or Students\nTrying to access a Student or Teacher from the administration page leads to an error:\nFailed to load resource: the server responded with a status of 500 (OK)\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Code for Life\n#\n# Copyright (C) 2016, Ocado Innovation Limited\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n# ADDITIONAL TERMS \u2013 Section 7 GNU General Public Licence\n#\n# This licence does not grant any right, title or interest in any \u201cOcado\u201d logos,\n# trade names or the trademark \u201cOcado\u201d or any other trademarks or domain names\n# owned by Ocado Innovation Limited or the Ocado group of companies or any other\n# distinctive brand features of \u201cOcado\u201d as may be secured from time to time. You\n# must not distribute any modification of this program using the trademark\n# \u201cOcado\u201d or claim any affiliation or association with Ocado or its employees.\n#\n# You are not authorised to use the name Ocado (or any of its trade names) or\n# the names of any author or contributor in advertising or for publicity purposes\n# pertaining to the distribution of this program, without the prior written\n# authorisation of Ocado.\n#\n# Any propagation, distribution or conveyance of this program must include this\n# copyright notice and these terms. You must not misrepresent the origins of this\n# program; modified versions of the program must be marked as such and not\n# identified as the original program.\nfrom django.contrib import admin\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.admin import UserAdmin\n\n\nfrom portal.models import Class, Student, Guardian, Teacher, School, UserProfile, FrontPageNews, EmailVerification\n\n\nclass ClassAdmin(admin.ModelAdmin):\n search_fields = ['name', 'teacher__new_user__first_name', 'teacher__new_user__last_name']\n list_filter = ['teacher']\n\n\nclass SchoolAdmin(admin.ModelAdmin):\n search_fields = ['name', 'country', 'postcode', 'town']\n list_filter = ['postcode', 'country']\n\n\nclass StudentAdmin(admin.ModelAdmin):\n search_fields = ['new_user__first_name', 'new_user__last_name']\n list_filter = ['class_field', 'class_field__teacher']\n\n\nclass TeacherAdmin(admin.ModelAdmin):\n search_fields = ['new_user__first_name', 'new_user__last_name']\n list_filter = ['school']\n\n\nclass UserProfileAdmin(admin.ModelAdmin):\n search_fields = ['user__first_name', 'user__last_name', 'new_username', 'user__date_joined']\n list_filter = ['user__date_joined']\n list_display = ['user', 'joined_recently']\n\n\nclass EmailVerificationAdmin(admin.ModelAdmin):\n search_fields = ['new_user']\n\n\nUserAdmin.list_display += ('date_joined',)\nUserAdmin.list_filter += ('date_joined',)\n\n\nadmin.site.register(Class, ClassAdmin)\nadmin.site.register(Student, StudentAdmin)\nadmin.site.register(Guardian)\nadmin.site.register(Teacher, TeacherAdmin)\nadmin.site.register(School, SchoolAdmin)\nadmin.site.unregister(User)\nadmin.site.register(User, UserAdmin)\nadmin.site.register(UserProfile, UserProfileAdmin)\nadmin.site.register(FrontPageNews)\nadmin.site.register(EmailVerification, EmailVerificationAdmin)\n", "path": "portal/admin.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Code for Life\n#\n# Copyright (C) 2016, Ocado Innovation Limited\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n# ADDITIONAL TERMS \u2013 Section 7 GNU General Public Licence\n#\n# This licence does not grant any right, title or interest in any \u201cOcado\u201d logos,\n# trade names or the trademark \u201cOcado\u201d or any other trademarks or domain names\n# owned by Ocado Innovation Limited or the Ocado group of companies or any other\n# distinctive brand features of \u201cOcado\u201d as may be secured from time to time. You\n# must not distribute any modification of this program using the trademark\n# \u201cOcado\u201d or claim any affiliation or association with Ocado or its employees.\n#\n# You are not authorised to use the name Ocado (or any of its trade names) or\n# the names of any author or contributor in advertising or for publicity purposes\n# pertaining to the distribution of this program, without the prior written\n# authorisation of Ocado.\n#\n# Any propagation, distribution or conveyance of this program must include this\n# copyright notice and these terms. You must not misrepresent the origins of this\n# program; modified versions of the program must be marked as such and not\n# identified as the original program.\nfrom django.contrib import admin\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.admin import UserAdmin\n\n\nfrom portal.models import Class, Student, Guardian, Teacher, School, UserProfile, FrontPageNews, EmailVerification\n\n\nclass ClassAdmin(admin.ModelAdmin):\n search_fields = ['name', 'teacher__new_user__first_name', 'teacher__new_user__last_name']\n list_filter = ['teacher']\n readonly_fields = ['teacher']\n\n\nclass SchoolAdmin(admin.ModelAdmin):\n search_fields = ['name', 'country', 'postcode', 'town']\n list_filter = ['postcode', 'country']\n\n\nclass StudentAdmin(admin.ModelAdmin):\n search_fields = ['new_user__first_name', 'new_user__last_name']\n list_filter = ['class_field', 'class_field__teacher']\n readonly_fields = ['user', 'new_user']\n raw_id_fields = ['class_field', 'pending_class_request']\n\n\nclass TeacherAdmin(admin.ModelAdmin):\n search_fields = ['new_user__first_name', 'new_user__last_name']\n list_filter = ['school']\n readonly_fields = ['user', 'new_user']\n raw_id_fields = ['school', 'pending_join_request']\n\n\nclass UserProfileAdmin(admin.ModelAdmin):\n search_fields = ['user__first_name', 'user__last_name', 'new_username', 'user__date_joined']\n list_filter = ['user__date_joined']\n list_display = ['user', 'joined_recently']\n readonly_fields = ['user']\n\n\nclass EmailVerificationAdmin(admin.ModelAdmin):\n search_fields = ['new_user']\n\n\nUserAdmin.list_display += ('date_joined',)\nUserAdmin.list_filter += ('date_joined',)\n\n\nadmin.site.register(Class, ClassAdmin)\nadmin.site.register(Student, StudentAdmin)\nadmin.site.register(Guardian)\nadmin.site.register(Teacher, TeacherAdmin)\nadmin.site.register(School, SchoolAdmin)\nadmin.site.unregister(User)\nadmin.site.register(User, UserAdmin)\nadmin.site.register(UserProfile, UserProfileAdmin)\nadmin.site.register(FrontPageNews)\nadmin.site.register(EmailVerification, EmailVerificationAdmin)\n", "path": "portal/admin.py"}]} | 1,266 | 303 |
gh_patches_debug_27236 | rasdani/github-patches | git_diff | redis__redis-py-2324 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add support for WITHSUFFIXTRIE to FT.CREATE
RediSearch now supports another option (WITHSUFFIXTRIE) during index creation. We need to extend the [FT.CREATE](https://sourcegraph.com/github.com/RediSearch/RediSearch/-/blob/docs/commands/ft.create.md) calls to support this
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redis/commands/search/field.py`
Content:
```
1 from typing import List
2
3 from redis import DataError
4
5
6 class Field:
7
8 NUMERIC = "NUMERIC"
9 TEXT = "TEXT"
10 WEIGHT = "WEIGHT"
11 GEO = "GEO"
12 TAG = "TAG"
13 VECTOR = "VECTOR"
14 SORTABLE = "SORTABLE"
15 NOINDEX = "NOINDEX"
16 AS = "AS"
17
18 def __init__(
19 self,
20 name: str,
21 args: List[str] = None,
22 sortable: bool = False,
23 no_index: bool = False,
24 as_name: str = None,
25 ):
26 if args is None:
27 args = []
28 self.name = name
29 self.args = args
30 self.args_suffix = list()
31 self.as_name = as_name
32
33 if sortable:
34 self.args_suffix.append(Field.SORTABLE)
35 if no_index:
36 self.args_suffix.append(Field.NOINDEX)
37
38 if no_index and not sortable:
39 raise ValueError("Non-Sortable non-Indexable fields are ignored")
40
41 def append_arg(self, value):
42 self.args.append(value)
43
44 def redis_args(self):
45 args = [self.name]
46 if self.as_name:
47 args += [self.AS, self.as_name]
48 args += self.args
49 args += self.args_suffix
50 return args
51
52
53 class TextField(Field):
54 """
55 TextField is used to define a text field in a schema definition
56 """
57
58 NOSTEM = "NOSTEM"
59 PHONETIC = "PHONETIC"
60
61 def __init__(
62 self,
63 name: str,
64 weight: float = 1.0,
65 no_stem: bool = False,
66 phonetic_matcher: str = None,
67 **kwargs,
68 ):
69 Field.__init__(self, name, args=[Field.TEXT, Field.WEIGHT, weight], **kwargs)
70
71 if no_stem:
72 Field.append_arg(self, self.NOSTEM)
73 if phonetic_matcher and phonetic_matcher in [
74 "dm:en",
75 "dm:fr",
76 "dm:pt",
77 "dm:es",
78 ]:
79 Field.append_arg(self, self.PHONETIC)
80 Field.append_arg(self, phonetic_matcher)
81
82
83 class NumericField(Field):
84 """
85 NumericField is used to define a numeric field in a schema definition
86 """
87
88 def __init__(self, name: str, **kwargs):
89 Field.__init__(self, name, args=[Field.NUMERIC], **kwargs)
90
91
92 class GeoField(Field):
93 """
94 GeoField is used to define a geo-indexing field in a schema definition
95 """
96
97 def __init__(self, name: str, **kwargs):
98 Field.__init__(self, name, args=[Field.GEO], **kwargs)
99
100
101 class TagField(Field):
102 """
103 TagField is a tag-indexing field with simpler compression and tokenization.
104 See http://redisearch.io/Tags/
105 """
106
107 SEPARATOR = "SEPARATOR"
108 CASESENSITIVE = "CASESENSITIVE"
109
110 def __init__(
111 self, name: str, separator: str = ",", case_sensitive: bool = False, **kwargs
112 ):
113 args = [Field.TAG, self.SEPARATOR, separator]
114 if case_sensitive:
115 args.append(self.CASESENSITIVE)
116
117 Field.__init__(self, name, args=args, **kwargs)
118
119
120 class VectorField(Field):
121 """
122 Allows vector similarity queries against the value in this attribute.
123 See https://oss.redis.com/redisearch/Vectors/#vector_fields.
124 """
125
126 def __init__(self, name: str, algorithm: str, attributes: dict, **kwargs):
127 """
128 Create Vector Field. Notice that Vector cannot have sortable or no_index tag,
129 although it's also a Field.
130
131 ``name`` is the name of the field.
132
133 ``algorithm`` can be "FLAT" or "HNSW".
134
135 ``attributes`` each algorithm can have specific attributes. Some of them
136 are mandatory and some of them are optional. See
137 https://oss.redis.com/redisearch/master/Vectors/#specific_creation_attributes_per_algorithm
138 for more information.
139 """
140 sort = kwargs.get("sortable", False)
141 noindex = kwargs.get("no_index", False)
142
143 if sort or noindex:
144 raise DataError("Cannot set 'sortable' or 'no_index' in Vector fields.")
145
146 if algorithm.upper() not in ["FLAT", "HNSW"]:
147 raise DataError(
148 "Realtime vector indexing supporting 2 Indexing Methods:"
149 "'FLAT' and 'HNSW'."
150 )
151
152 attr_li = []
153
154 for key, value in attributes.items():
155 attr_li.extend([key, value])
156
157 Field.__init__(
158 self, name, args=[Field.VECTOR, algorithm, len(attr_li), *attr_li], **kwargs
159 )
160
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/redis/commands/search/field.py b/redis/commands/search/field.py
--- a/redis/commands/search/field.py
+++ b/redis/commands/search/field.py
@@ -64,6 +64,7 @@
weight: float = 1.0,
no_stem: bool = False,
phonetic_matcher: str = None,
+ withsuffixtrie: bool = False,
**kwargs,
):
Field.__init__(self, name, args=[Field.TEXT, Field.WEIGHT, weight], **kwargs)
@@ -78,6 +79,8 @@
]:
Field.append_arg(self, self.PHONETIC)
Field.append_arg(self, phonetic_matcher)
+ if withsuffixtrie:
+ Field.append_arg(self, "WITHSUFFIXTRIE")
class NumericField(Field):
@@ -108,11 +111,18 @@
CASESENSITIVE = "CASESENSITIVE"
def __init__(
- self, name: str, separator: str = ",", case_sensitive: bool = False, **kwargs
+ self,
+ name: str,
+ separator: str = ",",
+ case_sensitive: bool = False,
+ withsuffixtrie: bool = False,
+ **kwargs,
):
args = [Field.TAG, self.SEPARATOR, separator]
if case_sensitive:
args.append(self.CASESENSITIVE)
+ if withsuffixtrie:
+ args.append("WITHSUFFIXTRIE")
Field.__init__(self, name, args=args, **kwargs)
| {"golden_diff": "diff --git a/redis/commands/search/field.py b/redis/commands/search/field.py\n--- a/redis/commands/search/field.py\n+++ b/redis/commands/search/field.py\n@@ -64,6 +64,7 @@\n weight: float = 1.0,\n no_stem: bool = False,\n phonetic_matcher: str = None,\n+ withsuffixtrie: bool = False,\n **kwargs,\n ):\n Field.__init__(self, name, args=[Field.TEXT, Field.WEIGHT, weight], **kwargs)\n@@ -78,6 +79,8 @@\n ]:\n Field.append_arg(self, self.PHONETIC)\n Field.append_arg(self, phonetic_matcher)\n+ if withsuffixtrie:\n+ Field.append_arg(self, \"WITHSUFFIXTRIE\")\n \n \n class NumericField(Field):\n@@ -108,11 +111,18 @@\n CASESENSITIVE = \"CASESENSITIVE\"\n \n def __init__(\n- self, name: str, separator: str = \",\", case_sensitive: bool = False, **kwargs\n+ self,\n+ name: str,\n+ separator: str = \",\",\n+ case_sensitive: bool = False,\n+ withsuffixtrie: bool = False,\n+ **kwargs,\n ):\n args = [Field.TAG, self.SEPARATOR, separator]\n if case_sensitive:\n args.append(self.CASESENSITIVE)\n+ if withsuffixtrie:\n+ args.append(\"WITHSUFFIXTRIE\")\n \n Field.__init__(self, name, args=args, **kwargs)\n", "issue": "Add support for WITHSUFFIXTRIE to FT.CREATE \nRediSearch now supports another option (WITHSUFFIXTRIE) during index creation. We need to extend the [FT.CREATE](https://sourcegraph.com/github.com/RediSearch/RediSearch/-/blob/docs/commands/ft.create.md) calls to support this\n", "before_files": [{"content": "from typing import List\n\nfrom redis import DataError\n\n\nclass Field:\n\n NUMERIC = \"NUMERIC\"\n TEXT = \"TEXT\"\n WEIGHT = \"WEIGHT\"\n GEO = \"GEO\"\n TAG = \"TAG\"\n VECTOR = \"VECTOR\"\n SORTABLE = \"SORTABLE\"\n NOINDEX = \"NOINDEX\"\n AS = \"AS\"\n\n def __init__(\n self,\n name: str,\n args: List[str] = None,\n sortable: bool = False,\n no_index: bool = False,\n as_name: str = None,\n ):\n if args is None:\n args = []\n self.name = name\n self.args = args\n self.args_suffix = list()\n self.as_name = as_name\n\n if sortable:\n self.args_suffix.append(Field.SORTABLE)\n if no_index:\n self.args_suffix.append(Field.NOINDEX)\n\n if no_index and not sortable:\n raise ValueError(\"Non-Sortable non-Indexable fields are ignored\")\n\n def append_arg(self, value):\n self.args.append(value)\n\n def redis_args(self):\n args = [self.name]\n if self.as_name:\n args += [self.AS, self.as_name]\n args += self.args\n args += self.args_suffix\n return args\n\n\nclass TextField(Field):\n \"\"\"\n TextField is used to define a text field in a schema definition\n \"\"\"\n\n NOSTEM = \"NOSTEM\"\n PHONETIC = \"PHONETIC\"\n\n def __init__(\n self,\n name: str,\n weight: float = 1.0,\n no_stem: bool = False,\n phonetic_matcher: str = None,\n **kwargs,\n ):\n Field.__init__(self, name, args=[Field.TEXT, Field.WEIGHT, weight], **kwargs)\n\n if no_stem:\n Field.append_arg(self, self.NOSTEM)\n if phonetic_matcher and phonetic_matcher in [\n \"dm:en\",\n \"dm:fr\",\n \"dm:pt\",\n \"dm:es\",\n ]:\n Field.append_arg(self, self.PHONETIC)\n Field.append_arg(self, phonetic_matcher)\n\n\nclass NumericField(Field):\n \"\"\"\n NumericField is used to define a numeric field in a schema definition\n \"\"\"\n\n def __init__(self, name: str, **kwargs):\n Field.__init__(self, name, args=[Field.NUMERIC], **kwargs)\n\n\nclass GeoField(Field):\n \"\"\"\n GeoField is used to define a geo-indexing field in a schema definition\n \"\"\"\n\n def __init__(self, name: str, **kwargs):\n Field.__init__(self, name, args=[Field.GEO], **kwargs)\n\n\nclass TagField(Field):\n \"\"\"\n TagField is a tag-indexing field with simpler compression and tokenization.\n See http://redisearch.io/Tags/\n \"\"\"\n\n SEPARATOR = \"SEPARATOR\"\n CASESENSITIVE = \"CASESENSITIVE\"\n\n def __init__(\n self, name: str, separator: str = \",\", case_sensitive: bool = False, **kwargs\n ):\n args = [Field.TAG, self.SEPARATOR, separator]\n if case_sensitive:\n args.append(self.CASESENSITIVE)\n\n Field.__init__(self, name, args=args, **kwargs)\n\n\nclass VectorField(Field):\n \"\"\"\n Allows vector similarity queries against the value in this attribute.\n See https://oss.redis.com/redisearch/Vectors/#vector_fields.\n \"\"\"\n\n def __init__(self, name: str, algorithm: str, attributes: dict, **kwargs):\n \"\"\"\n Create Vector Field. Notice that Vector cannot have sortable or no_index tag,\n although it's also a Field.\n\n ``name`` is the name of the field.\n\n ``algorithm`` can be \"FLAT\" or \"HNSW\".\n\n ``attributes`` each algorithm can have specific attributes. Some of them\n are mandatory and some of them are optional. See\n https://oss.redis.com/redisearch/master/Vectors/#specific_creation_attributes_per_algorithm\n for more information.\n \"\"\"\n sort = kwargs.get(\"sortable\", False)\n noindex = kwargs.get(\"no_index\", False)\n\n if sort or noindex:\n raise DataError(\"Cannot set 'sortable' or 'no_index' in Vector fields.\")\n\n if algorithm.upper() not in [\"FLAT\", \"HNSW\"]:\n raise DataError(\n \"Realtime vector indexing supporting 2 Indexing Methods:\"\n \"'FLAT' and 'HNSW'.\"\n )\n\n attr_li = []\n\n for key, value in attributes.items():\n attr_li.extend([key, value])\n\n Field.__init__(\n self, name, args=[Field.VECTOR, algorithm, len(attr_li), *attr_li], **kwargs\n )\n", "path": "redis/commands/search/field.py"}], "after_files": [{"content": "from typing import List\n\nfrom redis import DataError\n\n\nclass Field:\n\n NUMERIC = \"NUMERIC\"\n TEXT = \"TEXT\"\n WEIGHT = \"WEIGHT\"\n GEO = \"GEO\"\n TAG = \"TAG\"\n VECTOR = \"VECTOR\"\n SORTABLE = \"SORTABLE\"\n NOINDEX = \"NOINDEX\"\n AS = \"AS\"\n\n def __init__(\n self,\n name: str,\n args: List[str] = None,\n sortable: bool = False,\n no_index: bool = False,\n as_name: str = None,\n ):\n if args is None:\n args = []\n self.name = name\n self.args = args\n self.args_suffix = list()\n self.as_name = as_name\n\n if sortable:\n self.args_suffix.append(Field.SORTABLE)\n if no_index:\n self.args_suffix.append(Field.NOINDEX)\n\n if no_index and not sortable:\n raise ValueError(\"Non-Sortable non-Indexable fields are ignored\")\n\n def append_arg(self, value):\n self.args.append(value)\n\n def redis_args(self):\n args = [self.name]\n if self.as_name:\n args += [self.AS, self.as_name]\n args += self.args\n args += self.args_suffix\n return args\n\n\nclass TextField(Field):\n \"\"\"\n TextField is used to define a text field in a schema definition\n \"\"\"\n\n NOSTEM = \"NOSTEM\"\n PHONETIC = \"PHONETIC\"\n\n def __init__(\n self,\n name: str,\n weight: float = 1.0,\n no_stem: bool = False,\n phonetic_matcher: str = None,\n withsuffixtrie: bool = False,\n **kwargs,\n ):\n Field.__init__(self, name, args=[Field.TEXT, Field.WEIGHT, weight], **kwargs)\n\n if no_stem:\n Field.append_arg(self, self.NOSTEM)\n if phonetic_matcher and phonetic_matcher in [\n \"dm:en\",\n \"dm:fr\",\n \"dm:pt\",\n \"dm:es\",\n ]:\n Field.append_arg(self, self.PHONETIC)\n Field.append_arg(self, phonetic_matcher)\n if withsuffixtrie:\n Field.append_arg(self, \"WITHSUFFIXTRIE\")\n\n\nclass NumericField(Field):\n \"\"\"\n NumericField is used to define a numeric field in a schema definition\n \"\"\"\n\n def __init__(self, name: str, **kwargs):\n Field.__init__(self, name, args=[Field.NUMERIC], **kwargs)\n\n\nclass GeoField(Field):\n \"\"\"\n GeoField is used to define a geo-indexing field in a schema definition\n \"\"\"\n\n def __init__(self, name: str, **kwargs):\n Field.__init__(self, name, args=[Field.GEO], **kwargs)\n\n\nclass TagField(Field):\n \"\"\"\n TagField is a tag-indexing field with simpler compression and tokenization.\n See http://redisearch.io/Tags/\n \"\"\"\n\n SEPARATOR = \"SEPARATOR\"\n CASESENSITIVE = \"CASESENSITIVE\"\n\n def __init__(\n self,\n name: str,\n separator: str = \",\",\n case_sensitive: bool = False,\n withsuffixtrie: bool = False,\n **kwargs,\n ):\n args = [Field.TAG, self.SEPARATOR, separator]\n if case_sensitive:\n args.append(self.CASESENSITIVE)\n if withsuffixtrie:\n args.append(\"WITHSUFFIXTRIE\")\n\n Field.__init__(self, name, args=args, **kwargs)\n\n\nclass VectorField(Field):\n \"\"\"\n Allows vector similarity queries against the value in this attribute.\n See https://oss.redis.com/redisearch/Vectors/#vector_fields.\n \"\"\"\n\n def __init__(self, name: str, algorithm: str, attributes: dict, **kwargs):\n \"\"\"\n Create Vector Field. Notice that Vector cannot have sortable or no_index tag,\n although it's also a Field.\n\n ``name`` is the name of the field.\n\n ``algorithm`` can be \"FLAT\" or \"HNSW\".\n\n ``attributes`` each algorithm can have specific attributes. Some of them\n are mandatory and some of them are optional. See\n https://oss.redis.com/redisearch/master/Vectors/#specific_creation_attributes_per_algorithm\n for more information.\n \"\"\"\n sort = kwargs.get(\"sortable\", False)\n noindex = kwargs.get(\"no_index\", False)\n\n if sort or noindex:\n raise DataError(\"Cannot set 'sortable' or 'no_index' in Vector fields.\")\n\n if algorithm.upper() not in [\"FLAT\", \"HNSW\"]:\n raise DataError(\n \"Realtime vector indexing supporting 2 Indexing Methods:\"\n \"'FLAT' and 'HNSW'.\"\n )\n\n attr_li = []\n\n for key, value in attributes.items():\n attr_li.extend([key, value])\n\n Field.__init__(\n self, name, args=[Field.VECTOR, algorithm, len(attr_li), *attr_li], **kwargs\n )\n", "path": "redis/commands/search/field.py"}]} | 1,765 | 353 |
gh_patches_debug_37669 | rasdani/github-patches | git_diff | electricitymaps__electricitymaps-contrib-1577 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add JP prices
See https://github.com/tmrowco/electricitymap-contrib/pull/1543#issuecomment-411281685 by @tmslaine
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsers/JP.py`
Content:
```
1 #!/usr/bin/env python3
2 # coding=utf-8
3 import logging
4 # The arrow library is used to handle datetimes
5 import arrow
6 import pandas as pd
7 from . import occtonet
8
9 # Abbreviations
10 # JP-HKD : Hokkaido
11 # JP-TH : Tohoku
12 # JP-TK : Tokyo area
13 # JP-CB : Chubu
14 # JP-HR : Hokuriku
15 # JP-KN : Kansai
16 # JP-SK : Shikoku
17 # JP-KY : Kyushu
18 # JP-ON : Okinawa
19
20 def fetch_production(zone_key='JP-TK', session=None, target_datetime=None,
21 logger=logging.getLogger(__name__)):
22 """
23 Calculates production from consumption and imports for a given area
24 All production is mapped to unknown
25 """
26 if target_datetime:
27 raise NotImplementedError(
28 'This parser is not yet able to parse past dates')
29 exch_map = {
30 'JP-HKD':['JP-TH'],
31 'JP-TH':['JP-TK'],
32 'JP-TK':['JP-TH', 'JP-CB'],
33 'JP-CB':['JP-TK', 'JP-HR', 'JP-KN'],
34 'JP-HR':['JP-CB', 'JP-KN'],
35 'JP-KN':['JP-CB', 'JP-HR', 'JP-SK', 'JP-CG'],
36 'JP-SK':['JP-KN', 'JP-CG'],
37 'JP-CG':['JP-KN', 'JP-SK', 'JP-KY']
38 }
39 df = fetch_consumption_df(zone_key, target_datetime)
40 df['imports'] = 0
41 for zone in exch_map[zone_key]:
42 df2 = occtonet.fetch_exchange(zone_key, zone, target_datetime)
43 df2 = pd.DataFrame(df2)
44 exchname = df2.loc[0, 'sortedZoneKeys']
45 df2 = df2[['datetime', 'netFlow']]
46 df2.columns = ['datetime', exchname]
47 df = pd.merge(df, df2, how='inner', on='datetime')
48 if exchname.split('->')[-1] == zone_key:
49 df['imports'] = df['imports']+df[exchname]
50 else:
51 df['imports'] = df['imports']-df[exchname]
52 df['prod'] = df['cons']-df['imports']
53 df = df[['datetime', 'prod']]
54 # add a row to production for each entry in the dictionary:
55 sources = {
56 'JP-HKD':'denkiyoho.hepco.co.jp',
57 'JP-TH':'setsuden.tohoku-epco.co.jp',
58 'JP-TK':'www.tepco.co.jp',
59 'JP-CB':'denki-yoho.chuden.jp',
60 'JP-HR':'www.rikuden.co.jp/denki-yoho',
61 'JP-KN':'www.kepco.co.jp',
62 'JP-SK':'www.energia.co.jp',
63 'JP-CG':'www.yonden.co.jp'
64 }
65 datalist = []
66 for i in range(df.shape[0]):
67 data = {
68 'zoneKey': zone_key,
69 'datetime': df.loc[i, 'datetime'].to_pydatetime(),
70 'production': {
71 'biomass': None,
72 'coal': None,
73 'gas': None,
74 'hydro': None,
75 'nuclear': None,
76 'oil': None,
77 'solar': None,
78 'wind': None,
79 'geothermal': None,
80 'unknown': df.loc[i, 'prod']
81 },
82 'storage': {},
83 'source': ['occtonet.or.jp', sources[zone_key]]
84 }
85 datalist.append(data)
86 return datalist
87
88
89 def fetch_consumption_df(zone_key='JP-TK', target_datetime=None,
90 logger=logging.getLogger(__name__)):
91 """
92 Returns the consumption for an area as a pandas DataFrame
93 """
94 datestamp = arrow.get(target_datetime).to('Asia/Tokyo').strftime('%Y%m%d')
95 consumption_url = {
96 'JP-HKD': 'http://denkiyoho.hepco.co.jp/area/data/juyo_01_{}.csv'.format(datestamp),
97 'JP-TH': 'http://setsuden.tohoku-epco.co.jp/common/demand/juyo_02_{}.csv'.format(datestamp),
98 'JP-TK': 'http://www.tepco.co.jp/forecast/html/images/juyo-j.csv',
99 'JP-HR': 'http://www.rikuden.co.jp/denki-yoho/csv/juyo_05_{}.csv'.format(datestamp),
100 'JP-CB': 'http://denki-yoho.chuden.jp/denki_yoho_content_data/juyo_cepco003.csv',
101 'JP-KN': 'http://www.kepco.co.jp/yamasou/juyo1_kansai.csv',
102 'JP-CG': 'http://www.energia.co.jp/jukyuu/sys/juyo_07_{}.csv'.format(datestamp),
103 'JP-SK': 'http://www.yonden.co.jp/denkiyoho/juyo_shikoku.csv'
104 }
105 # First roughly 40 rows of the consumption files have hourly data,
106 # the parser skips to the rows with 5-min actual values
107 if zone_key == 'JP-KN':
108 startrow = 44
109 else:
110 startrow = 42
111 df = pd.read_csv(consumption_url[zone_key], skiprows=list(range(startrow)),
112 encoding='shift-jis')
113 df.columns = ['Date', 'Time', 'cons']
114 # Convert 万kW to MW
115 df['cons'] = 10*df['cons']
116 df = df.dropna()
117 df['datetime'] = df.apply(parse_dt, axis=1)
118 df = df[['datetime', 'cons']]
119 return df
120
121 def parse_dt(row):
122 """
123 Parses timestamps from date and time
124 """
125 return arrow.get(' '.join([row['Date'], row['Time']]).replace('/', '-'),
126 'YYYY-M-D H:mm').replace(tzinfo='Asia/Tokyo').datetime
127
128 if __name__ == '__main__':
129 """Main method, never used by the Electricity Map backend, but handy for testing."""
130
131 print('fetch_production() ->')
132 print(fetch_production())
133
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/parsers/JP.py b/parsers/JP.py
--- a/parsers/JP.py
+++ b/parsers/JP.py
@@ -3,6 +3,7 @@
import logging
# The arrow library is used to handle datetimes
import arrow
+import datetime as dt
import pandas as pd
from . import occtonet
@@ -16,6 +17,8 @@
# JP-SK : Shikoku
# JP-KY : Kyushu
# JP-ON : Okinawa
+# JP-CG : Chūgoku
+
def fetch_production(zone_key='JP-TK', session=None, target_datetime=None,
logger=logging.getLogger(__name__)):
@@ -118,6 +121,47 @@
df = df[['datetime', 'cons']]
return df
+
+def fetch_price(zone_key='JP-TK', session=None, target_datetime=None,
+ logger=logging.getLogger(__name__)):
+ if target_datetime is None:
+ target_datetime = dt.datetime.now() + dt.timedelta(days=1)
+
+ # price files contain data for fiscal year and not calendar year.
+ if target_datetime.month <= 3:
+ fiscal_year = target_datetime.year - 1
+ else:
+ fiscal_year = target_datetime.year
+ url = 'http://www.jepx.org/market/excel/spot_{}.csv'.format(fiscal_year)
+ df = pd.read_csv(url)
+
+ df = df.iloc[:, [0, 1, 6, 7, 8, 9, 10, 11, 12, 13, 14]]
+ df.columns = ['Date', 'Period', 'JP-HKD', 'JP-TH', 'JP-TK', 'JP-CB',
+ 'JP-HR', 'JP-KN', 'JP-CG', 'JP-SK', 'JP-KY']
+
+ if zone_key not in df.columns[2:]:
+ return []
+
+ start = target_datetime - dt.timedelta(days=1)
+ df['Date'] = df['Date'].apply(lambda x: dt.datetime.strptime(x, '%Y/%m/%d'))
+ df = df[(df['Date'] >= start.date()) & (df['Date'] <= target_datetime.date())]
+
+ df['datetime'] = df.apply(lambda row: arrow.get(row['Date']).shift(
+ minutes=30 * (row['Period'] - 1)).replace(tzinfo='Asia/Tokyo'), axis=1)
+
+ data = list()
+ for row in df.iterrows():
+ data.append({
+ 'zoneKey': zone_key,
+ 'currency': 'JPY',
+ 'datetime': row[1]['datetime'].datetime,
+ 'price': row[1][zone_key],
+ 'source': 'jepx.org'
+ })
+
+ return data
+
+
def parse_dt(row):
"""
Parses timestamps from date and time
@@ -125,8 +169,11 @@
return arrow.get(' '.join([row['Date'], row['Time']]).replace('/', '-'),
'YYYY-M-D H:mm').replace(tzinfo='Asia/Tokyo').datetime
+
if __name__ == '__main__':
"""Main method, never used by the Electricity Map backend, but handy for testing."""
print('fetch_production() ->')
print(fetch_production())
+ print('fetch_price() ->')
+ print(fetch_price())
| {"golden_diff": "diff --git a/parsers/JP.py b/parsers/JP.py\n--- a/parsers/JP.py\n+++ b/parsers/JP.py\n@@ -3,6 +3,7 @@\n import logging\n # The arrow library is used to handle datetimes\n import arrow\n+import datetime as dt\n import pandas as pd\n from . import occtonet\n \n@@ -16,6 +17,8 @@\n # JP-SK : Shikoku\n # JP-KY : Kyushu\n # JP-ON : Okinawa\n+# JP-CG : Ch\u016bgoku\n+\n \n def fetch_production(zone_key='JP-TK', session=None, target_datetime=None,\n logger=logging.getLogger(__name__)):\n@@ -118,6 +121,47 @@\n df = df[['datetime', 'cons']]\n return df\n \n+\n+def fetch_price(zone_key='JP-TK', session=None, target_datetime=None,\n+ logger=logging.getLogger(__name__)):\n+ if target_datetime is None:\n+ target_datetime = dt.datetime.now() + dt.timedelta(days=1)\n+\n+ # price files contain data for fiscal year and not calendar year.\n+ if target_datetime.month <= 3:\n+ fiscal_year = target_datetime.year - 1\n+ else:\n+ fiscal_year = target_datetime.year\n+ url = 'http://www.jepx.org/market/excel/spot_{}.csv'.format(fiscal_year)\n+ df = pd.read_csv(url)\n+\n+ df = df.iloc[:, [0, 1, 6, 7, 8, 9, 10, 11, 12, 13, 14]]\n+ df.columns = ['Date', 'Period', 'JP-HKD', 'JP-TH', 'JP-TK', 'JP-CB',\n+ 'JP-HR', 'JP-KN', 'JP-CG', 'JP-SK', 'JP-KY']\n+\n+ if zone_key not in df.columns[2:]:\n+ return []\n+\n+ start = target_datetime - dt.timedelta(days=1)\n+ df['Date'] = df['Date'].apply(lambda x: dt.datetime.strptime(x, '%Y/%m/%d'))\n+ df = df[(df['Date'] >= start.date()) & (df['Date'] <= target_datetime.date())]\n+\n+ df['datetime'] = df.apply(lambda row: arrow.get(row['Date']).shift(\n+ minutes=30 * (row['Period'] - 1)).replace(tzinfo='Asia/Tokyo'), axis=1)\n+\n+ data = list()\n+ for row in df.iterrows():\n+ data.append({\n+ 'zoneKey': zone_key,\n+ 'currency': 'JPY',\n+ 'datetime': row[1]['datetime'].datetime,\n+ 'price': row[1][zone_key],\n+ 'source': 'jepx.org'\n+ })\n+\n+ return data\n+\n+\n def parse_dt(row):\n \"\"\"\n Parses timestamps from date and time\n@@ -125,8 +169,11 @@\n return arrow.get(' '.join([row['Date'], row['Time']]).replace('/', '-'),\n 'YYYY-M-D H:mm').replace(tzinfo='Asia/Tokyo').datetime\n \n+\n if __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n \n print('fetch_production() ->')\n print(fetch_production())\n+ print('fetch_price() ->')\n+ print(fetch_price())\n", "issue": "Add JP prices\nSee https://github.com/tmrowco/electricitymap-contrib/pull/1543#issuecomment-411281685 by @tmslaine \n", "before_files": [{"content": "#!/usr/bin/env python3\n# coding=utf-8\nimport logging\n# The arrow library is used to handle datetimes\nimport arrow\nimport pandas as pd\nfrom . import occtonet\n\n# Abbreviations\n# JP-HKD : Hokkaido\n# JP-TH : Tohoku\n# JP-TK : Tokyo area\n# JP-CB : Chubu\n# JP-HR : Hokuriku\n# JP-KN : Kansai\n# JP-SK : Shikoku\n# JP-KY : Kyushu\n# JP-ON : Okinawa\n\ndef fetch_production(zone_key='JP-TK', session=None, target_datetime=None,\n logger=logging.getLogger(__name__)):\n \"\"\"\n Calculates production from consumption and imports for a given area\n All production is mapped to unknown\n \"\"\"\n if target_datetime:\n raise NotImplementedError(\n 'This parser is not yet able to parse past dates')\n exch_map = {\n 'JP-HKD':['JP-TH'],\n 'JP-TH':['JP-TK'],\n 'JP-TK':['JP-TH', 'JP-CB'],\n 'JP-CB':['JP-TK', 'JP-HR', 'JP-KN'],\n 'JP-HR':['JP-CB', 'JP-KN'],\n 'JP-KN':['JP-CB', 'JP-HR', 'JP-SK', 'JP-CG'],\n 'JP-SK':['JP-KN', 'JP-CG'],\n 'JP-CG':['JP-KN', 'JP-SK', 'JP-KY']\n }\n df = fetch_consumption_df(zone_key, target_datetime)\n df['imports'] = 0\n for zone in exch_map[zone_key]:\n df2 = occtonet.fetch_exchange(zone_key, zone, target_datetime)\n df2 = pd.DataFrame(df2)\n exchname = df2.loc[0, 'sortedZoneKeys']\n df2 = df2[['datetime', 'netFlow']]\n df2.columns = ['datetime', exchname]\n df = pd.merge(df, df2, how='inner', on='datetime')\n if exchname.split('->')[-1] == zone_key:\n df['imports'] = df['imports']+df[exchname]\n else:\n df['imports'] = df['imports']-df[exchname]\n df['prod'] = df['cons']-df['imports']\n df = df[['datetime', 'prod']]\n # add a row to production for each entry in the dictionary:\n sources = {\n 'JP-HKD':'denkiyoho.hepco.co.jp',\n 'JP-TH':'setsuden.tohoku-epco.co.jp',\n 'JP-TK':'www.tepco.co.jp',\n 'JP-CB':'denki-yoho.chuden.jp',\n 'JP-HR':'www.rikuden.co.jp/denki-yoho',\n 'JP-KN':'www.kepco.co.jp',\n 'JP-SK':'www.energia.co.jp',\n 'JP-CG':'www.yonden.co.jp'\n }\n datalist = []\n for i in range(df.shape[0]):\n data = {\n 'zoneKey': zone_key,\n 'datetime': df.loc[i, 'datetime'].to_pydatetime(),\n 'production': {\n 'biomass': None,\n 'coal': None,\n 'gas': None,\n 'hydro': None,\n 'nuclear': None,\n 'oil': None,\n 'solar': None,\n 'wind': None,\n 'geothermal': None,\n 'unknown': df.loc[i, 'prod']\n },\n 'storage': {},\n 'source': ['occtonet.or.jp', sources[zone_key]]\n }\n datalist.append(data)\n return datalist\n\n\ndef fetch_consumption_df(zone_key='JP-TK', target_datetime=None,\n logger=logging.getLogger(__name__)):\n \"\"\"\n Returns the consumption for an area as a pandas DataFrame\n \"\"\"\n datestamp = arrow.get(target_datetime).to('Asia/Tokyo').strftime('%Y%m%d')\n consumption_url = {\n 'JP-HKD': 'http://denkiyoho.hepco.co.jp/area/data/juyo_01_{}.csv'.format(datestamp),\n 'JP-TH': 'http://setsuden.tohoku-epco.co.jp/common/demand/juyo_02_{}.csv'.format(datestamp),\n 'JP-TK': 'http://www.tepco.co.jp/forecast/html/images/juyo-j.csv',\n 'JP-HR': 'http://www.rikuden.co.jp/denki-yoho/csv/juyo_05_{}.csv'.format(datestamp),\n 'JP-CB': 'http://denki-yoho.chuden.jp/denki_yoho_content_data/juyo_cepco003.csv',\n 'JP-KN': 'http://www.kepco.co.jp/yamasou/juyo1_kansai.csv',\n 'JP-CG': 'http://www.energia.co.jp/jukyuu/sys/juyo_07_{}.csv'.format(datestamp),\n 'JP-SK': 'http://www.yonden.co.jp/denkiyoho/juyo_shikoku.csv'\n }\n # First roughly 40 rows of the consumption files have hourly data,\n # the parser skips to the rows with 5-min actual values \n if zone_key == 'JP-KN':\n startrow = 44\n else:\n startrow = 42\n df = pd.read_csv(consumption_url[zone_key], skiprows=list(range(startrow)),\n encoding='shift-jis')\n df.columns = ['Date', 'Time', 'cons']\n # Convert \u4e07kW to MW\n df['cons'] = 10*df['cons']\n df = df.dropna()\n df['datetime'] = df.apply(parse_dt, axis=1)\n df = df[['datetime', 'cons']]\n return df\n\ndef parse_dt(row):\n \"\"\"\n Parses timestamps from date and time\n \"\"\"\n return arrow.get(' '.join([row['Date'], row['Time']]).replace('/', '-'),\n 'YYYY-M-D H:mm').replace(tzinfo='Asia/Tokyo').datetime\n\nif __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n\n print('fetch_production() ->')\n print(fetch_production())\n", "path": "parsers/JP.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n# coding=utf-8\nimport logging\n# The arrow library is used to handle datetimes\nimport arrow\nimport datetime as dt\nimport pandas as pd\nfrom . import occtonet\n\n# Abbreviations\n# JP-HKD : Hokkaido\n# JP-TH : Tohoku\n# JP-TK : Tokyo area\n# JP-CB : Chubu\n# JP-HR : Hokuriku\n# JP-KN : Kansai\n# JP-SK : Shikoku\n# JP-KY : Kyushu\n# JP-ON : Okinawa\n# JP-CG : Ch\u016bgoku\n\n\ndef fetch_production(zone_key='JP-TK', session=None, target_datetime=None,\n logger=logging.getLogger(__name__)):\n \"\"\"\n Calculates production from consumption and imports for a given area\n All production is mapped to unknown\n \"\"\"\n if target_datetime:\n raise NotImplementedError(\n 'This parser is not yet able to parse past dates')\n exch_map = {\n 'JP-HKD':['JP-TH'],\n 'JP-TH':['JP-TK'],\n 'JP-TK':['JP-TH', 'JP-CB'],\n 'JP-CB':['JP-TK', 'JP-HR', 'JP-KN'],\n 'JP-HR':['JP-CB', 'JP-KN'],\n 'JP-KN':['JP-CB', 'JP-HR', 'JP-SK', 'JP-CG'],\n 'JP-SK':['JP-KN', 'JP-CG'],\n 'JP-CG':['JP-KN', 'JP-SK', 'JP-KY']\n }\n df = fetch_consumption_df(zone_key, target_datetime)\n df['imports'] = 0\n for zone in exch_map[zone_key]:\n df2 = occtonet.fetch_exchange(zone_key, zone, target_datetime)\n df2 = pd.DataFrame(df2)\n exchname = df2.loc[0, 'sortedZoneKeys']\n df2 = df2[['datetime', 'netFlow']]\n df2.columns = ['datetime', exchname]\n df = pd.merge(df, df2, how='inner', on='datetime')\n if exchname.split('->')[-1] == zone_key:\n df['imports'] = df['imports']+df[exchname]\n else:\n df['imports'] = df['imports']-df[exchname]\n df['prod'] = df['cons']-df['imports']\n df = df[['datetime', 'prod']]\n # add a row to production for each entry in the dictionary:\n sources = {\n 'JP-HKD':'denkiyoho.hepco.co.jp',\n 'JP-TH':'setsuden.tohoku-epco.co.jp',\n 'JP-TK':'www.tepco.co.jp',\n 'JP-CB':'denki-yoho.chuden.jp',\n 'JP-HR':'www.rikuden.co.jp/denki-yoho',\n 'JP-KN':'www.kepco.co.jp',\n 'JP-SK':'www.energia.co.jp',\n 'JP-CG':'www.yonden.co.jp'\n }\n datalist = []\n for i in range(df.shape[0]):\n data = {\n 'zoneKey': zone_key,\n 'datetime': df.loc[i, 'datetime'].to_pydatetime(),\n 'production': {\n 'biomass': None,\n 'coal': None,\n 'gas': None,\n 'hydro': None,\n 'nuclear': None,\n 'oil': None,\n 'solar': None,\n 'wind': None,\n 'geothermal': None,\n 'unknown': df.loc[i, 'prod']\n },\n 'storage': {},\n 'source': ['occtonet.or.jp', sources[zone_key]]\n }\n datalist.append(data)\n return datalist\n\n\ndef fetch_consumption_df(zone_key='JP-TK', target_datetime=None,\n logger=logging.getLogger(__name__)):\n \"\"\"\n Returns the consumption for an area as a pandas DataFrame\n \"\"\"\n datestamp = arrow.get(target_datetime).to('Asia/Tokyo').strftime('%Y%m%d')\n consumption_url = {\n 'JP-HKD': 'http://denkiyoho.hepco.co.jp/area/data/juyo_01_{}.csv'.format(datestamp),\n 'JP-TH': 'http://setsuden.tohoku-epco.co.jp/common/demand/juyo_02_{}.csv'.format(datestamp),\n 'JP-TK': 'http://www.tepco.co.jp/forecast/html/images/juyo-j.csv',\n 'JP-HR': 'http://www.rikuden.co.jp/denki-yoho/csv/juyo_05_{}.csv'.format(datestamp),\n 'JP-CB': 'http://denki-yoho.chuden.jp/denki_yoho_content_data/juyo_cepco003.csv',\n 'JP-KN': 'http://www.kepco.co.jp/yamasou/juyo1_kansai.csv',\n 'JP-CG': 'http://www.energia.co.jp/jukyuu/sys/juyo_07_{}.csv'.format(datestamp),\n 'JP-SK': 'http://www.yonden.co.jp/denkiyoho/juyo_shikoku.csv'\n }\n # First roughly 40 rows of the consumption files have hourly data,\n # the parser skips to the rows with 5-min actual values \n if zone_key == 'JP-KN':\n startrow = 44\n else:\n startrow = 42\n df = pd.read_csv(consumption_url[zone_key], skiprows=list(range(startrow)),\n encoding='shift-jis')\n df.columns = ['Date', 'Time', 'cons']\n # Convert \u4e07kW to MW\n df['cons'] = 10*df['cons']\n df = df.dropna()\n df['datetime'] = df.apply(parse_dt, axis=1)\n df = df[['datetime', 'cons']]\n return df\n\n\ndef fetch_price(zone_key='JP-TK', session=None, target_datetime=None,\n logger=logging.getLogger(__name__)):\n if target_datetime is None:\n target_datetime = dt.datetime.now() + dt.timedelta(days=1)\n\n # price files contain data for fiscal year and not calendar year.\n if target_datetime.month <= 3:\n fiscal_year = target_datetime.year - 1\n else:\n fiscal_year = target_datetime.year\n url = 'http://www.jepx.org/market/excel/spot_{}.csv'.format(fiscal_year)\n df = pd.read_csv(url)\n\n df = df.iloc[:, [0, 1, 6, 7, 8, 9, 10, 11, 12, 13, 14]]\n df.columns = ['Date', 'Period', 'JP-HKD', 'JP-TH', 'JP-TK', 'JP-CB',\n 'JP-HR', 'JP-KN', 'JP-CG', 'JP-SK', 'JP-KY']\n\n if zone_key not in df.columns[2:]:\n return []\n\n start = target_datetime - dt.timedelta(days=1)\n df['Date'] = df['Date'].apply(lambda x: dt.datetime.strptime(x, '%Y/%m/%d'))\n df = df[(df['Date'] >= start.date()) & (df['Date'] <= target_datetime.date())]\n\n df['datetime'] = df.apply(lambda row: arrow.get(row['Date']).shift(\n minutes=30 * (row['Period'] - 1)).replace(tzinfo='Asia/Tokyo'), axis=1)\n\n data = list()\n for row in df.iterrows():\n data.append({\n 'zoneKey': zone_key,\n 'currency': 'JPY',\n 'datetime': row[1]['datetime'].datetime,\n 'price': row[1][zone_key],\n 'source': 'jepx.org'\n })\n\n return data\n\n\ndef parse_dt(row):\n \"\"\"\n Parses timestamps from date and time\n \"\"\"\n return arrow.get(' '.join([row['Date'], row['Time']]).replace('/', '-'),\n 'YYYY-M-D H:mm').replace(tzinfo='Asia/Tokyo').datetime\n\n\nif __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n\n print('fetch_production() ->')\n print(fetch_production())\n print('fetch_price() ->')\n print(fetch_price())\n", "path": "parsers/JP.py"}]} | 2,003 | 777 |
gh_patches_debug_22042 | rasdani/github-patches | git_diff | MycroftAI__mycroft-core-2706 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Idea: Enhance Amazon Polly support
Amazon Polly works well using standard voices, I have it running perfectly under the latest Picroft image. However, there is no current support for 'neural' engine voices, as well as 'conversational' style SSML. Both of these provide exceptionally high quality text-to-speech audio and would be nice to have the ability to use with Mycroft.
This [post](https://community.mycroft.ai/t/regarding-polly-tts-support/8722/10) on the community forums goes in to a little more detail on it.
Thanks!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mycroft/tts/polly_tts.py`
Content:
```
1 # Copyright 2017 Mycroft AI Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 #
15 from mycroft.tts.tts import TTS, TTSValidator
16 from mycroft.configuration import Configuration
17
18
19 class PollyTTS(TTS):
20 def __init__(self, lang="en-us", config=None):
21 import boto3
22 config = config or Configuration.get().get("tts", {}).get("polly", {})
23 super(PollyTTS, self).__init__(lang, config, PollyTTSValidator(self),
24 audio_ext="mp3",
25 ssml_tags=["speak", "say-as", "voice",
26 "prosody", "break",
27 "emphasis", "sub", "lang",
28 "phoneme", "w", "whisper",
29 "amazon:auto-breaths",
30 "p", "s", "amazon:effect",
31 "mark"])
32
33 self.voice = self.config.get("voice", "Matthew")
34 self.key_id = self.config.get("access_key_id", '')
35 self.key = self.config.get("secret_access_key", '')
36 self.region = self.config.get("region", 'us-east-1')
37 self.polly = boto3.Session(aws_access_key_id=self.key_id,
38 aws_secret_access_key=self.key,
39 region_name=self.region).client('polly')
40
41 def get_tts(self, sentence, wav_file):
42 text_type = "text"
43 if self.remove_ssml(sentence) != sentence:
44 text_type = "ssml"
45 sentence = sentence \
46 .replace("\\whispered", "/amazon:effect") \
47 .replace("whispered", "amazon:effect name=\"whispered\"")
48 response = self.polly.synthesize_speech(
49 OutputFormat=self.audio_ext,
50 Text=sentence,
51 TextType=text_type,
52 VoiceId=self.voice)
53
54 with open(wav_file, 'wb') as f:
55 f.write(response['AudioStream'].read())
56 return (wav_file, None) # No phonemes
57
58 def describe_voices(self, language_code="en-US"):
59 if language_code.islower():
60 a, b = language_code.split("-")
61 b = b.upper()
62 language_code = "-".join([a, b])
63 # example 'it-IT' useful to retrieve voices
64 voices = self.polly.describe_voices(LanguageCode=language_code)
65
66 return voices
67
68
69 class PollyTTSValidator(TTSValidator):
70 def __init__(self, tts):
71 super(PollyTTSValidator, self).__init__(tts)
72
73 def validate_lang(self):
74 # TODO
75 pass
76
77 def validate_dependencies(self):
78 try:
79 from boto3 import Session
80 except ImportError:
81 raise Exception(
82 'PollyTTS dependencies not installed, please run pip install '
83 'boto3 ')
84
85 def validate_connection(self):
86 try:
87 if not self.tts.voice:
88 raise Exception("Polly TTS Voice not configured")
89 output = self.tts.describe_voices()
90 except TypeError:
91 raise Exception(
92 'PollyTTS server could not be verified. Please check your '
93 'internet connection and credentials.')
94
95 def get_tts_class(self):
96 return PollyTTS
97
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mycroft/tts/polly_tts.py b/mycroft/tts/polly_tts.py
--- a/mycroft/tts/polly_tts.py
+++ b/mycroft/tts/polly_tts.py
@@ -34,6 +34,7 @@
self.key_id = self.config.get("access_key_id", '')
self.key = self.config.get("secret_access_key", '')
self.region = self.config.get("region", 'us-east-1')
+ self.engine = self.config.get("engine", "standard")
self.polly = boto3.Session(aws_access_key_id=self.key_id,
aws_secret_access_key=self.key,
region_name=self.region).client('polly')
@@ -49,7 +50,8 @@
OutputFormat=self.audio_ext,
Text=sentence,
TextType=text_type,
- VoiceId=self.voice)
+ VoiceId=self.voice,
+ Engine=self.engine)
with open(wav_file, 'wb') as f:
f.write(response['AudioStream'].read())
| {"golden_diff": "diff --git a/mycroft/tts/polly_tts.py b/mycroft/tts/polly_tts.py\n--- a/mycroft/tts/polly_tts.py\n+++ b/mycroft/tts/polly_tts.py\n@@ -34,6 +34,7 @@\n self.key_id = self.config.get(\"access_key_id\", '')\n self.key = self.config.get(\"secret_access_key\", '')\n self.region = self.config.get(\"region\", 'us-east-1')\n+ self.engine = self.config.get(\"engine\", \"standard\")\n self.polly = boto3.Session(aws_access_key_id=self.key_id,\n aws_secret_access_key=self.key,\n region_name=self.region).client('polly')\n@@ -49,7 +50,8 @@\n OutputFormat=self.audio_ext,\n Text=sentence,\n TextType=text_type,\n- VoiceId=self.voice)\n+ VoiceId=self.voice,\n+ Engine=self.engine)\n \n with open(wav_file, 'wb') as f:\n f.write(response['AudioStream'].read())\n", "issue": "Idea: Enhance Amazon Polly support\nAmazon Polly works well using standard voices, I have it running perfectly under the latest Picroft image. However, there is no current support for 'neural' engine voices, as well as 'conversational' style SSML. Both of these provide exceptionally high quality text-to-speech audio and would be nice to have the ability to use with Mycroft.\r\n\r\nThis [post](https://community.mycroft.ai/t/regarding-polly-tts-support/8722/10) on the community forums goes in to a little more detail on it.\r\n\r\nThanks!\n", "before_files": [{"content": "# Copyright 2017 Mycroft AI Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom mycroft.tts.tts import TTS, TTSValidator\nfrom mycroft.configuration import Configuration\n\n\nclass PollyTTS(TTS):\n def __init__(self, lang=\"en-us\", config=None):\n import boto3\n config = config or Configuration.get().get(\"tts\", {}).get(\"polly\", {})\n super(PollyTTS, self).__init__(lang, config, PollyTTSValidator(self),\n audio_ext=\"mp3\",\n ssml_tags=[\"speak\", \"say-as\", \"voice\",\n \"prosody\", \"break\",\n \"emphasis\", \"sub\", \"lang\",\n \"phoneme\", \"w\", \"whisper\",\n \"amazon:auto-breaths\",\n \"p\", \"s\", \"amazon:effect\",\n \"mark\"])\n\n self.voice = self.config.get(\"voice\", \"Matthew\")\n self.key_id = self.config.get(\"access_key_id\", '')\n self.key = self.config.get(\"secret_access_key\", '')\n self.region = self.config.get(\"region\", 'us-east-1')\n self.polly = boto3.Session(aws_access_key_id=self.key_id,\n aws_secret_access_key=self.key,\n region_name=self.region).client('polly')\n\n def get_tts(self, sentence, wav_file):\n text_type = \"text\"\n if self.remove_ssml(sentence) != sentence:\n text_type = \"ssml\"\n sentence = sentence \\\n .replace(\"\\\\whispered\", \"/amazon:effect\") \\\n .replace(\"whispered\", \"amazon:effect name=\\\"whispered\\\"\")\n response = self.polly.synthesize_speech(\n OutputFormat=self.audio_ext,\n Text=sentence,\n TextType=text_type,\n VoiceId=self.voice)\n\n with open(wav_file, 'wb') as f:\n f.write(response['AudioStream'].read())\n return (wav_file, None) # No phonemes\n\n def describe_voices(self, language_code=\"en-US\"):\n if language_code.islower():\n a, b = language_code.split(\"-\")\n b = b.upper()\n language_code = \"-\".join([a, b])\n # example 'it-IT' useful to retrieve voices\n voices = self.polly.describe_voices(LanguageCode=language_code)\n\n return voices\n\n\nclass PollyTTSValidator(TTSValidator):\n def __init__(self, tts):\n super(PollyTTSValidator, self).__init__(tts)\n\n def validate_lang(self):\n # TODO\n pass\n\n def validate_dependencies(self):\n try:\n from boto3 import Session\n except ImportError:\n raise Exception(\n 'PollyTTS dependencies not installed, please run pip install '\n 'boto3 ')\n\n def validate_connection(self):\n try:\n if not self.tts.voice:\n raise Exception(\"Polly TTS Voice not configured\")\n output = self.tts.describe_voices()\n except TypeError:\n raise Exception(\n 'PollyTTS server could not be verified. Please check your '\n 'internet connection and credentials.')\n\n def get_tts_class(self):\n return PollyTTS\n", "path": "mycroft/tts/polly_tts.py"}], "after_files": [{"content": "# Copyright 2017 Mycroft AI Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom mycroft.tts.tts import TTS, TTSValidator\nfrom mycroft.configuration import Configuration\n\n\nclass PollyTTS(TTS):\n def __init__(self, lang=\"en-us\", config=None):\n import boto3\n config = config or Configuration.get().get(\"tts\", {}).get(\"polly\", {})\n super(PollyTTS, self).__init__(lang, config, PollyTTSValidator(self),\n audio_ext=\"mp3\",\n ssml_tags=[\"speak\", \"say-as\", \"voice\",\n \"prosody\", \"break\",\n \"emphasis\", \"sub\", \"lang\",\n \"phoneme\", \"w\", \"whisper\",\n \"amazon:auto-breaths\",\n \"p\", \"s\", \"amazon:effect\",\n \"mark\"])\n\n self.voice = self.config.get(\"voice\", \"Matthew\")\n self.key_id = self.config.get(\"access_key_id\", '')\n self.key = self.config.get(\"secret_access_key\", '')\n self.region = self.config.get(\"region\", 'us-east-1')\n self.engine = self.config.get(\"engine\", \"standard\")\n self.polly = boto3.Session(aws_access_key_id=self.key_id,\n aws_secret_access_key=self.key,\n region_name=self.region).client('polly')\n\n def get_tts(self, sentence, wav_file):\n text_type = \"text\"\n if self.remove_ssml(sentence) != sentence:\n text_type = \"ssml\"\n sentence = sentence \\\n .replace(\"\\\\whispered\", \"/amazon:effect\") \\\n .replace(\"whispered\", \"amazon:effect name=\\\"whispered\\\"\")\n response = self.polly.synthesize_speech(\n OutputFormat=self.audio_ext,\n Text=sentence,\n TextType=text_type,\n VoiceId=self.voice,\n Engine=self.engine)\n\n with open(wav_file, 'wb') as f:\n f.write(response['AudioStream'].read())\n return (wav_file, None) # No phonemes\n\n def describe_voices(self, language_code=\"en-US\"):\n if language_code.islower():\n a, b = language_code.split(\"-\")\n b = b.upper()\n language_code = \"-\".join([a, b])\n # example 'it-IT' useful to retrieve voices\n voices = self.polly.describe_voices(LanguageCode=language_code)\n\n return voices\n\n\nclass PollyTTSValidator(TTSValidator):\n def __init__(self, tts):\n super(PollyTTSValidator, self).__init__(tts)\n\n def validate_lang(self):\n # TODO\n pass\n\n def validate_dependencies(self):\n try:\n from boto3 import Session\n except ImportError:\n raise Exception(\n 'PollyTTS dependencies not installed, please run pip install '\n 'boto3 ')\n\n def validate_connection(self):\n try:\n if not self.tts.voice:\n raise Exception(\"Polly TTS Voice not configured\")\n output = self.tts.describe_voices()\n except TypeError:\n raise Exception(\n 'PollyTTS server could not be verified. Please check your '\n 'internet connection and credentials.')\n\n def get_tts_class(self):\n return PollyTTS\n", "path": "mycroft/tts/polly_tts.py"}]} | 1,386 | 227 |
gh_patches_debug_22555 | rasdani/github-patches | git_diff | cookiecutter__cookiecutter-967 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
gh: prefix doesn't work anymore
* Cookiecutter version: 1.5.1
* Template project url: `gh:*`
* Python version: 2.7.13
* Operating System: Linux
### Description:
cookiecutter does not honor prefixes anymore.
### What I've run:
Simply testing the example from the README doesn't work as expected:
``` bash
$ cookiecutter gh:audreyr/cookiecutter-pypackage
A valid repository for "gh:audreyr/cookiecutter-pypackage" could not be found in the following locations:
gh:audreyr/cookiecutter-pypackage
/home/me/.cookiecutters/gh:audreyr/cookiecutter-pypackage
```
The same commands using the full repository path works as expected:
```bash
$ cookiecutter https://github.com/audreyr/cookiecutter-pypackage
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cookiecutter/config.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 """Global configuration handling."""
4
5 from __future__ import unicode_literals
6 import copy
7 import logging
8 import os
9 import io
10
11 import poyo
12
13 from .exceptions import ConfigDoesNotExistException
14 from .exceptions import InvalidConfiguration
15
16
17 logger = logging.getLogger(__name__)
18
19 USER_CONFIG_PATH = os.path.expanduser('~/.cookiecutterrc')
20
21 BUILTIN_ABBREVIATIONS = {
22 'gh': 'https://github.com/{0}.git',
23 'gl': 'https://gitlab.com/{0}.git',
24 'bb': 'https://bitbucket.org/{0}',
25 }
26
27 DEFAULT_CONFIG = {
28 'cookiecutters_dir': os.path.expanduser('~/.cookiecutters/'),
29 'replay_dir': os.path.expanduser('~/.cookiecutter_replay/'),
30 'default_context': {},
31 'abbreviations': BUILTIN_ABBREVIATIONS,
32 }
33
34
35 def _expand_path(path):
36 """Expand both environment variables and user home in the given path."""
37 path = os.path.expandvars(path)
38 path = os.path.expanduser(path)
39 return path
40
41
42 def get_config(config_path):
43 """Retrieve the config from the specified path, returning a config dict."""
44 if not os.path.exists(config_path):
45 raise ConfigDoesNotExistException
46
47 logger.debug('config_path is {0}'.format(config_path))
48 with io.open(config_path, encoding='utf-8') as file_handle:
49 try:
50 yaml_dict = poyo.parse_string(file_handle.read())
51 except poyo.exceptions.PoyoException as e:
52 raise InvalidConfiguration(
53 'Unable to parse YAML file {}. Error: {}'
54 ''.format(config_path, e)
55 )
56
57 config_dict = copy.copy(DEFAULT_CONFIG)
58 config_dict.update(yaml_dict)
59
60 raw_replay_dir = config_dict['replay_dir']
61 config_dict['replay_dir'] = _expand_path(raw_replay_dir)
62
63 raw_cookies_dir = config_dict['cookiecutters_dir']
64 config_dict['cookiecutters_dir'] = _expand_path(raw_cookies_dir)
65
66 return config_dict
67
68
69 def get_user_config(config_file=None, default_config=False):
70 """Return the user config as a dict.
71
72 If ``default_config`` is True, ignore ``config_file`` and return default
73 values for the config parameters.
74
75 If a path to a ``config_file`` is given, that is different from the default
76 location, load the user config from that.
77
78 Otherwise look up the config file path in the ``COOKIECUTTER_CONFIG``
79 environment variable. If set, load the config from this path. This will
80 raise an error if the specified path is not valid.
81
82 If the environment variable is not set, try the default config file path
83 before falling back to the default config values.
84 """
85 # Do NOT load a config. Return defaults instead.
86 if default_config:
87 return copy.copy(DEFAULT_CONFIG)
88
89 # Load the given config file
90 if config_file and config_file is not USER_CONFIG_PATH:
91 return get_config(config_file)
92
93 try:
94 # Does the user set up a config environment variable?
95 env_config_file = os.environ['COOKIECUTTER_CONFIG']
96 except KeyError:
97 # Load an optional user config if it exists
98 # otherwise return the defaults
99 if os.path.exists(USER_CONFIG_PATH):
100 return get_config(USER_CONFIG_PATH)
101 else:
102 return copy.copy(DEFAULT_CONFIG)
103 else:
104 # There is a config environment variable. Try to load it.
105 # Do not check for existence, so invalid file paths raise an error.
106 return get_config(env_config_file)
107
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cookiecutter/config.py b/cookiecutter/config.py
--- a/cookiecutter/config.py
+++ b/cookiecutter/config.py
@@ -39,6 +39,25 @@
return path
+def merge_configs(default, overwrite):
+ """Recursively update a dict with the key/value pair of another.
+
+ Dict values that are dictionaries themselves will be updated, whilst
+ preserving existing keys.
+ """
+ new_config = copy.deepcopy(default)
+
+ for k, v in overwrite.items():
+ # Make sure to preserve existing items in
+ # nested dicts, for example `abbreviations`
+ if isinstance(v, dict):
+ new_config[k] = merge_configs(default[k], v)
+ else:
+ new_config[k] = v
+
+ return new_config
+
+
def get_config(config_path):
"""Retrieve the config from the specified path, returning a config dict."""
if not os.path.exists(config_path):
@@ -54,8 +73,7 @@
''.format(config_path, e)
)
- config_dict = copy.copy(DEFAULT_CONFIG)
- config_dict.update(yaml_dict)
+ config_dict = merge_configs(DEFAULT_CONFIG, yaml_dict)
raw_replay_dir = config_dict['replay_dir']
config_dict['replay_dir'] = _expand_path(raw_replay_dir)
| {"golden_diff": "diff --git a/cookiecutter/config.py b/cookiecutter/config.py\n--- a/cookiecutter/config.py\n+++ b/cookiecutter/config.py\n@@ -39,6 +39,25 @@\n return path\n \n \n+def merge_configs(default, overwrite):\n+ \"\"\"Recursively update a dict with the key/value pair of another.\n+\n+ Dict values that are dictionaries themselves will be updated, whilst\n+ preserving existing keys.\n+ \"\"\"\n+ new_config = copy.deepcopy(default)\n+\n+ for k, v in overwrite.items():\n+ # Make sure to preserve existing items in\n+ # nested dicts, for example `abbreviations`\n+ if isinstance(v, dict):\n+ new_config[k] = merge_configs(default[k], v)\n+ else:\n+ new_config[k] = v\n+\n+ return new_config\n+\n+\n def get_config(config_path):\n \"\"\"Retrieve the config from the specified path, returning a config dict.\"\"\"\n if not os.path.exists(config_path):\n@@ -54,8 +73,7 @@\n ''.format(config_path, e)\n )\n \n- config_dict = copy.copy(DEFAULT_CONFIG)\n- config_dict.update(yaml_dict)\n+ config_dict = merge_configs(DEFAULT_CONFIG, yaml_dict)\n \n raw_replay_dir = config_dict['replay_dir']\n config_dict['replay_dir'] = _expand_path(raw_replay_dir)\n", "issue": "gh: prefix doesn't work anymore\n* Cookiecutter version: 1.5.1\r\n* Template project url: `gh:*`\r\n* Python version: 2.7.13\r\n* Operating System: Linux\r\n\r\n### Description:\r\n\r\ncookiecutter does not honor prefixes anymore.\r\n\r\n### What I've run:\r\n\r\nSimply testing the example from the README doesn't work as expected:\r\n\r\n``` bash\r\n$ cookiecutter gh:audreyr/cookiecutter-pypackage\r\nA valid repository for \"gh:audreyr/cookiecutter-pypackage\" could not be found in the following locations:\r\ngh:audreyr/cookiecutter-pypackage\r\n/home/me/.cookiecutters/gh:audreyr/cookiecutter-pypackage\r\n```\r\nThe same commands using the full repository path works as expected:\r\n\r\n```bash\r\n$ cookiecutter https://github.com/audreyr/cookiecutter-pypackage\r\n```\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Global configuration handling.\"\"\"\n\nfrom __future__ import unicode_literals\nimport copy\nimport logging\nimport os\nimport io\n\nimport poyo\n\nfrom .exceptions import ConfigDoesNotExistException\nfrom .exceptions import InvalidConfiguration\n\n\nlogger = logging.getLogger(__name__)\n\nUSER_CONFIG_PATH = os.path.expanduser('~/.cookiecutterrc')\n\nBUILTIN_ABBREVIATIONS = {\n 'gh': 'https://github.com/{0}.git',\n 'gl': 'https://gitlab.com/{0}.git',\n 'bb': 'https://bitbucket.org/{0}',\n}\n\nDEFAULT_CONFIG = {\n 'cookiecutters_dir': os.path.expanduser('~/.cookiecutters/'),\n 'replay_dir': os.path.expanduser('~/.cookiecutter_replay/'),\n 'default_context': {},\n 'abbreviations': BUILTIN_ABBREVIATIONS,\n}\n\n\ndef _expand_path(path):\n \"\"\"Expand both environment variables and user home in the given path.\"\"\"\n path = os.path.expandvars(path)\n path = os.path.expanduser(path)\n return path\n\n\ndef get_config(config_path):\n \"\"\"Retrieve the config from the specified path, returning a config dict.\"\"\"\n if not os.path.exists(config_path):\n raise ConfigDoesNotExistException\n\n logger.debug('config_path is {0}'.format(config_path))\n with io.open(config_path, encoding='utf-8') as file_handle:\n try:\n yaml_dict = poyo.parse_string(file_handle.read())\n except poyo.exceptions.PoyoException as e:\n raise InvalidConfiguration(\n 'Unable to parse YAML file {}. Error: {}'\n ''.format(config_path, e)\n )\n\n config_dict = copy.copy(DEFAULT_CONFIG)\n config_dict.update(yaml_dict)\n\n raw_replay_dir = config_dict['replay_dir']\n config_dict['replay_dir'] = _expand_path(raw_replay_dir)\n\n raw_cookies_dir = config_dict['cookiecutters_dir']\n config_dict['cookiecutters_dir'] = _expand_path(raw_cookies_dir)\n\n return config_dict\n\n\ndef get_user_config(config_file=None, default_config=False):\n \"\"\"Return the user config as a dict.\n\n If ``default_config`` is True, ignore ``config_file`` and return default\n values for the config parameters.\n\n If a path to a ``config_file`` is given, that is different from the default\n location, load the user config from that.\n\n Otherwise look up the config file path in the ``COOKIECUTTER_CONFIG``\n environment variable. If set, load the config from this path. This will\n raise an error if the specified path is not valid.\n\n If the environment variable is not set, try the default config file path\n before falling back to the default config values.\n \"\"\"\n # Do NOT load a config. Return defaults instead.\n if default_config:\n return copy.copy(DEFAULT_CONFIG)\n\n # Load the given config file\n if config_file and config_file is not USER_CONFIG_PATH:\n return get_config(config_file)\n\n try:\n # Does the user set up a config environment variable?\n env_config_file = os.environ['COOKIECUTTER_CONFIG']\n except KeyError:\n # Load an optional user config if it exists\n # otherwise return the defaults\n if os.path.exists(USER_CONFIG_PATH):\n return get_config(USER_CONFIG_PATH)\n else:\n return copy.copy(DEFAULT_CONFIG)\n else:\n # There is a config environment variable. Try to load it.\n # Do not check for existence, so invalid file paths raise an error.\n return get_config(env_config_file)\n", "path": "cookiecutter/config.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Global configuration handling.\"\"\"\n\nfrom __future__ import unicode_literals\nimport copy\nimport logging\nimport os\nimport io\n\nimport poyo\n\nfrom .exceptions import ConfigDoesNotExistException\nfrom .exceptions import InvalidConfiguration\n\n\nlogger = logging.getLogger(__name__)\n\nUSER_CONFIG_PATH = os.path.expanduser('~/.cookiecutterrc')\n\nBUILTIN_ABBREVIATIONS = {\n 'gh': 'https://github.com/{0}.git',\n 'gl': 'https://gitlab.com/{0}.git',\n 'bb': 'https://bitbucket.org/{0}',\n}\n\nDEFAULT_CONFIG = {\n 'cookiecutters_dir': os.path.expanduser('~/.cookiecutters/'),\n 'replay_dir': os.path.expanduser('~/.cookiecutter_replay/'),\n 'default_context': {},\n 'abbreviations': BUILTIN_ABBREVIATIONS,\n}\n\n\ndef _expand_path(path):\n \"\"\"Expand both environment variables and user home in the given path.\"\"\"\n path = os.path.expandvars(path)\n path = os.path.expanduser(path)\n return path\n\n\ndef merge_configs(default, overwrite):\n \"\"\"Recursively update a dict with the key/value pair of another.\n\n Dict values that are dictionaries themselves will be updated, whilst\n preserving existing keys.\n \"\"\"\n new_config = copy.deepcopy(default)\n\n for k, v in overwrite.items():\n # Make sure to preserve existing items in\n # nested dicts, for example `abbreviations`\n if isinstance(v, dict):\n new_config[k] = merge_configs(default[k], v)\n else:\n new_config[k] = v\n\n return new_config\n\n\ndef get_config(config_path):\n \"\"\"Retrieve the config from the specified path, returning a config dict.\"\"\"\n if not os.path.exists(config_path):\n raise ConfigDoesNotExistException\n\n logger.debug('config_path is {0}'.format(config_path))\n with io.open(config_path, encoding='utf-8') as file_handle:\n try:\n yaml_dict = poyo.parse_string(file_handle.read())\n except poyo.exceptions.PoyoException as e:\n raise InvalidConfiguration(\n 'Unable to parse YAML file {}. Error: {}'\n ''.format(config_path, e)\n )\n\n config_dict = merge_configs(DEFAULT_CONFIG, yaml_dict)\n\n raw_replay_dir = config_dict['replay_dir']\n config_dict['replay_dir'] = _expand_path(raw_replay_dir)\n\n raw_cookies_dir = config_dict['cookiecutters_dir']\n config_dict['cookiecutters_dir'] = _expand_path(raw_cookies_dir)\n\n return config_dict\n\n\ndef get_user_config(config_file=None, default_config=False):\n \"\"\"Return the user config as a dict.\n\n If ``default_config`` is True, ignore ``config_file`` and return default\n values for the config parameters.\n\n If a path to a ``config_file`` is given, that is different from the default\n location, load the user config from that.\n\n Otherwise look up the config file path in the ``COOKIECUTTER_CONFIG``\n environment variable. If set, load the config from this path. This will\n raise an error if the specified path is not valid.\n\n If the environment variable is not set, try the default config file path\n before falling back to the default config values.\n \"\"\"\n # Do NOT load a config. Return defaults instead.\n if default_config:\n return copy.copy(DEFAULT_CONFIG)\n\n # Load the given config file\n if config_file and config_file is not USER_CONFIG_PATH:\n return get_config(config_file)\n\n try:\n # Does the user set up a config environment variable?\n env_config_file = os.environ['COOKIECUTTER_CONFIG']\n except KeyError:\n # Load an optional user config if it exists\n # otherwise return the defaults\n if os.path.exists(USER_CONFIG_PATH):\n return get_config(USER_CONFIG_PATH)\n else:\n return copy.copy(DEFAULT_CONFIG)\n else:\n # There is a config environment variable. Try to load it.\n # Do not check for existence, so invalid file paths raise an error.\n return get_config(env_config_file)\n", "path": "cookiecutter/config.py"}]} | 1,452 | 305 |
gh_patches_debug_4279 | rasdani/github-patches | git_diff | facebookresearch__ParlAI-3345 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Reddit Movie Dialog no longer exists
**Bug description**
[Reddit Movie Dialog](https://parl.ai/docs/tasks.html#movie-dialog-reddit) no longer exists.
**Reproduction steps**
```
TrainModel.main(
# similar to before
task='empathetic_dialogues,blended_skill_talk,movie_dialog_reddit,convai2,persona_chat',
model='transformer/generator',
model_file='from_pretrained/model',
# initialize with a pretrained model
init_model='zoo:tutorial_transformer_generator/model',
# arguments we get from the pretrained model.
# Unfortunately, these must be looked up separately for each model.
n_heads=16, n_layers=8, n_positions=512, text_truncate=512,
label_truncate=128, ffn_size=2048, embedding_size=512,
activation='gelu', variant='xlm',
dict_lower=True, dict_tokenizer='bpe',
dict_file='zoo:tutorial_transformer_generator/model.dict',
learn_positional_embeddings=True,
# some training arguments, specific to this fine-tuning
# use a small learning rate with ADAM optimizer
lr=1e-5, optimizer='adam',
warmup_updates=100,
# early stopping on perplexity
validation_metric='ppl',
# train at most 10 minutes, and validate every 0.25 epochs
max_train_time=600, validation_every_n_epochs=0.25,
# depend on your gpu. If you have a V100, this is good
batchsize=12, fp16=True, fp16_impl='mem_efficient',
# speeds up validation
skip_generation=True,
# helps us cram more examples into our gpu at a time
dynamic_batching='full',
)
```
**Logs**
Please paste the command line output:
```
---------------------------------------------------------------------------
ModuleNotFoundError Traceback (most recent call last)
<ipython-input-39-ff3044de39fe> in <module>()
36
37 # helps us cram more examples into our gpu at a time
---> 38 dynamic_batching='full',
39 )
15 frames
/usr/lib/python3.6/importlib/_bootstrap.py in _find_and_load_unlocked(name, import_)
ModuleNotFoundError: No module named 'parlai.tasks.movie_dialog_reddit'
---------------------------------------------------------------------------
NOTE: If your import is failing due to a missing package, you can
manually install dependencies using either !pip or !apt.
To view examples of installing some common dependencies, click the
"Open Examples" button below.
---------------------------------------------------------------------------```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/source/generate_task_list.py`
Content:
```
1 #!/usr/bin/env python3
2 # Copyright (c) Facebook, Inc. and its affiliates.
3 # This source code is licensed under the MIT license found in the
4 # LICENSE file in the root directory of this source tree.
5
6 from parlai.tasks.task_list import task_list
7
8 MASTER = "https://github.com/facebookresearch/ParlAI/tree/master"
9
10 category_order = ['QA', 'Cloze', 'Goal', 'ChitChat', 'Negotiation', 'Visual', 'decanlp']
11 category_task_list = {x: [] for x in category_order}
12
13 fout = open('task_list.inc', 'w')
14
15 s = "They consist of: "
16 for t in category_order:
17 fout.write(f"1. {t} tasks\n")
18 fout.write("\n")
19
20 for task_dict in task_list:
21 tags = task_dict.get('tags', None)
22 for tag in tags:
23 if tag in category_task_list:
24 category_task_list[tag].append(task_dict)
25
26 for num_category, (category, tl) in enumerate(category_task_list.items()):
27 if num_category != 0:
28 fout.write("\n-----\n\n")
29
30 fout.write(f'## {category} Tasks\n')
31
32 for task_dict in tl:
33 id = task_dict.get('id', None)
34 display_name = task_dict.get('display_name', None)
35 task = task_dict.get('task', None)
36 tags = task_dict.get('tags', None)
37 description = task_dict.get('description', None)
38 notes = task_dict.get('notes', None)
39 code_urlend = task[: max(task.find(':'), len(task))]
40 code_url = f"{MASTER}/parlai/tasks/{code_urlend}"
41 links = task_dict.get("links", {})
42 assert isinstance(links, dict), f"task {id} is poorly formatted"
43 urls = [(k, v) for k, v in links.items()]
44 urls.append(("code", code_url))
45
46 urls_md = ", ".join(f"[{k}]({v})" for k, v in urls)
47 fout.write(f"### {display_name}\n")
48 fout.write(f"_Links_: {urls_md}\n\n")
49 if description:
50 fout.write(description + "\n")
51 if notes:
52 fout.write(":::{admonition,note} Notes\n")
53 fout.write(notes + "\n")
54 fout.write(":::\n")
55 fout.write("\n\n")
56
57 fout.close()
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/source/generate_task_list.py b/docs/source/generate_task_list.py
--- a/docs/source/generate_task_list.py
+++ b/docs/source/generate_task_list.py
@@ -45,6 +45,7 @@
urls_md = ", ".join(f"[{k}]({v})" for k, v in urls)
fout.write(f"### {display_name}\n")
+ fout.write(f"_Usage_: `--task {task}`\n\n")
fout.write(f"_Links_: {urls_md}\n\n")
if description:
fout.write(description + "\n")
| {"golden_diff": "diff --git a/docs/source/generate_task_list.py b/docs/source/generate_task_list.py\n--- a/docs/source/generate_task_list.py\n+++ b/docs/source/generate_task_list.py\n@@ -45,6 +45,7 @@\n \n urls_md = \", \".join(f\"[{k}]({v})\" for k, v in urls)\n fout.write(f\"### {display_name}\\n\")\n+ fout.write(f\"_Usage_: `--task {task}`\\n\\n\")\n fout.write(f\"_Links_: {urls_md}\\n\\n\")\n if description:\n fout.write(description + \"\\n\")\n", "issue": "Reddit Movie Dialog no longer exists\n**Bug description**\r\n[Reddit Movie Dialog](https://parl.ai/docs/tasks.html#movie-dialog-reddit) no longer exists.\r\n\r\n**Reproduction steps**\r\n```\r\nTrainModel.main(\r\n # similar to before\r\n task='empathetic_dialogues,blended_skill_talk,movie_dialog_reddit,convai2,persona_chat', \r\n model='transformer/generator',\r\n model_file='from_pretrained/model',\r\n \r\n # initialize with a pretrained model\r\n init_model='zoo:tutorial_transformer_generator/model',\r\n \r\n # arguments we get from the pretrained model.\r\n # Unfortunately, these must be looked up separately for each model.\r\n n_heads=16, n_layers=8, n_positions=512, text_truncate=512,\r\n label_truncate=128, ffn_size=2048, embedding_size=512,\r\n activation='gelu', variant='xlm',\r\n dict_lower=True, dict_tokenizer='bpe',\r\n dict_file='zoo:tutorial_transformer_generator/model.dict',\r\n learn_positional_embeddings=True,\r\n \r\n # some training arguments, specific to this fine-tuning\r\n # use a small learning rate with ADAM optimizer\r\n lr=1e-5, optimizer='adam',\r\n warmup_updates=100,\r\n # early stopping on perplexity\r\n validation_metric='ppl',\r\n # train at most 10 minutes, and validate every 0.25 epochs\r\n max_train_time=600, validation_every_n_epochs=0.25,\r\n \r\n # depend on your gpu. If you have a V100, this is good\r\n batchsize=12, fp16=True, fp16_impl='mem_efficient',\r\n \r\n # speeds up validation\r\n skip_generation=True,\r\n \r\n # helps us cram more examples into our gpu at a time\r\n dynamic_batching='full',\r\n)\r\n```\r\n\r\n**Logs**\r\nPlease paste the command line output:\r\n\r\n```\r\n---------------------------------------------------------------------------\r\nModuleNotFoundError Traceback (most recent call last)\r\n<ipython-input-39-ff3044de39fe> in <module>()\r\n 36 \r\n 37 # helps us cram more examples into our gpu at a time\r\n---> 38 dynamic_batching='full',\r\n 39 )\r\n\r\n15 frames\r\n/usr/lib/python3.6/importlib/_bootstrap.py in _find_and_load_unlocked(name, import_)\r\n\r\nModuleNotFoundError: No module named 'parlai.tasks.movie_dialog_reddit'\r\n\r\n---------------------------------------------------------------------------\r\nNOTE: If your import is failing due to a missing package, you can\r\nmanually install dependencies using either !pip or !apt.\r\n\r\nTo view examples of installing some common dependencies, click the\r\n\"Open Examples\" button below.\r\n---------------------------------------------------------------------------```\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom parlai.tasks.task_list import task_list\n\nMASTER = \"https://github.com/facebookresearch/ParlAI/tree/master\"\n\ncategory_order = ['QA', 'Cloze', 'Goal', 'ChitChat', 'Negotiation', 'Visual', 'decanlp']\ncategory_task_list = {x: [] for x in category_order}\n\nfout = open('task_list.inc', 'w')\n\ns = \"They consist of: \"\nfor t in category_order:\n fout.write(f\"1. {t} tasks\\n\")\nfout.write(\"\\n\")\n\nfor task_dict in task_list:\n tags = task_dict.get('tags', None)\n for tag in tags:\n if tag in category_task_list:\n category_task_list[tag].append(task_dict)\n\nfor num_category, (category, tl) in enumerate(category_task_list.items()):\n if num_category != 0:\n fout.write(\"\\n-----\\n\\n\")\n\n fout.write(f'## {category} Tasks\\n')\n\n for task_dict in tl:\n id = task_dict.get('id', None)\n display_name = task_dict.get('display_name', None)\n task = task_dict.get('task', None)\n tags = task_dict.get('tags', None)\n description = task_dict.get('description', None)\n notes = task_dict.get('notes', None)\n code_urlend = task[: max(task.find(':'), len(task))]\n code_url = f\"{MASTER}/parlai/tasks/{code_urlend}\"\n links = task_dict.get(\"links\", {})\n assert isinstance(links, dict), f\"task {id} is poorly formatted\"\n urls = [(k, v) for k, v in links.items()]\n urls.append((\"code\", code_url))\n\n urls_md = \", \".join(f\"[{k}]({v})\" for k, v in urls)\n fout.write(f\"### {display_name}\\n\")\n fout.write(f\"_Links_: {urls_md}\\n\\n\")\n if description:\n fout.write(description + \"\\n\")\n if notes:\n fout.write(\":::{admonition,note} Notes\\n\")\n fout.write(notes + \"\\n\")\n fout.write(\":::\\n\")\n fout.write(\"\\n\\n\")\n\nfout.close()\n", "path": "docs/source/generate_task_list.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom parlai.tasks.task_list import task_list\n\nMASTER = \"https://github.com/facebookresearch/ParlAI/tree/master\"\n\ncategory_order = ['QA', 'Cloze', 'Goal', 'ChitChat', 'Negotiation', 'Visual', 'decanlp']\ncategory_task_list = {x: [] for x in category_order}\n\nfout = open('task_list.inc', 'w')\n\ns = \"They consist of: \"\nfor t in category_order:\n fout.write(f\"1. {t} tasks\\n\")\nfout.write(\"\\n\")\n\nfor task_dict in task_list:\n tags = task_dict.get('tags', None)\n for tag in tags:\n if tag in category_task_list:\n category_task_list[tag].append(task_dict)\n\nfor num_category, (category, tl) in enumerate(category_task_list.items()):\n if num_category != 0:\n fout.write(\"\\n-----\\n\\n\")\n\n fout.write(f'## {category} Tasks\\n')\n\n for task_dict in tl:\n id = task_dict.get('id', None)\n display_name = task_dict.get('display_name', None)\n task = task_dict.get('task', None)\n tags = task_dict.get('tags', None)\n description = task_dict.get('description', None)\n notes = task_dict.get('notes', None)\n code_urlend = task[: max(task.find(':'), len(task))]\n code_url = f\"{MASTER}/parlai/tasks/{code_urlend}\"\n links = task_dict.get(\"links\", {})\n assert isinstance(links, dict), f\"task {id} is poorly formatted\"\n urls = [(k, v) for k, v in links.items()]\n urls.append((\"code\", code_url))\n\n urls_md = \", \".join(f\"[{k}]({v})\" for k, v in urls)\n fout.write(f\"### {display_name}\\n\")\n fout.write(f\"_Usage_: `--task {task}`\\n\\n\")\n fout.write(f\"_Links_: {urls_md}\\n\\n\")\n if description:\n fout.write(description + \"\\n\")\n if notes:\n fout.write(\":::{admonition,note} Notes\\n\")\n fout.write(notes + \"\\n\")\n fout.write(\":::\\n\")\n fout.write(\"\\n\\n\")\n\nfout.close()\n", "path": "docs/source/generate_task_list.py"}]} | 1,501 | 134 |
gh_patches_debug_4443 | rasdani/github-patches | git_diff | pytorch__text-145 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AttributeError: 'list' object has no attribute 'rstrip'
Hi all, previously torchtext works for me when I'm running anaconda python. However, now, when i uninstalled my anaconda python. It stops working.
It gives me the following error:
```
File "/Library/Python/2.7/site-packages/torchtext/data/example.py", line 59, in fromlist
setattr(ex, name, field.preprocess(val.rstrip('\n')))
AttributeError: 'list' object has no attribute 'rstrip'
```
Thanks!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchtext/data/example.py`
Content:
```
1 import csv
2 import json
3
4 import six
5
6
7 class Example(object):
8 """Defines a single training or test example.
9
10 Stores each column of the example as an attribute.
11 """
12
13 @classmethod
14 def fromJSON(cls, data, fields):
15 return cls.fromdict(json.loads(data), fields)
16
17 @classmethod
18 def fromdict(cls, data, fields):
19 ex = cls()
20 for key, vals in fields.items():
21 if key not in data:
22 raise ValueError("Specified key {} was not found in "
23 "the input data".format(key))
24 if vals is not None:
25 if not isinstance(vals, list):
26 vals = [vals]
27 for val in vals:
28 name, field = val
29 setattr(ex, name, field.preprocess(data[key]))
30 return ex
31
32 @classmethod
33 def fromTSV(cls, data, fields):
34 return cls.fromlist(data.split('\t'), fields)
35
36 @classmethod
37 def fromCSV(cls, data, fields):
38 data = data.rstrip("\n")
39 # If Python 2, encode to utf-8 since CSV doesn't take unicode input
40 if six.PY2:
41 data = data.encode('utf-8')
42 # Use Python CSV module to parse the CSV line
43 parsed_csv_lines = csv.reader([data])
44
45 # If Python 2, decode back to unicode (the original input format).
46 if six.PY2:
47 for line in parsed_csv_lines:
48 parsed_csv_line = [six.text_type(col, 'utf-8') for col in line]
49 break
50 else:
51 parsed_csv_line = list(parsed_csv_lines)[0]
52 return cls.fromlist(parsed_csv_line, fields)
53
54 @classmethod
55 def fromlist(cls, data, fields):
56 ex = cls()
57 for (name, field), val in zip(fields, data):
58 if field is not None:
59 setattr(ex, name, field.preprocess(val.rstrip('\n')))
60 return ex
61
62 @classmethod
63 def fromtree(cls, data, fields, subtrees=False):
64 try:
65 from nltk.tree import Tree
66 except ImportError:
67 print("Please install NLTK. "
68 "See the docs at http://nltk.org for more information.")
69 raise
70 tree = Tree.fromstring(data)
71 if subtrees:
72 return [cls.fromlist(
73 [' '.join(t.leaves()), t.label()], fields) for t in tree.subtrees()]
74 return cls.fromlist([' '.join(tree.leaves()), tree.label()], fields)
75
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/torchtext/data/example.py b/torchtext/data/example.py
--- a/torchtext/data/example.py
+++ b/torchtext/data/example.py
@@ -56,7 +56,9 @@
ex = cls()
for (name, field), val in zip(fields, data):
if field is not None:
- setattr(ex, name, field.preprocess(val.rstrip('\n')))
+ if isinstance(val, six.string_types):
+ val = val.rstrip('\n')
+ setattr(ex, name, field.preprocess(val))
return ex
@classmethod
| {"golden_diff": "diff --git a/torchtext/data/example.py b/torchtext/data/example.py\n--- a/torchtext/data/example.py\n+++ b/torchtext/data/example.py\n@@ -56,7 +56,9 @@\n ex = cls()\n for (name, field), val in zip(fields, data):\n if field is not None:\n- setattr(ex, name, field.preprocess(val.rstrip('\\n')))\n+ if isinstance(val, six.string_types):\n+ val = val.rstrip('\\n')\n+ setattr(ex, name, field.preprocess(val))\n return ex\n \n @classmethod\n", "issue": "AttributeError: 'list' object has no attribute 'rstrip'\nHi all, previously torchtext works for me when I'm running anaconda python. However, now, when i uninstalled my anaconda python. It stops working.\r\n\r\nIt gives me the following error: \r\n\r\n```\r\nFile \"/Library/Python/2.7/site-packages/torchtext/data/example.py\", line 59, in fromlist\r\n setattr(ex, name, field.preprocess(val.rstrip('\\n')))\r\nAttributeError: 'list' object has no attribute 'rstrip'\r\n\r\n```\r\n\r\nThanks!\n", "before_files": [{"content": "import csv\nimport json\n\nimport six\n\n\nclass Example(object):\n \"\"\"Defines a single training or test example.\n\n Stores each column of the example as an attribute.\n \"\"\"\n\n @classmethod\n def fromJSON(cls, data, fields):\n return cls.fromdict(json.loads(data), fields)\n\n @classmethod\n def fromdict(cls, data, fields):\n ex = cls()\n for key, vals in fields.items():\n if key not in data:\n raise ValueError(\"Specified key {} was not found in \"\n \"the input data\".format(key))\n if vals is not None:\n if not isinstance(vals, list):\n vals = [vals]\n for val in vals:\n name, field = val\n setattr(ex, name, field.preprocess(data[key]))\n return ex\n\n @classmethod\n def fromTSV(cls, data, fields):\n return cls.fromlist(data.split('\\t'), fields)\n\n @classmethod\n def fromCSV(cls, data, fields):\n data = data.rstrip(\"\\n\")\n # If Python 2, encode to utf-8 since CSV doesn't take unicode input\n if six.PY2:\n data = data.encode('utf-8')\n # Use Python CSV module to parse the CSV line\n parsed_csv_lines = csv.reader([data])\n\n # If Python 2, decode back to unicode (the original input format).\n if six.PY2:\n for line in parsed_csv_lines:\n parsed_csv_line = [six.text_type(col, 'utf-8') for col in line]\n break\n else:\n parsed_csv_line = list(parsed_csv_lines)[0]\n return cls.fromlist(parsed_csv_line, fields)\n\n @classmethod\n def fromlist(cls, data, fields):\n ex = cls()\n for (name, field), val in zip(fields, data):\n if field is not None:\n setattr(ex, name, field.preprocess(val.rstrip('\\n')))\n return ex\n\n @classmethod\n def fromtree(cls, data, fields, subtrees=False):\n try:\n from nltk.tree import Tree\n except ImportError:\n print(\"Please install NLTK. \"\n \"See the docs at http://nltk.org for more information.\")\n raise\n tree = Tree.fromstring(data)\n if subtrees:\n return [cls.fromlist(\n [' '.join(t.leaves()), t.label()], fields) for t in tree.subtrees()]\n return cls.fromlist([' '.join(tree.leaves()), tree.label()], fields)\n", "path": "torchtext/data/example.py"}], "after_files": [{"content": "import csv\nimport json\n\nimport six\n\n\nclass Example(object):\n \"\"\"Defines a single training or test example.\n\n Stores each column of the example as an attribute.\n \"\"\"\n\n @classmethod\n def fromJSON(cls, data, fields):\n return cls.fromdict(json.loads(data), fields)\n\n @classmethod\n def fromdict(cls, data, fields):\n ex = cls()\n for key, vals in fields.items():\n if key not in data:\n raise ValueError(\"Specified key {} was not found in \"\n \"the input data\".format(key))\n if vals is not None:\n if not isinstance(vals, list):\n vals = [vals]\n for val in vals:\n name, field = val\n setattr(ex, name, field.preprocess(data[key]))\n return ex\n\n @classmethod\n def fromTSV(cls, data, fields):\n return cls.fromlist(data.split('\\t'), fields)\n\n @classmethod\n def fromCSV(cls, data, fields):\n data = data.rstrip(\"\\n\")\n # If Python 2, encode to utf-8 since CSV doesn't take unicode input\n if six.PY2:\n data = data.encode('utf-8')\n # Use Python CSV module to parse the CSV line\n parsed_csv_lines = csv.reader([data])\n\n # If Python 2, decode back to unicode (the original input format).\n if six.PY2:\n for line in parsed_csv_lines:\n parsed_csv_line = [six.text_type(col, 'utf-8') for col in line]\n break\n else:\n parsed_csv_line = list(parsed_csv_lines)[0]\n return cls.fromlist(parsed_csv_line, fields)\n\n @classmethod\n def fromlist(cls, data, fields):\n ex = cls()\n for (name, field), val in zip(fields, data):\n if field is not None:\n if isinstance(val, six.string_types):\n val = val.rstrip('\\n')\n setattr(ex, name, field.preprocess(val))\n return ex\n\n @classmethod\n def fromtree(cls, data, fields, subtrees=False):\n try:\n from nltk.tree import Tree\n except ImportError:\n print(\"Please install NLTK. \"\n \"See the docs at http://nltk.org for more information.\")\n raise\n tree = Tree.fromstring(data)\n if subtrees:\n return [cls.fromlist(\n [' '.join(t.leaves()), t.label()], fields) for t in tree.subtrees()]\n return cls.fromlist([' '.join(tree.leaves()), tree.label()], fields)\n", "path": "torchtext/data/example.py"}]} | 1,059 | 128 |
gh_patches_debug_12513 | rasdani/github-patches | git_diff | pypa__pip-11417 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Completion in ZSH doesn't understand that arguments follow certain options
* Pip version: 9.0.1
* Python version: 2.7.12
* Operating system: Ubuntu 16.04
### Description:
Completion in Zsh uses the older `compctl` builtin and returns completions for long options that take arguments with a trailing equals sign. But compctl/Zsh doesn't understand that as meaning that the option takes an argument and adds a space after the equals sign and also tries to complete the next argument if you remove the equals sign as yet another option.
No idea if this is fixable using the older compctl, might want to migrate to the newer compsys... With compsys you will probably have to modify the completion output from Pip or preprocess it in shell code so that it fits what something like `_arguments` expects.
https://github.com/pypa/pip/pull/4842 will make it complete file names by re-implementing that inside pip instead of letting the shell handle it which means certain stuff like colored file names won't work or `zstyle` related settings for file name completion. And it still won't fix the fact that Zsh will add a space after the equals sign...
### What I've run:
```sh
➜ pip install --requirem<tab>
➜ pip install --requirement= # With a space
➜ pip install --requirement=<tab> # Nothing
➜ pip install --requirement= <tab>
➜ pip install --requirement= -- # Sigh...
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pip/_internal/commands/completion.py`
Content:
```
1 import sys
2 import textwrap
3 from optparse import Values
4 from typing import List
5
6 from pip._internal.cli.base_command import Command
7 from pip._internal.cli.status_codes import SUCCESS
8 from pip._internal.utils.misc import get_prog
9
10 BASE_COMPLETION = """
11 # pip {shell} completion start{script}# pip {shell} completion end
12 """
13
14 COMPLETION_SCRIPTS = {
15 "bash": """
16 _pip_completion()
17 {{
18 COMPREPLY=( $( COMP_WORDS="${{COMP_WORDS[*]}}" \\
19 COMP_CWORD=$COMP_CWORD \\
20 PIP_AUTO_COMPLETE=1 $1 2>/dev/null ) )
21 }}
22 complete -o default -F _pip_completion {prog}
23 """,
24 "zsh": """
25 function _pip_completion {{
26 local words cword
27 read -Ac words
28 read -cn cword
29 reply=( $( COMP_WORDS="$words[*]" \\
30 COMP_CWORD=$(( cword-1 )) \\
31 PIP_AUTO_COMPLETE=1 $words[1] 2>/dev/null ))
32 }}
33 compctl -K _pip_completion {prog}
34 """,
35 "fish": """
36 function __fish_complete_pip
37 set -lx COMP_WORDS (commandline -o) ""
38 set -lx COMP_CWORD ( \\
39 math (contains -i -- (commandline -t) $COMP_WORDS)-1 \\
40 )
41 set -lx PIP_AUTO_COMPLETE 1
42 string split \\ -- (eval $COMP_WORDS[1])
43 end
44 complete -fa "(__fish_complete_pip)" -c {prog}
45 """,
46 "powershell": """
47 if ((Test-Path Function:\\TabExpansion) -and -not `
48 (Test-Path Function:\\_pip_completeBackup)) {{
49 Rename-Item Function:\\TabExpansion _pip_completeBackup
50 }}
51 function TabExpansion($line, $lastWord) {{
52 $lastBlock = [regex]::Split($line, '[|;]')[-1].TrimStart()
53 if ($lastBlock.StartsWith("{prog} ")) {{
54 $Env:COMP_WORDS=$lastBlock
55 $Env:COMP_CWORD=$lastBlock.Split().Length - 1
56 $Env:PIP_AUTO_COMPLETE=1
57 (& {prog}).Split()
58 Remove-Item Env:COMP_WORDS
59 Remove-Item Env:COMP_CWORD
60 Remove-Item Env:PIP_AUTO_COMPLETE
61 }}
62 elseif (Test-Path Function:\\_pip_completeBackup) {{
63 # Fall back on existing tab expansion
64 _pip_completeBackup $line $lastWord
65 }}
66 }}
67 """,
68 }
69
70
71 class CompletionCommand(Command):
72 """A helper command to be used for command completion."""
73
74 ignore_require_venv = True
75
76 def add_options(self) -> None:
77 self.cmd_opts.add_option(
78 "--bash",
79 "-b",
80 action="store_const",
81 const="bash",
82 dest="shell",
83 help="Emit completion code for bash",
84 )
85 self.cmd_opts.add_option(
86 "--zsh",
87 "-z",
88 action="store_const",
89 const="zsh",
90 dest="shell",
91 help="Emit completion code for zsh",
92 )
93 self.cmd_opts.add_option(
94 "--fish",
95 "-f",
96 action="store_const",
97 const="fish",
98 dest="shell",
99 help="Emit completion code for fish",
100 )
101 self.cmd_opts.add_option(
102 "--powershell",
103 "-p",
104 action="store_const",
105 const="powershell",
106 dest="shell",
107 help="Emit completion code for powershell",
108 )
109
110 self.parser.insert_option_group(0, self.cmd_opts)
111
112 def run(self, options: Values, args: List[str]) -> int:
113 """Prints the completion code of the given shell"""
114 shells = COMPLETION_SCRIPTS.keys()
115 shell_options = ["--" + shell for shell in sorted(shells)]
116 if options.shell in shells:
117 script = textwrap.dedent(
118 COMPLETION_SCRIPTS.get(options.shell, "").format(prog=get_prog())
119 )
120 print(BASE_COMPLETION.format(script=script, shell=options.shell))
121 return SUCCESS
122 else:
123 sys.stderr.write(
124 "ERROR: You must pass {}\n".format(" or ".join(shell_options))
125 )
126 return SUCCESS
127
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/pip/_internal/commands/completion.py b/src/pip/_internal/commands/completion.py
--- a/src/pip/_internal/commands/completion.py
+++ b/src/pip/_internal/commands/completion.py
@@ -22,15 +22,10 @@
complete -o default -F _pip_completion {prog}
""",
"zsh": """
- function _pip_completion {{
- local words cword
- read -Ac words
- read -cn cword
- reply=( $( COMP_WORDS="$words[*]" \\
- COMP_CWORD=$(( cword-1 )) \\
- PIP_AUTO_COMPLETE=1 $words[1] 2>/dev/null ))
- }}
- compctl -K _pip_completion {prog}
+ #compdef -P pip[0-9.]#
+ compadd $( COMP_WORDS="$words[*]" \\
+ COMP_CWORD=$((CURRENT-1)) \\
+ PIP_AUTO_COMPLETE=1 $words[1] 2>/dev/null )
""",
"fish": """
function __fish_complete_pip
| {"golden_diff": "diff --git a/src/pip/_internal/commands/completion.py b/src/pip/_internal/commands/completion.py\n--- a/src/pip/_internal/commands/completion.py\n+++ b/src/pip/_internal/commands/completion.py\n@@ -22,15 +22,10 @@\n complete -o default -F _pip_completion {prog}\n \"\"\",\n \"zsh\": \"\"\"\n- function _pip_completion {{\n- local words cword\n- read -Ac words\n- read -cn cword\n- reply=( $( COMP_WORDS=\"$words[*]\" \\\\\n- COMP_CWORD=$(( cword-1 )) \\\\\n- PIP_AUTO_COMPLETE=1 $words[1] 2>/dev/null ))\n- }}\n- compctl -K _pip_completion {prog}\n+ #compdef -P pip[0-9.]#\n+ compadd $( COMP_WORDS=\"$words[*]\" \\\\\n+ COMP_CWORD=$((CURRENT-1)) \\\\\n+ PIP_AUTO_COMPLETE=1 $words[1] 2>/dev/null )\n \"\"\",\n \"fish\": \"\"\"\n function __fish_complete_pip\n", "issue": "Completion in ZSH doesn't understand that arguments follow certain options\n* Pip version: 9.0.1\r\n* Python version: 2.7.12\r\n* Operating system: Ubuntu 16.04\r\n\r\n### Description:\r\n\r\nCompletion in Zsh uses the older `compctl` builtin and returns completions for long options that take arguments with a trailing equals sign. But compctl/Zsh doesn't understand that as meaning that the option takes an argument and adds a space after the equals sign and also tries to complete the next argument if you remove the equals sign as yet another option.\r\n\r\nNo idea if this is fixable using the older compctl, might want to migrate to the newer compsys... With compsys you will probably have to modify the completion output from Pip or preprocess it in shell code so that it fits what something like `_arguments` expects.\r\n\r\nhttps://github.com/pypa/pip/pull/4842 will make it complete file names by re-implementing that inside pip instead of letting the shell handle it which means certain stuff like colored file names won't work or `zstyle` related settings for file name completion. And it still won't fix the fact that Zsh will add a space after the equals sign...\r\n\r\n### What I've run:\r\n\r\n```sh\r\n\u279c pip install --requirem<tab>\r\n\u279c pip install --requirement= # With a space\r\n\u279c pip install --requirement=<tab> # Nothing\r\n\u279c pip install --requirement= <tab>\r\n\u279c pip install --requirement= -- # Sigh...\r\n```\r\n\n", "before_files": [{"content": "import sys\nimport textwrap\nfrom optparse import Values\nfrom typing import List\n\nfrom pip._internal.cli.base_command import Command\nfrom pip._internal.cli.status_codes import SUCCESS\nfrom pip._internal.utils.misc import get_prog\n\nBASE_COMPLETION = \"\"\"\n# pip {shell} completion start{script}# pip {shell} completion end\n\"\"\"\n\nCOMPLETION_SCRIPTS = {\n \"bash\": \"\"\"\n _pip_completion()\n {{\n COMPREPLY=( $( COMP_WORDS=\"${{COMP_WORDS[*]}}\" \\\\\n COMP_CWORD=$COMP_CWORD \\\\\n PIP_AUTO_COMPLETE=1 $1 2>/dev/null ) )\n }}\n complete -o default -F _pip_completion {prog}\n \"\"\",\n \"zsh\": \"\"\"\n function _pip_completion {{\n local words cword\n read -Ac words\n read -cn cword\n reply=( $( COMP_WORDS=\"$words[*]\" \\\\\n COMP_CWORD=$(( cword-1 )) \\\\\n PIP_AUTO_COMPLETE=1 $words[1] 2>/dev/null ))\n }}\n compctl -K _pip_completion {prog}\n \"\"\",\n \"fish\": \"\"\"\n function __fish_complete_pip\n set -lx COMP_WORDS (commandline -o) \"\"\n set -lx COMP_CWORD ( \\\\\n math (contains -i -- (commandline -t) $COMP_WORDS)-1 \\\\\n )\n set -lx PIP_AUTO_COMPLETE 1\n string split \\\\ -- (eval $COMP_WORDS[1])\n end\n complete -fa \"(__fish_complete_pip)\" -c {prog}\n \"\"\",\n \"powershell\": \"\"\"\n if ((Test-Path Function:\\\\TabExpansion) -and -not `\n (Test-Path Function:\\\\_pip_completeBackup)) {{\n Rename-Item Function:\\\\TabExpansion _pip_completeBackup\n }}\n function TabExpansion($line, $lastWord) {{\n $lastBlock = [regex]::Split($line, '[|;]')[-1].TrimStart()\n if ($lastBlock.StartsWith(\"{prog} \")) {{\n $Env:COMP_WORDS=$lastBlock\n $Env:COMP_CWORD=$lastBlock.Split().Length - 1\n $Env:PIP_AUTO_COMPLETE=1\n (& {prog}).Split()\n Remove-Item Env:COMP_WORDS\n Remove-Item Env:COMP_CWORD\n Remove-Item Env:PIP_AUTO_COMPLETE\n }}\n elseif (Test-Path Function:\\\\_pip_completeBackup) {{\n # Fall back on existing tab expansion\n _pip_completeBackup $line $lastWord\n }}\n }}\n \"\"\",\n}\n\n\nclass CompletionCommand(Command):\n \"\"\"A helper command to be used for command completion.\"\"\"\n\n ignore_require_venv = True\n\n def add_options(self) -> None:\n self.cmd_opts.add_option(\n \"--bash\",\n \"-b\",\n action=\"store_const\",\n const=\"bash\",\n dest=\"shell\",\n help=\"Emit completion code for bash\",\n )\n self.cmd_opts.add_option(\n \"--zsh\",\n \"-z\",\n action=\"store_const\",\n const=\"zsh\",\n dest=\"shell\",\n help=\"Emit completion code for zsh\",\n )\n self.cmd_opts.add_option(\n \"--fish\",\n \"-f\",\n action=\"store_const\",\n const=\"fish\",\n dest=\"shell\",\n help=\"Emit completion code for fish\",\n )\n self.cmd_opts.add_option(\n \"--powershell\",\n \"-p\",\n action=\"store_const\",\n const=\"powershell\",\n dest=\"shell\",\n help=\"Emit completion code for powershell\",\n )\n\n self.parser.insert_option_group(0, self.cmd_opts)\n\n def run(self, options: Values, args: List[str]) -> int:\n \"\"\"Prints the completion code of the given shell\"\"\"\n shells = COMPLETION_SCRIPTS.keys()\n shell_options = [\"--\" + shell for shell in sorted(shells)]\n if options.shell in shells:\n script = textwrap.dedent(\n COMPLETION_SCRIPTS.get(options.shell, \"\").format(prog=get_prog())\n )\n print(BASE_COMPLETION.format(script=script, shell=options.shell))\n return SUCCESS\n else:\n sys.stderr.write(\n \"ERROR: You must pass {}\\n\".format(\" or \".join(shell_options))\n )\n return SUCCESS\n", "path": "src/pip/_internal/commands/completion.py"}], "after_files": [{"content": "import sys\nimport textwrap\nfrom optparse import Values\nfrom typing import List\n\nfrom pip._internal.cli.base_command import Command\nfrom pip._internal.cli.status_codes import SUCCESS\nfrom pip._internal.utils.misc import get_prog\n\nBASE_COMPLETION = \"\"\"\n# pip {shell} completion start{script}# pip {shell} completion end\n\"\"\"\n\nCOMPLETION_SCRIPTS = {\n \"bash\": \"\"\"\n _pip_completion()\n {{\n COMPREPLY=( $( COMP_WORDS=\"${{COMP_WORDS[*]}}\" \\\\\n COMP_CWORD=$COMP_CWORD \\\\\n PIP_AUTO_COMPLETE=1 $1 2>/dev/null ) )\n }}\n complete -o default -F _pip_completion {prog}\n \"\"\",\n \"zsh\": \"\"\"\n #compdef -P pip[0-9.]#\n compadd $( COMP_WORDS=\"$words[*]\" \\\\\n COMP_CWORD=$((CURRENT-1)) \\\\\n PIP_AUTO_COMPLETE=1 $words[1] 2>/dev/null )\n \"\"\",\n \"fish\": \"\"\"\n function __fish_complete_pip\n set -lx COMP_WORDS (commandline -o) \"\"\n set -lx COMP_CWORD ( \\\\\n math (contains -i -- (commandline -t) $COMP_WORDS)-1 \\\\\n )\n set -lx PIP_AUTO_COMPLETE 1\n string split \\\\ -- (eval $COMP_WORDS[1])\n end\n complete -fa \"(__fish_complete_pip)\" -c {prog}\n \"\"\",\n \"powershell\": \"\"\"\n if ((Test-Path Function:\\\\TabExpansion) -and -not `\n (Test-Path Function:\\\\_pip_completeBackup)) {{\n Rename-Item Function:\\\\TabExpansion _pip_completeBackup\n }}\n function TabExpansion($line, $lastWord) {{\n $lastBlock = [regex]::Split($line, '[|;]')[-1].TrimStart()\n if ($lastBlock.StartsWith(\"{prog} \")) {{\n $Env:COMP_WORDS=$lastBlock\n $Env:COMP_CWORD=$lastBlock.Split().Length - 1\n $Env:PIP_AUTO_COMPLETE=1\n (& {prog}).Split()\n Remove-Item Env:COMP_WORDS\n Remove-Item Env:COMP_CWORD\n Remove-Item Env:PIP_AUTO_COMPLETE\n }}\n elseif (Test-Path Function:\\\\_pip_completeBackup) {{\n # Fall back on existing tab expansion\n _pip_completeBackup $line $lastWord\n }}\n }}\n \"\"\",\n}\n\n\nclass CompletionCommand(Command):\n \"\"\"A helper command to be used for command completion.\"\"\"\n\n ignore_require_venv = True\n\n def add_options(self) -> None:\n self.cmd_opts.add_option(\n \"--bash\",\n \"-b\",\n action=\"store_const\",\n const=\"bash\",\n dest=\"shell\",\n help=\"Emit completion code for bash\",\n )\n self.cmd_opts.add_option(\n \"--zsh\",\n \"-z\",\n action=\"store_const\",\n const=\"zsh\",\n dest=\"shell\",\n help=\"Emit completion code for zsh\",\n )\n self.cmd_opts.add_option(\n \"--fish\",\n \"-f\",\n action=\"store_const\",\n const=\"fish\",\n dest=\"shell\",\n help=\"Emit completion code for fish\",\n )\n self.cmd_opts.add_option(\n \"--powershell\",\n \"-p\",\n action=\"store_const\",\n const=\"powershell\",\n dest=\"shell\",\n help=\"Emit completion code for powershell\",\n )\n\n self.parser.insert_option_group(0, self.cmd_opts)\n\n def run(self, options: Values, args: List[str]) -> int:\n \"\"\"Prints the completion code of the given shell\"\"\"\n shells = COMPLETION_SCRIPTS.keys()\n shell_options = [\"--\" + shell for shell in sorted(shells)]\n if options.shell in shells:\n script = textwrap.dedent(\n COMPLETION_SCRIPTS.get(options.shell, \"\").format(prog=get_prog())\n )\n print(BASE_COMPLETION.format(script=script, shell=options.shell))\n return SUCCESS\n else:\n sys.stderr.write(\n \"ERROR: You must pass {}\\n\".format(\" or \".join(shell_options))\n )\n return SUCCESS\n", "path": "src/pip/_internal/commands/completion.py"}]} | 1,793 | 247 |
gh_patches_debug_13914 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-2083 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
store window: district tile announces more results than there are if I click on them
store window tile shows ALL project of district, if I click and get to project overview, the default filter takes out all old projects and plans without beteiligung.
can we only count running projects with participation?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `meinberlin/apps/cms/models/storefronts.py`
Content:
```
1 import random
2
3 from django.db import models
4 from django.utils.functional import cached_property
5 from modelcluster.fields import ParentalKey
6 from modelcluster.models import ClusterableModel
7 from wagtail.admin import edit_handlers
8 from wagtail.admin.edit_handlers import FieldPanel
9 from wagtail.images.edit_handlers import ImageChooserPanel
10 from wagtail.snippets.models import register_snippet
11
12 from adhocracy4.comments.models import Comment
13 from adhocracy4.modules.models import Item
14 from adhocracy4.projects.models import Project
15 from meinberlin.apps.projects import get_project_type
16
17
18 class StorefrontItem(models.Model):
19 district = models.ForeignKey(
20 'a4administrative_districts.AdministrativeDistrict',
21 related_name='+',
22 null=True,
23 blank=True
24 )
25 project = models.ForeignKey(
26 'a4projects.Project',
27 related_name='+',
28 null=True,
29 blank=True
30 )
31 quote = models.TextField(
32 blank=True,
33 max_length=150
34 )
35
36 def __str__(self):
37 return str(self.pk)
38
39 @cached_property
40 def item_type(self):
41 if get_project_type(self.project) in ('external', 'bplan'):
42 return 'external'
43 return 'project'
44
45 @cached_property
46 def project_url(self):
47 if self.item_type == 'external':
48 return self.project.externalproject.url
49 return self.project.get_absolute_url()
50
51 @cached_property
52 def district_project_count(self):
53 return Project.objects\
54 .filter(administrative_district=self.district,
55 is_draft=False,
56 is_public=True,
57 is_archived=False
58 ).count()
59
60 panels = [
61 FieldPanel('district'),
62 FieldPanel('project'),
63 FieldPanel('quote'),
64 ]
65
66
67 @register_snippet
68 class Storefront(ClusterableModel):
69 title = models.CharField(max_length=255, null=False, blank=False)
70 image = models.ForeignKey(
71 'meinberlin_cms.CustomImage',
72 null=True,
73 blank=True,
74 on_delete=models.SET_NULL,
75 related_name='+'
76 )
77 teaser = models.CharField(max_length=100)
78
79 def __str__(self):
80 return self.title
81
82 @cached_property
83 def num_entries(self):
84 num_comments = Comment.objects.all().count()
85 num_items = Item.objects.all().count()
86 return num_comments + num_items
87
88 @cached_property
89 def num_projects(self):
90 projects = Project.objects.all()\
91 .filter(is_draft=False, is_archived=False, is_public=True)
92 active_project_count = 0
93 for project in projects:
94 if project.active_phase or project.future_phases:
95 active_project_count += 1
96 return active_project_count
97
98 @cached_property
99 def random_items(self):
100 items = self.items.all()
101 if items.count() > 3:
102 items_list = items.values_list('id', flat=True)
103 random_items = random.sample(list(items_list), 3)
104 return StorefrontItem.objects.filter(id__in=random_items)
105 else:
106 return items
107
108 title_panel = [
109 edit_handlers.FieldPanel('title')
110 ]
111
112 image_tile_panel = [
113 ImageChooserPanel('image'),
114 edit_handlers.FieldPanel('teaser')
115 ]
116
117 project_tiles_panel = [
118 edit_handlers.InlinePanel('items', min_num=3)
119 ]
120
121 edit_handler = edit_handlers.TabbedInterface([
122 edit_handlers.ObjectList(title_panel, heading='Title'),
123 edit_handlers.ObjectList(image_tile_panel, heading='Image Tile'),
124 edit_handlers.ObjectList(project_tiles_panel, heading='Project Tiles')
125 ])
126
127
128 class StorefrontCollection(StorefrontItem):
129 parent = ParentalKey('meinberlin_cms.Storefront', related_name='items')
130
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/meinberlin/apps/cms/models/storefronts.py b/meinberlin/apps/cms/models/storefronts.py
--- a/meinberlin/apps/cms/models/storefronts.py
+++ b/meinberlin/apps/cms/models/storefronts.py
@@ -50,12 +50,17 @@
@cached_property
def district_project_count(self):
- return Project.objects\
+ projects = Project.objects\
.filter(administrative_district=self.district,
is_draft=False,
is_public=True,
is_archived=False
- ).count()
+ )
+ active_project_count = 0
+ for project in projects:
+ if project.active_phase or project.future_phases:
+ active_project_count += 1
+ return active_project_count
panels = [
FieldPanel('district'),
| {"golden_diff": "diff --git a/meinberlin/apps/cms/models/storefronts.py b/meinberlin/apps/cms/models/storefronts.py\n--- a/meinberlin/apps/cms/models/storefronts.py\n+++ b/meinberlin/apps/cms/models/storefronts.py\n@@ -50,12 +50,17 @@\n \n @cached_property\n def district_project_count(self):\n- return Project.objects\\\n+ projects = Project.objects\\\n .filter(administrative_district=self.district,\n is_draft=False,\n is_public=True,\n is_archived=False\n- ).count()\n+ )\n+ active_project_count = 0\n+ for project in projects:\n+ if project.active_phase or project.future_phases:\n+ active_project_count += 1\n+ return active_project_count\n \n panels = [\n FieldPanel('district'),\n", "issue": "store window: district tile announces more results than there are if I click on them\nstore window tile shows ALL project of district, if I click and get to project overview, the default filter takes out all old projects and plans without beteiligung.\r\n\r\ncan we only count running projects with participation?\n", "before_files": [{"content": "import random\n\nfrom django.db import models\nfrom django.utils.functional import cached_property\nfrom modelcluster.fields import ParentalKey\nfrom modelcluster.models import ClusterableModel\nfrom wagtail.admin import edit_handlers\nfrom wagtail.admin.edit_handlers import FieldPanel\nfrom wagtail.images.edit_handlers import ImageChooserPanel\nfrom wagtail.snippets.models import register_snippet\n\nfrom adhocracy4.comments.models import Comment\nfrom adhocracy4.modules.models import Item\nfrom adhocracy4.projects.models import Project\nfrom meinberlin.apps.projects import get_project_type\n\n\nclass StorefrontItem(models.Model):\n district = models.ForeignKey(\n 'a4administrative_districts.AdministrativeDistrict',\n related_name='+',\n null=True,\n blank=True\n )\n project = models.ForeignKey(\n 'a4projects.Project',\n related_name='+',\n null=True,\n blank=True\n )\n quote = models.TextField(\n blank=True,\n max_length=150\n )\n\n def __str__(self):\n return str(self.pk)\n\n @cached_property\n def item_type(self):\n if get_project_type(self.project) in ('external', 'bplan'):\n return 'external'\n return 'project'\n\n @cached_property\n def project_url(self):\n if self.item_type == 'external':\n return self.project.externalproject.url\n return self.project.get_absolute_url()\n\n @cached_property\n def district_project_count(self):\n return Project.objects\\\n .filter(administrative_district=self.district,\n is_draft=False,\n is_public=True,\n is_archived=False\n ).count()\n\n panels = [\n FieldPanel('district'),\n FieldPanel('project'),\n FieldPanel('quote'),\n ]\n\n\n@register_snippet\nclass Storefront(ClusterableModel):\n title = models.CharField(max_length=255, null=False, blank=False)\n image = models.ForeignKey(\n 'meinberlin_cms.CustomImage',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n teaser = models.CharField(max_length=100)\n\n def __str__(self):\n return self.title\n\n @cached_property\n def num_entries(self):\n num_comments = Comment.objects.all().count()\n num_items = Item.objects.all().count()\n return num_comments + num_items\n\n @cached_property\n def num_projects(self):\n projects = Project.objects.all()\\\n .filter(is_draft=False, is_archived=False, is_public=True)\n active_project_count = 0\n for project in projects:\n if project.active_phase or project.future_phases:\n active_project_count += 1\n return active_project_count\n\n @cached_property\n def random_items(self):\n items = self.items.all()\n if items.count() > 3:\n items_list = items.values_list('id', flat=True)\n random_items = random.sample(list(items_list), 3)\n return StorefrontItem.objects.filter(id__in=random_items)\n else:\n return items\n\n title_panel = [\n edit_handlers.FieldPanel('title')\n ]\n\n image_tile_panel = [\n ImageChooserPanel('image'),\n edit_handlers.FieldPanel('teaser')\n ]\n\n project_tiles_panel = [\n edit_handlers.InlinePanel('items', min_num=3)\n ]\n\n edit_handler = edit_handlers.TabbedInterface([\n edit_handlers.ObjectList(title_panel, heading='Title'),\n edit_handlers.ObjectList(image_tile_panel, heading='Image Tile'),\n edit_handlers.ObjectList(project_tiles_panel, heading='Project Tiles')\n ])\n\n\nclass StorefrontCollection(StorefrontItem):\n parent = ParentalKey('meinberlin_cms.Storefront', related_name='items')\n", "path": "meinberlin/apps/cms/models/storefronts.py"}], "after_files": [{"content": "import random\n\nfrom django.db import models\nfrom django.utils.functional import cached_property\nfrom modelcluster.fields import ParentalKey\nfrom modelcluster.models import ClusterableModel\nfrom wagtail.admin import edit_handlers\nfrom wagtail.admin.edit_handlers import FieldPanel\nfrom wagtail.images.edit_handlers import ImageChooserPanel\nfrom wagtail.snippets.models import register_snippet\n\nfrom adhocracy4.comments.models import Comment\nfrom adhocracy4.modules.models import Item\nfrom adhocracy4.projects.models import Project\nfrom meinberlin.apps.projects import get_project_type\n\n\nclass StorefrontItem(models.Model):\n district = models.ForeignKey(\n 'a4administrative_districts.AdministrativeDistrict',\n related_name='+',\n null=True,\n blank=True\n )\n project = models.ForeignKey(\n 'a4projects.Project',\n related_name='+',\n null=True,\n blank=True\n )\n quote = models.TextField(\n blank=True,\n max_length=150\n )\n\n def __str__(self):\n return str(self.pk)\n\n @cached_property\n def item_type(self):\n if get_project_type(self.project) in ('external', 'bplan'):\n return 'external'\n return 'project'\n\n @cached_property\n def project_url(self):\n if self.item_type == 'external':\n return self.project.externalproject.url\n return self.project.get_absolute_url()\n\n @cached_property\n def district_project_count(self):\n projects = Project.objects\\\n .filter(administrative_district=self.district,\n is_draft=False,\n is_public=True,\n is_archived=False\n )\n active_project_count = 0\n for project in projects:\n if project.active_phase or project.future_phases:\n active_project_count += 1\n return active_project_count\n\n panels = [\n FieldPanel('district'),\n FieldPanel('project'),\n FieldPanel('quote'),\n ]\n\n\n@register_snippet\nclass Storefront(ClusterableModel):\n title = models.CharField(max_length=255, null=False, blank=False)\n image = models.ForeignKey(\n 'meinberlin_cms.CustomImage',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n teaser = models.CharField(max_length=100)\n\n def __str__(self):\n return self.title\n\n @cached_property\n def num_entries(self):\n num_comments = Comment.objects.all().count()\n num_items = Item.objects.all().count()\n return num_comments + num_items\n\n @cached_property\n def num_projects(self):\n projects = Project.objects.all()\\\n .filter(is_draft=False, is_archived=False, is_public=True)\n active_project_count = 0\n for project in projects:\n if project.active_phase or project.future_phases:\n active_project_count += 1\n return active_project_count\n\n @cached_property\n def random_items(self):\n items = self.items.all()\n if items.count() > 3:\n items_list = items.values_list('id', flat=True)\n random_items = random.sample(list(items_list), 3)\n return StorefrontItem.objects.filter(id__in=random_items)\n else:\n return items\n\n title_panel = [\n edit_handlers.FieldPanel('title')\n ]\n\n image_tile_panel = [\n ImageChooserPanel('image'),\n edit_handlers.FieldPanel('teaser')\n ]\n\n project_tiles_panel = [\n edit_handlers.InlinePanel('items', min_num=3)\n ]\n\n edit_handler = edit_handlers.TabbedInterface([\n edit_handlers.ObjectList(title_panel, heading='Title'),\n edit_handlers.ObjectList(image_tile_panel, heading='Image Tile'),\n edit_handlers.ObjectList(project_tiles_panel, heading='Project Tiles')\n ])\n\n\nclass StorefrontCollection(StorefrontItem):\n parent = ParentalKey('meinberlin_cms.Storefront', related_name='items')\n", "path": "meinberlin/apps/cms/models/storefronts.py"}]} | 1,413 | 185 |
gh_patches_debug_15530 | rasdani/github-patches | git_diff | ibis-project__ibis-6950 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bug(bigquery): memtable and string literals not escaping `\n` or `\` which results in invalid syntax
### What happened?
Code:
```python
import ibis
ibis_client = ibis.bigquery.connect()
table = ibis.memtable(
{
"col1": ["a\tb\nc", "d e f", "g'e\"h"],
}
)
print(ibis_client.compile(table))
```
Output:
```
SELECT t0.*
FROM UNNEST(ARRAY<STRUCT<col1 STRING>>[STRUCT('a b
c' AS col1), STRUCT('d e f' AS col1), STRUCT('g\'e"h' AS col1)]) t0
```
Note, the following SQL works as expected:
```
SELECT t0.*
FROM UNNEST(ARRAY<STRUCT<col1 STRING>>[STRUCT('a b\nc' AS col1), STRUCT('d e f' AS col1), STRUCT('g\'e"h' AS col1)]) t0
```
Therefore, we should really be escaping `\n` in addition to `'`. Though, perhaps there are other characters that could break BigQuery syntax? See: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#string_and_bytes_literals
Alternatively, using triple-quoted strings allows for newline characters in the string literal itself.
### What version of ibis are you using?
6.1.0
also tested on latest commit: 15f8d9575
### What backend(s) are you using, if any?
BigQuery
### Relevant log output
```sh
BigQuery API: Syntax error: Unclosed string literal at [2:47]
```
### Code of Conduct
- [X] I agree to follow this project's Code of Conduct
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ibis/backends/base/sql/registry/literal.py`
Content:
```
1 from __future__ import annotations
2
3 import datetime
4 import math
5
6 import ibis.expr.types as ir
7
8
9 def _set_literal_format(translator, expr):
10 value_type = expr.type().value_type
11
12 formatted = [
13 translator.translate(ir.literal(x, type=value_type)) for x in expr.op().value
14 ]
15
16 return "(" + ", ".join(formatted) + ")"
17
18
19 def _boolean_literal_format(translator, op):
20 return "TRUE" if op.value else "FALSE"
21
22
23 def _string_literal_format(translator, op):
24 return "'{}'".format(op.value.replace("'", "\\'"))
25
26
27 def _number_literal_format(translator, op):
28 if math.isfinite(op.value):
29 formatted = repr(op.value)
30 else:
31 if math.isnan(op.value):
32 formatted_val = "NaN"
33 elif math.isinf(op.value):
34 if op.value > 0:
35 formatted_val = "Infinity"
36 else:
37 formatted_val = "-Infinity"
38 formatted = f"CAST({formatted_val!r} AS DOUBLE)"
39
40 return formatted
41
42
43 def _interval_literal_format(translator, op):
44 return f"INTERVAL {op.value} {op.dtype.resolution.upper()}"
45
46
47 def _date_literal_format(translator, op):
48 value = op.value
49 if isinstance(value, datetime.date):
50 value = value.strftime("%Y-%m-%d")
51
52 return repr(value)
53
54
55 def _timestamp_literal_format(translator, op):
56 value = op.value
57 if isinstance(value, datetime.datetime):
58 value = value.isoformat()
59
60 return repr(value)
61
62
63 literal_formatters = {
64 "boolean": _boolean_literal_format,
65 "number": _number_literal_format,
66 "string": _string_literal_format,
67 "interval": _interval_literal_format,
68 "timestamp": _timestamp_literal_format,
69 "date": _date_literal_format,
70 "set": _set_literal_format,
71 }
72
73
74 def literal(translator, op):
75 """Return the expression as its literal value."""
76
77 dtype = op.dtype
78
79 if op.value is None:
80 return "NULL"
81
82 if dtype.is_boolean():
83 typeclass = "boolean"
84 elif dtype.is_string():
85 typeclass = "string"
86 elif dtype.is_date():
87 typeclass = "date"
88 elif dtype.is_numeric():
89 typeclass = "number"
90 elif dtype.is_timestamp():
91 typeclass = "timestamp"
92 elif dtype.is_interval():
93 typeclass = "interval"
94 else:
95 raise NotImplementedError(f"Unsupported type: {dtype!r}")
96
97 return literal_formatters[typeclass](translator, op)
98
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ibis/backends/base/sql/registry/literal.py b/ibis/backends/base/sql/registry/literal.py
--- a/ibis/backends/base/sql/registry/literal.py
+++ b/ibis/backends/base/sql/registry/literal.py
@@ -21,7 +21,22 @@
def _string_literal_format(translator, op):
- return "'{}'".format(op.value.replace("'", "\\'"))
+ return "'{}'".format(
+ op.value
+ # Escape \ first so we don't double escape other characters.
+ .replace("\\", "\\\\")
+ # Escape ' since we're using those for the string literal.
+ .replace("'", "\\'")
+ # ASCII escape sequences that are recognized in Python:
+ # https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals
+ .replace("\a", "\\a") # Bell
+ .replace("\b", "\\b") # Backspace
+ .replace("\f", "\\f") # Formfeed
+ .replace("\n", "\\n") # Newline / Linefeed
+ .replace("\r", "\\r") # Carriage return
+ .replace("\t", "\\t") # Tab
+ .replace("\v", "\\v") # Vertical tab
+ )
def _number_literal_format(translator, op):
| {"golden_diff": "diff --git a/ibis/backends/base/sql/registry/literal.py b/ibis/backends/base/sql/registry/literal.py\n--- a/ibis/backends/base/sql/registry/literal.py\n+++ b/ibis/backends/base/sql/registry/literal.py\n@@ -21,7 +21,22 @@\n \n \n def _string_literal_format(translator, op):\n- return \"'{}'\".format(op.value.replace(\"'\", \"\\\\'\"))\n+ return \"'{}'\".format(\n+ op.value\n+ # Escape \\ first so we don't double escape other characters.\n+ .replace(\"\\\\\", \"\\\\\\\\\")\n+ # Escape ' since we're using those for the string literal.\n+ .replace(\"'\", \"\\\\'\")\n+ # ASCII escape sequences that are recognized in Python:\n+ # https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals\n+ .replace(\"\\a\", \"\\\\a\") # Bell\n+ .replace(\"\\b\", \"\\\\b\") # Backspace\n+ .replace(\"\\f\", \"\\\\f\") # Formfeed\n+ .replace(\"\\n\", \"\\\\n\") # Newline / Linefeed\n+ .replace(\"\\r\", \"\\\\r\") # Carriage return\n+ .replace(\"\\t\", \"\\\\t\") # Tab\n+ .replace(\"\\v\", \"\\\\v\") # Vertical tab\n+ )\n \n \n def _number_literal_format(translator, op):\n", "issue": "bug(bigquery): memtable and string literals not escaping `\\n` or `\\` which results in invalid syntax\n### What happened?\n\nCode:\r\n\r\n```python\r\nimport ibis\r\n\r\nibis_client = ibis.bigquery.connect()\r\ntable = ibis.memtable(\r\n {\r\n \"col1\": [\"a\\tb\\nc\", \"d e f\", \"g'e\\\"h\"],\r\n }\r\n)\r\nprint(ibis_client.compile(table))\r\n```\r\n\r\nOutput:\r\n\r\n```\r\nSELECT t0.*\r\nFROM UNNEST(ARRAY<STRUCT<col1 STRING>>[STRUCT('a b\r\nc' AS col1), STRUCT('d e f' AS col1), STRUCT('g\\'e\"h' AS col1)]) t0\r\n```\r\n\r\nNote, the following SQL works as expected:\r\n\r\n```\r\nSELECT t0.*\r\nFROM UNNEST(ARRAY<STRUCT<col1 STRING>>[STRUCT('a b\\nc' AS col1), STRUCT('d e f' AS col1), STRUCT('g\\'e\"h' AS col1)]) t0\r\n```\r\n\r\nTherefore, we should really be escaping `\\n` in addition to `'`. Though, perhaps there are other characters that could break BigQuery syntax? See: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#string_and_bytes_literals\r\n\r\nAlternatively, using triple-quoted strings allows for newline characters in the string literal itself.\n\n### What version of ibis are you using?\n\n6.1.0\r\n\r\nalso tested on latest commit: 15f8d9575\n\n### What backend(s) are you using, if any?\n\nBigQuery\n\n### Relevant log output\n\n```sh\nBigQuery API: Syntax error: Unclosed string literal at [2:47]\n```\n\n\n### Code of Conduct\n\n- [X] I agree to follow this project's Code of Conduct\n", "before_files": [{"content": "from __future__ import annotations\n\nimport datetime\nimport math\n\nimport ibis.expr.types as ir\n\n\ndef _set_literal_format(translator, expr):\n value_type = expr.type().value_type\n\n formatted = [\n translator.translate(ir.literal(x, type=value_type)) for x in expr.op().value\n ]\n\n return \"(\" + \", \".join(formatted) + \")\"\n\n\ndef _boolean_literal_format(translator, op):\n return \"TRUE\" if op.value else \"FALSE\"\n\n\ndef _string_literal_format(translator, op):\n return \"'{}'\".format(op.value.replace(\"'\", \"\\\\'\"))\n\n\ndef _number_literal_format(translator, op):\n if math.isfinite(op.value):\n formatted = repr(op.value)\n else:\n if math.isnan(op.value):\n formatted_val = \"NaN\"\n elif math.isinf(op.value):\n if op.value > 0:\n formatted_val = \"Infinity\"\n else:\n formatted_val = \"-Infinity\"\n formatted = f\"CAST({formatted_val!r} AS DOUBLE)\"\n\n return formatted\n\n\ndef _interval_literal_format(translator, op):\n return f\"INTERVAL {op.value} {op.dtype.resolution.upper()}\"\n\n\ndef _date_literal_format(translator, op):\n value = op.value\n if isinstance(value, datetime.date):\n value = value.strftime(\"%Y-%m-%d\")\n\n return repr(value)\n\n\ndef _timestamp_literal_format(translator, op):\n value = op.value\n if isinstance(value, datetime.datetime):\n value = value.isoformat()\n\n return repr(value)\n\n\nliteral_formatters = {\n \"boolean\": _boolean_literal_format,\n \"number\": _number_literal_format,\n \"string\": _string_literal_format,\n \"interval\": _interval_literal_format,\n \"timestamp\": _timestamp_literal_format,\n \"date\": _date_literal_format,\n \"set\": _set_literal_format,\n}\n\n\ndef literal(translator, op):\n \"\"\"Return the expression as its literal value.\"\"\"\n\n dtype = op.dtype\n\n if op.value is None:\n return \"NULL\"\n\n if dtype.is_boolean():\n typeclass = \"boolean\"\n elif dtype.is_string():\n typeclass = \"string\"\n elif dtype.is_date():\n typeclass = \"date\"\n elif dtype.is_numeric():\n typeclass = \"number\"\n elif dtype.is_timestamp():\n typeclass = \"timestamp\"\n elif dtype.is_interval():\n typeclass = \"interval\"\n else:\n raise NotImplementedError(f\"Unsupported type: {dtype!r}\")\n\n return literal_formatters[typeclass](translator, op)\n", "path": "ibis/backends/base/sql/registry/literal.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport datetime\nimport math\n\nimport ibis.expr.types as ir\n\n\ndef _set_literal_format(translator, expr):\n value_type = expr.type().value_type\n\n formatted = [\n translator.translate(ir.literal(x, type=value_type)) for x in expr.op().value\n ]\n\n return \"(\" + \", \".join(formatted) + \")\"\n\n\ndef _boolean_literal_format(translator, op):\n return \"TRUE\" if op.value else \"FALSE\"\n\n\ndef _string_literal_format(translator, op):\n return \"'{}'\".format(\n op.value\n # Escape \\ first so we don't double escape other characters.\n .replace(\"\\\\\", \"\\\\\\\\\")\n # Escape ' since we're using those for the string literal.\n .replace(\"'\", \"\\\\'\")\n # ASCII escape sequences that are recognized in Python:\n # https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals\n .replace(\"\\a\", \"\\\\a\") # Bell\n .replace(\"\\b\", \"\\\\b\") # Backspace\n .replace(\"\\f\", \"\\\\f\") # Formfeed\n .replace(\"\\n\", \"\\\\n\") # Newline / Linefeed\n .replace(\"\\r\", \"\\\\r\") # Carriage return\n .replace(\"\\t\", \"\\\\t\") # Tab\n .replace(\"\\v\", \"\\\\v\") # Vertical tab\n )\n\n\ndef _number_literal_format(translator, op):\n if math.isfinite(op.value):\n formatted = repr(op.value)\n else:\n if math.isnan(op.value):\n formatted_val = \"NaN\"\n elif math.isinf(op.value):\n if op.value > 0:\n formatted_val = \"Infinity\"\n else:\n formatted_val = \"-Infinity\"\n formatted = f\"CAST({formatted_val!r} AS DOUBLE)\"\n\n return formatted\n\n\ndef _interval_literal_format(translator, op):\n return f\"INTERVAL {op.value} {op.dtype.resolution.upper()}\"\n\n\ndef _date_literal_format(translator, op):\n value = op.value\n if isinstance(value, datetime.date):\n value = value.strftime(\"%Y-%m-%d\")\n\n return repr(value)\n\n\ndef _timestamp_literal_format(translator, op):\n value = op.value\n if isinstance(value, datetime.datetime):\n value = value.isoformat()\n\n return repr(value)\n\n\nliteral_formatters = {\n \"boolean\": _boolean_literal_format,\n \"number\": _number_literal_format,\n \"string\": _string_literal_format,\n \"interval\": _interval_literal_format,\n \"timestamp\": _timestamp_literal_format,\n \"date\": _date_literal_format,\n \"set\": _set_literal_format,\n}\n\n\ndef literal(translator, op):\n \"\"\"Return the expression as its literal value.\"\"\"\n\n dtype = op.dtype\n\n if op.value is None:\n return \"NULL\"\n\n if dtype.is_boolean():\n typeclass = \"boolean\"\n elif dtype.is_string():\n typeclass = \"string\"\n elif dtype.is_date():\n typeclass = \"date\"\n elif dtype.is_numeric():\n typeclass = \"number\"\n elif dtype.is_timestamp():\n typeclass = \"timestamp\"\n elif dtype.is_interval():\n typeclass = \"interval\"\n else:\n raise NotImplementedError(f\"Unsupported type: {dtype!r}\")\n\n return literal_formatters[typeclass](translator, op)\n", "path": "ibis/backends/base/sql/registry/literal.py"}]} | 1,397 | 316 |
gh_patches_debug_18800 | rasdani/github-patches | git_diff | DistrictDataLabs__yellowbrick-407 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ClassificationScoreVisualizers should return accuracy
See #358 and #213 -- classification score visualizers should return accuracy when `score()` is called. If F1 or accuracy is not in the figure it should also be included in the figure.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `yellowbrick/classifier/base.py`
Content:
```
1 # yellowbrick.classifier.base
2 # API for classification visualizer hierarchy.
3 #
4 # Author: Rebecca Bilbro <[email protected]>
5 # Author: Benjamin Bengfort <[email protected]>
6 # Author: Neal Humphrey
7 # Created: Wed May 18 12:39:40 2016 -0400
8 #
9 # Copyright (C) 2016 District Data Labs
10 # For license information, see LICENSE.txt
11 #
12 # ID: base.py [5388065] [email protected] $
13
14 """
15 API for classification visualizer hierarchy.
16 """
17
18 ##########################################################################
19 ## Imports
20 ##########################################################################
21
22 import numpy as np
23
24 from ..utils import isclassifier
25 from ..base import ScoreVisualizer
26 from ..style.palettes import color_palette
27 from ..exceptions import YellowbrickTypeError
28
29
30 ##########################################################################
31 ## Base Classification Visualizer
32 ##########################################################################
33
34 class ClassificationScoreVisualizer(ScoreVisualizer):
35
36 def __init__(self, model, ax=None, classes=None, **kwargs):
37 """
38 Check to see if model is an instance of a classifer.
39 Should return an error if it isn't.
40
41 .. todo:: document this class.
42 .. tood:: accept as input classes as all visualizers need this.
43 """
44 # A bit of type checking
45 if not isclassifier(model):
46 raise YellowbrickTypeError(
47 "This estimator is not a classifier; "
48 "try a regression or clustering score visualizer instead!"
49 )
50
51 # Initialize the super method.
52 super(ClassificationScoreVisualizer, self).__init__(model, ax=ax, **kwargs)
53
54 # Convert to array if necessary to match estimator.classes_
55 if classes is not None:
56 classes = np.array(classes)
57
58 # Set up classifier score visualization properties
59 if classes is not None:
60 n_colors = len(classes)
61 else:
62 n_colors = None
63
64 self.colors = color_palette(kwargs.pop('colors', None), n_colors)
65 self.classes_ = classes
66
67 @property
68 def classes_(self):
69 """
70 Proxy property to smartly access the classes from the estimator or
71 stored locally on the score visualizer for visualization.
72 """
73 if self.__classes is None:
74 try:
75 return self.estimator.classes_
76 except AttributeError:
77 return None
78 return self.__classes
79
80 @classes_.setter
81 def classes_(self, value):
82 self.__classes = value
83
84 def fit(self, X, y=None, **kwargs):
85 """
86 Parameters
87 ----------
88
89 X : ndarray or DataFrame of shape n x m
90 A matrix of n instances with m features
91
92 y : ndarray or Series of length n
93 An array or series of target or class values
94
95 kwargs: keyword arguments passed to Scikit-Learn API.
96
97 Returns
98 -------
99 self : instance
100 Returns the instance of the classification score visualizer
101
102 """
103 # Fit the inner estimator
104 self.estimator.fit(X, y)
105
106 # Extract the classes from the estimator
107 if self.classes_ is None:
108 self.classes_ = self.estimator.classes_
109
110 # Always return self from fit
111 return self
112
113 #TODO during refactoring this can be used to generalize ClassBalance
114 def class_counts(self, y):
115 unique, counts = np.unique(y, return_counts=True)
116 return dict(zip(unique, counts))
117
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/yellowbrick/classifier/base.py b/yellowbrick/classifier/base.py
--- a/yellowbrick/classifier/base.py
+++ b/yellowbrick/classifier/base.py
@@ -110,6 +110,28 @@
# Always return self from fit
return self
+
+ def score(self, X, y, **kwargs):
+ """
+ The score function is the hook for visual interaction. Pass in test
+ data and the visualizer will create predictions on the data and
+ evaluate them with respect to the test values. The evaluation will
+ then be passed to draw() and the result of the estimator score will
+ be returned.
+ Parameters
+ ----------
+ X : array-like
+ X (also X_test) are the dependent variables of test set to predict
+ y : array-like
+ y (also y_test) is the independent actual variables to score against
+ Returns
+ -------
+ score : float
+ """
+ self.score_ = self.estimator.score(X, y, **kwargs)
+
+ return self.score_
+
#TODO during refactoring this can be used to generalize ClassBalance
def class_counts(self, y):
unique, counts = np.unique(y, return_counts=True)
| {"golden_diff": "diff --git a/yellowbrick/classifier/base.py b/yellowbrick/classifier/base.py\n--- a/yellowbrick/classifier/base.py\n+++ b/yellowbrick/classifier/base.py\n@@ -110,6 +110,28 @@\n # Always return self from fit\n return self\n \n+\n+ def score(self, X, y, **kwargs):\n+ \"\"\"\n+ The score function is the hook for visual interaction. Pass in test\n+ data and the visualizer will create predictions on the data and\n+ evaluate them with respect to the test values. The evaluation will\n+ then be passed to draw() and the result of the estimator score will\n+ be returned.\n+ Parameters\n+ ----------\n+ X : array-like\n+ X (also X_test) are the dependent variables of test set to predict\n+ y : array-like\n+ y (also y_test) is the independent actual variables to score against\n+ Returns\n+ -------\n+ score : float\n+ \"\"\"\n+ self.score_ = self.estimator.score(X, y, **kwargs)\n+\n+ return self.score_\n+\n #TODO during refactoring this can be used to generalize ClassBalance\n def class_counts(self, y):\n unique, counts = np.unique(y, return_counts=True)\n", "issue": "ClassificationScoreVisualizers should return accuracy\nSee #358 and #213 -- classification score visualizers should return accuracy when `score()` is called. If F1 or accuracy is not in the figure it should also be included in the figure. \n", "before_files": [{"content": "# yellowbrick.classifier.base\n# API for classification visualizer hierarchy.\n#\n# Author: Rebecca Bilbro <[email protected]>\n# Author: Benjamin Bengfort <[email protected]>\n# Author: Neal Humphrey\n# Created: Wed May 18 12:39:40 2016 -0400\n#\n# Copyright (C) 2016 District Data Labs\n# For license information, see LICENSE.txt\n#\n# ID: base.py [5388065] [email protected] $\n\n\"\"\"\nAPI for classification visualizer hierarchy.\n\"\"\"\n\n##########################################################################\n## Imports\n##########################################################################\n\nimport numpy as np\n\nfrom ..utils import isclassifier\nfrom ..base import ScoreVisualizer\nfrom ..style.palettes import color_palette\nfrom ..exceptions import YellowbrickTypeError\n\n\n##########################################################################\n## Base Classification Visualizer\n##########################################################################\n\nclass ClassificationScoreVisualizer(ScoreVisualizer):\n\n def __init__(self, model, ax=None, classes=None, **kwargs):\n \"\"\"\n Check to see if model is an instance of a classifer.\n Should return an error if it isn't.\n\n .. todo:: document this class.\n .. tood:: accept as input classes as all visualizers need this.\n \"\"\"\n # A bit of type checking\n if not isclassifier(model):\n raise YellowbrickTypeError(\n \"This estimator is not a classifier; \"\n \"try a regression or clustering score visualizer instead!\"\n )\n\n # Initialize the super method.\n super(ClassificationScoreVisualizer, self).__init__(model, ax=ax, **kwargs)\n\n # Convert to array if necessary to match estimator.classes_\n if classes is not None:\n classes = np.array(classes)\n\n # Set up classifier score visualization properties\n if classes is not None:\n n_colors = len(classes)\n else:\n n_colors = None\n\n self.colors = color_palette(kwargs.pop('colors', None), n_colors)\n self.classes_ = classes\n\n @property\n def classes_(self):\n \"\"\"\n Proxy property to smartly access the classes from the estimator or\n stored locally on the score visualizer for visualization.\n \"\"\"\n if self.__classes is None:\n try:\n return self.estimator.classes_\n except AttributeError:\n return None\n return self.__classes\n\n @classes_.setter\n def classes_(self, value):\n self.__classes = value\n\n def fit(self, X, y=None, **kwargs):\n \"\"\"\n Parameters\n ----------\n\n X : ndarray or DataFrame of shape n x m\n A matrix of n instances with m features\n\n y : ndarray or Series of length n\n An array or series of target or class values\n\n kwargs: keyword arguments passed to Scikit-Learn API.\n\n Returns\n -------\n self : instance\n Returns the instance of the classification score visualizer\n\n \"\"\"\n # Fit the inner estimator\n self.estimator.fit(X, y)\n\n # Extract the classes from the estimator\n if self.classes_ is None:\n self.classes_ = self.estimator.classes_\n\n # Always return self from fit\n return self\n\n #TODO during refactoring this can be used to generalize ClassBalance\n def class_counts(self, y):\n unique, counts = np.unique(y, return_counts=True)\n return dict(zip(unique, counts))\n", "path": "yellowbrick/classifier/base.py"}], "after_files": [{"content": "# yellowbrick.classifier.base\n# API for classification visualizer hierarchy.\n#\n# Author: Rebecca Bilbro <[email protected]>\n# Author: Benjamin Bengfort <[email protected]>\n# Author: Neal Humphrey\n# Created: Wed May 18 12:39:40 2016 -0400\n#\n# Copyright (C) 2016 District Data Labs\n# For license information, see LICENSE.txt\n#\n# ID: base.py [5388065] [email protected] $\n\n\"\"\"\nAPI for classification visualizer hierarchy.\n\"\"\"\n\n##########################################################################\n## Imports\n##########################################################################\n\nimport numpy as np\n\nfrom ..utils import isclassifier\nfrom ..base import ScoreVisualizer\nfrom ..style.palettes import color_palette\nfrom ..exceptions import YellowbrickTypeError\n\n\n##########################################################################\n## Base Classification Visualizer\n##########################################################################\n\nclass ClassificationScoreVisualizer(ScoreVisualizer):\n\n def __init__(self, model, ax=None, classes=None, **kwargs):\n \"\"\"\n Check to see if model is an instance of a classifer.\n Should return an error if it isn't.\n\n .. todo:: document this class.\n .. tood:: accept as input classes as all visualizers need this.\n \"\"\"\n # A bit of type checking\n if not isclassifier(model):\n raise YellowbrickTypeError(\n \"This estimator is not a classifier; \"\n \"try a regression or clustering score visualizer instead!\"\n )\n\n # Initialize the super method.\n super(ClassificationScoreVisualizer, self).__init__(model, ax=ax, **kwargs)\n\n # Convert to array if necessary to match estimator.classes_\n if classes is not None:\n classes = np.array(classes)\n\n # Set up classifier score visualization properties\n if classes is not None:\n n_colors = len(classes)\n else:\n n_colors = None\n\n self.colors = color_palette(kwargs.pop('colors', None), n_colors)\n self.classes_ = classes\n\n @property\n def classes_(self):\n \"\"\"\n Proxy property to smartly access the classes from the estimator or\n stored locally on the score visualizer for visualization.\n \"\"\"\n if self.__classes is None:\n try:\n return self.estimator.classes_\n except AttributeError:\n return None\n return self.__classes\n\n @classes_.setter\n def classes_(self, value):\n self.__classes = value\n\n def fit(self, X, y=None, **kwargs):\n \"\"\"\n Parameters\n ----------\n\n X : ndarray or DataFrame of shape n x m\n A matrix of n instances with m features\n\n y : ndarray or Series of length n\n An array or series of target or class values\n\n kwargs: keyword arguments passed to Scikit-Learn API.\n\n Returns\n -------\n self : instance\n Returns the instance of the classification score visualizer\n\n \"\"\"\n # Fit the inner estimator\n self.estimator.fit(X, y)\n\n # Extract the classes from the estimator\n if self.classes_ is None:\n self.classes_ = self.estimator.classes_\n\n # Always return self from fit\n return self\n\n\n def score(self, X, y, **kwargs):\n \"\"\"\n The score function is the hook for visual interaction. Pass in test\n data and the visualizer will create predictions on the data and\n evaluate them with respect to the test values. The evaluation will\n then be passed to draw() and the result of the estimator score will\n be returned.\n Parameters\n ----------\n X : array-like\n X (also X_test) are the dependent variables of test set to predict\n y : array-like\n y (also y_test) is the independent actual variables to score against\n Returns\n -------\n score : float\n \"\"\"\n self.score_ = self.estimator.score(X, y, **kwargs)\n\n return self.score_\n\n #TODO during refactoring this can be used to generalize ClassBalance\n def class_counts(self, y):\n unique, counts = np.unique(y, return_counts=True)\n return dict(zip(unique, counts))\n", "path": "yellowbrick/classifier/base.py"}]} | 1,303 | 286 |
gh_patches_debug_8883 | rasdani/github-patches | git_diff | python-pillow__Pillow-906 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cannot from PIL import ImageGrab
Does Pillow2.5.3 ImageGrab still not support other OS except windows?
If not, why we cannot do that?
---
/Library/Python/2.7/site-packages/Pillow-2.5.3-py2.7-macosx-10.9-intel.egg/PIL/**init**.py
Python 2.7.5 (default, Mar 9 2014, 22:15:05)
[GCC 4.2.1 Compatible Apple LLVM 5.0 (clang-500.0.68)] on darwin
Type "help", "copyright", "credits" or "license" for more information.
> > > from PIL import ImageGrab
> > > Traceback (most recent call last):
> > > File "<stdin>", line 1, in <module>
> > > File "build/bdist.macosx-10.9-intel/egg/PIL/ImageGrab.py", line 26, in <module>
> > > ImportError: No module named _grabscreen
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `PIL/ImageGrab.py`
Content:
```
1 #
2 # The Python Imaging Library
3 # $Id$
4 #
5 # screen grabber (windows only)
6 #
7 # History:
8 # 2001-04-26 fl created
9 # 2001-09-17 fl use builtin driver, if present
10 # 2002-11-19 fl added grabclipboard support
11 #
12 # Copyright (c) 2001-2002 by Secret Labs AB
13 # Copyright (c) 2001-2002 by Fredrik Lundh
14 #
15 # See the README file for information on usage and redistribution.
16 #
17
18 from PIL import Image
19
20
21 try:
22 # built-in driver (1.1.3 and later)
23 grabber = Image.core.grabscreen
24 except AttributeError:
25 # stand-alone driver (pil plus)
26 import _grabscreen
27 grabber = _grabscreen.grab
28
29
30 def grab(bbox=None):
31 size, data = grabber()
32 im = Image.frombytes(
33 "RGB", size, data,
34 # RGB, 32-bit line padding, origo in lower left corner
35 "raw", "BGR", (size[0]*3 + 3) & -4, -1
36 )
37 if bbox:
38 im = im.crop(bbox)
39 return im
40
41
42 def grabclipboard():
43 debug = 0 # temporary interface
44 data = Image.core.grabclipboard(debug)
45 if isinstance(data, bytes):
46 from PIL import BmpImagePlugin
47 import io
48 return BmpImagePlugin.DibImageFile(io.BytesIO(data))
49 return data
50
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/PIL/ImageGrab.py b/PIL/ImageGrab.py
--- a/PIL/ImageGrab.py
+++ b/PIL/ImageGrab.py
@@ -17,6 +17,9 @@
from PIL import Image
+import sys
+if sys.platform != "win32":
+ raise ImportError("ImageGrab is Windows only")
try:
# built-in driver (1.1.3 and later)
@@ -40,7 +43,7 @@
def grabclipboard():
- debug = 0 # temporary interface
+ debug = 0 # temporary interface
data = Image.core.grabclipboard(debug)
if isinstance(data, bytes):
from PIL import BmpImagePlugin
| {"golden_diff": "diff --git a/PIL/ImageGrab.py b/PIL/ImageGrab.py\n--- a/PIL/ImageGrab.py\n+++ b/PIL/ImageGrab.py\n@@ -17,6 +17,9 @@\n \n from PIL import Image\n \n+import sys\n+if sys.platform != \"win32\":\n+ raise ImportError(\"ImageGrab is Windows only\")\n \n try:\n # built-in driver (1.1.3 and later)\n@@ -40,7 +43,7 @@\n \n \n def grabclipboard():\n- debug = 0 # temporary interface\n+ debug = 0 # temporary interface\n data = Image.core.grabclipboard(debug)\n if isinstance(data, bytes):\n from PIL import BmpImagePlugin\n", "issue": "Cannot from PIL import ImageGrab\nDoes Pillow2.5.3 ImageGrab still not support other OS except windows?\nIf not, why we cannot do that?\n\n---\n\n/Library/Python/2.7/site-packages/Pillow-2.5.3-py2.7-macosx-10.9-intel.egg/PIL/**init**.py\n\nPython 2.7.5 (default, Mar 9 2014, 22:15:05)\n[GCC 4.2.1 Compatible Apple LLVM 5.0 (clang-500.0.68)] on darwin\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\n\n> > > from PIL import ImageGrab\n> > > Traceback (most recent call last):\n> > > File \"<stdin>\", line 1, in <module>\n> > > File \"build/bdist.macosx-10.9-intel/egg/PIL/ImageGrab.py\", line 26, in <module>\n> > > ImportError: No module named _grabscreen\n\n", "before_files": [{"content": "#\n# The Python Imaging Library\n# $Id$\n#\n# screen grabber (windows only)\n#\n# History:\n# 2001-04-26 fl created\n# 2001-09-17 fl use builtin driver, if present\n# 2002-11-19 fl added grabclipboard support\n#\n# Copyright (c) 2001-2002 by Secret Labs AB\n# Copyright (c) 2001-2002 by Fredrik Lundh\n#\n# See the README file for information on usage and redistribution.\n#\n\nfrom PIL import Image\n\n\ntry:\n # built-in driver (1.1.3 and later)\n grabber = Image.core.grabscreen\nexcept AttributeError:\n # stand-alone driver (pil plus)\n import _grabscreen\n grabber = _grabscreen.grab\n\n\ndef grab(bbox=None):\n size, data = grabber()\n im = Image.frombytes(\n \"RGB\", size, data,\n # RGB, 32-bit line padding, origo in lower left corner\n \"raw\", \"BGR\", (size[0]*3 + 3) & -4, -1\n )\n if bbox:\n im = im.crop(bbox)\n return im\n\n\ndef grabclipboard():\n debug = 0 # temporary interface\n data = Image.core.grabclipboard(debug)\n if isinstance(data, bytes):\n from PIL import BmpImagePlugin\n import io\n return BmpImagePlugin.DibImageFile(io.BytesIO(data))\n return data\n", "path": "PIL/ImageGrab.py"}], "after_files": [{"content": "#\n# The Python Imaging Library\n# $Id$\n#\n# screen grabber (windows only)\n#\n# History:\n# 2001-04-26 fl created\n# 2001-09-17 fl use builtin driver, if present\n# 2002-11-19 fl added grabclipboard support\n#\n# Copyright (c) 2001-2002 by Secret Labs AB\n# Copyright (c) 2001-2002 by Fredrik Lundh\n#\n# See the README file for information on usage and redistribution.\n#\n\nfrom PIL import Image\n\nimport sys\nif sys.platform != \"win32\":\n raise ImportError(\"ImageGrab is Windows only\")\n\ntry:\n # built-in driver (1.1.3 and later)\n grabber = Image.core.grabscreen\nexcept AttributeError:\n # stand-alone driver (pil plus)\n import _grabscreen\n grabber = _grabscreen.grab\n\n\ndef grab(bbox=None):\n size, data = grabber()\n im = Image.frombytes(\n \"RGB\", size, data,\n # RGB, 32-bit line padding, origo in lower left corner\n \"raw\", \"BGR\", (size[0]*3 + 3) & -4, -1\n )\n if bbox:\n im = im.crop(bbox)\n return im\n\n\ndef grabclipboard():\n debug = 0 # temporary interface\n data = Image.core.grabclipboard(debug)\n if isinstance(data, bytes):\n from PIL import BmpImagePlugin\n import io\n return BmpImagePlugin.DibImageFile(io.BytesIO(data))\n return data\n", "path": "PIL/ImageGrab.py"}]} | 945 | 156 |
gh_patches_debug_1942 | rasdani/github-patches | git_diff | ocf__ocfweb-72 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add "edit this page" link on docs?
It would link to the GitHub editor page.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ocfweb/docs/doc.py`
Content:
```
1 from collections import namedtuple
2
3
4 class Document(namedtuple('Document', ['name', 'title', 'render'])):
5
6 @property
7 def category(self):
8 """Return full category path of the document.
9
10 For example, "/" or "/staff/backend/".
11 """
12 return self.name.rsplit('/', 1)[0] + '/'
13
14 @property
15 def category_for_sidebar(self):
16 """Return the category to show similar pages for in the sidebar.
17
18 If this page isn't at the root category, we just return this page's
19 category.
20
21 If this page is at the root category, we return the category rooted at
22 this page (which may or may not have any pages in it).
23 """
24 if self.category == '/':
25 return self.name + '/'
26 else:
27 return self.category
28
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ocfweb/docs/doc.py b/ocfweb/docs/doc.py
--- a/ocfweb/docs/doc.py
+++ b/ocfweb/docs/doc.py
@@ -25,3 +25,12 @@
return self.name + '/'
else:
return self.category
+
+ @property
+ def edit_url(self):
+ """Return a GitHub edit URL for this page."""
+ return (
+ 'https://github.com/ocf/ocfweb/edit/master/ocfweb/docs/docs' +
+ self.name +
+ '.md'
+ )
| {"golden_diff": "diff --git a/ocfweb/docs/doc.py b/ocfweb/docs/doc.py\n--- a/ocfweb/docs/doc.py\n+++ b/ocfweb/docs/doc.py\n@@ -25,3 +25,12 @@\n return self.name + '/'\n else:\n return self.category\n+\n+ @property\n+ def edit_url(self):\n+ \"\"\"Return a GitHub edit URL for this page.\"\"\"\n+ return (\n+ 'https://github.com/ocf/ocfweb/edit/master/ocfweb/docs/docs' +\n+ self.name +\n+ '.md'\n+ )\n", "issue": "Add \"edit this page\" link on docs?\nIt would link to the GitHub editor page.\n\n", "before_files": [{"content": "from collections import namedtuple\n\n\nclass Document(namedtuple('Document', ['name', 'title', 'render'])):\n\n @property\n def category(self):\n \"\"\"Return full category path of the document.\n\n For example, \"/\" or \"/staff/backend/\".\n \"\"\"\n return self.name.rsplit('/', 1)[0] + '/'\n\n @property\n def category_for_sidebar(self):\n \"\"\"Return the category to show similar pages for in the sidebar.\n\n If this page isn't at the root category, we just return this page's\n category.\n\n If this page is at the root category, we return the category rooted at\n this page (which may or may not have any pages in it).\n \"\"\"\n if self.category == '/':\n return self.name + '/'\n else:\n return self.category\n", "path": "ocfweb/docs/doc.py"}], "after_files": [{"content": "from collections import namedtuple\n\n\nclass Document(namedtuple('Document', ['name', 'title', 'render'])):\n\n @property\n def category(self):\n \"\"\"Return full category path of the document.\n\n For example, \"/\" or \"/staff/backend/\".\n \"\"\"\n return self.name.rsplit('/', 1)[0] + '/'\n\n @property\n def category_for_sidebar(self):\n \"\"\"Return the category to show similar pages for in the sidebar.\n\n If this page isn't at the root category, we just return this page's\n category.\n\n If this page is at the root category, we return the category rooted at\n this page (which may or may not have any pages in it).\n \"\"\"\n if self.category == '/':\n return self.name + '/'\n else:\n return self.category\n\n @property\n def edit_url(self):\n \"\"\"Return a GitHub edit URL for this page.\"\"\"\n return (\n 'https://github.com/ocf/ocfweb/edit/master/ocfweb/docs/docs' +\n self.name +\n '.md'\n )\n", "path": "ocfweb/docs/doc.py"}]} | 502 | 133 |
gh_patches_debug_2911 | rasdani/github-patches | git_diff | iterative__dvc-5067 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
dvc version: does not follow symlinks
# Bug Report
## Description
This is the `dvc version` output, where it says the cache directory is `nfs4 on storage:/home` and cache type is `symlink`.
```
DVC version: 1.10.2 (pip)
---------------------------------
Platform: Python 3.8.3 on Linux-5.4.0-54-generic-x86_64-with-glibc2.10
Supports: All remotes
Cache types: symlink
Cache directory: nfs4 on storage:/home
Caches: local
Remotes: s3
Workspace directory: nfs4 on storage:/home
Repo: dvc, git
```
However, I do have a `~/.config/dvc/config` file that overrides this:
```
[core]
experiments = true
[cache]
type = "reflink,symlink,copy"
protected = true
dir = /home/jc/ssd_cache/dvc_cache
[feature]
parametrization = true
```
And the actual cache dir is `/home/jc/ssd_cache/dvc_cache` as I've specified instead of `nfs4 on storage:/home`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dvc/info.py`
Content:
```
1 import itertools
2 import os
3 import pathlib
4 import platform
5 import uuid
6
7 from dvc.exceptions import DvcException, NotDvcRepoError
8 from dvc.repo import Repo
9 from dvc.scm.base import SCMError
10 from dvc.system import System
11 from dvc.tree import TREES, get_tree_cls, get_tree_config
12 from dvc.utils import error_link
13 from dvc.utils.pkg import PKG
14 from dvc.version import __version__
15
16 try:
17 import psutil
18 except ImportError:
19 psutil = None
20
21 if PKG is None:
22 package = ""
23 else:
24 package = f"({PKG})"
25
26
27 def get_dvc_info():
28 info = [
29 f"DVC version: {__version__} {package}",
30 "---------------------------------",
31 f"Platform: Python {platform.python_version()} on "
32 f"{platform.platform()}",
33 f"Supports: {_get_supported_remotes()}",
34 ]
35
36 try:
37 repo = Repo()
38
39 # cache_dir might not exist yet (e.g. after `dvc init`), and we
40 # can't auto-create it, as it might cause issues if the user
41 # later decides to enable shared cache mode with
42 # `dvc config cache.shared group`.
43 if os.path.exists(repo.cache.local.cache_dir):
44 info.append(
45 "Cache types: {}".format(_get_linktype_support_info(repo))
46 )
47 if psutil:
48 fs_type = get_fs_type(repo.cache.local.cache_dir)
49 info.append(f"Cache directory: {fs_type}")
50 else:
51 info.append("Cache types: " + error_link("no-dvc-cache"))
52
53 info.append(f"Caches: {_get_caches(repo.cache)}")
54
55 info.append(f"Remotes: {_get_remotes(repo.config)}")
56
57 except NotDvcRepoError:
58 pass
59 except SCMError:
60 info.append("Repo: dvc, git (broken)")
61 else:
62 root_directory = repo.root_dir
63 if psutil:
64 fs_root = get_fs_type(os.path.abspath(root_directory))
65 info.append(f"Workspace directory: {fs_root}")
66 info.append("Repo: {}".format(_get_dvc_repo_info(repo)))
67 return "\n".join(info)
68
69
70 def _get_caches(cache):
71 caches = (
72 cache_type
73 for cache_type, cache_instance in cache.by_scheme()
74 if cache_instance
75 )
76
77 # Caches will be always non-empty including the local cache
78 return ", ".join(caches)
79
80
81 def _get_remotes(config):
82 schemes = (
83 get_tree_cls(get_tree_config(config, name=remote)).scheme
84 for remote in config["remote"]
85 )
86
87 return ", ".join(schemes) or "None"
88
89
90 def _get_linktype_support_info(repo):
91
92 links = {
93 "reflink": (System.reflink, None),
94 "hardlink": (System.hardlink, System.is_hardlink),
95 "symlink": (System.symlink, System.is_symlink),
96 }
97
98 fname = "." + str(uuid.uuid4())
99 src = os.path.join(repo.cache.local.cache_dir, fname)
100 open(src, "w").close()
101 dst = os.path.join(repo.root_dir, fname)
102
103 cache = []
104
105 for name, (link, is_link) in links.items():
106 try:
107 link(src, dst)
108 status = "supported"
109 if is_link and not is_link(dst):
110 status = "broken"
111 os.unlink(dst)
112 except DvcException:
113 status = "not supported"
114
115 if status == "supported":
116 cache.append(name)
117 os.remove(src)
118
119 return ", ".join(cache)
120
121
122 def _get_supported_remotes():
123
124 supported_remotes = []
125 for tree_cls in TREES:
126 if not tree_cls.get_missing_deps():
127 supported_remotes.append(tree_cls.scheme)
128
129 if len(supported_remotes) == len(TREES):
130 return "All remotes"
131
132 if len(supported_remotes) == 1:
133 return supported_remotes
134
135 return ", ".join(supported_remotes)
136
137
138 def get_fs_type(path):
139
140 partition = {
141 pathlib.Path(part.mountpoint): (part.fstype + " on " + part.device)
142 for part in psutil.disk_partitions(all=True)
143 }
144
145 path = pathlib.Path(path)
146
147 for parent in itertools.chain([path], path.parents):
148 if parent in partition:
149 return partition[parent]
150 return ("unknown", "none")
151
152
153 def _get_dvc_repo_info(self):
154 if self.config.get("core", {}).get("no_scm", False):
155 return "dvc (no_scm)"
156
157 if self.root_dir != self.scm.root_dir:
158 return "dvc (subdir), git"
159
160 return "dvc, git"
161
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dvc/info.py b/dvc/info.py
--- a/dvc/info.py
+++ b/dvc/info.py
@@ -142,7 +142,8 @@
for part in psutil.disk_partitions(all=True)
}
- path = pathlib.Path(path)
+ # need to follow the symlink: https://github.com/iterative/dvc/issues/5065
+ path = pathlib.Path(path).resolve()
for parent in itertools.chain([path], path.parents):
if parent in partition:
| {"golden_diff": "diff --git a/dvc/info.py b/dvc/info.py\n--- a/dvc/info.py\n+++ b/dvc/info.py\n@@ -142,7 +142,8 @@\n for part in psutil.disk_partitions(all=True)\n }\n \n- path = pathlib.Path(path)\n+ # need to follow the symlink: https://github.com/iterative/dvc/issues/5065\n+ path = pathlib.Path(path).resolve()\n \n for parent in itertools.chain([path], path.parents):\n if parent in partition:\n", "issue": "dvc version: does not follow symlinks\n# Bug Report\r\n\r\n## Description\r\n\r\nThis is the `dvc version` output, where it says the cache directory is `nfs4 on storage:/home` and cache type is `symlink`.\r\n\r\n```\r\nDVC version: 1.10.2 (pip)\r\n---------------------------------\r\nPlatform: Python 3.8.3 on Linux-5.4.0-54-generic-x86_64-with-glibc2.10\r\nSupports: All remotes\r\nCache types: symlink\r\nCache directory: nfs4 on storage:/home\r\nCaches: local\r\nRemotes: s3\r\nWorkspace directory: nfs4 on storage:/home\r\nRepo: dvc, git\r\n```\r\n\r\nHowever, I do have a `~/.config/dvc/config` file that overrides this:\r\n\r\n```\r\n[core]\r\n experiments = true\r\n[cache]\r\n type = \"reflink,symlink,copy\"\r\n protected = true\r\n dir = /home/jc/ssd_cache/dvc_cache\r\n[feature]\r\n parametrization = true\r\n```\r\n\r\nAnd the actual cache dir is `/home/jc/ssd_cache/dvc_cache` as I've specified instead of `nfs4 on storage:/home`\n", "before_files": [{"content": "import itertools\nimport os\nimport pathlib\nimport platform\nimport uuid\n\nfrom dvc.exceptions import DvcException, NotDvcRepoError\nfrom dvc.repo import Repo\nfrom dvc.scm.base import SCMError\nfrom dvc.system import System\nfrom dvc.tree import TREES, get_tree_cls, get_tree_config\nfrom dvc.utils import error_link\nfrom dvc.utils.pkg import PKG\nfrom dvc.version import __version__\n\ntry:\n import psutil\nexcept ImportError:\n psutil = None\n\nif PKG is None:\n package = \"\"\nelse:\n package = f\"({PKG})\"\n\n\ndef get_dvc_info():\n info = [\n f\"DVC version: {__version__} {package}\",\n \"---------------------------------\",\n f\"Platform: Python {platform.python_version()} on \"\n f\"{platform.platform()}\",\n f\"Supports: {_get_supported_remotes()}\",\n ]\n\n try:\n repo = Repo()\n\n # cache_dir might not exist yet (e.g. after `dvc init`), and we\n # can't auto-create it, as it might cause issues if the user\n # later decides to enable shared cache mode with\n # `dvc config cache.shared group`.\n if os.path.exists(repo.cache.local.cache_dir):\n info.append(\n \"Cache types: {}\".format(_get_linktype_support_info(repo))\n )\n if psutil:\n fs_type = get_fs_type(repo.cache.local.cache_dir)\n info.append(f\"Cache directory: {fs_type}\")\n else:\n info.append(\"Cache types: \" + error_link(\"no-dvc-cache\"))\n\n info.append(f\"Caches: {_get_caches(repo.cache)}\")\n\n info.append(f\"Remotes: {_get_remotes(repo.config)}\")\n\n except NotDvcRepoError:\n pass\n except SCMError:\n info.append(\"Repo: dvc, git (broken)\")\n else:\n root_directory = repo.root_dir\n if psutil:\n fs_root = get_fs_type(os.path.abspath(root_directory))\n info.append(f\"Workspace directory: {fs_root}\")\n info.append(\"Repo: {}\".format(_get_dvc_repo_info(repo)))\n return \"\\n\".join(info)\n\n\ndef _get_caches(cache):\n caches = (\n cache_type\n for cache_type, cache_instance in cache.by_scheme()\n if cache_instance\n )\n\n # Caches will be always non-empty including the local cache\n return \", \".join(caches)\n\n\ndef _get_remotes(config):\n schemes = (\n get_tree_cls(get_tree_config(config, name=remote)).scheme\n for remote in config[\"remote\"]\n )\n\n return \", \".join(schemes) or \"None\"\n\n\ndef _get_linktype_support_info(repo):\n\n links = {\n \"reflink\": (System.reflink, None),\n \"hardlink\": (System.hardlink, System.is_hardlink),\n \"symlink\": (System.symlink, System.is_symlink),\n }\n\n fname = \".\" + str(uuid.uuid4())\n src = os.path.join(repo.cache.local.cache_dir, fname)\n open(src, \"w\").close()\n dst = os.path.join(repo.root_dir, fname)\n\n cache = []\n\n for name, (link, is_link) in links.items():\n try:\n link(src, dst)\n status = \"supported\"\n if is_link and not is_link(dst):\n status = \"broken\"\n os.unlink(dst)\n except DvcException:\n status = \"not supported\"\n\n if status == \"supported\":\n cache.append(name)\n os.remove(src)\n\n return \", \".join(cache)\n\n\ndef _get_supported_remotes():\n\n supported_remotes = []\n for tree_cls in TREES:\n if not tree_cls.get_missing_deps():\n supported_remotes.append(tree_cls.scheme)\n\n if len(supported_remotes) == len(TREES):\n return \"All remotes\"\n\n if len(supported_remotes) == 1:\n return supported_remotes\n\n return \", \".join(supported_remotes)\n\n\ndef get_fs_type(path):\n\n partition = {\n pathlib.Path(part.mountpoint): (part.fstype + \" on \" + part.device)\n for part in psutil.disk_partitions(all=True)\n }\n\n path = pathlib.Path(path)\n\n for parent in itertools.chain([path], path.parents):\n if parent in partition:\n return partition[parent]\n return (\"unknown\", \"none\")\n\n\ndef _get_dvc_repo_info(self):\n if self.config.get(\"core\", {}).get(\"no_scm\", False):\n return \"dvc (no_scm)\"\n\n if self.root_dir != self.scm.root_dir:\n return \"dvc (subdir), git\"\n\n return \"dvc, git\"\n", "path": "dvc/info.py"}], "after_files": [{"content": "import itertools\nimport os\nimport pathlib\nimport platform\nimport uuid\n\nfrom dvc.exceptions import DvcException, NotDvcRepoError\nfrom dvc.repo import Repo\nfrom dvc.scm.base import SCMError\nfrom dvc.system import System\nfrom dvc.tree import TREES, get_tree_cls, get_tree_config\nfrom dvc.utils import error_link\nfrom dvc.utils.pkg import PKG\nfrom dvc.version import __version__\n\ntry:\n import psutil\nexcept ImportError:\n psutil = None\n\nif PKG is None:\n package = \"\"\nelse:\n package = f\"({PKG})\"\n\n\ndef get_dvc_info():\n info = [\n f\"DVC version: {__version__} {package}\",\n \"---------------------------------\",\n f\"Platform: Python {platform.python_version()} on \"\n f\"{platform.platform()}\",\n f\"Supports: {_get_supported_remotes()}\",\n ]\n\n try:\n repo = Repo()\n\n # cache_dir might not exist yet (e.g. after `dvc init`), and we\n # can't auto-create it, as it might cause issues if the user\n # later decides to enable shared cache mode with\n # `dvc config cache.shared group`.\n if os.path.exists(repo.cache.local.cache_dir):\n info.append(\n \"Cache types: {}\".format(_get_linktype_support_info(repo))\n )\n if psutil:\n fs_type = get_fs_type(repo.cache.local.cache_dir)\n info.append(f\"Cache directory: {fs_type}\")\n else:\n info.append(\"Cache types: \" + error_link(\"no-dvc-cache\"))\n\n info.append(f\"Caches: {_get_caches(repo.cache)}\")\n\n info.append(f\"Remotes: {_get_remotes(repo.config)}\")\n\n except NotDvcRepoError:\n pass\n except SCMError:\n info.append(\"Repo: dvc, git (broken)\")\n else:\n root_directory = repo.root_dir\n if psutil:\n fs_root = get_fs_type(os.path.abspath(root_directory))\n info.append(f\"Workspace directory: {fs_root}\")\n info.append(\"Repo: {}\".format(_get_dvc_repo_info(repo)))\n return \"\\n\".join(info)\n\n\ndef _get_caches(cache):\n caches = (\n cache_type\n for cache_type, cache_instance in cache.by_scheme()\n if cache_instance\n )\n\n # Caches will be always non-empty including the local cache\n return \", \".join(caches)\n\n\ndef _get_remotes(config):\n schemes = (\n get_tree_cls(get_tree_config(config, name=remote)).scheme\n for remote in config[\"remote\"]\n )\n\n return \", \".join(schemes) or \"None\"\n\n\ndef _get_linktype_support_info(repo):\n\n links = {\n \"reflink\": (System.reflink, None),\n \"hardlink\": (System.hardlink, System.is_hardlink),\n \"symlink\": (System.symlink, System.is_symlink),\n }\n\n fname = \".\" + str(uuid.uuid4())\n src = os.path.join(repo.cache.local.cache_dir, fname)\n open(src, \"w\").close()\n dst = os.path.join(repo.root_dir, fname)\n\n cache = []\n\n for name, (link, is_link) in links.items():\n try:\n link(src, dst)\n status = \"supported\"\n if is_link and not is_link(dst):\n status = \"broken\"\n os.unlink(dst)\n except DvcException:\n status = \"not supported\"\n\n if status == \"supported\":\n cache.append(name)\n os.remove(src)\n\n return \", \".join(cache)\n\n\ndef _get_supported_remotes():\n\n supported_remotes = []\n for tree_cls in TREES:\n if not tree_cls.get_missing_deps():\n supported_remotes.append(tree_cls.scheme)\n\n if len(supported_remotes) == len(TREES):\n return \"All remotes\"\n\n if len(supported_remotes) == 1:\n return supported_remotes\n\n return \", \".join(supported_remotes)\n\n\ndef get_fs_type(path):\n\n partition = {\n pathlib.Path(part.mountpoint): (part.fstype + \" on \" + part.device)\n for part in psutil.disk_partitions(all=True)\n }\n\n # need to follow the symlink: https://github.com/iterative/dvc/issues/5065\n path = pathlib.Path(path).resolve()\n\n for parent in itertools.chain([path], path.parents):\n if parent in partition:\n return partition[parent]\n return (\"unknown\", \"none\")\n\n\ndef _get_dvc_repo_info(self):\n if self.config.get(\"core\", {}).get(\"no_scm\", False):\n return \"dvc (no_scm)\"\n\n if self.root_dir != self.scm.root_dir:\n return \"dvc (subdir), git\"\n\n return \"dvc, git\"\n", "path": "dvc/info.py"}]} | 1,939 | 118 |
gh_patches_debug_15501 | rasdani/github-patches | git_diff | wagtail__wagtail-8708 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Revision model rename breaks Page revision foreign key on SQLite
### Issue Summary
#8441 renamed the PageRevision model to Revision, which included a migration with [a `RenameModel` step](https://github.com/wagtail/wagtail/blob/1f43d8ef51e455b92e42447fdc190d5ec83ec53c/wagtail/migrations/0070_rename_pagerevision_revision.py#L15-L18).
On my local machine, running against SQLite in the default configuration, this renamed the table but didn't update the foreign key from the Page model. Looking at the SQL for the migration starts with:
```
% ./manage.py sqlmigrate wagtailcore 0070
BEGIN;
--
-- Rename model PageRevision to Revision
--
ALTER TABLE "wagtailcore_pagerevision" RENAME TO "wagtailcore_revision";
...
```
But if I then check the `live_revision_id` foreign key on the Page model in SQLite, it hasn't been updated, and still points to the now-renamed `wagtailcore_pagerevision` table.
```
% sqlite3 db.sqlite3
SQLite version 3.32.3 2020-06-18 14:16:19
Enter ".help" for usage hints.
sqlite> PRAGMA foreign_key_list('wagtailcore_page');
...
3|0|wagtailcore_pagerevision|live_revision_id|id|NO ACTION|NO ACTION|NONE
...
```
It looks like I'm getting hit by the `ALTER TABLE RENAME` issue described in the SQLite docs [here](https://www.sqlite.org/draft/lang_altertable.html#alter_table_rename):
> Beginning with version 3.26.0, FOREIGN KEY constraints are always converted when a table is renamed, unless the [PRAGMA legacy_alter_table=ON](https://www.sqlite.org/draft/pragma.html#pragma_legacy_alter_table) setting is engaged.
My `PRAGMA`s are defined thusly (the default values, apparently, for this version of SQLite on MacOS):
```
sqlite> PRAGMA legacy_alter_table;
1
sqlite> PRAGMA foreign_keys;
0
```
I note [this commit](https://github.com/django/django/commit/063cf98d3a6839f40c423cbd845def429c5cf0ce) that just went into Django (the dev version for 4.1) that explicitly disables `legacy_alter_table`; I wonder if I am hitting some edge case that this would fix.
Wagtail seems to have only [one other instance of `RenameModel`](https://github.com/wagtail/wagtail/blob/716bf92c2dc2da2aca5e8f5aa6768b5b087cd4b0/wagtail/contrib/search_promotions/migrations/0001_initial.py#L65) for the SearchPromotion model, but I don't think we have any foreign keys pointing to that. So this might be the first time this combination has hit Wagtail.
### Steps to Reproduce
1. Start a new project with `wagtail start myproject`
2. `cd myproject`
3. `./manage.py migrate`
4. `./manage.py createsuperuser` and create an admin user.
5. `./manage.py runserver`
6. Vist http://localhost:8000/admin/pages/add/home/homepage/3/ to create a new page (you'll be asked to log in first)
7. Fill in the title field with something like "Test". Click "Save Draft". You'll get an error: "OperationalError at /admin/pages/add/home/homepage/3/, no such table: main.wagtailcore_pagerevision"
- I have confirmed that this issue can be reproduced as described on a fresh Wagtail project: yes
### Technical details
- Mac OS version: Big Sur 11.6.5
- SQLite3 version: 3.32.3 2020-06-18 14:16:19 02c344aceaea0d177dd42e62c8541e3cab4a26c757ba33b3a31a43ccc7d4aapl
- Python version: 3.10.4
- Django version: 4.0.5, also happens on 3.2
- Wagtail version: main (1f43d8ef51e455b92e42447fdc190d5ec83ec53c)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wagtail/migrations/0070_rename_pagerevision_revision.py`
Content:
```
1 # Generated by Django 4.0.3 on 2022-04-26 12:31
2
3 from django.conf import settings
4 from django.db import migrations, models
5
6
7 class Migration(migrations.Migration):
8
9 dependencies = [
10 migrations.swappable_dependency(settings.AUTH_USER_MODEL),
11 ("wagtailcore", "0069_log_entry_jsonfield"),
12 ]
13
14 operations = [
15 migrations.RenameModel(
16 old_name="PageRevision",
17 new_name="Revision",
18 ),
19 migrations.AlterModelOptions(
20 name="revision",
21 options={"verbose_name": "revision", "verbose_name_plural": "revisions"},
22 ),
23 migrations.AlterField(
24 model_name="revision",
25 name="page",
26 field=models.CharField(max_length=255, verbose_name="object id"),
27 ),
28 migrations.RenameField(
29 model_name="revision",
30 old_name="page",
31 new_name="object_id",
32 ),
33 migrations.AddField(
34 model_name="revision",
35 name="content_type",
36 field=models.ForeignKey(
37 null=True,
38 on_delete=models.CASCADE,
39 related_name="+",
40 to="contenttypes.contenttype",
41 ),
42 ),
43 migrations.AddField(
44 model_name="revision",
45 name="base_content_type",
46 field=models.ForeignKey(
47 null=True,
48 on_delete=models.CASCADE,
49 related_name="+",
50 to="contenttypes.contenttype",
51 ),
52 ),
53 migrations.AddIndex(
54 model_name="revision",
55 index=models.Index(
56 fields=["content_type", "object_id"],
57 name="content_object_idx",
58 ),
59 ),
60 migrations.AddIndex(
61 model_name="revision",
62 index=models.Index(
63 fields=["base_content_type", "object_id"],
64 name="base_content_object_idx",
65 ),
66 ),
67 ]
68
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/wagtail/migrations/0070_rename_pagerevision_revision.py b/wagtail/migrations/0070_rename_pagerevision_revision.py
--- a/wagtail/migrations/0070_rename_pagerevision_revision.py
+++ b/wagtail/migrations/0070_rename_pagerevision_revision.py
@@ -4,6 +4,12 @@
from django.db import migrations, models
+def disable_sqlite_legacy_alter_table(apps, schema_editor):
+ # Fix for https://github.com/wagtail/wagtail/issues/8635
+ if schema_editor.connection.vendor == "sqlite":
+ schema_editor.execute("PRAGMA legacy_alter_table = OFF")
+
+
class Migration(migrations.Migration):
dependencies = [
@@ -12,6 +18,10 @@
]
operations = [
+ migrations.RunPython(
+ disable_sqlite_legacy_alter_table,
+ migrations.RunPython.noop,
+ ),
migrations.RenameModel(
old_name="PageRevision",
new_name="Revision",
| {"golden_diff": "diff --git a/wagtail/migrations/0070_rename_pagerevision_revision.py b/wagtail/migrations/0070_rename_pagerevision_revision.py\n--- a/wagtail/migrations/0070_rename_pagerevision_revision.py\n+++ b/wagtail/migrations/0070_rename_pagerevision_revision.py\n@@ -4,6 +4,12 @@\n from django.db import migrations, models\n \n \n+def disable_sqlite_legacy_alter_table(apps, schema_editor):\n+ # Fix for https://github.com/wagtail/wagtail/issues/8635\n+ if schema_editor.connection.vendor == \"sqlite\":\n+ schema_editor.execute(\"PRAGMA legacy_alter_table = OFF\")\n+\n+\n class Migration(migrations.Migration):\n \n dependencies = [\n@@ -12,6 +18,10 @@\n ]\n \n operations = [\n+ migrations.RunPython(\n+ disable_sqlite_legacy_alter_table,\n+ migrations.RunPython.noop,\n+ ),\n migrations.RenameModel(\n old_name=\"PageRevision\",\n new_name=\"Revision\",\n", "issue": "Revision model rename breaks Page revision foreign key on SQLite\n### Issue Summary\r\n\r\n#8441 renamed the PageRevision model to Revision, which included a migration with [a `RenameModel` step](https://github.com/wagtail/wagtail/blob/1f43d8ef51e455b92e42447fdc190d5ec83ec53c/wagtail/migrations/0070_rename_pagerevision_revision.py#L15-L18).\r\n\r\nOn my local machine, running against SQLite in the default configuration, this renamed the table but didn't update the foreign key from the Page model. Looking at the SQL for the migration starts with:\r\n\r\n```\r\n% ./manage.py sqlmigrate wagtailcore 0070\r\nBEGIN;\r\n--\r\n-- Rename model PageRevision to Revision\r\n--\r\nALTER TABLE \"wagtailcore_pagerevision\" RENAME TO \"wagtailcore_revision\";\r\n...\r\n```\r\n\r\nBut if I then check the `live_revision_id` foreign key on the Page model in SQLite, it hasn't been updated, and still points to the now-renamed `wagtailcore_pagerevision` table.\r\n\r\n```\r\n% sqlite3 db.sqlite3 \r\nSQLite version 3.32.3 2020-06-18 14:16:19\r\nEnter \".help\" for usage hints.\r\nsqlite> PRAGMA foreign_key_list('wagtailcore_page');\r\n...\r\n3|0|wagtailcore_pagerevision|live_revision_id|id|NO ACTION|NO ACTION|NONE\r\n...\r\n```\r\n\r\nIt looks like I'm getting hit by the `ALTER TABLE RENAME` issue described in the SQLite docs [here](https://www.sqlite.org/draft/lang_altertable.html#alter_table_rename):\r\n\r\n> Beginning with version 3.26.0, FOREIGN KEY constraints are always converted when a table is renamed, unless the [PRAGMA legacy_alter_table=ON](https://www.sqlite.org/draft/pragma.html#pragma_legacy_alter_table) setting is engaged.\r\n\r\nMy `PRAGMA`s are defined thusly (the default values, apparently, for this version of SQLite on MacOS):\r\n\r\n```\r\nsqlite> PRAGMA legacy_alter_table;\r\n1\r\nsqlite> PRAGMA foreign_keys;\r\n0\r\n```\r\n\r\nI note [this commit](https://github.com/django/django/commit/063cf98d3a6839f40c423cbd845def429c5cf0ce) that just went into Django (the dev version for 4.1) that explicitly disables `legacy_alter_table`; I wonder if I am hitting some edge case that this would fix.\r\n\r\nWagtail seems to have only [one other instance of `RenameModel`](https://github.com/wagtail/wagtail/blob/716bf92c2dc2da2aca5e8f5aa6768b5b087cd4b0/wagtail/contrib/search_promotions/migrations/0001_initial.py#L65) for the SearchPromotion model, but I don't think we have any foreign keys pointing to that. So this might be the first time this combination has hit Wagtail.\r\n\r\n### Steps to Reproduce\r\n\r\n1. Start a new project with `wagtail start myproject`\r\n2. `cd myproject`\r\n3. `./manage.py migrate`\r\n4. `./manage.py createsuperuser` and create an admin user.\r\n5. `./manage.py runserver`\r\n6. Vist http://localhost:8000/admin/pages/add/home/homepage/3/ to create a new page (you'll be asked to log in first)\r\n7. Fill in the title field with something like \"Test\". Click \"Save Draft\". You'll get an error: \"OperationalError at /admin/pages/add/home/homepage/3/, no such table: main.wagtailcore_pagerevision\"\r\n\r\n- I have confirmed that this issue can be reproduced as described on a fresh Wagtail project: yes\r\n\r\n### Technical details\r\n\r\n- Mac OS version: Big Sur 11.6.5\r\n- SQLite3 version: 3.32.3 2020-06-18 14:16:19 02c344aceaea0d177dd42e62c8541e3cab4a26c757ba33b3a31a43ccc7d4aapl\r\n- Python version: 3.10.4\r\n- Django version: 4.0.5, also happens on 3.2\r\n- Wagtail version: main (1f43d8ef51e455b92e42447fdc190d5ec83ec53c)\r\n\n", "before_files": [{"content": "# Generated by Django 4.0.3 on 2022-04-26 12:31\n\nfrom django.conf import settings\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n (\"wagtailcore\", \"0069_log_entry_jsonfield\"),\n ]\n\n operations = [\n migrations.RenameModel(\n old_name=\"PageRevision\",\n new_name=\"Revision\",\n ),\n migrations.AlterModelOptions(\n name=\"revision\",\n options={\"verbose_name\": \"revision\", \"verbose_name_plural\": \"revisions\"},\n ),\n migrations.AlterField(\n model_name=\"revision\",\n name=\"page\",\n field=models.CharField(max_length=255, verbose_name=\"object id\"),\n ),\n migrations.RenameField(\n model_name=\"revision\",\n old_name=\"page\",\n new_name=\"object_id\",\n ),\n migrations.AddField(\n model_name=\"revision\",\n name=\"content_type\",\n field=models.ForeignKey(\n null=True,\n on_delete=models.CASCADE,\n related_name=\"+\",\n to=\"contenttypes.contenttype\",\n ),\n ),\n migrations.AddField(\n model_name=\"revision\",\n name=\"base_content_type\",\n field=models.ForeignKey(\n null=True,\n on_delete=models.CASCADE,\n related_name=\"+\",\n to=\"contenttypes.contenttype\",\n ),\n ),\n migrations.AddIndex(\n model_name=\"revision\",\n index=models.Index(\n fields=[\"content_type\", \"object_id\"],\n name=\"content_object_idx\",\n ),\n ),\n migrations.AddIndex(\n model_name=\"revision\",\n index=models.Index(\n fields=[\"base_content_type\", \"object_id\"],\n name=\"base_content_object_idx\",\n ),\n ),\n ]\n", "path": "wagtail/migrations/0070_rename_pagerevision_revision.py"}], "after_files": [{"content": "# Generated by Django 4.0.3 on 2022-04-26 12:31\n\nfrom django.conf import settings\nfrom django.db import migrations, models\n\n\ndef disable_sqlite_legacy_alter_table(apps, schema_editor):\n # Fix for https://github.com/wagtail/wagtail/issues/8635\n if schema_editor.connection.vendor == \"sqlite\":\n schema_editor.execute(\"PRAGMA legacy_alter_table = OFF\")\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n (\"wagtailcore\", \"0069_log_entry_jsonfield\"),\n ]\n\n operations = [\n migrations.RunPython(\n disable_sqlite_legacy_alter_table,\n migrations.RunPython.noop,\n ),\n migrations.RenameModel(\n old_name=\"PageRevision\",\n new_name=\"Revision\",\n ),\n migrations.AlterModelOptions(\n name=\"revision\",\n options={\"verbose_name\": \"revision\", \"verbose_name_plural\": \"revisions\"},\n ),\n migrations.AlterField(\n model_name=\"revision\",\n name=\"page\",\n field=models.CharField(max_length=255, verbose_name=\"object id\"),\n ),\n migrations.RenameField(\n model_name=\"revision\",\n old_name=\"page\",\n new_name=\"object_id\",\n ),\n migrations.AddField(\n model_name=\"revision\",\n name=\"content_type\",\n field=models.ForeignKey(\n null=True,\n on_delete=models.CASCADE,\n related_name=\"+\",\n to=\"contenttypes.contenttype\",\n ),\n ),\n migrations.AddField(\n model_name=\"revision\",\n name=\"base_content_type\",\n field=models.ForeignKey(\n null=True,\n on_delete=models.CASCADE,\n related_name=\"+\",\n to=\"contenttypes.contenttype\",\n ),\n ),\n migrations.AddIndex(\n model_name=\"revision\",\n index=models.Index(\n fields=[\"content_type\", \"object_id\"],\n name=\"content_object_idx\",\n ),\n ),\n migrations.AddIndex(\n model_name=\"revision\",\n index=models.Index(\n fields=[\"base_content_type\", \"object_id\"],\n name=\"base_content_object_idx\",\n ),\n ),\n ]\n", "path": "wagtail/migrations/0070_rename_pagerevision_revision.py"}]} | 1,827 | 239 |
gh_patches_debug_11462 | rasdani/github-patches | git_diff | sublimelsp__LSP-772 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Completion inputs label instead of textEdit newText
scalameta/metals#1031 was recently merged which adds an "implement all members" completion option. However, in Sublime it seems to not show up in the same order in the completions as the other editors. It seems to be triggered by e for some reason. Apart from that, if you do decide to use that completion, the completion seems to instead of implementing the `newText` it implements the `label`.
I'm on MacOS using the [Metals Language Server](https://github.com/scalameta/metals) with this SNAPSHOT `0.7.6+224-b3ea857f-SNAPSHOT`
Here is a gif illustrating what I'm talking about

And here is the snippet of lsp json that shows the completion item
```
[Trace - 08:54:53 AM] Received request 'completionItem/resolve - (30)'
Params: {
"label": "Implement all members",
"kind": 12,
"sortText": "00002",
"filterText": "e",
"insertTextFormat": 2,
"textEdit": {
"range": {
"start": {
"line": 9,
"character": 3
},
"end": {
"line": 9,
"character": 4
}
},
"newText": "def foo: Int \u003d ${0:???}\n def boo: Int \u003d ${0:???}"
},
"data": {
"target": "file:/Users/ckipp/Documents/scala-workspace/test-project/?id\u003droot",
"symbol": "local6"
}
}
[Trace - 08:54:53 AM] Sending response 'completionItem/resolve - (30)'. Processing request took 1ms
Result: {
"label": "Implement all members",
"kind": 12,
"sortText": "00002",
"filterText": "e",
"insertTextFormat": 2,
"textEdit": {
"range": {
"start": {
"line": 9,
"character": 3
},
"end": {
"line": 9,
"character": 4
}
},
"newText": "def foo: Int \u003d ${0:???}\n def boo: Int \u003d ${0:???}"
},
"data": {
"target": "file:/Users/ckipp/Documents/scala-workspace/test-project/?id\u003droot",
"symbol": "local6"
}
}
```
If I can provide any more details, just let me know!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugin/core/completion.py`
Content:
```
1 from .protocol import CompletionItemKind, Range
2 from .types import Settings
3 from .logging import debug
4 try:
5 from typing import Tuple, Optional, Dict, List, Union
6 assert Tuple and Optional and Dict and List and Union and Settings
7 except ImportError:
8 pass
9
10
11 completion_item_kind_names = {v: k for k, v in CompletionItemKind.__dict__.items()}
12
13
14 def get_completion_hint(item: dict, settings: 'Settings') -> 'Optional[str]':
15 # choose hint based on availability and user preference
16 hint = None
17 if settings.completion_hint_type == "auto":
18 hint = item.get("detail")
19 if not hint:
20 kind = item.get("kind")
21 if kind:
22 hint = completion_item_kind_names[kind]
23 elif settings.completion_hint_type == "detail":
24 hint = item.get("detail")
25 elif settings.completion_hint_type == "kind":
26 kind = item.get("kind")
27 if kind:
28 hint = completion_item_kind_names.get(kind)
29 return hint
30
31
32 def format_completion(item: dict, word_col: int, settings: 'Settings') -> 'Tuple[str, str]':
33 # Sublime handles snippets automatically, so we don't have to care about insertTextFormat.
34 if settings.prefer_label_over_filter_text:
35 trigger = item["label"]
36 else:
37 trigger = item.get("filterText") or item["label"]
38
39 hint = get_completion_hint(item, settings)
40
41 # label is an alternative for insertText if neither textEdit nor insertText is provided
42 replacement = text_edit_text(item, word_col) or item.get("insertText") or trigger
43
44 if replacement[0] != trigger[0]:
45 # fix some common cases when server sends different start on label and replacement.
46 if replacement[0] == '$':
47 trigger = '$' + trigger # add missing $
48 elif replacement[0] == '-':
49 trigger = '-' + trigger # add missing -
50 elif trigger[0] == ':':
51 replacement = ':' + replacement # add missing :
52 elif trigger[0] == '$':
53 trigger = trigger[1:] # remove leading $
54 elif trigger[0] == ' ' or trigger[0] == '•':
55 trigger = trigger[1:] # remove clangd insertion indicator
56 else:
57 debug("replacement prefix does not match trigger!")
58 replacement = item.get("insertText") or trigger
59
60 if len(replacement) > 0 and replacement[0] == '$': # sublime needs leading '$' escaped.
61 replacement = '\\$' + replacement[1:]
62 # only return trigger with a hint if available
63 return "\t ".join((trigger, hint)) if hint else trigger, replacement
64
65
66 def text_edit_text(item: dict, word_col: int) -> 'Optional[str]':
67 text_edit = item.get('textEdit')
68 if text_edit:
69 edit_range, edit_text = text_edit.get("range"), text_edit.get("newText")
70 if edit_range and edit_text:
71 edit_range = Range.from_lsp(edit_range)
72
73 # debug('textEdit from col {}, {} applied at col {}'.format(
74 # edit_range.start.col, edit_range.end.col, word_col))
75
76 if edit_range.start.col <= word_col:
77 # if edit starts at current word, we can use it.
78 # if edit starts before current word, use the whole thing and we'll fix it up later.
79 return edit_text
80
81 return None
82
83
84 def parse_completion_response(response: 'Optional[Union[Dict,List]]') -> 'Tuple[List[Dict], bool]':
85 items = [] # type: List[Dict]
86 is_incomplete = False
87 if isinstance(response, dict):
88 items = response["items"] or []
89 is_incomplete = response.get("isIncomplete", False)
90 elif isinstance(response, list):
91 items = response
92 items = sorted(items, key=lambda item: item.get("sortText") or item["label"])
93 return items, is_incomplete
94
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plugin/core/completion.py b/plugin/core/completion.py
--- a/plugin/core/completion.py
+++ b/plugin/core/completion.py
@@ -54,8 +54,7 @@
elif trigger[0] == ' ' or trigger[0] == '•':
trigger = trigger[1:] # remove clangd insertion indicator
else:
- debug("replacement prefix does not match trigger!")
- replacement = item.get("insertText") or trigger
+ debug("WARNING: Replacement prefix does not match trigger '{}'".format(trigger))
if len(replacement) > 0 and replacement[0] == '$': # sublime needs leading '$' escaped.
replacement = '\\$' + replacement[1:]
| {"golden_diff": "diff --git a/plugin/core/completion.py b/plugin/core/completion.py\n--- a/plugin/core/completion.py\n+++ b/plugin/core/completion.py\n@@ -54,8 +54,7 @@\n elif trigger[0] == ' ' or trigger[0] == '\u2022':\n trigger = trigger[1:] # remove clangd insertion indicator\n else:\n- debug(\"replacement prefix does not match trigger!\")\n- replacement = item.get(\"insertText\") or trigger\n+ debug(\"WARNING: Replacement prefix does not match trigger '{}'\".format(trigger))\n \n if len(replacement) > 0 and replacement[0] == '$': # sublime needs leading '$' escaped.\n replacement = '\\\\$' + replacement[1:]\n", "issue": "Completion inputs label instead of textEdit newText\nscalameta/metals#1031 was recently merged which adds an \"implement all members\" completion option. However, in Sublime it seems to not show up in the same order in the completions as the other editors. It seems to be triggered by e for some reason. Apart from that, if you do decide to use that completion, the completion seems to instead of implementing the `newText` it implements the `label`.\r\n\r\nI'm on MacOS using the [Metals Language Server](https://github.com/scalameta/metals) with this SNAPSHOT `0.7.6+224-b3ea857f-SNAPSHOT`\r\n\r\nHere is a gif illustrating what I'm talking about\r\n\r\n\r\n\r\nAnd here is the snippet of lsp json that shows the completion item\r\n\r\n```\r\n[Trace - 08:54:53 AM] Received request 'completionItem/resolve - (30)'\r\nParams: {\r\n \"label\": \"Implement all members\",\r\n \"kind\": 12,\r\n \"sortText\": \"00002\",\r\n \"filterText\": \"e\",\r\n \"insertTextFormat\": 2,\r\n \"textEdit\": {\r\n \"range\": {\r\n \"start\": {\r\n \"line\": 9,\r\n \"character\": 3\r\n },\r\n \"end\": {\r\n \"line\": 9,\r\n \"character\": 4\r\n }\r\n },\r\n \"newText\": \"def foo: Int \\u003d ${0:???}\\n def boo: Int \\u003d ${0:???}\"\r\n },\r\n \"data\": {\r\n \"target\": \"file:/Users/ckipp/Documents/scala-workspace/test-project/?id\\u003droot\",\r\n \"symbol\": \"local6\"\r\n }\r\n}\r\n\r\n\r\n[Trace - 08:54:53 AM] Sending response 'completionItem/resolve - (30)'. Processing request took 1ms\r\nResult: {\r\n \"label\": \"Implement all members\",\r\n \"kind\": 12,\r\n \"sortText\": \"00002\",\r\n \"filterText\": \"e\",\r\n \"insertTextFormat\": 2,\r\n \"textEdit\": {\r\n \"range\": {\r\n \"start\": {\r\n \"line\": 9,\r\n \"character\": 3\r\n },\r\n \"end\": {\r\n \"line\": 9,\r\n \"character\": 4\r\n }\r\n },\r\n \"newText\": \"def foo: Int \\u003d ${0:???}\\n def boo: Int \\u003d ${0:???}\"\r\n },\r\n \"data\": {\r\n \"target\": \"file:/Users/ckipp/Documents/scala-workspace/test-project/?id\\u003droot\",\r\n \"symbol\": \"local6\"\r\n }\r\n}\r\n```\r\n\r\nIf I can provide any more details, just let me know!\n", "before_files": [{"content": "from .protocol import CompletionItemKind, Range\nfrom .types import Settings\nfrom .logging import debug\ntry:\n from typing import Tuple, Optional, Dict, List, Union\n assert Tuple and Optional and Dict and List and Union and Settings\nexcept ImportError:\n pass\n\n\ncompletion_item_kind_names = {v: k for k, v in CompletionItemKind.__dict__.items()}\n\n\ndef get_completion_hint(item: dict, settings: 'Settings') -> 'Optional[str]':\n # choose hint based on availability and user preference\n hint = None\n if settings.completion_hint_type == \"auto\":\n hint = item.get(\"detail\")\n if not hint:\n kind = item.get(\"kind\")\n if kind:\n hint = completion_item_kind_names[kind]\n elif settings.completion_hint_type == \"detail\":\n hint = item.get(\"detail\")\n elif settings.completion_hint_type == \"kind\":\n kind = item.get(\"kind\")\n if kind:\n hint = completion_item_kind_names.get(kind)\n return hint\n\n\ndef format_completion(item: dict, word_col: int, settings: 'Settings') -> 'Tuple[str, str]':\n # Sublime handles snippets automatically, so we don't have to care about insertTextFormat.\n if settings.prefer_label_over_filter_text:\n trigger = item[\"label\"]\n else:\n trigger = item.get(\"filterText\") or item[\"label\"]\n\n hint = get_completion_hint(item, settings)\n\n # label is an alternative for insertText if neither textEdit nor insertText is provided\n replacement = text_edit_text(item, word_col) or item.get(\"insertText\") or trigger\n\n if replacement[0] != trigger[0]:\n # fix some common cases when server sends different start on label and replacement.\n if replacement[0] == '$':\n trigger = '$' + trigger # add missing $\n elif replacement[0] == '-':\n trigger = '-' + trigger # add missing -\n elif trigger[0] == ':':\n replacement = ':' + replacement # add missing :\n elif trigger[0] == '$':\n trigger = trigger[1:] # remove leading $\n elif trigger[0] == ' ' or trigger[0] == '\u2022':\n trigger = trigger[1:] # remove clangd insertion indicator\n else:\n debug(\"replacement prefix does not match trigger!\")\n replacement = item.get(\"insertText\") or trigger\n\n if len(replacement) > 0 and replacement[0] == '$': # sublime needs leading '$' escaped.\n replacement = '\\\\$' + replacement[1:]\n # only return trigger with a hint if available\n return \"\\t \".join((trigger, hint)) if hint else trigger, replacement\n\n\ndef text_edit_text(item: dict, word_col: int) -> 'Optional[str]':\n text_edit = item.get('textEdit')\n if text_edit:\n edit_range, edit_text = text_edit.get(\"range\"), text_edit.get(\"newText\")\n if edit_range and edit_text:\n edit_range = Range.from_lsp(edit_range)\n\n # debug('textEdit from col {}, {} applied at col {}'.format(\n # edit_range.start.col, edit_range.end.col, word_col))\n\n if edit_range.start.col <= word_col:\n # if edit starts at current word, we can use it.\n # if edit starts before current word, use the whole thing and we'll fix it up later.\n return edit_text\n\n return None\n\n\ndef parse_completion_response(response: 'Optional[Union[Dict,List]]') -> 'Tuple[List[Dict], bool]':\n items = [] # type: List[Dict]\n is_incomplete = False\n if isinstance(response, dict):\n items = response[\"items\"] or []\n is_incomplete = response.get(\"isIncomplete\", False)\n elif isinstance(response, list):\n items = response\n items = sorted(items, key=lambda item: item.get(\"sortText\") or item[\"label\"])\n return items, is_incomplete\n", "path": "plugin/core/completion.py"}], "after_files": [{"content": "from .protocol import CompletionItemKind, Range\nfrom .types import Settings\nfrom .logging import debug\ntry:\n from typing import Tuple, Optional, Dict, List, Union\n assert Tuple and Optional and Dict and List and Union and Settings\nexcept ImportError:\n pass\n\n\ncompletion_item_kind_names = {v: k for k, v in CompletionItemKind.__dict__.items()}\n\n\ndef get_completion_hint(item: dict, settings: 'Settings') -> 'Optional[str]':\n # choose hint based on availability and user preference\n hint = None\n if settings.completion_hint_type == \"auto\":\n hint = item.get(\"detail\")\n if not hint:\n kind = item.get(\"kind\")\n if kind:\n hint = completion_item_kind_names[kind]\n elif settings.completion_hint_type == \"detail\":\n hint = item.get(\"detail\")\n elif settings.completion_hint_type == \"kind\":\n kind = item.get(\"kind\")\n if kind:\n hint = completion_item_kind_names.get(kind)\n return hint\n\n\ndef format_completion(item: dict, word_col: int, settings: 'Settings') -> 'Tuple[str, str]':\n # Sublime handles snippets automatically, so we don't have to care about insertTextFormat.\n if settings.prefer_label_over_filter_text:\n trigger = item[\"label\"]\n else:\n trigger = item.get(\"filterText\") or item[\"label\"]\n\n hint = get_completion_hint(item, settings)\n\n # label is an alternative for insertText if neither textEdit nor insertText is provided\n replacement = text_edit_text(item, word_col) or item.get(\"insertText\") or trigger\n\n if replacement[0] != trigger[0]:\n # fix some common cases when server sends different start on label and replacement.\n if replacement[0] == '$':\n trigger = '$' + trigger # add missing $\n elif replacement[0] == '-':\n trigger = '-' + trigger # add missing -\n elif trigger[0] == ':':\n replacement = ':' + replacement # add missing :\n elif trigger[0] == '$':\n trigger = trigger[1:] # remove leading $\n elif trigger[0] == ' ' or trigger[0] == '\u2022':\n trigger = trigger[1:] # remove clangd insertion indicator\n else:\n debug(\"WARNING: Replacement prefix does not match trigger '{}'\".format(trigger))\n\n if len(replacement) > 0 and replacement[0] == '$': # sublime needs leading '$' escaped.\n replacement = '\\\\$' + replacement[1:]\n # only return trigger with a hint if available\n return \"\\t \".join((trigger, hint)) if hint else trigger, replacement\n\n\ndef text_edit_text(item: dict, word_col: int) -> 'Optional[str]':\n text_edit = item.get('textEdit')\n if text_edit:\n edit_range, edit_text = text_edit.get(\"range\"), text_edit.get(\"newText\")\n if edit_range and edit_text:\n edit_range = Range.from_lsp(edit_range)\n\n # debug('textEdit from col {}, {} applied at col {}'.format(\n # edit_range.start.col, edit_range.end.col, word_col))\n\n if edit_range.start.col <= word_col:\n # if edit starts at current word, we can use it.\n # if edit starts before current word, use the whole thing and we'll fix it up later.\n return edit_text\n\n return None\n\n\ndef parse_completion_response(response: 'Optional[Union[Dict,List]]') -> 'Tuple[List[Dict], bool]':\n items = [] # type: List[Dict]\n is_incomplete = False\n if isinstance(response, dict):\n items = response[\"items\"] or []\n is_incomplete = response.get(\"isIncomplete\", False)\n elif isinstance(response, list):\n items = response\n items = sorted(items, key=lambda item: item.get(\"sortText\") or item[\"label\"])\n return items, is_incomplete\n", "path": "plugin/core/completion.py"}]} | 1,998 | 159 |
gh_patches_debug_29372 | rasdani/github-patches | git_diff | conda__conda-6752 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
conda is broken if your home directory is read-only
Conda currently requires the user's home directory to be writable.
If the directory conda is installed into is writable (say a tmpfs) then you can get along way by using
```shell
./Miniconda3-latest-Linux-x86_64.sh -p $CONDA_DIR -b -f
conda config --system --set always_yes yes
conda config --system --set changeps1 no
conda config --system --add envs_dirs $CONDA_DIR/envs
conda config --system --add pkgs_dirs $CONDA_DIR/pkgs
```
However, this is foiled by the following line -> https://github.com/conda/conda/blob/7616b87ad87b80da16b8263011c9c708be98147c/conda/core/envs_manager.py#L18
```python
USER_ENVIRONMENTS_TXT_FILE = expand(join('~', '.conda', 'environments.txt'))
```
I'm not sure if this would even work on Windows?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conda/core/envs_manager.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 from logging import getLogger
5 from os import listdir
6 from os.path import dirname, isdir, isfile, join, normpath, split as path_split
7
8 from ..base.constants import ROOT_ENV_NAME
9 from ..base.context import context
10 from ..common.compat import ensure_text_type, on_win, open
11 from ..common.path import expand, paths_equal
12 from ..gateways.disk.read import yield_lines
13 from ..gateways.disk.test import is_conda_environment
14
15 log = getLogger(__name__)
16
17
18 USER_ENVIRONMENTS_TXT_FILE = expand(join('~', '.conda', 'environments.txt'))
19
20
21 def register_env(location):
22 location = normpath(location)
23
24 if "placehold_pl" in location:
25 # Don't record envs created by conda-build.
26 return
27
28 if location in yield_lines(USER_ENVIRONMENTS_TXT_FILE):
29 # Nothing to do. Location is already recorded in a known environments.txt file.
30 return
31
32 with open(USER_ENVIRONMENTS_TXT_FILE, 'a') as fh:
33 fh.write(ensure_text_type(location))
34 fh.write('\n')
35
36
37 def unregister_env(location):
38 if isdir(location):
39 meta_dir = join(location, 'conda-meta')
40 if isdir(meta_dir):
41 meta_dir_contents = listdir(meta_dir)
42 if len(meta_dir_contents) > 1:
43 # if there are any files left other than 'conda-meta/history'
44 # then don't unregister
45 return
46
47 _clean_environments_txt(USER_ENVIRONMENTS_TXT_FILE, location)
48
49
50 def list_all_known_prefixes():
51 all_env_paths = set()
52 if on_win:
53 home_dir_dir = dirname(expand('~'))
54 for home_dir in listdir(home_dir_dir):
55 environments_txt_file = join(home_dir_dir, home_dir, '.conda', 'environments.txt')
56 if isfile(environments_txt_file):
57 all_env_paths.update(_clean_environments_txt(environments_txt_file))
58 else:
59 from os import geteuid
60 from pwd import getpwall
61 if geteuid() == 0:
62 search_dirs = tuple(pwentry.pw_dir for pwentry in getpwall()) or (expand('~'),)
63 else:
64 search_dirs = (expand('~'),)
65 for home_dir in search_dirs:
66 environments_txt_file = join(home_dir, '.conda', 'environments.txt')
67 if isfile(environments_txt_file):
68 all_env_paths.update(_clean_environments_txt(environments_txt_file))
69
70 # in case environments.txt files aren't complete, also add all known conda environments in
71 # all envs_dirs
72 envs_dirs = (envs_dir for envs_dir in context.envs_dirs if isdir(envs_dir))
73 all_env_paths.update(path for path in (
74 join(envs_dir, name) for envs_dir in envs_dirs for name in listdir(envs_dir)
75 ) if path not in all_env_paths and is_conda_environment(path))
76
77 all_env_paths.add(context.root_prefix)
78 return sorted(all_env_paths)
79
80
81 def env_name(prefix):
82 if not prefix:
83 return None
84 if paths_equal(prefix, context.root_prefix):
85 return ROOT_ENV_NAME
86 maybe_envs_dir, maybe_name = path_split(prefix)
87 for envs_dir in context.envs_dirs:
88 if paths_equal(envs_dir, maybe_envs_dir):
89 return maybe_name
90 return prefix
91
92
93 def _clean_environments_txt(environments_txt_file, remove_location=None):
94 if not isfile(environments_txt_file):
95 return ()
96
97 if remove_location:
98 remove_location = normpath(remove_location)
99 environments_txt_lines = tuple(yield_lines(environments_txt_file))
100 environments_txt_lines_cleaned = tuple(
101 prefix for prefix in environments_txt_lines
102 if prefix != remove_location and is_conda_environment(prefix)
103 )
104 if environments_txt_lines_cleaned != environments_txt_lines:
105 _rewrite_environments_txt(environments_txt_file, environments_txt_lines_cleaned)
106 return environments_txt_lines_cleaned
107
108
109 def _rewrite_environments_txt(environments_txt_file, prefixes):
110 try:
111 with open(environments_txt_file, 'w') as fh:
112 fh.write('\n'.join(prefixes))
113 fh.write('\n')
114 except (IOError, OSError) as e:
115 log.info("File not cleaned: %s", environments_txt_file)
116 log.debug('%r', e, exc_info=True)
117
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/conda/core/envs_manager.py b/conda/core/envs_manager.py
--- a/conda/core/envs_manager.py
+++ b/conda/core/envs_manager.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
+from errno import EACCES
from logging import getLogger
from os import listdir
from os.path import dirname, isdir, isfile, join, normpath, split as path_split
@@ -29,9 +30,17 @@
# Nothing to do. Location is already recorded in a known environments.txt file.
return
- with open(USER_ENVIRONMENTS_TXT_FILE, 'a') as fh:
- fh.write(ensure_text_type(location))
- fh.write('\n')
+ try:
+ with open(USER_ENVIRONMENTS_TXT_FILE, 'a') as fh:
+ fh.write(ensure_text_type(location))
+ fh.write('\n')
+ except EnvironmentError as e:
+ if e.errno == EACCES:
+ log.warn("Unable to register environment. Path not writable.\n"
+ " environment location: %s\n"
+ " registry file: %s", location, USER_ENVIRONMENTS_TXT_FILE)
+ else:
+ raise
def unregister_env(location):
@@ -111,6 +120,6 @@
with open(environments_txt_file, 'w') as fh:
fh.write('\n'.join(prefixes))
fh.write('\n')
- except (IOError, OSError) as e:
+ except EnvironmentError as e:
log.info("File not cleaned: %s", environments_txt_file)
log.debug('%r', e, exc_info=True)
| {"golden_diff": "diff --git a/conda/core/envs_manager.py b/conda/core/envs_manager.py\n--- a/conda/core/envs_manager.py\n+++ b/conda/core/envs_manager.py\n@@ -1,6 +1,7 @@\n # -*- coding: utf-8 -*-\n from __future__ import absolute_import, division, print_function, unicode_literals\n \n+from errno import EACCES\n from logging import getLogger\n from os import listdir\n from os.path import dirname, isdir, isfile, join, normpath, split as path_split\n@@ -29,9 +30,17 @@\n # Nothing to do. Location is already recorded in a known environments.txt file.\n return\n \n- with open(USER_ENVIRONMENTS_TXT_FILE, 'a') as fh:\n- fh.write(ensure_text_type(location))\n- fh.write('\\n')\n+ try:\n+ with open(USER_ENVIRONMENTS_TXT_FILE, 'a') as fh:\n+ fh.write(ensure_text_type(location))\n+ fh.write('\\n')\n+ except EnvironmentError as e:\n+ if e.errno == EACCES:\n+ log.warn(\"Unable to register environment. Path not writable.\\n\"\n+ \" environment location: %s\\n\"\n+ \" registry file: %s\", location, USER_ENVIRONMENTS_TXT_FILE)\n+ else:\n+ raise\n \n \n def unregister_env(location):\n@@ -111,6 +120,6 @@\n with open(environments_txt_file, 'w') as fh:\n fh.write('\\n'.join(prefixes))\n fh.write('\\n')\n- except (IOError, OSError) as e:\n+ except EnvironmentError as e:\n log.info(\"File not cleaned: %s\", environments_txt_file)\n log.debug('%r', e, exc_info=True)\n", "issue": "conda is broken if your home directory is read-only\nConda currently requires the user's home directory to be writable.\r\n\r\nIf the directory conda is installed into is writable (say a tmpfs) then you can get along way by using \r\n```shell\r\n\t\t./Miniconda3-latest-Linux-x86_64.sh -p $CONDA_DIR -b -f\r\n\t\tconda config --system --set always_yes yes\r\n\t\tconda config --system --set changeps1 no\r\n\t\tconda config --system --add envs_dirs $CONDA_DIR/envs\r\n\t\tconda config --system --add pkgs_dirs $CONDA_DIR/pkgs\r\n```\r\n\r\nHowever, this is foiled by the following line -> https://github.com/conda/conda/blob/7616b87ad87b80da16b8263011c9c708be98147c/conda/core/envs_manager.py#L18\r\n\r\n```python\r\nUSER_ENVIRONMENTS_TXT_FILE = expand(join('~', '.conda', 'environments.txt'))\r\n```\r\n\r\nI'm not sure if this would even work on Windows?\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom logging import getLogger\nfrom os import listdir\nfrom os.path import dirname, isdir, isfile, join, normpath, split as path_split\n\nfrom ..base.constants import ROOT_ENV_NAME\nfrom ..base.context import context\nfrom ..common.compat import ensure_text_type, on_win, open\nfrom ..common.path import expand, paths_equal\nfrom ..gateways.disk.read import yield_lines\nfrom ..gateways.disk.test import is_conda_environment\n\nlog = getLogger(__name__)\n\n\nUSER_ENVIRONMENTS_TXT_FILE = expand(join('~', '.conda', 'environments.txt'))\n\n\ndef register_env(location):\n location = normpath(location)\n\n if \"placehold_pl\" in location:\n # Don't record envs created by conda-build.\n return\n\n if location in yield_lines(USER_ENVIRONMENTS_TXT_FILE):\n # Nothing to do. Location is already recorded in a known environments.txt file.\n return\n\n with open(USER_ENVIRONMENTS_TXT_FILE, 'a') as fh:\n fh.write(ensure_text_type(location))\n fh.write('\\n')\n\n\ndef unregister_env(location):\n if isdir(location):\n meta_dir = join(location, 'conda-meta')\n if isdir(meta_dir):\n meta_dir_contents = listdir(meta_dir)\n if len(meta_dir_contents) > 1:\n # if there are any files left other than 'conda-meta/history'\n # then don't unregister\n return\n\n _clean_environments_txt(USER_ENVIRONMENTS_TXT_FILE, location)\n\n\ndef list_all_known_prefixes():\n all_env_paths = set()\n if on_win:\n home_dir_dir = dirname(expand('~'))\n for home_dir in listdir(home_dir_dir):\n environments_txt_file = join(home_dir_dir, home_dir, '.conda', 'environments.txt')\n if isfile(environments_txt_file):\n all_env_paths.update(_clean_environments_txt(environments_txt_file))\n else:\n from os import geteuid\n from pwd import getpwall\n if geteuid() == 0:\n search_dirs = tuple(pwentry.pw_dir for pwentry in getpwall()) or (expand('~'),)\n else:\n search_dirs = (expand('~'),)\n for home_dir in search_dirs:\n environments_txt_file = join(home_dir, '.conda', 'environments.txt')\n if isfile(environments_txt_file):\n all_env_paths.update(_clean_environments_txt(environments_txt_file))\n\n # in case environments.txt files aren't complete, also add all known conda environments in\n # all envs_dirs\n envs_dirs = (envs_dir for envs_dir in context.envs_dirs if isdir(envs_dir))\n all_env_paths.update(path for path in (\n join(envs_dir, name) for envs_dir in envs_dirs for name in listdir(envs_dir)\n ) if path not in all_env_paths and is_conda_environment(path))\n\n all_env_paths.add(context.root_prefix)\n return sorted(all_env_paths)\n\n\ndef env_name(prefix):\n if not prefix:\n return None\n if paths_equal(prefix, context.root_prefix):\n return ROOT_ENV_NAME\n maybe_envs_dir, maybe_name = path_split(prefix)\n for envs_dir in context.envs_dirs:\n if paths_equal(envs_dir, maybe_envs_dir):\n return maybe_name\n return prefix\n\n\ndef _clean_environments_txt(environments_txt_file, remove_location=None):\n if not isfile(environments_txt_file):\n return ()\n\n if remove_location:\n remove_location = normpath(remove_location)\n environments_txt_lines = tuple(yield_lines(environments_txt_file))\n environments_txt_lines_cleaned = tuple(\n prefix for prefix in environments_txt_lines\n if prefix != remove_location and is_conda_environment(prefix)\n )\n if environments_txt_lines_cleaned != environments_txt_lines:\n _rewrite_environments_txt(environments_txt_file, environments_txt_lines_cleaned)\n return environments_txt_lines_cleaned\n\n\ndef _rewrite_environments_txt(environments_txt_file, prefixes):\n try:\n with open(environments_txt_file, 'w') as fh:\n fh.write('\\n'.join(prefixes))\n fh.write('\\n')\n except (IOError, OSError) as e:\n log.info(\"File not cleaned: %s\", environments_txt_file)\n log.debug('%r', e, exc_info=True)\n", "path": "conda/core/envs_manager.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom errno import EACCES\nfrom logging import getLogger\nfrom os import listdir\nfrom os.path import dirname, isdir, isfile, join, normpath, split as path_split\n\nfrom ..base.constants import ROOT_ENV_NAME\nfrom ..base.context import context\nfrom ..common.compat import ensure_text_type, on_win, open\nfrom ..common.path import expand, paths_equal\nfrom ..gateways.disk.read import yield_lines\nfrom ..gateways.disk.test import is_conda_environment\n\nlog = getLogger(__name__)\n\n\nUSER_ENVIRONMENTS_TXT_FILE = expand(join('~', '.conda', 'environments.txt'))\n\n\ndef register_env(location):\n location = normpath(location)\n\n if \"placehold_pl\" in location:\n # Don't record envs created by conda-build.\n return\n\n if location in yield_lines(USER_ENVIRONMENTS_TXT_FILE):\n # Nothing to do. Location is already recorded in a known environments.txt file.\n return\n\n try:\n with open(USER_ENVIRONMENTS_TXT_FILE, 'a') as fh:\n fh.write(ensure_text_type(location))\n fh.write('\\n')\n except EnvironmentError as e:\n if e.errno == EACCES:\n log.warn(\"Unable to register environment. Path not writable.\\n\"\n \" environment location: %s\\n\"\n \" registry file: %s\", location, USER_ENVIRONMENTS_TXT_FILE)\n else:\n raise\n\n\ndef unregister_env(location):\n if isdir(location):\n meta_dir = join(location, 'conda-meta')\n if isdir(meta_dir):\n meta_dir_contents = listdir(meta_dir)\n if len(meta_dir_contents) > 1:\n # if there are any files left other than 'conda-meta/history'\n # then don't unregister\n return\n\n _clean_environments_txt(USER_ENVIRONMENTS_TXT_FILE, location)\n\n\ndef list_all_known_prefixes():\n all_env_paths = set()\n if on_win:\n home_dir_dir = dirname(expand('~'))\n for home_dir in listdir(home_dir_dir):\n environments_txt_file = join(home_dir_dir, home_dir, '.conda', 'environments.txt')\n if isfile(environments_txt_file):\n all_env_paths.update(_clean_environments_txt(environments_txt_file))\n else:\n from os import geteuid\n from pwd import getpwall\n if geteuid() == 0:\n search_dirs = tuple(pwentry.pw_dir for pwentry in getpwall()) or (expand('~'),)\n else:\n search_dirs = (expand('~'),)\n for home_dir in search_dirs:\n environments_txt_file = join(home_dir, '.conda', 'environments.txt')\n if isfile(environments_txt_file):\n all_env_paths.update(_clean_environments_txt(environments_txt_file))\n\n # in case environments.txt files aren't complete, also add all known conda environments in\n # all envs_dirs\n envs_dirs = (envs_dir for envs_dir in context.envs_dirs if isdir(envs_dir))\n all_env_paths.update(path for path in (\n join(envs_dir, name) for envs_dir in envs_dirs for name in listdir(envs_dir)\n ) if path not in all_env_paths and is_conda_environment(path))\n\n all_env_paths.add(context.root_prefix)\n return sorted(all_env_paths)\n\n\ndef env_name(prefix):\n if not prefix:\n return None\n if paths_equal(prefix, context.root_prefix):\n return ROOT_ENV_NAME\n maybe_envs_dir, maybe_name = path_split(prefix)\n for envs_dir in context.envs_dirs:\n if paths_equal(envs_dir, maybe_envs_dir):\n return maybe_name\n return prefix\n\n\ndef _clean_environments_txt(environments_txt_file, remove_location=None):\n if not isfile(environments_txt_file):\n return ()\n\n if remove_location:\n remove_location = normpath(remove_location)\n environments_txt_lines = tuple(yield_lines(environments_txt_file))\n environments_txt_lines_cleaned = tuple(\n prefix for prefix in environments_txt_lines\n if prefix != remove_location and is_conda_environment(prefix)\n )\n if environments_txt_lines_cleaned != environments_txt_lines:\n _rewrite_environments_txt(environments_txt_file, environments_txt_lines_cleaned)\n return environments_txt_lines_cleaned\n\n\ndef _rewrite_environments_txt(environments_txt_file, prefixes):\n try:\n with open(environments_txt_file, 'w') as fh:\n fh.write('\\n'.join(prefixes))\n fh.write('\\n')\n except EnvironmentError as e:\n log.info(\"File not cleaned: %s\", environments_txt_file)\n log.debug('%r', e, exc_info=True)\n", "path": "conda/core/envs_manager.py"}]} | 1,718 | 390 |
gh_patches_debug_17939 | rasdani/github-patches | git_diff | GeotrekCE__Geotrek-admin-1030 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pragma no cache for path graph json
As in https://github.com/makinacorpus/django-mapentity/pull/48
Related https://github.com/makinacorpus/Geotrek/issues/1026
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `geotrek/core/views.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import json
3 from django.http import HttpResponse
4 from django.utils.decorators import method_decorator
5 from django.contrib.auth.decorators import login_required
6 from django.views.decorators.http import last_modified as cache_last_modified
7 from django.views.generic.edit import BaseDetailView
8 from django.core.cache import get_cache
9 from django.shortcuts import redirect
10
11 from mapentity.views import (MapEntityLayer, MapEntityList, MapEntityJsonList,
12 MapEntityDetail, MapEntityDocument, MapEntityCreate, MapEntityUpdate,
13 MapEntityDelete, MapEntityFormat,
14 JSONResponseMixin, HttpJSONResponse, LastModifiedMixin)
15
16 from geotrek.authent.decorators import path_manager_required, same_structure_required
17
18 from .models import Path, Trail
19 from .forms import PathForm
20 from .filters import PathFilter
21 from . import graph as graph_lib
22
23
24 @login_required
25 def last_list(request):
26 last = request.session.get('last_list') # set in MapEntityList
27 if not last:
28 return redirect('core:path_list')
29 return redirect(last)
30
31 home = last_list
32
33
34 class HttpSVGResponse(HttpResponse):
35 content_type = 'image/svg+xml'
36 def __init__(self, content='', **kwargs):
37 kwargs['content_type'] = self.content_type
38 super(HttpSVGResponse, self).__init__(content, **kwargs)
39
40
41 class ElevationChart(LastModifiedMixin, BaseDetailView):
42
43 @method_decorator(login_required)
44 def dispatch(self, *args, **kwargs):
45 return super(ElevationChart, self).dispatch(*args, **kwargs)
46
47 def render_to_response(self, context, **response_kwargs):
48 return HttpSVGResponse(self.get_object().get_elevation_profile_svg(),
49 **response_kwargs)
50
51
52 class ElevationProfile(LastModifiedMixin, JSONResponseMixin, BaseDetailView):
53 """Extract elevation profile from a path and return it as JSON"""
54
55 @method_decorator(login_required)
56 def dispatch(self, *args, **kwargs):
57 return super(ElevationProfile, self).dispatch(*args, **kwargs)
58
59 def get_context_data(self, **kwargs):
60 """
61 Put elevation profile into response context.
62 """
63 obj = self.get_object()
64 data = {}
65 # Formatted as distance, elevation, [lng, lat]
66 for step in obj.get_elevation_profile():
67 formatted = step[0], step[3], step[1:3]
68 data.setdefault('profile', []).append(formatted)
69 return data
70
71
72 class ElevationArea(LastModifiedMixin, JSONResponseMixin, BaseDetailView):
73 """Extract elevation profile on an area and return it as JSON"""
74
75 @method_decorator(login_required)
76 def dispatch(self, *args, **kwargs):
77 return super(ElevationArea, self).dispatch(*args, **kwargs)
78
79 def get_context_data(self, **kwargs):
80 obj = self.get_object()
81 return obj.get_elevation_area()
82
83
84 class PathLayer(MapEntityLayer):
85 model = Path
86 properties = ['name']
87
88
89 class PathList(MapEntityList):
90 queryset = Path.objects.prefetch_related('networks').select_related('stake', 'trail')
91 filterform = PathFilter
92 columns = ['id', 'name', 'networks', 'stake', 'trail']
93
94
95 class PathJsonList(MapEntityJsonList, PathList):
96 pass
97
98
99 class PathFormatList(MapEntityFormat, PathList):
100 pass
101
102
103 class PathDetail(MapEntityDetail):
104 model = Path
105
106 def can_edit(self):
107 return self.request.user.is_superuser or \
108 (hasattr(self.request.user, 'profile') and \
109 self.request.user.profile.is_path_manager and \
110 self.get_object().same_structure(self.request.user))
111
112
113 class PathDocument(MapEntityDocument):
114 model = Path
115
116 def get_context_data(self, *args, **kwargs):
117 self.get_object().prepare_elevation_chart(self.request)
118 return super(PathDocument, self).get_context_data(*args, **kwargs)
119
120
121 class PathCreate(MapEntityCreate):
122 model = Path
123 form_class = PathForm
124
125 @method_decorator(path_manager_required('core:path_list'))
126 def dispatch(self, *args, **kwargs):
127 return super(PathCreate, self).dispatch(*args, **kwargs)
128
129
130 class PathUpdate(MapEntityUpdate):
131 model = Path
132 form_class = PathForm
133
134 @method_decorator(path_manager_required('core:path_detail'))
135 @same_structure_required('core:path_detail')
136 def dispatch(self, *args, **kwargs):
137 return super(PathUpdate, self).dispatch(*args, **kwargs)
138
139
140 class PathDelete(MapEntityDelete):
141 model = Path
142
143 @method_decorator(path_manager_required('core:path_detail'))
144 @same_structure_required('core:path_detail')
145 def dispatch(self, *args, **kwargs):
146 return super(PathDelete, self).dispatch(*args, **kwargs)
147
148
149 @login_required
150 @cache_last_modified(lambda x: Path.latest_updated())
151 def get_graph_json(request):
152 cache = get_cache('fat')
153 key = 'path_graph_json'
154
155 result = cache.get(key)
156 latest = Path.latest_updated()
157
158 if result and latest:
159 cache_latest, json_graph = result
160 # Not empty and still valid
161 if cache_latest and cache_latest >= latest:
162 return HttpJSONResponse(json_graph)
163
164 # cache does not exist or is not up to date
165 # rebuild the graph and cache the json
166 graph = graph_lib.graph_edges_nodes_of_qs(Path.objects.all())
167 json_graph = json.dumps(graph)
168
169 cache.set(key, (latest, json_graph))
170 return HttpJSONResponse(json_graph)
171
172
173 class TrailDetail(MapEntityDetail):
174 model = Trail
175
176 def can_edit(self):
177 return False
178
179
180 class TrailDocument(MapEntityDocument):
181 model = Trail
182
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/geotrek/core/views.py b/geotrek/core/views.py
--- a/geotrek/core/views.py
+++ b/geotrek/core/views.py
@@ -4,6 +4,7 @@
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import last_modified as cache_last_modified
+from django.views.decorators.cache import never_cache as force_cache_validation
from django.views.generic.edit import BaseDetailView
from django.core.cache import get_cache
from django.shortcuts import redirect
@@ -148,6 +149,7 @@
@login_required
@cache_last_modified(lambda x: Path.latest_updated())
+@force_cache_validation
def get_graph_json(request):
cache = get_cache('fat')
key = 'path_graph_json'
| {"golden_diff": "diff --git a/geotrek/core/views.py b/geotrek/core/views.py\n--- a/geotrek/core/views.py\n+++ b/geotrek/core/views.py\n@@ -4,6 +4,7 @@\n from django.utils.decorators import method_decorator\n from django.contrib.auth.decorators import login_required\n from django.views.decorators.http import last_modified as cache_last_modified\n+from django.views.decorators.cache import never_cache as force_cache_validation\n from django.views.generic.edit import BaseDetailView\n from django.core.cache import get_cache\n from django.shortcuts import redirect\n@@ -148,6 +149,7 @@\n \n @login_required\n @cache_last_modified(lambda x: Path.latest_updated())\n+@force_cache_validation\n def get_graph_json(request):\n cache = get_cache('fat')\n key = 'path_graph_json'\n", "issue": "Pragma no cache for path graph json\nAs in https://github.com/makinacorpus/django-mapentity/pull/48\n\nRelated https://github.com/makinacorpus/Geotrek/issues/1026\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nfrom django.http import HttpResponse\nfrom django.utils.decorators import method_decorator\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.http import last_modified as cache_last_modified\nfrom django.views.generic.edit import BaseDetailView\nfrom django.core.cache import get_cache\nfrom django.shortcuts import redirect\n\nfrom mapentity.views import (MapEntityLayer, MapEntityList, MapEntityJsonList,\n MapEntityDetail, MapEntityDocument, MapEntityCreate, MapEntityUpdate,\n MapEntityDelete, MapEntityFormat,\n JSONResponseMixin, HttpJSONResponse, LastModifiedMixin)\n\nfrom geotrek.authent.decorators import path_manager_required, same_structure_required\n\nfrom .models import Path, Trail\nfrom .forms import PathForm\nfrom .filters import PathFilter\nfrom . import graph as graph_lib\n\n\n@login_required\ndef last_list(request):\n last = request.session.get('last_list') # set in MapEntityList\n if not last:\n return redirect('core:path_list')\n return redirect(last)\n\nhome = last_list\n\n\nclass HttpSVGResponse(HttpResponse):\n content_type = 'image/svg+xml'\n def __init__(self, content='', **kwargs):\n kwargs['content_type'] = self.content_type\n super(HttpSVGResponse, self).__init__(content, **kwargs)\n\n\nclass ElevationChart(LastModifiedMixin, BaseDetailView):\n\n @method_decorator(login_required)\n def dispatch(self, *args, **kwargs):\n return super(ElevationChart, self).dispatch(*args, **kwargs)\n\n def render_to_response(self, context, **response_kwargs):\n return HttpSVGResponse(self.get_object().get_elevation_profile_svg(),\n **response_kwargs)\n\n\nclass ElevationProfile(LastModifiedMixin, JSONResponseMixin, BaseDetailView):\n \"\"\"Extract elevation profile from a path and return it as JSON\"\"\"\n\n @method_decorator(login_required)\n def dispatch(self, *args, **kwargs):\n return super(ElevationProfile, self).dispatch(*args, **kwargs)\n\n def get_context_data(self, **kwargs):\n \"\"\"\n Put elevation profile into response context.\n \"\"\"\n obj = self.get_object()\n data = {}\n # Formatted as distance, elevation, [lng, lat]\n for step in obj.get_elevation_profile():\n formatted = step[0], step[3], step[1:3]\n data.setdefault('profile', []).append(formatted)\n return data\n\n\nclass ElevationArea(LastModifiedMixin, JSONResponseMixin, BaseDetailView):\n \"\"\"Extract elevation profile on an area and return it as JSON\"\"\"\n\n @method_decorator(login_required)\n def dispatch(self, *args, **kwargs):\n return super(ElevationArea, self).dispatch(*args, **kwargs)\n\n def get_context_data(self, **kwargs):\n obj = self.get_object()\n return obj.get_elevation_area()\n\n\nclass PathLayer(MapEntityLayer):\n model = Path\n properties = ['name']\n\n\nclass PathList(MapEntityList):\n queryset = Path.objects.prefetch_related('networks').select_related('stake', 'trail')\n filterform = PathFilter\n columns = ['id', 'name', 'networks', 'stake', 'trail']\n\n\nclass PathJsonList(MapEntityJsonList, PathList):\n pass\n\n\nclass PathFormatList(MapEntityFormat, PathList):\n pass\n\n\nclass PathDetail(MapEntityDetail):\n model = Path\n\n def can_edit(self):\n return self.request.user.is_superuser or \\\n (hasattr(self.request.user, 'profile') and \\\n self.request.user.profile.is_path_manager and \\\n self.get_object().same_structure(self.request.user))\n\n\nclass PathDocument(MapEntityDocument):\n model = Path\n\n def get_context_data(self, *args, **kwargs):\n self.get_object().prepare_elevation_chart(self.request)\n return super(PathDocument, self).get_context_data(*args, **kwargs)\n\n\nclass PathCreate(MapEntityCreate):\n model = Path\n form_class = PathForm\n\n @method_decorator(path_manager_required('core:path_list'))\n def dispatch(self, *args, **kwargs):\n return super(PathCreate, self).dispatch(*args, **kwargs)\n\n\nclass PathUpdate(MapEntityUpdate):\n model = Path\n form_class = PathForm\n\n @method_decorator(path_manager_required('core:path_detail'))\n @same_structure_required('core:path_detail')\n def dispatch(self, *args, **kwargs):\n return super(PathUpdate, self).dispatch(*args, **kwargs)\n\n\nclass PathDelete(MapEntityDelete):\n model = Path\n\n @method_decorator(path_manager_required('core:path_detail'))\n @same_structure_required('core:path_detail')\n def dispatch(self, *args, **kwargs):\n return super(PathDelete, self).dispatch(*args, **kwargs)\n\n\n@login_required\n@cache_last_modified(lambda x: Path.latest_updated())\ndef get_graph_json(request):\n cache = get_cache('fat')\n key = 'path_graph_json'\n\n result = cache.get(key)\n latest = Path.latest_updated()\n\n if result and latest:\n cache_latest, json_graph = result\n # Not empty and still valid\n if cache_latest and cache_latest >= latest:\n return HttpJSONResponse(json_graph)\n\n # cache does not exist or is not up to date\n # rebuild the graph and cache the json\n graph = graph_lib.graph_edges_nodes_of_qs(Path.objects.all())\n json_graph = json.dumps(graph)\n\n cache.set(key, (latest, json_graph))\n return HttpJSONResponse(json_graph)\n\n\nclass TrailDetail(MapEntityDetail):\n model = Trail\n\n def can_edit(self):\n return False\n\n\nclass TrailDocument(MapEntityDocument):\n model = Trail\n", "path": "geotrek/core/views.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nfrom django.http import HttpResponse\nfrom django.utils.decorators import method_decorator\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.http import last_modified as cache_last_modified\nfrom django.views.decorators.cache import never_cache as force_cache_validation\nfrom django.views.generic.edit import BaseDetailView\nfrom django.core.cache import get_cache\nfrom django.shortcuts import redirect\n\nfrom mapentity.views import (MapEntityLayer, MapEntityList, MapEntityJsonList,\n MapEntityDetail, MapEntityDocument, MapEntityCreate, MapEntityUpdate,\n MapEntityDelete, MapEntityFormat,\n JSONResponseMixin, HttpJSONResponse, LastModifiedMixin)\n\nfrom geotrek.authent.decorators import path_manager_required, same_structure_required\n\nfrom .models import Path, Trail\nfrom .forms import PathForm\nfrom .filters import PathFilter\nfrom . import graph as graph_lib\n\n\n@login_required\ndef last_list(request):\n last = request.session.get('last_list') # set in MapEntityList\n if not last:\n return redirect('core:path_list')\n return redirect(last)\n\nhome = last_list\n\n\nclass HttpSVGResponse(HttpResponse):\n content_type = 'image/svg+xml'\n def __init__(self, content='', **kwargs):\n kwargs['content_type'] = self.content_type\n super(HttpSVGResponse, self).__init__(content, **kwargs)\n\n\nclass ElevationChart(LastModifiedMixin, BaseDetailView):\n\n @method_decorator(login_required)\n def dispatch(self, *args, **kwargs):\n return super(ElevationChart, self).dispatch(*args, **kwargs)\n\n def render_to_response(self, context, **response_kwargs):\n return HttpSVGResponse(self.get_object().get_elevation_profile_svg(),\n **response_kwargs)\n\n\nclass ElevationProfile(LastModifiedMixin, JSONResponseMixin, BaseDetailView):\n \"\"\"Extract elevation profile from a path and return it as JSON\"\"\"\n\n @method_decorator(login_required)\n def dispatch(self, *args, **kwargs):\n return super(ElevationProfile, self).dispatch(*args, **kwargs)\n\n def get_context_data(self, **kwargs):\n \"\"\"\n Put elevation profile into response context.\n \"\"\"\n obj = self.get_object()\n data = {}\n # Formatted as distance, elevation, [lng, lat]\n for step in obj.get_elevation_profile():\n formatted = step[0], step[3], step[1:3]\n data.setdefault('profile', []).append(formatted)\n return data\n\n\nclass ElevationArea(LastModifiedMixin, JSONResponseMixin, BaseDetailView):\n \"\"\"Extract elevation profile on an area and return it as JSON\"\"\"\n\n @method_decorator(login_required)\n def dispatch(self, *args, **kwargs):\n return super(ElevationArea, self).dispatch(*args, **kwargs)\n\n def get_context_data(self, **kwargs):\n obj = self.get_object()\n return obj.get_elevation_area()\n\n\nclass PathLayer(MapEntityLayer):\n model = Path\n properties = ['name']\n\n\nclass PathList(MapEntityList):\n queryset = Path.objects.prefetch_related('networks').select_related('stake', 'trail')\n filterform = PathFilter\n columns = ['id', 'name', 'networks', 'stake', 'trail']\n\n\nclass PathJsonList(MapEntityJsonList, PathList):\n pass\n\n\nclass PathFormatList(MapEntityFormat, PathList):\n pass\n\n\nclass PathDetail(MapEntityDetail):\n model = Path\n\n def can_edit(self):\n return self.request.user.is_superuser or \\\n (hasattr(self.request.user, 'profile') and \\\n self.request.user.profile.is_path_manager and \\\n self.get_object().same_structure(self.request.user))\n\n\nclass PathDocument(MapEntityDocument):\n model = Path\n\n def get_context_data(self, *args, **kwargs):\n self.get_object().prepare_elevation_chart(self.request)\n return super(PathDocument, self).get_context_data(*args, **kwargs)\n\n\nclass PathCreate(MapEntityCreate):\n model = Path\n form_class = PathForm\n\n @method_decorator(path_manager_required('core:path_list'))\n def dispatch(self, *args, **kwargs):\n return super(PathCreate, self).dispatch(*args, **kwargs)\n\n\nclass PathUpdate(MapEntityUpdate):\n model = Path\n form_class = PathForm\n\n @method_decorator(path_manager_required('core:path_detail'))\n @same_structure_required('core:path_detail')\n def dispatch(self, *args, **kwargs):\n return super(PathUpdate, self).dispatch(*args, **kwargs)\n\n\nclass PathDelete(MapEntityDelete):\n model = Path\n\n @method_decorator(path_manager_required('core:path_detail'))\n @same_structure_required('core:path_detail')\n def dispatch(self, *args, **kwargs):\n return super(PathDelete, self).dispatch(*args, **kwargs)\n\n\n@login_required\n@cache_last_modified(lambda x: Path.latest_updated())\n@force_cache_validation\ndef get_graph_json(request):\n cache = get_cache('fat')\n key = 'path_graph_json'\n\n result = cache.get(key)\n latest = Path.latest_updated()\n\n if result and latest:\n cache_latest, json_graph = result\n # Not empty and still valid\n if cache_latest and cache_latest >= latest:\n return HttpJSONResponse(json_graph)\n\n # cache does not exist or is not up to date\n # rebuild the graph and cache the json\n graph = graph_lib.graph_edges_nodes_of_qs(Path.objects.all())\n json_graph = json.dumps(graph)\n\n cache.set(key, (latest, json_graph))\n return HttpJSONResponse(json_graph)\n\n\nclass TrailDetail(MapEntityDetail):\n model = Trail\n\n def can_edit(self):\n return False\n\n\nclass TrailDocument(MapEntityDocument):\n model = Trail\n", "path": "geotrek/core/views.py"}]} | 1,992 | 171 |
gh_patches_debug_23222 | rasdani/github-patches | git_diff | pyinstaller__pyinstaller-7217 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
NullWriter has no attribute 'closed'
This is similar to issue #1883
It is triggered when using the "click" library in a PyInstaller --noconsole application.
A workaround is as follows:
```python
# fixup somw problems from pyinstaller
if "NullWriter" in str(type(sys.stdout)):
sys.stdout.closed = sys.stderr.closed = False
```
I suggest adding a class attribute, closed=False to fix this.
You may want to add the "errors" and "newlines" attributes as well, see the python docs.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `PyInstaller/loader/pyiboot01_bootstrap.py`
Content:
```
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2005-2022, PyInstaller Development Team.
3 #
4 # Distributed under the terms of the GNU General Public License (version 2
5 # or later) with exception for distributing the bootloader.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #
9 # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
10 #-----------------------------------------------------------------------------
11
12 #-- Start bootstrap process
13 # Only python built-in modules can be used.
14
15 import sys
16
17 import pyimod02_importers
18
19 # Extend Python import machinery by adding PEP302 importers to sys.meta_path.
20 pyimod02_importers.install()
21
22 #-- Bootstrap process is complete.
23 # We can use other python modules (e.g. os)
24
25 import os # noqa: E402
26
27 # Let other python modules know that the code is running in frozen mode.
28 if not hasattr(sys, 'frozen'):
29 sys.frozen = True
30
31 # sys._MEIPASS is now set in the bootloader. Hooray.
32
33 # Python 3 C-API function Py_SetPath() resets sys.prefix to empty string. Python 2 was using PYTHONHOME for sys.prefix.
34 # Let's do the same for Python 3.
35 sys.prefix = sys._MEIPASS
36 sys.exec_prefix = sys.prefix
37
38 # Python 3.3+ defines also sys.base_prefix. Let's set them too.
39 sys.base_prefix = sys.prefix
40 sys.base_exec_prefix = sys.exec_prefix
41
42 # Some packages behave differently when running inside virtual environment. E.g., IPython tries to append path
43 # VIRTUAL_ENV to sys.path. For the frozen app we want to prevent this behavior.
44 VIRTENV = 'VIRTUAL_ENV'
45 if VIRTENV in os.environ:
46 # On some platforms (e.g., AIX) 'os.unsetenv()' is unavailable and deleting the var from os.environ does not
47 # delete it from the environment.
48 os.environ[VIRTENV] = ''
49 del os.environ[VIRTENV]
50
51 # Ensure sys.path contains absolute paths. Otherwise, import of other python modules will fail when current working
52 # directory is changed by the frozen application.
53 python_path = []
54 for pth in sys.path:
55 python_path.append(os.path.abspath(pth))
56 sys.path = python_path
57
58
59 # Implement workaround for prints in non-console mode. In non-console mode (with "pythonw"), print randomly fails with
60 # "[errno 9] Bad file descriptor" when the printed text is flushed (e.g., buffer full); this is because the sys.stdout
61 # object is bound to an invalid file descriptor.
62 # Python 3000 has a fix for it (http://bugs.python.org/issue1415), but we feel that a workaround in PyInstaller is a
63 # good thing, because most people first encounter this problem with PyInstaller as they do not usually run their code
64 # with "pythonw" (and it is difficult to debug, anyway).
65 class NullWriter:
66 softspace = 0
67 encoding = 'UTF-8'
68
69 def write(*args):
70 pass
71
72 def flush(*args):
73 pass
74
75 # Some packages are checking if stdout/stderr is available (e.g., youtube-dl). For details, see #1883.
76 def isatty(self):
77 return False
78
79
80 # sys.stdout/err is None in GUI mode on Windows.
81 if sys.stdout is None:
82 sys.stdout = NullWriter()
83 if sys.stderr is None:
84 sys.stderr = NullWriter()
85
86 # At least on Windows, Python seems to hook up the codecs on this import, so it is not enough to just package up all
87 # the encodings.
88 #
89 # It was also reported that without 'encodings' module, the frozen executable fails to load in some configurations:
90 # http://www.pyinstaller.org/ticket/651
91 #
92 # Importing 'encodings' module in a run-time hook is not enough, since some run-time hooks require this module, and the
93 # order of running the code from the run-time hooks is not defined.
94 try:
95 import encodings # noqa: F401
96 except ImportError:
97 pass
98
99 # In the Python interpreter 'warnings' module is imported when 'sys.warnoptions' is not empty. Mimic this behavior.
100 if sys.warnoptions:
101 import warnings # noqa: F401
102
103 # Install the hooks for ctypes
104 import pyimod03_ctypes # noqa: E402
105
106 pyimod03_ctypes.install()
107
108 # Install the hooks for pywin32 (Windows only)
109 if sys.platform.startswith('win'):
110 import pyimod04_pywin32
111 pyimod04_pywin32.install()
112
113 # Make .eggs and zipfiles available at runtime
114 d = "eggs"
115 d = os.path.join(sys._MEIPASS, d)
116 # Test if the 'eggs' directory exists. This allows us to opportunistically include this script into the packaged exe,
117 # even if no eggs were found when packaging the program. (Which may be a use-case, see issue #653).
118 if os.path.isdir(d):
119 for fn in os.listdir(d):
120 sys.path.append(os.path.join(d, fn))
121
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/PyInstaller/loader/pyiboot01_bootstrap.py b/PyInstaller/loader/pyiboot01_bootstrap.py
--- a/PyInstaller/loader/pyiboot01_bootstrap.py
+++ b/PyInstaller/loader/pyiboot01_bootstrap.py
@@ -55,34 +55,6 @@
python_path.append(os.path.abspath(pth))
sys.path = python_path
-
-# Implement workaround for prints in non-console mode. In non-console mode (with "pythonw"), print randomly fails with
-# "[errno 9] Bad file descriptor" when the printed text is flushed (e.g., buffer full); this is because the sys.stdout
-# object is bound to an invalid file descriptor.
-# Python 3000 has a fix for it (http://bugs.python.org/issue1415), but we feel that a workaround in PyInstaller is a
-# good thing, because most people first encounter this problem with PyInstaller as they do not usually run their code
-# with "pythonw" (and it is difficult to debug, anyway).
-class NullWriter:
- softspace = 0
- encoding = 'UTF-8'
-
- def write(*args):
- pass
-
- def flush(*args):
- pass
-
- # Some packages are checking if stdout/stderr is available (e.g., youtube-dl). For details, see #1883.
- def isatty(self):
- return False
-
-
-# sys.stdout/err is None in GUI mode on Windows.
-if sys.stdout is None:
- sys.stdout = NullWriter()
-if sys.stderr is None:
- sys.stderr = NullWriter()
-
# At least on Windows, Python seems to hook up the codecs on this import, so it is not enough to just package up all
# the encodings.
#
| {"golden_diff": "diff --git a/PyInstaller/loader/pyiboot01_bootstrap.py b/PyInstaller/loader/pyiboot01_bootstrap.py\n--- a/PyInstaller/loader/pyiboot01_bootstrap.py\n+++ b/PyInstaller/loader/pyiboot01_bootstrap.py\n@@ -55,34 +55,6 @@\n python_path.append(os.path.abspath(pth))\n sys.path = python_path\n \n-\n-# Implement workaround for prints in non-console mode. In non-console mode (with \"pythonw\"), print randomly fails with\n-# \"[errno 9] Bad file descriptor\" when the printed text is flushed (e.g., buffer full); this is because the sys.stdout\n-# object is bound to an invalid file descriptor.\n-# Python 3000 has a fix for it (http://bugs.python.org/issue1415), but we feel that a workaround in PyInstaller is a\n-# good thing, because most people first encounter this problem with PyInstaller as they do not usually run their code\n-# with \"pythonw\" (and it is difficult to debug, anyway).\n-class NullWriter:\n- softspace = 0\n- encoding = 'UTF-8'\n-\n- def write(*args):\n- pass\n-\n- def flush(*args):\n- pass\n-\n- # Some packages are checking if stdout/stderr is available (e.g., youtube-dl). For details, see #1883.\n- def isatty(self):\n- return False\n-\n-\n-# sys.stdout/err is None in GUI mode on Windows.\n-if sys.stdout is None:\n- sys.stdout = NullWriter()\n-if sys.stderr is None:\n- sys.stderr = NullWriter()\n-\n # At least on Windows, Python seems to hook up the codecs on this import, so it is not enough to just package up all\n # the encodings.\n #\n", "issue": "NullWriter has no attribute 'closed'\nThis is similar to issue #1883\r\nIt is triggered when using the \"click\" library in a PyInstaller --noconsole application.\r\n\r\nA workaround is as follows:\r\n```python\r\n# fixup somw problems from pyinstaller\r\nif \"NullWriter\" in str(type(sys.stdout)):\r\n sys.stdout.closed = sys.stderr.closed = False\r\n```\r\nI suggest adding a class attribute, closed=False to fix this.\r\nYou may want to add the \"errors\" and \"newlines\" attributes as well, see the python docs.\r\n\r\n\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2022, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\n#-- Start bootstrap process\n# Only python built-in modules can be used.\n\nimport sys\n\nimport pyimod02_importers\n\n# Extend Python import machinery by adding PEP302 importers to sys.meta_path.\npyimod02_importers.install()\n\n#-- Bootstrap process is complete.\n# We can use other python modules (e.g. os)\n\nimport os # noqa: E402\n\n# Let other python modules know that the code is running in frozen mode.\nif not hasattr(sys, 'frozen'):\n sys.frozen = True\n\n# sys._MEIPASS is now set in the bootloader. Hooray.\n\n# Python 3 C-API function Py_SetPath() resets sys.prefix to empty string. Python 2 was using PYTHONHOME for sys.prefix.\n# Let's do the same for Python 3.\nsys.prefix = sys._MEIPASS\nsys.exec_prefix = sys.prefix\n\n# Python 3.3+ defines also sys.base_prefix. Let's set them too.\nsys.base_prefix = sys.prefix\nsys.base_exec_prefix = sys.exec_prefix\n\n# Some packages behave differently when running inside virtual environment. E.g., IPython tries to append path\n# VIRTUAL_ENV to sys.path. For the frozen app we want to prevent this behavior.\nVIRTENV = 'VIRTUAL_ENV'\nif VIRTENV in os.environ:\n # On some platforms (e.g., AIX) 'os.unsetenv()' is unavailable and deleting the var from os.environ does not\n # delete it from the environment.\n os.environ[VIRTENV] = ''\n del os.environ[VIRTENV]\n\n# Ensure sys.path contains absolute paths. Otherwise, import of other python modules will fail when current working\n# directory is changed by the frozen application.\npython_path = []\nfor pth in sys.path:\n python_path.append(os.path.abspath(pth))\n sys.path = python_path\n\n\n# Implement workaround for prints in non-console mode. In non-console mode (with \"pythonw\"), print randomly fails with\n# \"[errno 9] Bad file descriptor\" when the printed text is flushed (e.g., buffer full); this is because the sys.stdout\n# object is bound to an invalid file descriptor.\n# Python 3000 has a fix for it (http://bugs.python.org/issue1415), but we feel that a workaround in PyInstaller is a\n# good thing, because most people first encounter this problem with PyInstaller as they do not usually run their code\n# with \"pythonw\" (and it is difficult to debug, anyway).\nclass NullWriter:\n softspace = 0\n encoding = 'UTF-8'\n\n def write(*args):\n pass\n\n def flush(*args):\n pass\n\n # Some packages are checking if stdout/stderr is available (e.g., youtube-dl). For details, see #1883.\n def isatty(self):\n return False\n\n\n# sys.stdout/err is None in GUI mode on Windows.\nif sys.stdout is None:\n sys.stdout = NullWriter()\nif sys.stderr is None:\n sys.stderr = NullWriter()\n\n# At least on Windows, Python seems to hook up the codecs on this import, so it is not enough to just package up all\n# the encodings.\n#\n# It was also reported that without 'encodings' module, the frozen executable fails to load in some configurations:\n# http://www.pyinstaller.org/ticket/651\n#\n# Importing 'encodings' module in a run-time hook is not enough, since some run-time hooks require this module, and the\n# order of running the code from the run-time hooks is not defined.\ntry:\n import encodings # noqa: F401\nexcept ImportError:\n pass\n\n# In the Python interpreter 'warnings' module is imported when 'sys.warnoptions' is not empty. Mimic this behavior.\nif sys.warnoptions:\n import warnings # noqa: F401\n\n# Install the hooks for ctypes\nimport pyimod03_ctypes # noqa: E402\n\npyimod03_ctypes.install()\n\n# Install the hooks for pywin32 (Windows only)\nif sys.platform.startswith('win'):\n import pyimod04_pywin32\n pyimod04_pywin32.install()\n\n# Make .eggs and zipfiles available at runtime\nd = \"eggs\"\nd = os.path.join(sys._MEIPASS, d)\n# Test if the 'eggs' directory exists. This allows us to opportunistically include this script into the packaged exe,\n# even if no eggs were found when packaging the program. (Which may be a use-case, see issue #653).\nif os.path.isdir(d):\n for fn in os.listdir(d):\n sys.path.append(os.path.join(d, fn))\n", "path": "PyInstaller/loader/pyiboot01_bootstrap.py"}], "after_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2022, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\n#-- Start bootstrap process\n# Only python built-in modules can be used.\n\nimport sys\n\nimport pyimod02_importers\n\n# Extend Python import machinery by adding PEP302 importers to sys.meta_path.\npyimod02_importers.install()\n\n#-- Bootstrap process is complete.\n# We can use other python modules (e.g. os)\n\nimport os # noqa: E402\n\n# Let other python modules know that the code is running in frozen mode.\nif not hasattr(sys, 'frozen'):\n sys.frozen = True\n\n# sys._MEIPASS is now set in the bootloader. Hooray.\n\n# Python 3 C-API function Py_SetPath() resets sys.prefix to empty string. Python 2 was using PYTHONHOME for sys.prefix.\n# Let's do the same for Python 3.\nsys.prefix = sys._MEIPASS\nsys.exec_prefix = sys.prefix\n\n# Python 3.3+ defines also sys.base_prefix. Let's set them too.\nsys.base_prefix = sys.prefix\nsys.base_exec_prefix = sys.exec_prefix\n\n# Some packages behave differently when running inside virtual environment. E.g., IPython tries to append path\n# VIRTUAL_ENV to sys.path. For the frozen app we want to prevent this behavior.\nVIRTENV = 'VIRTUAL_ENV'\nif VIRTENV in os.environ:\n # On some platforms (e.g., AIX) 'os.unsetenv()' is unavailable and deleting the var from os.environ does not\n # delete it from the environment.\n os.environ[VIRTENV] = ''\n del os.environ[VIRTENV]\n\n# Ensure sys.path contains absolute paths. Otherwise, import of other python modules will fail when current working\n# directory is changed by the frozen application.\npython_path = []\nfor pth in sys.path:\n python_path.append(os.path.abspath(pth))\n sys.path = python_path\n\n# At least on Windows, Python seems to hook up the codecs on this import, so it is not enough to just package up all\n# the encodings.\n#\n# It was also reported that without 'encodings' module, the frozen executable fails to load in some configurations:\n# http://www.pyinstaller.org/ticket/651\n#\n# Importing 'encodings' module in a run-time hook is not enough, since some run-time hooks require this module, and the\n# order of running the code from the run-time hooks is not defined.\ntry:\n import encodings # noqa: F401\nexcept ImportError:\n pass\n\n# In the Python interpreter 'warnings' module is imported when 'sys.warnoptions' is not empty. Mimic this behavior.\nif sys.warnoptions:\n import warnings # noqa: F401\n\n# Install the hooks for ctypes\nimport pyimod03_ctypes # noqa: E402\n\npyimod03_ctypes.install()\n\n# Install the hooks for pywin32 (Windows only)\nif sys.platform.startswith('win'):\n import pyimod04_pywin32\n pyimod04_pywin32.install()\n\n# Make .eggs and zipfiles available at runtime\nd = \"eggs\"\nd = os.path.join(sys._MEIPASS, d)\n# Test if the 'eggs' directory exists. This allows us to opportunistically include this script into the packaged exe,\n# even if no eggs were found when packaging the program. (Which may be a use-case, see issue #653).\nif os.path.isdir(d):\n for fn in os.listdir(d):\n sys.path.append(os.path.join(d, fn))\n", "path": "PyInstaller/loader/pyiboot01_bootstrap.py"}]} | 1,774 | 402 |
gh_patches_debug_39414 | rasdani/github-patches | git_diff | buildbot__buildbot-4467 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SecretInVault secret provider integration tests no longer work
The test `buildbot.test.integration.test_integration_secrets_with_vault.SecretsConfig.test_secret` no longer works.
See https://travis-ci.org/buildbot/buildbot/jobs/464401540.
Looks like the default kv engine shipping with the `vault` engine is now v2 which we don't support yet.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `master/buildbot/secrets/providers/vault.py`
Content:
```
1 # This file is part of Buildbot. Buildbot is free software: you can
2 # redistribute it and/or modify it under the terms of the GNU General Public
3 # License as published by the Free Software Foundation, version 2.
4 #
5 # This program is distributed in the hope that it will be useful, but WITHOUT
6 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
7 # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
8 # details.
9 #
10 # You should have received a copy of the GNU General Public License along with
11 # this program; if not, write to the Free Software Foundation, Inc., 51
12 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
13 #
14 # Copyright Buildbot Team Members
15 """
16 vault based providers
17 """
18
19 from __future__ import absolute_import
20 from __future__ import print_function
21
22 from twisted.internet import defer
23
24 from buildbot import config
25 from buildbot.secrets.providers.base import SecretProviderBase
26 from buildbot.util import httpclientservice
27
28
29 class HashiCorpVaultSecretProvider(SecretProviderBase):
30 """
31 basic provider where each secret is stored in Vault
32 """
33
34 name = 'SecretInVault'
35
36 def checkConfig(self, vaultServer=None, vaultToken=None, secretsmount=None):
37 if not isinstance(vaultServer, str):
38 config.error("vaultServer must be a string while it is %s" % (type(vaultServer,)))
39 if not isinstance(vaultToken, str):
40 config.error("vaultToken must be a string while it is %s" % (type(vaultToken,)))
41
42 @defer.inlineCallbacks
43 def reconfigService(self, vaultServer=None, vaultToken=None, secretsmount=None):
44 if secretsmount is None:
45 self.secretsmount = "secret"
46 else:
47 self.secretsmount = secretsmount
48 self.vaultServer = vaultServer
49 self.vaultToken = vaultToken
50 if vaultServer.endswith('/'):
51 vaultServer = vaultServer[:-1]
52 self._http = yield httpclientservice.HTTPClientService.getService(
53 self.master, self.vaultServer, headers={'X-Vault-Token': self.vaultToken})
54
55 @defer.inlineCallbacks
56 def get(self, entry):
57 """
58 get the value from vault secret backend
59 """
60 path = self.secretsmount + '/' + entry
61 proj = yield self._http.get('/v1/{0}'.format(path))
62 code = yield proj.code
63 if code != 200:
64 raise KeyError("The key %s does not exist in Vault provider: request"
65 " return code:%d." % (entry, code))
66 json = yield proj.json()
67 defer.returnValue(json.get(u'data', {}).get('value'))
68
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/master/buildbot/secrets/providers/vault.py b/master/buildbot/secrets/providers/vault.py
--- a/master/buildbot/secrets/providers/vault.py
+++ b/master/buildbot/secrets/providers/vault.py
@@ -28,25 +28,30 @@
class HashiCorpVaultSecretProvider(SecretProviderBase):
"""
- basic provider where each secret is stored in Vault
+ basic provider where each secret is stored in Vault KV secret engine
"""
name = 'SecretInVault'
- def checkConfig(self, vaultServer=None, vaultToken=None, secretsmount=None):
+ def checkConfig(self, vaultServer=None, vaultToken=None, secretsmount=None,
+ apiVersion=1):
if not isinstance(vaultServer, str):
config.error("vaultServer must be a string while it is %s" % (type(vaultServer,)))
if not isinstance(vaultToken, str):
config.error("vaultToken must be a string while it is %s" % (type(vaultToken,)))
+ if apiVersion not in [1, 2]:
+ config.error("apiVersion %s is not supported" % apiVersion)
@defer.inlineCallbacks
- def reconfigService(self, vaultServer=None, vaultToken=None, secretsmount=None):
+ def reconfigService(self, vaultServer=None, vaultToken=None, secretsmount=None,
+ apiVersion=1):
if secretsmount is None:
self.secretsmount = "secret"
else:
self.secretsmount = secretsmount
self.vaultServer = vaultServer
self.vaultToken = vaultToken
+ self.apiVersion = apiVersion
if vaultServer.endswith('/'):
vaultServer = vaultServer[:-1]
self._http = yield httpclientservice.HTTPClientService.getService(
@@ -57,11 +62,23 @@
"""
get the value from vault secret backend
"""
- path = self.secretsmount + '/' + entry
+ if self.apiVersion == 1:
+ path = self.secretsmount + '/' + entry
+ else:
+ path = self.secretsmount + '/data/' + entry
+
+ # note that the HTTP path contains v1 for both versions of the key-value
+ # secret engine. Different versions of the key-value engine are
+ # effectively separate secret engines in vault, with the same base HTTP
+ # API, but with different paths within it.
proj = yield self._http.get('/v1/{0}'.format(path))
code = yield proj.code
if code != 200:
raise KeyError("The key %s does not exist in Vault provider: request"
" return code:%d." % (entry, code))
json = yield proj.json()
- defer.returnValue(json.get(u'data', {}).get('value'))
+ if self.apiVersion == 1:
+ ret = json.get(u'data', {}).get('value')
+ else:
+ ret = json.get(u'data', {}).get(u'data', {}).get('value')
+ defer.returnValue(ret)
| {"golden_diff": "diff --git a/master/buildbot/secrets/providers/vault.py b/master/buildbot/secrets/providers/vault.py\n--- a/master/buildbot/secrets/providers/vault.py\n+++ b/master/buildbot/secrets/providers/vault.py\n@@ -28,25 +28,30 @@\n \n class HashiCorpVaultSecretProvider(SecretProviderBase):\n \"\"\"\n- basic provider where each secret is stored in Vault\n+ basic provider where each secret is stored in Vault KV secret engine\n \"\"\"\n \n name = 'SecretInVault'\n \n- def checkConfig(self, vaultServer=None, vaultToken=None, secretsmount=None):\n+ def checkConfig(self, vaultServer=None, vaultToken=None, secretsmount=None,\n+ apiVersion=1):\n if not isinstance(vaultServer, str):\n config.error(\"vaultServer must be a string while it is %s\" % (type(vaultServer,)))\n if not isinstance(vaultToken, str):\n config.error(\"vaultToken must be a string while it is %s\" % (type(vaultToken,)))\n+ if apiVersion not in [1, 2]:\n+ config.error(\"apiVersion %s is not supported\" % apiVersion)\n \n @defer.inlineCallbacks\n- def reconfigService(self, vaultServer=None, vaultToken=None, secretsmount=None):\n+ def reconfigService(self, vaultServer=None, vaultToken=None, secretsmount=None,\n+ apiVersion=1):\n if secretsmount is None:\n self.secretsmount = \"secret\"\n else:\n self.secretsmount = secretsmount\n self.vaultServer = vaultServer\n self.vaultToken = vaultToken\n+ self.apiVersion = apiVersion\n if vaultServer.endswith('/'):\n vaultServer = vaultServer[:-1]\n self._http = yield httpclientservice.HTTPClientService.getService(\n@@ -57,11 +62,23 @@\n \"\"\"\n get the value from vault secret backend\n \"\"\"\n- path = self.secretsmount + '/' + entry\n+ if self.apiVersion == 1:\n+ path = self.secretsmount + '/' + entry\n+ else:\n+ path = self.secretsmount + '/data/' + entry\n+\n+ # note that the HTTP path contains v1 for both versions of the key-value\n+ # secret engine. Different versions of the key-value engine are\n+ # effectively separate secret engines in vault, with the same base HTTP\n+ # API, but with different paths within it.\n proj = yield self._http.get('/v1/{0}'.format(path))\n code = yield proj.code\n if code != 200:\n raise KeyError(\"The key %s does not exist in Vault provider: request\"\n \" return code:%d.\" % (entry, code))\n json = yield proj.json()\n- defer.returnValue(json.get(u'data', {}).get('value'))\n+ if self.apiVersion == 1:\n+ ret = json.get(u'data', {}).get('value')\n+ else:\n+ ret = json.get(u'data', {}).get(u'data', {}).get('value')\n+ defer.returnValue(ret)\n", "issue": "SecretInVault secret provider integration tests no longer work\nThe test `buildbot.test.integration.test_integration_secrets_with_vault.SecretsConfig.test_secret` no longer works.\r\n\r\nSee https://travis-ci.org/buildbot/buildbot/jobs/464401540.\r\n\r\nLooks like the default kv engine shipping with the `vault` engine is now v2 which we don't support yet.\r\n\r\n\n", "before_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\"\"\"\nvault based providers\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nfrom twisted.internet import defer\n\nfrom buildbot import config\nfrom buildbot.secrets.providers.base import SecretProviderBase\nfrom buildbot.util import httpclientservice\n\n\nclass HashiCorpVaultSecretProvider(SecretProviderBase):\n \"\"\"\n basic provider where each secret is stored in Vault\n \"\"\"\n\n name = 'SecretInVault'\n\n def checkConfig(self, vaultServer=None, vaultToken=None, secretsmount=None):\n if not isinstance(vaultServer, str):\n config.error(\"vaultServer must be a string while it is %s\" % (type(vaultServer,)))\n if not isinstance(vaultToken, str):\n config.error(\"vaultToken must be a string while it is %s\" % (type(vaultToken,)))\n\n @defer.inlineCallbacks\n def reconfigService(self, vaultServer=None, vaultToken=None, secretsmount=None):\n if secretsmount is None:\n self.secretsmount = \"secret\"\n else:\n self.secretsmount = secretsmount\n self.vaultServer = vaultServer\n self.vaultToken = vaultToken\n if vaultServer.endswith('/'):\n vaultServer = vaultServer[:-1]\n self._http = yield httpclientservice.HTTPClientService.getService(\n self.master, self.vaultServer, headers={'X-Vault-Token': self.vaultToken})\n\n @defer.inlineCallbacks\n def get(self, entry):\n \"\"\"\n get the value from vault secret backend\n \"\"\"\n path = self.secretsmount + '/' + entry\n proj = yield self._http.get('/v1/{0}'.format(path))\n code = yield proj.code\n if code != 200:\n raise KeyError(\"The key %s does not exist in Vault provider: request\"\n \" return code:%d.\" % (entry, code))\n json = yield proj.json()\n defer.returnValue(json.get(u'data', {}).get('value'))\n", "path": "master/buildbot/secrets/providers/vault.py"}], "after_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\"\"\"\nvault based providers\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nfrom twisted.internet import defer\n\nfrom buildbot import config\nfrom buildbot.secrets.providers.base import SecretProviderBase\nfrom buildbot.util import httpclientservice\n\n\nclass HashiCorpVaultSecretProvider(SecretProviderBase):\n \"\"\"\n basic provider where each secret is stored in Vault KV secret engine\n \"\"\"\n\n name = 'SecretInVault'\n\n def checkConfig(self, vaultServer=None, vaultToken=None, secretsmount=None,\n apiVersion=1):\n if not isinstance(vaultServer, str):\n config.error(\"vaultServer must be a string while it is %s\" % (type(vaultServer,)))\n if not isinstance(vaultToken, str):\n config.error(\"vaultToken must be a string while it is %s\" % (type(vaultToken,)))\n if apiVersion not in [1, 2]:\n config.error(\"apiVersion %s is not supported\" % apiVersion)\n\n @defer.inlineCallbacks\n def reconfigService(self, vaultServer=None, vaultToken=None, secretsmount=None,\n apiVersion=1):\n if secretsmount is None:\n self.secretsmount = \"secret\"\n else:\n self.secretsmount = secretsmount\n self.vaultServer = vaultServer\n self.vaultToken = vaultToken\n self.apiVersion = apiVersion\n if vaultServer.endswith('/'):\n vaultServer = vaultServer[:-1]\n self._http = yield httpclientservice.HTTPClientService.getService(\n self.master, self.vaultServer, headers={'X-Vault-Token': self.vaultToken})\n\n @defer.inlineCallbacks\n def get(self, entry):\n \"\"\"\n get the value from vault secret backend\n \"\"\"\n if self.apiVersion == 1:\n path = self.secretsmount + '/' + entry\n else:\n path = self.secretsmount + '/data/' + entry\n\n # note that the HTTP path contains v1 for both versions of the key-value\n # secret engine. Different versions of the key-value engine are\n # effectively separate secret engines in vault, with the same base HTTP\n # API, but with different paths within it.\n proj = yield self._http.get('/v1/{0}'.format(path))\n code = yield proj.code\n if code != 200:\n raise KeyError(\"The key %s does not exist in Vault provider: request\"\n \" return code:%d.\" % (entry, code))\n json = yield proj.json()\n if self.apiVersion == 1:\n ret = json.get(u'data', {}).get('value')\n else:\n ret = json.get(u'data', {}).get(u'data', {}).get('value')\n defer.returnValue(ret)\n", "path": "master/buildbot/secrets/providers/vault.py"}]} | 1,072 | 684 |
gh_patches_debug_30219 | rasdani/github-patches | git_diff | ansible__awx-13455 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CyberArk Conjur lookup plugin does not work with open source Conjur version
### Please confirm the following
- [X] I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).
- [X] I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates.
- [X] I understand that AWX is open source software provided for free and that I might not receive a timely response.
### Bug Summary
The CyberArk Conjur Secrets Manager Lookup plugin no longer supports the lookup against the Conjur OSS. Most likely because the opensource conjure version API endpoint does not have the `/api/` portion recently added by this change: https://github.com/ansible/awx/pull/13121
### AWX version
2.4
### Select the relevant components
- [ ] UI
- [X] API
- [ ] Docs
- [ ] Collection
- [ ] CLI
- [ ] Other
### Installation method
docker development environment
### Modifications
no
### Ansible version
_No response_
### Operating system
_No response_
### Web browser
_No response_
### Steps to reproduce
1. Deploy Conjur OSS. We use the latest `cyberark/conjur` image for that. The Conjur version is
```
Version 1.19.1-3398
API Version 5.3.0
```
2. Under the AWX, create the CyberArk Conjur Secrets Manager Lookup credential. Provide Conjure URL, user, and token.
3. Click Test and provide a path.
### Expected results
Connection to Conjur should be established. HTTP 200 response.
### Actual results
401 error returned.
```
Traceback (most recent call last):
File "/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/tasks/jobs.py", line 504, in run
args = self.build_args(self.instance, private_data_dir, passwords)
File "/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/tasks/jobs.py", line 937, in build_args
ssh_username = creds.get_input('username', default='')
File "/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/models/credential/__init__.py", line 275, in get_input
return self._get_dynamic_input(field_name)
File "/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/models/credential/__init__.py", line 309, in _get_dynamic_input
return input_source.get_input_value()
File "/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/models/credential/__init__.py", line 1250, in get_input_value
return backend(**backend_kwargs)
File "/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/credential_plugins/conjur.py", line 72, in conjur_backend
raise_for_status(resp)
File "/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/credential_plugins/plugin.py", line 12, in raise_for_status
resp.raise_for_status()
File "/var/lib/awx/venv/awx/lib64/python3.9/site-packages/requests/models.py", line 1021, in raise_for_status
raise HTTPError(http_error_msg, response=self)
requests.exceptions.HTTPError: 401 Client Error: Unauthorized for url: https://conjureurl.com/api/authn/test/admin/authenticate
```
### Additional information
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `awx/main/credential_plugins/conjur.py`
Content:
```
1 from .plugin import CredentialPlugin, CertFiles, raise_for_status
2
3 from urllib.parse import urljoin, quote
4
5 from django.utils.translation import gettext_lazy as _
6 import requests
7
8
9 conjur_inputs = {
10 'fields': [
11 {
12 'id': 'url',
13 'label': _('Conjur URL'),
14 'type': 'string',
15 'format': 'url',
16 },
17 {
18 'id': 'api_key',
19 'label': _('API Key'),
20 'type': 'string',
21 'secret': True,
22 },
23 {
24 'id': 'account',
25 'label': _('Account'),
26 'type': 'string',
27 },
28 {
29 'id': 'username',
30 'label': _('Username'),
31 'type': 'string',
32 },
33 {'id': 'cacert', 'label': _('Public Key Certificate'), 'type': 'string', 'multiline': True},
34 ],
35 'metadata': [
36 {
37 'id': 'secret_path',
38 'label': _('Secret Identifier'),
39 'type': 'string',
40 'help_text': _('The identifier for the secret e.g., /some/identifier'),
41 },
42 {
43 'id': 'secret_version',
44 'label': _('Secret Version'),
45 'type': 'string',
46 'help_text': _('Used to specify a specific secret version (if left empty, the latest version will be used).'),
47 },
48 ],
49 'required': ['url', 'api_key', 'account', 'username'],
50 }
51
52
53 def conjur_backend(**kwargs):
54 url = kwargs['url']
55 api_key = kwargs['api_key']
56 account = quote(kwargs['account'], safe='')
57 username = quote(kwargs['username'], safe='')
58 secret_path = quote(kwargs['secret_path'], safe='')
59 version = kwargs.get('secret_version')
60 cacert = kwargs.get('cacert', None)
61
62 auth_kwargs = {
63 'headers': {'Content-Type': 'text/plain', 'Accept-Encoding': 'base64'},
64 'data': api_key,
65 'allow_redirects': False,
66 }
67
68 with CertFiles(cacert) as cert:
69 # https://www.conjur.org/api.html#authentication-authenticate-post
70 auth_kwargs['verify'] = cert
71 resp = requests.post(urljoin(url, '/'.join(['api', 'authn', account, username, 'authenticate'])), **auth_kwargs)
72 raise_for_status(resp)
73 token = resp.content.decode('utf-8')
74
75 lookup_kwargs = {
76 'headers': {'Authorization': 'Token token="{}"'.format(token)},
77 'allow_redirects': False,
78 }
79
80 # https://www.conjur.org/api.html#secrets-retrieve-a-secret-get
81 path = urljoin(url, '/'.join(['api', 'secrets', account, 'variable', secret_path]))
82 if version:
83 ver = "version={}".format(version)
84 path = '?'.join([path, ver])
85
86 with CertFiles(cacert) as cert:
87 lookup_kwargs['verify'] = cert
88 resp = requests.get(path, timeout=30, **lookup_kwargs)
89 raise_for_status(resp)
90 return resp.text
91
92
93 conjur_plugin = CredentialPlugin('CyberArk Conjur Secrets Manager Lookup', inputs=conjur_inputs, backend=conjur_backend)
94
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/awx/main/credential_plugins/conjur.py b/awx/main/credential_plugins/conjur.py
--- a/awx/main/credential_plugins/conjur.py
+++ b/awx/main/credential_plugins/conjur.py
@@ -68,7 +68,10 @@
with CertFiles(cacert) as cert:
# https://www.conjur.org/api.html#authentication-authenticate-post
auth_kwargs['verify'] = cert
- resp = requests.post(urljoin(url, '/'.join(['api', 'authn', account, username, 'authenticate'])), **auth_kwargs)
+ try:
+ resp = requests.post(urljoin(url, '/'.join(['authn', account, username, 'authenticate'])), **auth_kwargs)
+ except requests.exceptions.ConnectionError:
+ resp = requests.post(urljoin(url, '/'.join(['api', 'authn', account, username, 'authenticate'])), **auth_kwargs)
raise_for_status(resp)
token = resp.content.decode('utf-8')
@@ -78,14 +81,19 @@
}
# https://www.conjur.org/api.html#secrets-retrieve-a-secret-get
- path = urljoin(url, '/'.join(['api', 'secrets', account, 'variable', secret_path]))
+ path = urljoin(url, '/'.join(['secrets', account, 'variable', secret_path]))
+ path_conjurcloud = urljoin(url, '/'.join(['api', 'secrets', account, 'variable', secret_path]))
if version:
ver = "version={}".format(version)
path = '?'.join([path, ver])
+ path_conjurcloud = '?'.join([path_conjurcloud, ver])
with CertFiles(cacert) as cert:
lookup_kwargs['verify'] = cert
- resp = requests.get(path, timeout=30, **lookup_kwargs)
+ try:
+ resp = requests.get(path, timeout=30, **lookup_kwargs)
+ except requests.exceptions.ConnectionError:
+ resp = requests.get(path_conjurcloud, timeout=30, **lookup_kwargs)
raise_for_status(resp)
return resp.text
| {"golden_diff": "diff --git a/awx/main/credential_plugins/conjur.py b/awx/main/credential_plugins/conjur.py\n--- a/awx/main/credential_plugins/conjur.py\n+++ b/awx/main/credential_plugins/conjur.py\n@@ -68,7 +68,10 @@\n with CertFiles(cacert) as cert:\n # https://www.conjur.org/api.html#authentication-authenticate-post\n auth_kwargs['verify'] = cert\n- resp = requests.post(urljoin(url, '/'.join(['api', 'authn', account, username, 'authenticate'])), **auth_kwargs)\n+ try:\n+ resp = requests.post(urljoin(url, '/'.join(['authn', account, username, 'authenticate'])), **auth_kwargs)\n+ except requests.exceptions.ConnectionError:\n+ resp = requests.post(urljoin(url, '/'.join(['api', 'authn', account, username, 'authenticate'])), **auth_kwargs)\n raise_for_status(resp)\n token = resp.content.decode('utf-8')\n \n@@ -78,14 +81,19 @@\n }\n \n # https://www.conjur.org/api.html#secrets-retrieve-a-secret-get\n- path = urljoin(url, '/'.join(['api', 'secrets', account, 'variable', secret_path]))\n+ path = urljoin(url, '/'.join(['secrets', account, 'variable', secret_path]))\n+ path_conjurcloud = urljoin(url, '/'.join(['api', 'secrets', account, 'variable', secret_path]))\n if version:\n ver = \"version={}\".format(version)\n path = '?'.join([path, ver])\n+ path_conjurcloud = '?'.join([path_conjurcloud, ver])\n \n with CertFiles(cacert) as cert:\n lookup_kwargs['verify'] = cert\n- resp = requests.get(path, timeout=30, **lookup_kwargs)\n+ try:\n+ resp = requests.get(path, timeout=30, **lookup_kwargs)\n+ except requests.exceptions.ConnectionError:\n+ resp = requests.get(path_conjurcloud, timeout=30, **lookup_kwargs)\n raise_for_status(resp)\n return resp.text\n", "issue": "CyberArk Conjur lookup plugin does not work with open source Conjur version\n### Please confirm the following\r\n\r\n- [X] I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).\r\n- [X] I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates.\r\n- [X] I understand that AWX is open source software provided for free and that I might not receive a timely response.\r\n\r\n### Bug Summary\r\n\r\nThe CyberArk Conjur Secrets Manager Lookup plugin no longer supports the lookup against the Conjur OSS. Most likely because the opensource conjure version API endpoint does not have the `/api/` portion recently added by this change: https://github.com/ansible/awx/pull/13121\r\n\r\n### AWX version\r\n\r\n2.4\r\n\r\n### Select the relevant components\r\n\r\n- [ ] UI\r\n- [X] API\r\n- [ ] Docs\r\n- [ ] Collection\r\n- [ ] CLI\r\n- [ ] Other\r\n\r\n### Installation method\r\n\r\ndocker development environment\r\n\r\n### Modifications\r\n\r\nno\r\n\r\n### Ansible version\r\n\r\n_No response_\r\n\r\n### Operating system\r\n\r\n_No response_\r\n\r\n### Web browser\r\n\r\n_No response_\r\n\r\n### Steps to reproduce\r\n\r\n1. Deploy Conjur OSS. We use the latest `cyberark/conjur` image for that. The Conjur version is\r\n```\r\n Version 1.19.1-3398\r\n API Version 5.3.0 \r\n```\r\n2. Under the AWX, create the CyberArk Conjur Secrets Manager Lookup credential. Provide Conjure URL, user, and token. \r\n3. Click Test and provide a path. \r\n\r\n\r\n### Expected results\r\n\r\nConnection to Conjur should be established. HTTP 200 response. \r\n\r\n### Actual results\r\n\r\n401 error returned. \r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/tasks/jobs.py\", line 504, in run\r\n args = self.build_args(self.instance, private_data_dir, passwords)\r\n File \"/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/tasks/jobs.py\", line 937, in build_args\r\n ssh_username = creds.get_input('username', default='')\r\n File \"/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/models/credential/__init__.py\", line 275, in get_input\r\n return self._get_dynamic_input(field_name)\r\n File \"/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/models/credential/__init__.py\", line 309, in _get_dynamic_input\r\n return input_source.get_input_value()\r\n File \"/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/models/credential/__init__.py\", line 1250, in get_input_value\r\n return backend(**backend_kwargs)\r\n File \"/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/credential_plugins/conjur.py\", line 72, in conjur_backend\r\n raise_for_status(resp)\r\n File \"/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/credential_plugins/plugin.py\", line 12, in raise_for_status\r\n resp.raise_for_status()\r\n File \"/var/lib/awx/venv/awx/lib64/python3.9/site-packages/requests/models.py\", line 1021, in raise_for_status\r\n raise HTTPError(http_error_msg, response=self)\r\nrequests.exceptions.HTTPError: 401 Client Error: Unauthorized for url: https://conjureurl.com/api/authn/test/admin/authenticate\r\n```\r\n\r\n### Additional information\r\n\r\n_No response_\n", "before_files": [{"content": "from .plugin import CredentialPlugin, CertFiles, raise_for_status\n\nfrom urllib.parse import urljoin, quote\n\nfrom django.utils.translation import gettext_lazy as _\nimport requests\n\n\nconjur_inputs = {\n 'fields': [\n {\n 'id': 'url',\n 'label': _('Conjur URL'),\n 'type': 'string',\n 'format': 'url',\n },\n {\n 'id': 'api_key',\n 'label': _('API Key'),\n 'type': 'string',\n 'secret': True,\n },\n {\n 'id': 'account',\n 'label': _('Account'),\n 'type': 'string',\n },\n {\n 'id': 'username',\n 'label': _('Username'),\n 'type': 'string',\n },\n {'id': 'cacert', 'label': _('Public Key Certificate'), 'type': 'string', 'multiline': True},\n ],\n 'metadata': [\n {\n 'id': 'secret_path',\n 'label': _('Secret Identifier'),\n 'type': 'string',\n 'help_text': _('The identifier for the secret e.g., /some/identifier'),\n },\n {\n 'id': 'secret_version',\n 'label': _('Secret Version'),\n 'type': 'string',\n 'help_text': _('Used to specify a specific secret version (if left empty, the latest version will be used).'),\n },\n ],\n 'required': ['url', 'api_key', 'account', 'username'],\n}\n\n\ndef conjur_backend(**kwargs):\n url = kwargs['url']\n api_key = kwargs['api_key']\n account = quote(kwargs['account'], safe='')\n username = quote(kwargs['username'], safe='')\n secret_path = quote(kwargs['secret_path'], safe='')\n version = kwargs.get('secret_version')\n cacert = kwargs.get('cacert', None)\n\n auth_kwargs = {\n 'headers': {'Content-Type': 'text/plain', 'Accept-Encoding': 'base64'},\n 'data': api_key,\n 'allow_redirects': False,\n }\n\n with CertFiles(cacert) as cert:\n # https://www.conjur.org/api.html#authentication-authenticate-post\n auth_kwargs['verify'] = cert\n resp = requests.post(urljoin(url, '/'.join(['api', 'authn', account, username, 'authenticate'])), **auth_kwargs)\n raise_for_status(resp)\n token = resp.content.decode('utf-8')\n\n lookup_kwargs = {\n 'headers': {'Authorization': 'Token token=\"{}\"'.format(token)},\n 'allow_redirects': False,\n }\n\n # https://www.conjur.org/api.html#secrets-retrieve-a-secret-get\n path = urljoin(url, '/'.join(['api', 'secrets', account, 'variable', secret_path]))\n if version:\n ver = \"version={}\".format(version)\n path = '?'.join([path, ver])\n\n with CertFiles(cacert) as cert:\n lookup_kwargs['verify'] = cert\n resp = requests.get(path, timeout=30, **lookup_kwargs)\n raise_for_status(resp)\n return resp.text\n\n\nconjur_plugin = CredentialPlugin('CyberArk Conjur Secrets Manager Lookup', inputs=conjur_inputs, backend=conjur_backend)\n", "path": "awx/main/credential_plugins/conjur.py"}], "after_files": [{"content": "from .plugin import CredentialPlugin, CertFiles, raise_for_status\n\nfrom urllib.parse import urljoin, quote\n\nfrom django.utils.translation import gettext_lazy as _\nimport requests\n\n\nconjur_inputs = {\n 'fields': [\n {\n 'id': 'url',\n 'label': _('Conjur URL'),\n 'type': 'string',\n 'format': 'url',\n },\n {\n 'id': 'api_key',\n 'label': _('API Key'),\n 'type': 'string',\n 'secret': True,\n },\n {\n 'id': 'account',\n 'label': _('Account'),\n 'type': 'string',\n },\n {\n 'id': 'username',\n 'label': _('Username'),\n 'type': 'string',\n },\n {'id': 'cacert', 'label': _('Public Key Certificate'), 'type': 'string', 'multiline': True},\n ],\n 'metadata': [\n {\n 'id': 'secret_path',\n 'label': _('Secret Identifier'),\n 'type': 'string',\n 'help_text': _('The identifier for the secret e.g., /some/identifier'),\n },\n {\n 'id': 'secret_version',\n 'label': _('Secret Version'),\n 'type': 'string',\n 'help_text': _('Used to specify a specific secret version (if left empty, the latest version will be used).'),\n },\n ],\n 'required': ['url', 'api_key', 'account', 'username'],\n}\n\n\ndef conjur_backend(**kwargs):\n url = kwargs['url']\n api_key = kwargs['api_key']\n account = quote(kwargs['account'], safe='')\n username = quote(kwargs['username'], safe='')\n secret_path = quote(kwargs['secret_path'], safe='')\n version = kwargs.get('secret_version')\n cacert = kwargs.get('cacert', None)\n\n auth_kwargs = {\n 'headers': {'Content-Type': 'text/plain', 'Accept-Encoding': 'base64'},\n 'data': api_key,\n 'allow_redirects': False,\n }\n\n with CertFiles(cacert) as cert:\n # https://www.conjur.org/api.html#authentication-authenticate-post\n auth_kwargs['verify'] = cert\n try:\n resp = requests.post(urljoin(url, '/'.join(['authn', account, username, 'authenticate'])), **auth_kwargs)\n except requests.exceptions.ConnectionError:\n resp = requests.post(urljoin(url, '/'.join(['api', 'authn', account, username, 'authenticate'])), **auth_kwargs)\n raise_for_status(resp)\n token = resp.content.decode('utf-8')\n\n lookup_kwargs = {\n 'headers': {'Authorization': 'Token token=\"{}\"'.format(token)},\n 'allow_redirects': False,\n }\n\n # https://www.conjur.org/api.html#secrets-retrieve-a-secret-get\n path = urljoin(url, '/'.join(['secrets', account, 'variable', secret_path]))\n path_conjurcloud = urljoin(url, '/'.join(['api', 'secrets', account, 'variable', secret_path]))\n if version:\n ver = \"version={}\".format(version)\n path = '?'.join([path, ver])\n path_conjurcloud = '?'.join([path_conjurcloud, ver])\n\n with CertFiles(cacert) as cert:\n lookup_kwargs['verify'] = cert\n try:\n resp = requests.get(path, timeout=30, **lookup_kwargs)\n except requests.exceptions.ConnectionError:\n resp = requests.get(path_conjurcloud, timeout=30, **lookup_kwargs)\n raise_for_status(resp)\n return resp.text\n\n\nconjur_plugin = CredentialPlugin('CyberArk Conjur Secrets Manager Lookup', inputs=conjur_inputs, backend=conjur_backend)\n", "path": "awx/main/credential_plugins/conjur.py"}]} | 2,012 | 479 |
gh_patches_debug_19765 | rasdani/github-patches | git_diff | bokeh__bokeh-10170 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] `TileRenderer` ignores the `visible` property
#### ALL software version info (bokeh, python, notebook, OS, browser, any other relevant packages)
Bokeh 2.1.0rc1
#### Description of expected behavior and the observed behavior
`TileRenderer` should take into account the `visible` property. Both if passed to its constructor and if changed in runtime if using Bokeh server.
#### Complete, minimal, self-contained example code that reproduces the issue
```python
from bokeh.io import show
from bokeh.plotting import figure
from bokeh.tile_providers import CARTODBPOSITRON, get_provider
p = figure(x_range=(-2000000, 6000000), y_range=(-1000000, 7000000),
x_axis_type="mercator", y_axis_type="mercator")
p.add_tile(get_provider(CARTODBPOSITRON), visible=False)
show(p)
```
The root cause is that `TileRenderer` just doesn't check `visible` at all. It seems like every renderer checks this property. Maybe it should be checked at a higher level?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/models/file/latex_extension.py`
Content:
```
1 """ The LaTex example was derived from: http://matplotlib.org/users/usetex.html
2 """
3 import numpy as np
4 from scipy.special import jv
5
6 from bokeh.models import Label
7 from bokeh.palettes import Spectral4
8 from bokeh.plotting import figure, output_file, show
9 from bokeh.util.compiler import TypeScript
10
11 output_file('latex_extension.html')
12
13 class LatexLabel(Label):
14 """A subclass of `Label` with all of the same class attributes except
15 canvas mode isn't supported and DOM manipulation happens in the TypeScript
16 superclass implementation that requires setting `render_mode='css'`).
17
18 Only the render method of LabelView is overwritten to perform the
19 text -> latex (via katex) conversion
20 """
21 __javascript__ = ["https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.10.0/katex.min.js"]
22 __css__ = ["https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.10.0/katex.min.css"]
23 __implementation__ = TypeScript("""
24 import {Label, LabelView} from "models/annotations/label"
25
26 declare namespace katex {
27 function render(expression: string, element: HTMLElement, options: {displayMode?: boolean}): void
28 }
29
30 export class LatexLabelView extends LabelView {
31 model: LatexLabel
32
33 render(): void {
34 // Here because AngleSpec does units tranform and label doesn't support specs
35 let angle: number
36 switch (this.model.angle_units) {
37 case "rad": {
38 angle = -1 * this.model.angle
39 break
40 }
41 case "deg": {
42 angle = -1 * this.model.angle * Math.PI/180.0
43 break
44 }
45 default:
46 throw new Error("unreachable")
47 }
48
49 const panel = this.panel || this.plot_view.frame
50
51 const xscale = this.plot_view.frame.xscales[this.model.x_range_name]
52 const yscale = this.plot_view.frame.yscales[this.model.y_range_name]
53
54 const {x, y} = this.model
55 let sx = this.model.x_units == "data" ? xscale.compute(x) : panel.xview.compute(x)
56 let sy = this.model.y_units == "data" ? yscale.compute(y) : panel.yview.compute(y)
57
58 sx += this.model.x_offset
59 sy -= this.model.y_offset
60
61 this._css_text(this.layer.ctx, "", sx, sy, angle)
62 katex.render(this.model.text, this.el, {displayMode: true})
63 }
64 }
65
66 export class LatexLabel extends Label {
67 static init_LatexLabel(): void {
68 this.prototype.default_view = LatexLabelView
69 }
70 }
71 """)
72
73 p = figure(title="LaTex Extension Demonstration", plot_width=800, plot_height=350,
74 background_fill_color="#fafafa")
75 p.x_range.range_padding = 0
76
77 x = np.arange(0.0, 20.0, 0.02)
78
79 for i, n in enumerate([0, 1, 4, 7]):
80 p.line(x, jv(n, x), line_width=3, color=Spectral4[i], alpha=0.8, legend_label="𝜈=%d" % n)
81
82
83 text = (r"\text{Bessel Functions of the First Kind: }" +
84 r"J_\nu = \sum_{m=0}^{\infty}\frac{(-1)^m}{m!\ \Gamma(m+\nu+1)}" +
85 r"\left(\frac{x}{2}\right)^{2m+\nu}")
86 latex = LatexLabel(text=text,x=4.5, y=250, x_units='data', y_units='screen',
87 render_mode='css', text_font_size='11px',
88 background_fill_color="white", border_line_color="lightgrey")
89
90 p.add_layout(latex)
91
92 show(p)
93
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/models/file/latex_extension.py b/examples/models/file/latex_extension.py
--- a/examples/models/file/latex_extension.py
+++ b/examples/models/file/latex_extension.py
@@ -30,7 +30,7 @@
export class LatexLabelView extends LabelView {
model: LatexLabel
- render(): void {
+ protected _render(): void {
// Here because AngleSpec does units tranform and label doesn't support specs
let angle: number
switch (this.model.angle_units) {
@@ -59,13 +59,17 @@
sy -= this.model.y_offset
this._css_text(this.layer.ctx, "", sx, sy, angle)
- katex.render(this.model.text, this.el, {displayMode: true})
+ katex.render(this.model.text, this.el!, {displayMode: true})
}
}
export class LatexLabel extends Label {
static init_LatexLabel(): void {
this.prototype.default_view = LatexLabelView
+
+ this.override({
+ render_mode: "css",
+ })
}
}
""")
| {"golden_diff": "diff --git a/examples/models/file/latex_extension.py b/examples/models/file/latex_extension.py\n--- a/examples/models/file/latex_extension.py\n+++ b/examples/models/file/latex_extension.py\n@@ -30,7 +30,7 @@\n export class LatexLabelView extends LabelView {\n model: LatexLabel\n \n- render(): void {\n+ protected _render(): void {\n // Here because AngleSpec does units tranform and label doesn't support specs\n let angle: number\n switch (this.model.angle_units) {\n@@ -59,13 +59,17 @@\n sy -= this.model.y_offset\n \n this._css_text(this.layer.ctx, \"\", sx, sy, angle)\n- katex.render(this.model.text, this.el, {displayMode: true})\n+ katex.render(this.model.text, this.el!, {displayMode: true})\n }\n }\n \n export class LatexLabel extends Label {\n static init_LatexLabel(): void {\n this.prototype.default_view = LatexLabelView\n+\n+ this.override({\n+ render_mode: \"css\",\n+ })\n }\n }\n \"\"\")\n", "issue": "[BUG] `TileRenderer` ignores the `visible` property\n#### ALL software version info (bokeh, python, notebook, OS, browser, any other relevant packages)\r\nBokeh 2.1.0rc1\r\n\r\n#### Description of expected behavior and the observed behavior\r\n`TileRenderer` should take into account the `visible` property. Both if passed to its constructor and if changed in runtime if using Bokeh server.\r\n\r\n#### Complete, minimal, self-contained example code that reproduces the issue\r\n```python\r\nfrom bokeh.io import show\r\nfrom bokeh.plotting import figure\r\nfrom bokeh.tile_providers import CARTODBPOSITRON, get_provider\r\n\r\np = figure(x_range=(-2000000, 6000000), y_range=(-1000000, 7000000),\r\n x_axis_type=\"mercator\", y_axis_type=\"mercator\")\r\n\r\np.add_tile(get_provider(CARTODBPOSITRON), visible=False)\r\n\r\nshow(p)\r\n```\r\nThe root cause is that `TileRenderer` just doesn't check `visible` at all. It seems like every renderer checks this property. Maybe it should be checked at a higher level?\n", "before_files": [{"content": "\"\"\" The LaTex example was derived from: http://matplotlib.org/users/usetex.html\n\"\"\"\nimport numpy as np\nfrom scipy.special import jv\n\nfrom bokeh.models import Label\nfrom bokeh.palettes import Spectral4\nfrom bokeh.plotting import figure, output_file, show\nfrom bokeh.util.compiler import TypeScript\n\noutput_file('latex_extension.html')\n\nclass LatexLabel(Label):\n \"\"\"A subclass of `Label` with all of the same class attributes except\n canvas mode isn't supported and DOM manipulation happens in the TypeScript\n superclass implementation that requires setting `render_mode='css'`).\n\n Only the render method of LabelView is overwritten to perform the\n text -> latex (via katex) conversion\n \"\"\"\n __javascript__ = [\"https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.10.0/katex.min.js\"]\n __css__ = [\"https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.10.0/katex.min.css\"]\n __implementation__ = TypeScript(\"\"\"\nimport {Label, LabelView} from \"models/annotations/label\"\n\ndeclare namespace katex {\n function render(expression: string, element: HTMLElement, options: {displayMode?: boolean}): void\n}\n\nexport class LatexLabelView extends LabelView {\n model: LatexLabel\n\n render(): void {\n // Here because AngleSpec does units tranform and label doesn't support specs\n let angle: number\n switch (this.model.angle_units) {\n case \"rad\": {\n angle = -1 * this.model.angle\n break\n }\n case \"deg\": {\n angle = -1 * this.model.angle * Math.PI/180.0\n break\n }\n default:\n throw new Error(\"unreachable\")\n }\n\n const panel = this.panel || this.plot_view.frame\n\n const xscale = this.plot_view.frame.xscales[this.model.x_range_name]\n const yscale = this.plot_view.frame.yscales[this.model.y_range_name]\n\n const {x, y} = this.model\n let sx = this.model.x_units == \"data\" ? xscale.compute(x) : panel.xview.compute(x)\n let sy = this.model.y_units == \"data\" ? yscale.compute(y) : panel.yview.compute(y)\n\n sx += this.model.x_offset\n sy -= this.model.y_offset\n\n this._css_text(this.layer.ctx, \"\", sx, sy, angle)\n katex.render(this.model.text, this.el, {displayMode: true})\n }\n}\n\nexport class LatexLabel extends Label {\n static init_LatexLabel(): void {\n this.prototype.default_view = LatexLabelView\n }\n}\n\"\"\")\n\np = figure(title=\"LaTex Extension Demonstration\", plot_width=800, plot_height=350,\n background_fill_color=\"#fafafa\")\np.x_range.range_padding = 0\n\nx = np.arange(0.0, 20.0, 0.02)\n\nfor i, n in enumerate([0, 1, 4, 7]):\n p.line(x, jv(n, x), line_width=3, color=Spectral4[i], alpha=0.8, legend_label=\"\ud835\udf08=%d\" % n)\n\n\ntext = (r\"\\text{Bessel Functions of the First Kind: }\" +\n r\"J_\\nu = \\sum_{m=0}^{\\infty}\\frac{(-1)^m}{m!\\ \\Gamma(m+\\nu+1)}\" +\n r\"\\left(\\frac{x}{2}\\right)^{2m+\\nu}\")\nlatex = LatexLabel(text=text,x=4.5, y=250, x_units='data', y_units='screen',\n render_mode='css', text_font_size='11px',\n background_fill_color=\"white\", border_line_color=\"lightgrey\")\n\np.add_layout(latex)\n\nshow(p)\n", "path": "examples/models/file/latex_extension.py"}], "after_files": [{"content": "\"\"\" The LaTex example was derived from: http://matplotlib.org/users/usetex.html\n\"\"\"\nimport numpy as np\nfrom scipy.special import jv\n\nfrom bokeh.models import Label\nfrom bokeh.palettes import Spectral4\nfrom bokeh.plotting import figure, output_file, show\nfrom bokeh.util.compiler import TypeScript\n\noutput_file('latex_extension.html')\n\nclass LatexLabel(Label):\n \"\"\"A subclass of `Label` with all of the same class attributes except\n canvas mode isn't supported and DOM manipulation happens in the TypeScript\n superclass implementation that requires setting `render_mode='css'`).\n\n Only the render method of LabelView is overwritten to perform the\n text -> latex (via katex) conversion\n \"\"\"\n __javascript__ = [\"https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.10.0/katex.min.js\"]\n __css__ = [\"https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.10.0/katex.min.css\"]\n __implementation__ = TypeScript(\"\"\"\nimport {Label, LabelView} from \"models/annotations/label\"\n\ndeclare namespace katex {\n function render(expression: string, element: HTMLElement, options: {displayMode?: boolean}): void\n}\n\nexport class LatexLabelView extends LabelView {\n model: LatexLabel\n\n protected _render(): void {\n // Here because AngleSpec does units tranform and label doesn't support specs\n let angle: number\n switch (this.model.angle_units) {\n case \"rad\": {\n angle = -1 * this.model.angle\n break\n }\n case \"deg\": {\n angle = -1 * this.model.angle * Math.PI/180.0\n break\n }\n default:\n throw new Error(\"unreachable\")\n }\n\n const panel = this.panel || this.plot_view.frame\n\n const xscale = this.plot_view.frame.xscales[this.model.x_range_name]\n const yscale = this.plot_view.frame.yscales[this.model.y_range_name]\n\n const {x, y} = this.model\n let sx = this.model.x_units == \"data\" ? xscale.compute(x) : panel.xview.compute(x)\n let sy = this.model.y_units == \"data\" ? yscale.compute(y) : panel.yview.compute(y)\n\n sx += this.model.x_offset\n sy -= this.model.y_offset\n\n this._css_text(this.layer.ctx, \"\", sx, sy, angle)\n katex.render(this.model.text, this.el!, {displayMode: true})\n }\n}\n\nexport class LatexLabel extends Label {\n static init_LatexLabel(): void {\n this.prototype.default_view = LatexLabelView\n\n this.override({\n render_mode: \"css\",\n })\n }\n}\n\"\"\")\n\np = figure(title=\"LaTex Extension Demonstration\", plot_width=800, plot_height=350,\n background_fill_color=\"#fafafa\")\np.x_range.range_padding = 0\n\nx = np.arange(0.0, 20.0, 0.02)\n\nfor i, n in enumerate([0, 1, 4, 7]):\n p.line(x, jv(n, x), line_width=3, color=Spectral4[i], alpha=0.8, legend_label=\"\ud835\udf08=%d\" % n)\n\n\ntext = (r\"\\text{Bessel Functions of the First Kind: }\" +\n r\"J_\\nu = \\sum_{m=0}^{\\infty}\\frac{(-1)^m}{m!\\ \\Gamma(m+\\nu+1)}\" +\n r\"\\left(\\frac{x}{2}\\right)^{2m+\\nu}\")\nlatex = LatexLabel(text=text,x=4.5, y=250, x_units='data', y_units='screen',\n render_mode='css', text_font_size='11px',\n background_fill_color=\"white\", border_line_color=\"lightgrey\")\n\np.add_layout(latex)\n\nshow(p)\n", "path": "examples/models/file/latex_extension.py"}]} | 1,544 | 244 |
gh_patches_debug_30984 | rasdani/github-patches | git_diff | RedHatInsights__insights-core-1901 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Directories of exploded archives not recognized as cluster
Directories of exploded archives aren't recognized as a cluster but are erroneously identified as whatever context matches the first marker file we encounter after recursively enumerating every file in all subdirectories.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `insights/core/hydration.py`
Content:
```
1 import logging
2 import os
3 from itertools import product
4
5 from insights.core import archives
6 from insights.core.context import (ClusterArchiveContext,
7 JDRContext,
8 HostArchiveContext,
9 SosArchiveContext,
10 SerializedArchiveContext)
11
12 log = logging.getLogger(__name__)
13
14
15 def get_all_files(path):
16 all_files = []
17 for f in archives.get_all_files(path):
18 if os.path.isfile(f) and not os.path.islink(f):
19 all_files.append(f)
20 return all_files
21
22
23 def identify(files):
24 markers = {"insights_archive.txt": SerializedArchiveContext,
25 "insights_commands": HostArchiveContext,
26 "sos_commands": SosArchiveContext,
27 "JBOSS_HOME": JDRContext}
28
29 for f, m in product(files, markers):
30 if m in f:
31 i = f.find(m)
32 common_path = os.path.dirname(f[:i])
33 ctx = markers[m]
34 return common_path, ctx
35
36 common_path = os.path.dirname(os.path.commonprefix(files))
37 if not common_path:
38 raise archives.InvalidArchive("Unable to determine common path")
39
40 if any(f.endswith(archives.COMPRESSION_TYPES) for f in os.listdir(common_path)):
41 return common_path, ClusterArchiveContext
42
43 return common_path, HostArchiveContext
44
45
46 def create_context(path, context=None):
47 all_files = get_all_files(path)
48 if not all_files:
49 raise archives.InvalidArchive("No files in archive")
50
51 common_path, ctx = identify(all_files)
52 context = context or ctx
53 return context(common_path, all_files=all_files)
54
```
Path: `insights/core/cluster.py`
Content:
```
1 #!/usr/bin/env python
2 import itertools
3 import pandas as pd
4 from collections import defaultdict
5
6 from ansible.parsing.dataloader import DataLoader
7 from ansible.inventory.manager import InventoryManager
8
9 from insights.core import dr, plugins
10 from insights.core.archives import extract
11 from insights.core.hydration import create_context
12 from insights.specs import Specs
13
14
15 ID_GENERATOR = itertools.count()
16
17
18 class ClusterMeta(dict):
19 def __init__(self, num_members, kwargs):
20 self.num_members = num_members
21 self.update(**kwargs)
22
23
24 @plugins.combiner(optional=[Specs.machine_id, Specs.hostname])
25 def machine_id(mid, hn):
26 ds = mid or hn
27 if ds:
28 return ds.content[0].strip()
29 return str(next(ID_GENERATOR))
30
31
32 def parse_inventory(path):
33 inventory = InventoryManager(loader=DataLoader(), sources=path)
34 return inventory.get_groups_dict()
35
36
37 def attach_machine_id(result, mid):
38 key = "machine_id"
39 if isinstance(result, list):
40 for r in result:
41 r[key] = mid
42 else:
43 result[key] = mid
44 return result
45
46
47 def process_archives(archives):
48 for archive in archives:
49 with extract(archive) as ex:
50 ctx = create_context(ex.tmp_dir)
51 broker = dr.Broker()
52 broker[ctx.__class__] = ctx
53 yield dr.run(broker=broker)
54
55
56 def extract_facts(brokers):
57 results = defaultdict(list)
58 for b in brokers:
59 mid = b[machine_id]
60 for k, v in b.get_by_type(plugins.fact).items():
61 r = attach_machine_id(v, mid)
62 if isinstance(r, list):
63 results[k].extend(r)
64 else:
65 results[k].append(r)
66 return results
67
68
69 def process_facts(facts, meta, broker):
70 broker[ClusterMeta] = meta
71 for k, v in facts.items():
72 broker[k] = pd.DataFrame(v)
73 return dr.run(dr.COMPONENTS[dr.GROUPS.cluster], broker=broker)
74
75
76 def process_cluster(archives, broker, inventory=None):
77 inventory = parse_inventory(inventory) if inventory else {}
78
79 brokers = process_archives(archives)
80 facts = extract_facts(brokers)
81 meta = ClusterMeta(len(archives), inventory)
82
83 return process_facts(facts, meta, broker)
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/insights/core/cluster.py b/insights/core/cluster.py
--- a/insights/core/cluster.py
+++ b/insights/core/cluster.py
@@ -1,8 +1,10 @@
#!/usr/bin/env python
import itertools
-import pandas as pd
+import os
from collections import defaultdict
+import pandas as pd
+
from ansible.parsing.dataloader import DataLoader
from ansible.inventory.manager import InventoryManager
@@ -46,8 +48,14 @@
def process_archives(archives):
for archive in archives:
- with extract(archive) as ex:
- ctx = create_context(ex.tmp_dir)
+ if os.path.isfile(archive):
+ with extract(archive) as ex:
+ ctx = create_context(ex.tmp_dir)
+ broker = dr.Broker()
+ broker[ctx.__class__] = ctx
+ yield dr.run(broker=broker)
+ else:
+ ctx = create_context(archive)
broker = dr.Broker()
broker[ctx.__class__] = ctx
yield dr.run(broker=broker)
diff --git a/insights/core/hydration.py b/insights/core/hydration.py
--- a/insights/core/hydration.py
+++ b/insights/core/hydration.py
@@ -37,13 +37,15 @@
if not common_path:
raise archives.InvalidArchive("Unable to determine common path")
- if any(f.endswith(archives.COMPRESSION_TYPES) for f in os.listdir(common_path)):
- return common_path, ClusterArchiveContext
-
return common_path, HostArchiveContext
def create_context(path, context=None):
+ top = os.listdir(path)
+ arc = [os.path.join(path, f) for f in top if f.endswith(archives.COMPRESSION_TYPES)]
+ if arc:
+ return ClusterArchiveContext(path, all_files=arc)
+
all_files = get_all_files(path)
if not all_files:
raise archives.InvalidArchive("No files in archive")
| {"golden_diff": "diff --git a/insights/core/cluster.py b/insights/core/cluster.py\n--- a/insights/core/cluster.py\n+++ b/insights/core/cluster.py\n@@ -1,8 +1,10 @@\n #!/usr/bin/env python\n import itertools\n-import pandas as pd\n+import os\n from collections import defaultdict\n \n+import pandas as pd\n+\n from ansible.parsing.dataloader import DataLoader\n from ansible.inventory.manager import InventoryManager\n \n@@ -46,8 +48,14 @@\n \n def process_archives(archives):\n for archive in archives:\n- with extract(archive) as ex:\n- ctx = create_context(ex.tmp_dir)\n+ if os.path.isfile(archive):\n+ with extract(archive) as ex:\n+ ctx = create_context(ex.tmp_dir)\n+ broker = dr.Broker()\n+ broker[ctx.__class__] = ctx\n+ yield dr.run(broker=broker)\n+ else:\n+ ctx = create_context(archive)\n broker = dr.Broker()\n broker[ctx.__class__] = ctx\n yield dr.run(broker=broker)\ndiff --git a/insights/core/hydration.py b/insights/core/hydration.py\n--- a/insights/core/hydration.py\n+++ b/insights/core/hydration.py\n@@ -37,13 +37,15 @@\n if not common_path:\n raise archives.InvalidArchive(\"Unable to determine common path\")\n \n- if any(f.endswith(archives.COMPRESSION_TYPES) for f in os.listdir(common_path)):\n- return common_path, ClusterArchiveContext\n-\n return common_path, HostArchiveContext\n \n \n def create_context(path, context=None):\n+ top = os.listdir(path)\n+ arc = [os.path.join(path, f) for f in top if f.endswith(archives.COMPRESSION_TYPES)]\n+ if arc:\n+ return ClusterArchiveContext(path, all_files=arc)\n+\n all_files = get_all_files(path)\n if not all_files:\n raise archives.InvalidArchive(\"No files in archive\")\n", "issue": "Directories of exploded archives not recognized as cluster\nDirectories of exploded archives aren't recognized as a cluster but are erroneously identified as whatever context matches the first marker file we encounter after recursively enumerating every file in all subdirectories.\n", "before_files": [{"content": "import logging\nimport os\nfrom itertools import product\n\nfrom insights.core import archives\nfrom insights.core.context import (ClusterArchiveContext,\n JDRContext,\n HostArchiveContext,\n SosArchiveContext,\n SerializedArchiveContext)\n\nlog = logging.getLogger(__name__)\n\n\ndef get_all_files(path):\n all_files = []\n for f in archives.get_all_files(path):\n if os.path.isfile(f) and not os.path.islink(f):\n all_files.append(f)\n return all_files\n\n\ndef identify(files):\n markers = {\"insights_archive.txt\": SerializedArchiveContext,\n \"insights_commands\": HostArchiveContext,\n \"sos_commands\": SosArchiveContext,\n \"JBOSS_HOME\": JDRContext}\n\n for f, m in product(files, markers):\n if m in f:\n i = f.find(m)\n common_path = os.path.dirname(f[:i])\n ctx = markers[m]\n return common_path, ctx\n\n common_path = os.path.dirname(os.path.commonprefix(files))\n if not common_path:\n raise archives.InvalidArchive(\"Unable to determine common path\")\n\n if any(f.endswith(archives.COMPRESSION_TYPES) for f in os.listdir(common_path)):\n return common_path, ClusterArchiveContext\n\n return common_path, HostArchiveContext\n\n\ndef create_context(path, context=None):\n all_files = get_all_files(path)\n if not all_files:\n raise archives.InvalidArchive(\"No files in archive\")\n\n common_path, ctx = identify(all_files)\n context = context or ctx\n return context(common_path, all_files=all_files)\n", "path": "insights/core/hydration.py"}, {"content": "#!/usr/bin/env python\nimport itertools\nimport pandas as pd\nfrom collections import defaultdict\n\nfrom ansible.parsing.dataloader import DataLoader\nfrom ansible.inventory.manager import InventoryManager\n\nfrom insights.core import dr, plugins\nfrom insights.core.archives import extract\nfrom insights.core.hydration import create_context\nfrom insights.specs import Specs\n\n\nID_GENERATOR = itertools.count()\n\n\nclass ClusterMeta(dict):\n def __init__(self, num_members, kwargs):\n self.num_members = num_members\n self.update(**kwargs)\n\n\[email protected](optional=[Specs.machine_id, Specs.hostname])\ndef machine_id(mid, hn):\n ds = mid or hn\n if ds:\n return ds.content[0].strip()\n return str(next(ID_GENERATOR))\n\n\ndef parse_inventory(path):\n inventory = InventoryManager(loader=DataLoader(), sources=path)\n return inventory.get_groups_dict()\n\n\ndef attach_machine_id(result, mid):\n key = \"machine_id\"\n if isinstance(result, list):\n for r in result:\n r[key] = mid\n else:\n result[key] = mid\n return result\n\n\ndef process_archives(archives):\n for archive in archives:\n with extract(archive) as ex:\n ctx = create_context(ex.tmp_dir)\n broker = dr.Broker()\n broker[ctx.__class__] = ctx\n yield dr.run(broker=broker)\n\n\ndef extract_facts(brokers):\n results = defaultdict(list)\n for b in brokers:\n mid = b[machine_id]\n for k, v in b.get_by_type(plugins.fact).items():\n r = attach_machine_id(v, mid)\n if isinstance(r, list):\n results[k].extend(r)\n else:\n results[k].append(r)\n return results\n\n\ndef process_facts(facts, meta, broker):\n broker[ClusterMeta] = meta\n for k, v in facts.items():\n broker[k] = pd.DataFrame(v)\n return dr.run(dr.COMPONENTS[dr.GROUPS.cluster], broker=broker)\n\n\ndef process_cluster(archives, broker, inventory=None):\n inventory = parse_inventory(inventory) if inventory else {}\n\n brokers = process_archives(archives)\n facts = extract_facts(brokers)\n meta = ClusterMeta(len(archives), inventory)\n\n return process_facts(facts, meta, broker)\n", "path": "insights/core/cluster.py"}], "after_files": [{"content": "import logging\nimport os\nfrom itertools import product\n\nfrom insights.core import archives\nfrom insights.core.context import (ClusterArchiveContext,\n JDRContext,\n HostArchiveContext,\n SosArchiveContext,\n SerializedArchiveContext)\n\nlog = logging.getLogger(__name__)\n\n\ndef get_all_files(path):\n all_files = []\n for f in archives.get_all_files(path):\n if os.path.isfile(f) and not os.path.islink(f):\n all_files.append(f)\n return all_files\n\n\ndef identify(files):\n markers = {\"insights_archive.txt\": SerializedArchiveContext,\n \"insights_commands\": HostArchiveContext,\n \"sos_commands\": SosArchiveContext,\n \"JBOSS_HOME\": JDRContext}\n\n for f, m in product(files, markers):\n if m in f:\n i = f.find(m)\n common_path = os.path.dirname(f[:i])\n ctx = markers[m]\n return common_path, ctx\n\n common_path = os.path.dirname(os.path.commonprefix(files))\n if not common_path:\n raise archives.InvalidArchive(\"Unable to determine common path\")\n\n return common_path, HostArchiveContext\n\n\ndef create_context(path, context=None):\n top = os.listdir(path)\n arc = [os.path.join(path, f) for f in top if f.endswith(archives.COMPRESSION_TYPES)]\n if arc:\n return ClusterArchiveContext(path, all_files=arc)\n\n all_files = get_all_files(path)\n if not all_files:\n raise archives.InvalidArchive(\"No files in archive\")\n\n common_path, ctx = identify(all_files)\n context = context or ctx\n return context(common_path, all_files=all_files)\n", "path": "insights/core/hydration.py"}, {"content": "#!/usr/bin/env python\nimport itertools\nimport os\nfrom collections import defaultdict\n\nimport pandas as pd\n\nfrom ansible.parsing.dataloader import DataLoader\nfrom ansible.inventory.manager import InventoryManager\n\nfrom insights.core import dr, plugins\nfrom insights.core.archives import extract\nfrom insights.core.hydration import create_context\nfrom insights.specs import Specs\n\n\nID_GENERATOR = itertools.count()\n\n\nclass ClusterMeta(dict):\n def __init__(self, num_members, kwargs):\n self.num_members = num_members\n self.update(**kwargs)\n\n\[email protected](optional=[Specs.machine_id, Specs.hostname])\ndef machine_id(mid, hn):\n ds = mid or hn\n if ds:\n return ds.content[0].strip()\n return str(next(ID_GENERATOR))\n\n\ndef parse_inventory(path):\n inventory = InventoryManager(loader=DataLoader(), sources=path)\n return inventory.get_groups_dict()\n\n\ndef attach_machine_id(result, mid):\n key = \"machine_id\"\n if isinstance(result, list):\n for r in result:\n r[key] = mid\n else:\n result[key] = mid\n return result\n\n\ndef process_archives(archives):\n for archive in archives:\n if os.path.isfile(archive):\n with extract(archive) as ex:\n ctx = create_context(ex.tmp_dir)\n broker = dr.Broker()\n broker[ctx.__class__] = ctx\n yield dr.run(broker=broker)\n else:\n ctx = create_context(archive)\n broker = dr.Broker()\n broker[ctx.__class__] = ctx\n yield dr.run(broker=broker)\n\n\ndef extract_facts(brokers):\n results = defaultdict(list)\n for b in brokers:\n mid = b[machine_id]\n for k, v in b.get_by_type(plugins.fact).items():\n r = attach_machine_id(v, mid)\n if isinstance(r, list):\n results[k].extend(r)\n else:\n results[k].append(r)\n return results\n\n\ndef process_facts(facts, meta, broker):\n broker[ClusterMeta] = meta\n for k, v in facts.items():\n broker[k] = pd.DataFrame(v)\n return dr.run(dr.COMPONENTS[dr.GROUPS.cluster], broker=broker)\n\n\ndef process_cluster(archives, broker, inventory=None):\n inventory = parse_inventory(inventory) if inventory else {}\n\n brokers = process_archives(archives)\n facts = extract_facts(brokers)\n meta = ClusterMeta(len(archives), inventory)\n\n return process_facts(facts, meta, broker)\n", "path": "insights/core/cluster.py"}]} | 1,440 | 449 |
gh_patches_debug_20507 | rasdani/github-patches | git_diff | freedomofpress__securedrop-5199 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
update Ansible due to CVE-2019-14864
## Description
[CVE-2019-14864](https://nvd.nist.gov/vuln/detail/CVE-2019-14864) is a vulnerability in Ansible's `no_log` flag for the splunk and sumologic plugins (sensitive data is incorrectly logged) but neither of which we're using. Regardless, we should update Ansible to a version that does not have this vulnerability in the next release.
@emkll also pointed out to me that this is a [good time](https://docs.ansible.com/ansible/latest/reference_appendices/release_and_maintenance.html) to get onto the 2.8 series since the 2.7 series will become unmaintained when 2.10 is released (it's in development).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `install_files/ansible-base/callback_plugins/ansible_version_check.py`
Content:
```
1 # -*- encoding:utf-8 -*-
2 from __future__ import absolute_import, division, print_function, \
3 unicode_literals
4
5 import sys
6
7 import ansible
8
9 try:
10 # Version 2.0+
11 from ansible.plugins.callback import CallbackBase
12 except ImportError:
13 CallbackBase = object
14
15
16 def print_red_bold(text):
17 print('\x1b[31;1m' + text + '\x1b[0m')
18
19
20 class CallbackModule(CallbackBase):
21 def __init__(self):
22 # Can't use `on_X` because this isn't forwards compatible
23 # with Ansible 2.0+
24 required_version = '2.7.13' # Keep synchronized with requirements files
25 if not ansible.__version__.startswith(required_version):
26 print_red_bold(
27 "SecureDrop restriction: only Ansible {version}.*"
28 "is supported."
29 .format(version=required_version)
30 )
31 sys.exit(1)
32
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/install_files/ansible-base/callback_plugins/ansible_version_check.py b/install_files/ansible-base/callback_plugins/ansible_version_check.py
--- a/install_files/ansible-base/callback_plugins/ansible_version_check.py
+++ b/install_files/ansible-base/callback_plugins/ansible_version_check.py
@@ -19,13 +19,18 @@
class CallbackModule(CallbackBase):
def __init__(self):
- # Can't use `on_X` because this isn't forwards compatible
- # with Ansible 2.0+
- required_version = '2.7.13' # Keep synchronized with requirements files
- if not ansible.__version__.startswith(required_version):
+ # The acceptable version range needs to be synchronized with
+ # requirements files.
+ viable_start = [2, 9, 7]
+ viable_end = [2, 10, 0]
+ ansible_version = [int(v) for v in ansible.__version__.split('.')]
+ if not (viable_start <= ansible_version < viable_end):
print_red_bold(
- "SecureDrop restriction: only Ansible {version}.*"
- "is supported."
- .format(version=required_version)
+ "SecureDrop restriction: Ansible version must be at least {viable_start} "
+ "and less than {viable_end}."
+ .format(
+ viable_start='.'.join(str(v) for v in viable_start),
+ viable_end='.'.join(str(v) for v in viable_end),
+ )
)
sys.exit(1)
| {"golden_diff": "diff --git a/install_files/ansible-base/callback_plugins/ansible_version_check.py b/install_files/ansible-base/callback_plugins/ansible_version_check.py\n--- a/install_files/ansible-base/callback_plugins/ansible_version_check.py\n+++ b/install_files/ansible-base/callback_plugins/ansible_version_check.py\n@@ -19,13 +19,18 @@\n \n class CallbackModule(CallbackBase):\n def __init__(self):\n- # Can't use `on_X` because this isn't forwards compatible\n- # with Ansible 2.0+\n- required_version = '2.7.13' # Keep synchronized with requirements files\n- if not ansible.__version__.startswith(required_version):\n+ # The acceptable version range needs to be synchronized with\n+ # requirements files.\n+ viable_start = [2, 9, 7]\n+ viable_end = [2, 10, 0]\n+ ansible_version = [int(v) for v in ansible.__version__.split('.')]\n+ if not (viable_start <= ansible_version < viable_end):\n print_red_bold(\n- \"SecureDrop restriction: only Ansible {version}.*\"\n- \"is supported.\"\n- .format(version=required_version)\n+ \"SecureDrop restriction: Ansible version must be at least {viable_start} \"\n+ \"and less than {viable_end}.\"\n+ .format(\n+ viable_start='.'.join(str(v) for v in viable_start),\n+ viable_end='.'.join(str(v) for v in viable_end),\n+ )\n )\n sys.exit(1)\n", "issue": "update Ansible due to CVE-2019-14864 \n## Description\r\n\r\n[CVE-2019-14864](https://nvd.nist.gov/vuln/detail/CVE-2019-14864) is a vulnerability in Ansible's `no_log` flag for the splunk and sumologic plugins (sensitive data is incorrectly logged) but neither of which we're using. Regardless, we should update Ansible to a version that does not have this vulnerability in the next release.\r\n\r\n@emkll also pointed out to me that this is a [good time](https://docs.ansible.com/ansible/latest/reference_appendices/release_and_maintenance.html) to get onto the 2.8 series since the 2.7 series will become unmaintained when 2.10 is released (it's in development). \n", "before_files": [{"content": "# -*- encoding:utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, \\\n unicode_literals\n\nimport sys\n\nimport ansible\n\ntry:\n # Version 2.0+\n from ansible.plugins.callback import CallbackBase\nexcept ImportError:\n CallbackBase = object\n\n\ndef print_red_bold(text):\n print('\\x1b[31;1m' + text + '\\x1b[0m')\n\n\nclass CallbackModule(CallbackBase):\n def __init__(self):\n # Can't use `on_X` because this isn't forwards compatible\n # with Ansible 2.0+\n required_version = '2.7.13' # Keep synchronized with requirements files\n if not ansible.__version__.startswith(required_version):\n print_red_bold(\n \"SecureDrop restriction: only Ansible {version}.*\"\n \"is supported.\"\n .format(version=required_version)\n )\n sys.exit(1)\n", "path": "install_files/ansible-base/callback_plugins/ansible_version_check.py"}], "after_files": [{"content": "# -*- encoding:utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, \\\n unicode_literals\n\nimport sys\n\nimport ansible\n\ntry:\n # Version 2.0+\n from ansible.plugins.callback import CallbackBase\nexcept ImportError:\n CallbackBase = object\n\n\ndef print_red_bold(text):\n print('\\x1b[31;1m' + text + '\\x1b[0m')\n\n\nclass CallbackModule(CallbackBase):\n def __init__(self):\n # The acceptable version range needs to be synchronized with\n # requirements files.\n viable_start = [2, 9, 7]\n viable_end = [2, 10, 0]\n ansible_version = [int(v) for v in ansible.__version__.split('.')]\n if not (viable_start <= ansible_version < viable_end):\n print_red_bold(\n \"SecureDrop restriction: Ansible version must be at least {viable_start} \"\n \"and less than {viable_end}.\"\n .format(\n viable_start='.'.join(str(v) for v in viable_start),\n viable_end='.'.join(str(v) for v in viable_end),\n )\n )\n sys.exit(1)\n", "path": "install_files/ansible-base/callback_plugins/ansible_version_check.py"}]} | 716 | 347 |
gh_patches_debug_22997 | rasdani/github-patches | git_diff | liqd__a4-opin-605 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Wrong user in project
When I get an invitation to a project via email and I am logged in with a different user and click one the link in the email, the second user is added to the project
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `euth/memberships/views.py`
Content:
```
1 from django.http import Http404
2 from django.shortcuts import redirect
3 from django.views import generic
4 from rules.compat import access_mixins as mixin
5
6 from adhocracy4.projects import models as prj_models
7 from adhocracy4.projects import views as prj_views
8
9 from . import forms, models
10
11
12 class RequestsProjectDetailView(prj_views.ProjectDetailView):
13
14 def handle_no_permission(self):
15 """
16 Check if user clould join
17 """
18 user = self.request.user
19 is_member = user.is_authenticated() and self.project.has_member(user)
20
21 if is_member:
22 return super().handle_no_permission()
23 else:
24 return self.handle_no_membership()
25
26 def handle_no_membership(self):
27 membership_impossible = (
28 not self.request.user.is_authenticated()
29 or self.project.is_draft
30 or self.project.has_member(self.request.user)
31 )
32
33 if membership_impossible:
34 return super().handle_no_permission()
35 else:
36 return redirect('memberships-request',
37 project_slug=self.project.slug)
38
39
40 class InviteView(mixin.LoginRequiredMixin, generic.UpdateView):
41 model = models.Invite
42 form_class = forms.InviteForm
43 slug_field = 'token'
44 slug_url_kwarg = 'invite_token'
45
46 def form_valid(self, form):
47 if form.is_accepted():
48 form.instance.accept(self.request.user)
49 return redirect(form.instance.project.get_absolute_url())
50 else:
51 form.instance.reject()
52 return redirect('/')
53
54
55 class RequestView(mixin.LoginRequiredMixin, generic.DetailView):
56 """
57 Displays membership request if it exists or allows to create one.
58 """
59 model = models.Request
60 slug_field = 'project__slug'
61 slug_url_kwarg = 'project_slug'
62 context_object_name = 'join_request'
63
64 def get_queryset(self):
65 return self.model.objects.filter(creator=self.request.user)
66
67 def get(self, request, *args, **kwargs):
68 if self.project.has_member(request.user):
69 return redirect(self.project.get_absolute_url())
70 else:
71 return super().get(request, *args, **kwargs)
72
73 def post(self, request, *args, **kwargs):
74 user = request.user
75 project = self.project
76 models.Request.objects.request_membership(project, user)
77 return redirect(self.request.path)
78
79 def get_object(self, queryset=None):
80 try:
81 return super().get_object(queryset)
82 except Http404:
83 return None
84
85 @property
86 def project(self):
87 project_slug = self.kwargs[self.slug_url_kwarg]
88 return prj_models.Project.objects.get(slug=project_slug)
89
```
Path: `euth/memberships/forms.py`
Content:
```
1 from django import forms
2 from django.core.exceptions import ValidationError
3
4 from . import models
5
6
7 class InviteForm(forms.ModelForm):
8 accept = forms.CharField(required=False)
9 reject = forms.CharField(required=False)
10
11 class Meta:
12 model = models.Invite
13 fields = ['accept', 'reject']
14
15 def clean(self):
16 data = self.data
17 if 'accept' not in data and 'reject' not in data:
18 raise ValidationError('Reject or accept')
19 return data
20
21 def is_accepted(self):
22 data = self.data
23 return 'accept' in data and 'reject' not in data
24
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/euth/memberships/forms.py b/euth/memberships/forms.py
--- a/euth/memberships/forms.py
+++ b/euth/memberships/forms.py
@@ -12,10 +12,17 @@
model = models.Invite
fields = ['accept', 'reject']
+ def __init__(self, user=None, **kwargs):
+ super().__init__(**kwargs)
+ self.user = user
+
def clean(self):
data = self.data
if 'accept' not in data and 'reject' not in data:
raise ValidationError('Reject or accept')
+ if 'accept' in data and not self.user.email == self.instance.email:
+ raise ValidationError('This user has another email address than '
+ 'the one that received the invitation.')
return data
def is_accepted(self):
diff --git a/euth/memberships/views.py b/euth/memberships/views.py
--- a/euth/memberships/views.py
+++ b/euth/memberships/views.py
@@ -43,6 +43,11 @@
slug_field = 'token'
slug_url_kwarg = 'invite_token'
+ def get_form_kwargs(self):
+ kwargs = super().get_form_kwargs()
+ kwargs.update({'user': self.request.user})
+ return kwargs
+
def form_valid(self, form):
if form.is_accepted():
form.instance.accept(self.request.user)
| {"golden_diff": "diff --git a/euth/memberships/forms.py b/euth/memberships/forms.py\n--- a/euth/memberships/forms.py\n+++ b/euth/memberships/forms.py\n@@ -12,10 +12,17 @@\n model = models.Invite\n fields = ['accept', 'reject']\n \n+ def __init__(self, user=None, **kwargs):\n+ super().__init__(**kwargs)\n+ self.user = user\n+\n def clean(self):\n data = self.data\n if 'accept' not in data and 'reject' not in data:\n raise ValidationError('Reject or accept')\n+ if 'accept' in data and not self.user.email == self.instance.email:\n+ raise ValidationError('This user has another email address than '\n+ 'the one that received the invitation.')\n return data\n \n def is_accepted(self):\ndiff --git a/euth/memberships/views.py b/euth/memberships/views.py\n--- a/euth/memberships/views.py\n+++ b/euth/memberships/views.py\n@@ -43,6 +43,11 @@\n slug_field = 'token'\n slug_url_kwarg = 'invite_token'\n \n+ def get_form_kwargs(self):\n+ kwargs = super().get_form_kwargs()\n+ kwargs.update({'user': self.request.user})\n+ return kwargs\n+\n def form_valid(self, form):\n if form.is_accepted():\n form.instance.accept(self.request.user)\n", "issue": "Wrong user in project\nWhen I get an invitation to a project via email and I am logged in with a different user and click one the link in the email, the second user is added to the project\n", "before_files": [{"content": "from django.http import Http404\nfrom django.shortcuts import redirect\nfrom django.views import generic\nfrom rules.compat import access_mixins as mixin\n\nfrom adhocracy4.projects import models as prj_models\nfrom adhocracy4.projects import views as prj_views\n\nfrom . import forms, models\n\n\nclass RequestsProjectDetailView(prj_views.ProjectDetailView):\n\n def handle_no_permission(self):\n \"\"\"\n Check if user clould join\n \"\"\"\n user = self.request.user\n is_member = user.is_authenticated() and self.project.has_member(user)\n\n if is_member:\n return super().handle_no_permission()\n else:\n return self.handle_no_membership()\n\n def handle_no_membership(self):\n membership_impossible = (\n not self.request.user.is_authenticated()\n or self.project.is_draft\n or self.project.has_member(self.request.user)\n )\n\n if membership_impossible:\n return super().handle_no_permission()\n else:\n return redirect('memberships-request',\n project_slug=self.project.slug)\n\n\nclass InviteView(mixin.LoginRequiredMixin, generic.UpdateView):\n model = models.Invite\n form_class = forms.InviteForm\n slug_field = 'token'\n slug_url_kwarg = 'invite_token'\n\n def form_valid(self, form):\n if form.is_accepted():\n form.instance.accept(self.request.user)\n return redirect(form.instance.project.get_absolute_url())\n else:\n form.instance.reject()\n return redirect('/')\n\n\nclass RequestView(mixin.LoginRequiredMixin, generic.DetailView):\n \"\"\"\n Displays membership request if it exists or allows to create one.\n \"\"\"\n model = models.Request\n slug_field = 'project__slug'\n slug_url_kwarg = 'project_slug'\n context_object_name = 'join_request'\n\n def get_queryset(self):\n return self.model.objects.filter(creator=self.request.user)\n\n def get(self, request, *args, **kwargs):\n if self.project.has_member(request.user):\n return redirect(self.project.get_absolute_url())\n else:\n return super().get(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n user = request.user\n project = self.project\n models.Request.objects.request_membership(project, user)\n return redirect(self.request.path)\n\n def get_object(self, queryset=None):\n try:\n return super().get_object(queryset)\n except Http404:\n return None\n\n @property\n def project(self):\n project_slug = self.kwargs[self.slug_url_kwarg]\n return prj_models.Project.objects.get(slug=project_slug)\n", "path": "euth/memberships/views.py"}, {"content": "from django import forms\nfrom django.core.exceptions import ValidationError\n\nfrom . import models\n\n\nclass InviteForm(forms.ModelForm):\n accept = forms.CharField(required=False)\n reject = forms.CharField(required=False)\n\n class Meta:\n model = models.Invite\n fields = ['accept', 'reject']\n\n def clean(self):\n data = self.data\n if 'accept' not in data and 'reject' not in data:\n raise ValidationError('Reject or accept')\n return data\n\n def is_accepted(self):\n data = self.data\n return 'accept' in data and 'reject' not in data\n", "path": "euth/memberships/forms.py"}], "after_files": [{"content": "from django.http import Http404\nfrom django.shortcuts import redirect\nfrom django.views import generic\nfrom rules.compat import access_mixins as mixin\n\nfrom adhocracy4.projects import models as prj_models\nfrom adhocracy4.projects import views as prj_views\n\nfrom . import forms, models\n\n\nclass RequestsProjectDetailView(prj_views.ProjectDetailView):\n\n def handle_no_permission(self):\n \"\"\"\n Check if user clould join\n \"\"\"\n user = self.request.user\n is_member = user.is_authenticated() and self.project.has_member(user)\n\n if is_member:\n return super().handle_no_permission()\n else:\n return self.handle_no_membership()\n\n def handle_no_membership(self):\n membership_impossible = (\n not self.request.user.is_authenticated()\n or self.project.is_draft\n or self.project.has_member(self.request.user)\n )\n\n if membership_impossible:\n return super().handle_no_permission()\n else:\n return redirect('memberships-request',\n project_slug=self.project.slug)\n\n\nclass InviteView(mixin.LoginRequiredMixin, generic.UpdateView):\n model = models.Invite\n form_class = forms.InviteForm\n slug_field = 'token'\n slug_url_kwarg = 'invite_token'\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs.update({'user': self.request.user})\n return kwargs\n\n def form_valid(self, form):\n if form.is_accepted():\n form.instance.accept(self.request.user)\n return redirect(form.instance.project.get_absolute_url())\n else:\n form.instance.reject()\n return redirect('/')\n\n\nclass RequestView(mixin.LoginRequiredMixin, generic.DetailView):\n \"\"\"\n Displays membership request if it exists or allows to create one.\n \"\"\"\n model = models.Request\n slug_field = 'project__slug'\n slug_url_kwarg = 'project_slug'\n context_object_name = 'join_request'\n\n def get_queryset(self):\n return self.model.objects.filter(creator=self.request.user)\n\n def get(self, request, *args, **kwargs):\n if self.project.has_member(request.user):\n return redirect(self.project.get_absolute_url())\n else:\n return super().get(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n user = request.user\n project = self.project\n models.Request.objects.request_membership(project, user)\n return redirect(self.request.path)\n\n def get_object(self, queryset=None):\n try:\n return super().get_object(queryset)\n except Http404:\n return None\n\n @property\n def project(self):\n project_slug = self.kwargs[self.slug_url_kwarg]\n return prj_models.Project.objects.get(slug=project_slug)\n", "path": "euth/memberships/views.py"}, {"content": "from django import forms\nfrom django.core.exceptions import ValidationError\n\nfrom . import models\n\n\nclass InviteForm(forms.ModelForm):\n accept = forms.CharField(required=False)\n reject = forms.CharField(required=False)\n\n class Meta:\n model = models.Invite\n fields = ['accept', 'reject']\n\n def __init__(self, user=None, **kwargs):\n super().__init__(**kwargs)\n self.user = user\n\n def clean(self):\n data = self.data\n if 'accept' not in data and 'reject' not in data:\n raise ValidationError('Reject or accept')\n if 'accept' in data and not self.user.email == self.instance.email:\n raise ValidationError('This user has another email address than '\n 'the one that received the invitation.')\n return data\n\n def is_accepted(self):\n data = self.data\n return 'accept' in data and 'reject' not in data\n", "path": "euth/memberships/forms.py"}]} | 1,213 | 316 |
gh_patches_debug_3941 | rasdani/github-patches | git_diff | microsoft__botbuilder-python-1431 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[PORT] Create test for waterfall cancellation telemetry
> Port this change from botbuilder-dotnet/master branch:
https://github.com/microsoft/botbuilder-dotnet/pull/3314
For https://github.com/microsoft/botbuilder-js/issues/1619
# Changed projects
* Microsoft.Bot.Builder.Dialogs.Tests
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `libraries/botbuilder-dialogs/botbuilder/dialogs/waterfall_dialog.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4
5 import uuid
6 from typing import Coroutine
7 from botbuilder.core import TurnContext
8 from botbuilder.schema import ActivityTypes
9 from .dialog_reason import DialogReason
10 from .dialog import Dialog
11 from .dialog_turn_result import DialogTurnResult
12 from .dialog_context import DialogContext
13 from .dialog_instance import DialogInstance
14 from .waterfall_step_context import WaterfallStepContext
15
16
17 class WaterfallDialog(Dialog):
18 PersistedOptions = "options"
19 StepIndex = "stepIndex"
20 PersistedValues = "values"
21 PersistedInstanceId = "instanceId"
22
23 def __init__(self, dialog_id: str, steps: [Coroutine] = None):
24 super(WaterfallDialog, self).__init__(dialog_id)
25 if not steps:
26 self._steps = []
27 else:
28 if not isinstance(steps, list):
29 raise TypeError("WaterfallDialog(): steps must be list of steps")
30 self._steps = steps
31
32 def add_step(self, step):
33 """
34 Adds a new step to the waterfall.
35 :param step: Step to add
36 :return: Waterfall dialog for fluent calls to `add_step()`.
37 """
38 if not step:
39 raise TypeError("WaterfallDialog.add_step(): step cannot be None.")
40
41 self._steps.append(step)
42 return self
43
44 async def begin_dialog(
45 self, dialog_context: DialogContext, options: object = None
46 ) -> DialogTurnResult:
47
48 if not dialog_context:
49 raise TypeError("WaterfallDialog.begin_dialog(): dc cannot be None.")
50
51 # Initialize waterfall state
52 state = dialog_context.active_dialog.state
53
54 instance_id = uuid.uuid1().__str__()
55 state[self.PersistedOptions] = options
56 state[self.PersistedValues] = {}
57 state[self.PersistedInstanceId] = instance_id
58
59 properties = {}
60 properties["DialogId"] = self.id
61 properties["InstanceId"] = instance_id
62 self.telemetry_client.track_event("WaterfallStart", properties)
63
64 # Run first stepkinds
65 return await self.run_step(dialog_context, 0, DialogReason.BeginCalled, None)
66
67 async def continue_dialog( # pylint: disable=unused-argument,arguments-differ
68 self,
69 dialog_context: DialogContext = None,
70 reason: DialogReason = None,
71 result: object = NotImplementedError(),
72 ) -> DialogTurnResult:
73 if not dialog_context:
74 raise TypeError("WaterfallDialog.continue_dialog(): dc cannot be None.")
75
76 if dialog_context.context.activity.type != ActivityTypes.message:
77 return Dialog.end_of_turn
78
79 return await self.resume_dialog(
80 dialog_context,
81 DialogReason.ContinueCalled,
82 dialog_context.context.activity.text,
83 )
84
85 async def resume_dialog(
86 self, dialog_context: DialogContext, reason: DialogReason, result: object
87 ):
88 if dialog_context is None:
89 raise TypeError("WaterfallDialog.resume_dialog(): dc cannot be None.")
90
91 # Increment step index and run step
92 state = dialog_context.active_dialog.state
93
94 # Future Me:
95 # If issues with CosmosDB, see https://github.com/Microsoft/botbuilder-dotnet/issues/871
96 # for hints.
97 return await self.run_step(
98 dialog_context, state[self.StepIndex] + 1, reason, result
99 )
100
101 async def end_dialog( # pylint: disable=unused-argument
102 self, context: TurnContext, instance: DialogInstance, reason: DialogReason
103 ) -> None:
104 if reason is DialogReason.CancelCalled:
105 index = instance.state[self.StepIndex]
106 step_name = self.get_step_name(index)
107 instance_id = str(instance.state[self.PersistedInstanceId])
108 properties = {
109 "DialogId": self.id,
110 "StepName": step_name,
111 "InstanceId": instance_id,
112 }
113 self.telemetry_client.track_event("WaterfallCancel", properties)
114 else:
115 if reason is DialogReason.EndCalled:
116
117 instance_id = str(instance.state[self.PersistedInstanceId])
118 properties = {"DialogId": self.id, "InstanceId": instance_id}
119 self.telemetry_client.track_event("WaterfallComplete", properties)
120
121 return
122
123 async def on_step(self, step_context: WaterfallStepContext) -> DialogTurnResult:
124 step_name = self.get_step_name(step_context.index)
125 instance_id = str(step_context.active_dialog.state[self.PersistedInstanceId])
126 properties = {
127 "DialogId": self.id,
128 "StepName": step_name,
129 "InstanceId": instance_id,
130 }
131 self.telemetry_client.track_event("WaterfallStep", properties)
132 return await self._steps[step_context.index](step_context)
133
134 async def run_step(
135 self,
136 dialog_context: DialogContext,
137 index: int,
138 reason: DialogReason,
139 result: object,
140 ) -> DialogTurnResult:
141 if not dialog_context:
142 raise TypeError(
143 "WaterfallDialog.run_steps(): dialog_context cannot be None."
144 )
145 if index < len(self._steps):
146 # Update persisted step index
147 state = dialog_context.active_dialog.state
148 state[self.StepIndex] = index
149
150 # Create step context
151 options = state[self.PersistedOptions]
152 values = state[self.PersistedValues]
153 step_context = WaterfallStepContext(
154 self, dialog_context, options, values, index, reason, result
155 )
156 return await self.on_step(step_context)
157
158 # End of waterfall so just return any result to parent
159 return await dialog_context.end_dialog(result)
160
161 def get_step_name(self, index: int) -> str:
162 """
163 Give the waterfall step a unique name
164 """
165 step_name = self._steps[index].__qualname__
166
167 if not step_name or ">" in step_name:
168 step_name = f"Step{index + 1}of{len(self._steps)}"
169
170 return step_name
171
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/libraries/botbuilder-dialogs/botbuilder/dialogs/waterfall_dialog.py b/libraries/botbuilder-dialogs/botbuilder/dialogs/waterfall_dialog.py
--- a/libraries/botbuilder-dialogs/botbuilder/dialogs/waterfall_dialog.py
+++ b/libraries/botbuilder-dialogs/botbuilder/dialogs/waterfall_dialog.py
@@ -164,7 +164,7 @@
"""
step_name = self._steps[index].__qualname__
- if not step_name or ">" in step_name:
+ if not step_name or step_name.endswith("<lambda>"):
step_name = f"Step{index + 1}of{len(self._steps)}"
return step_name
| {"golden_diff": "diff --git a/libraries/botbuilder-dialogs/botbuilder/dialogs/waterfall_dialog.py b/libraries/botbuilder-dialogs/botbuilder/dialogs/waterfall_dialog.py\n--- a/libraries/botbuilder-dialogs/botbuilder/dialogs/waterfall_dialog.py\n+++ b/libraries/botbuilder-dialogs/botbuilder/dialogs/waterfall_dialog.py\n@@ -164,7 +164,7 @@\n \"\"\"\r\n step_name = self._steps[index].__qualname__\r\n \r\n- if not step_name or \">\" in step_name:\r\n+ if not step_name or step_name.endswith(\"<lambda>\"):\r\n step_name = f\"Step{index + 1}of{len(self._steps)}\"\r\n \r\n return step_name\n", "issue": "[PORT] Create test for waterfall cancellation telemetry\n> Port this change from botbuilder-dotnet/master branch:\nhttps://github.com/microsoft/botbuilder-dotnet/pull/3314\n\nFor https://github.com/microsoft/botbuilder-js/issues/1619\n\n\r\n# Changed projects\r\n* Microsoft.Bot.Builder.Dialogs.Tests\r\n\r\n\r\n\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\r\n# Licensed under the MIT License.\r\n\r\n\r\nimport uuid\r\nfrom typing import Coroutine\r\nfrom botbuilder.core import TurnContext\r\nfrom botbuilder.schema import ActivityTypes\r\nfrom .dialog_reason import DialogReason\r\nfrom .dialog import Dialog\r\nfrom .dialog_turn_result import DialogTurnResult\r\nfrom .dialog_context import DialogContext\r\nfrom .dialog_instance import DialogInstance\r\nfrom .waterfall_step_context import WaterfallStepContext\r\n\r\n\r\nclass WaterfallDialog(Dialog):\r\n PersistedOptions = \"options\"\r\n StepIndex = \"stepIndex\"\r\n PersistedValues = \"values\"\r\n PersistedInstanceId = \"instanceId\"\r\n\r\n def __init__(self, dialog_id: str, steps: [Coroutine] = None):\r\n super(WaterfallDialog, self).__init__(dialog_id)\r\n if not steps:\r\n self._steps = []\r\n else:\r\n if not isinstance(steps, list):\r\n raise TypeError(\"WaterfallDialog(): steps must be list of steps\")\r\n self._steps = steps\r\n\r\n def add_step(self, step):\r\n \"\"\"\r\n Adds a new step to the waterfall.\r\n :param step: Step to add\r\n :return: Waterfall dialog for fluent calls to `add_step()`.\r\n \"\"\"\r\n if not step:\r\n raise TypeError(\"WaterfallDialog.add_step(): step cannot be None.\")\r\n\r\n self._steps.append(step)\r\n return self\r\n\r\n async def begin_dialog(\r\n self, dialog_context: DialogContext, options: object = None\r\n ) -> DialogTurnResult:\r\n\r\n if not dialog_context:\r\n raise TypeError(\"WaterfallDialog.begin_dialog(): dc cannot be None.\")\r\n\r\n # Initialize waterfall state\r\n state = dialog_context.active_dialog.state\r\n\r\n instance_id = uuid.uuid1().__str__()\r\n state[self.PersistedOptions] = options\r\n state[self.PersistedValues] = {}\r\n state[self.PersistedInstanceId] = instance_id\r\n\r\n properties = {}\r\n properties[\"DialogId\"] = self.id\r\n properties[\"InstanceId\"] = instance_id\r\n self.telemetry_client.track_event(\"WaterfallStart\", properties)\r\n\r\n # Run first stepkinds\r\n return await self.run_step(dialog_context, 0, DialogReason.BeginCalled, None)\r\n\r\n async def continue_dialog( # pylint: disable=unused-argument,arguments-differ\r\n self,\r\n dialog_context: DialogContext = None,\r\n reason: DialogReason = None,\r\n result: object = NotImplementedError(),\r\n ) -> DialogTurnResult:\r\n if not dialog_context:\r\n raise TypeError(\"WaterfallDialog.continue_dialog(): dc cannot be None.\")\r\n\r\n if dialog_context.context.activity.type != ActivityTypes.message:\r\n return Dialog.end_of_turn\r\n\r\n return await self.resume_dialog(\r\n dialog_context,\r\n DialogReason.ContinueCalled,\r\n dialog_context.context.activity.text,\r\n )\r\n\r\n async def resume_dialog(\r\n self, dialog_context: DialogContext, reason: DialogReason, result: object\r\n ):\r\n if dialog_context is None:\r\n raise TypeError(\"WaterfallDialog.resume_dialog(): dc cannot be None.\")\r\n\r\n # Increment step index and run step\r\n state = dialog_context.active_dialog.state\r\n\r\n # Future Me:\r\n # If issues with CosmosDB, see https://github.com/Microsoft/botbuilder-dotnet/issues/871\r\n # for hints.\r\n return await self.run_step(\r\n dialog_context, state[self.StepIndex] + 1, reason, result\r\n )\r\n\r\n async def end_dialog( # pylint: disable=unused-argument\r\n self, context: TurnContext, instance: DialogInstance, reason: DialogReason\r\n ) -> None:\r\n if reason is DialogReason.CancelCalled:\r\n index = instance.state[self.StepIndex]\r\n step_name = self.get_step_name(index)\r\n instance_id = str(instance.state[self.PersistedInstanceId])\r\n properties = {\r\n \"DialogId\": self.id,\r\n \"StepName\": step_name,\r\n \"InstanceId\": instance_id,\r\n }\r\n self.telemetry_client.track_event(\"WaterfallCancel\", properties)\r\n else:\r\n if reason is DialogReason.EndCalled:\r\n\r\n instance_id = str(instance.state[self.PersistedInstanceId])\r\n properties = {\"DialogId\": self.id, \"InstanceId\": instance_id}\r\n self.telemetry_client.track_event(\"WaterfallComplete\", properties)\r\n\r\n return\r\n\r\n async def on_step(self, step_context: WaterfallStepContext) -> DialogTurnResult:\r\n step_name = self.get_step_name(step_context.index)\r\n instance_id = str(step_context.active_dialog.state[self.PersistedInstanceId])\r\n properties = {\r\n \"DialogId\": self.id,\r\n \"StepName\": step_name,\r\n \"InstanceId\": instance_id,\r\n }\r\n self.telemetry_client.track_event(\"WaterfallStep\", properties)\r\n return await self._steps[step_context.index](step_context)\r\n\r\n async def run_step(\r\n self,\r\n dialog_context: DialogContext,\r\n index: int,\r\n reason: DialogReason,\r\n result: object,\r\n ) -> DialogTurnResult:\r\n if not dialog_context:\r\n raise TypeError(\r\n \"WaterfallDialog.run_steps(): dialog_context cannot be None.\"\r\n )\r\n if index < len(self._steps):\r\n # Update persisted step index\r\n state = dialog_context.active_dialog.state\r\n state[self.StepIndex] = index\r\n\r\n # Create step context\r\n options = state[self.PersistedOptions]\r\n values = state[self.PersistedValues]\r\n step_context = WaterfallStepContext(\r\n self, dialog_context, options, values, index, reason, result\r\n )\r\n return await self.on_step(step_context)\r\n\r\n # End of waterfall so just return any result to parent\r\n return await dialog_context.end_dialog(result)\r\n\r\n def get_step_name(self, index: int) -> str:\r\n \"\"\"\r\n Give the waterfall step a unique name\r\n \"\"\"\r\n step_name = self._steps[index].__qualname__\r\n\r\n if not step_name or \">\" in step_name:\r\n step_name = f\"Step{index + 1}of{len(self._steps)}\"\r\n\r\n return step_name\r\n", "path": "libraries/botbuilder-dialogs/botbuilder/dialogs/waterfall_dialog.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\r\n# Licensed under the MIT License.\r\n\r\n\r\nimport uuid\r\nfrom typing import Coroutine\r\nfrom botbuilder.core import TurnContext\r\nfrom botbuilder.schema import ActivityTypes\r\nfrom .dialog_reason import DialogReason\r\nfrom .dialog import Dialog\r\nfrom .dialog_turn_result import DialogTurnResult\r\nfrom .dialog_context import DialogContext\r\nfrom .dialog_instance import DialogInstance\r\nfrom .waterfall_step_context import WaterfallStepContext\r\n\r\n\r\nclass WaterfallDialog(Dialog):\r\n PersistedOptions = \"options\"\r\n StepIndex = \"stepIndex\"\r\n PersistedValues = \"values\"\r\n PersistedInstanceId = \"instanceId\"\r\n\r\n def __init__(self, dialog_id: str, steps: [Coroutine] = None):\r\n super(WaterfallDialog, self).__init__(dialog_id)\r\n if not steps:\r\n self._steps = []\r\n else:\r\n if not isinstance(steps, list):\r\n raise TypeError(\"WaterfallDialog(): steps must be list of steps\")\r\n self._steps = steps\r\n\r\n def add_step(self, step):\r\n \"\"\"\r\n Adds a new step to the waterfall.\r\n :param step: Step to add\r\n :return: Waterfall dialog for fluent calls to `add_step()`.\r\n \"\"\"\r\n if not step:\r\n raise TypeError(\"WaterfallDialog.add_step(): step cannot be None.\")\r\n\r\n self._steps.append(step)\r\n return self\r\n\r\n async def begin_dialog(\r\n self, dialog_context: DialogContext, options: object = None\r\n ) -> DialogTurnResult:\r\n\r\n if not dialog_context:\r\n raise TypeError(\"WaterfallDialog.begin_dialog(): dc cannot be None.\")\r\n\r\n # Initialize waterfall state\r\n state = dialog_context.active_dialog.state\r\n\r\n instance_id = uuid.uuid1().__str__()\r\n state[self.PersistedOptions] = options\r\n state[self.PersistedValues] = {}\r\n state[self.PersistedInstanceId] = instance_id\r\n\r\n properties = {}\r\n properties[\"DialogId\"] = self.id\r\n properties[\"InstanceId\"] = instance_id\r\n self.telemetry_client.track_event(\"WaterfallStart\", properties)\r\n\r\n # Run first stepkinds\r\n return await self.run_step(dialog_context, 0, DialogReason.BeginCalled, None)\r\n\r\n async def continue_dialog( # pylint: disable=unused-argument,arguments-differ\r\n self,\r\n dialog_context: DialogContext = None,\r\n reason: DialogReason = None,\r\n result: object = NotImplementedError(),\r\n ) -> DialogTurnResult:\r\n if not dialog_context:\r\n raise TypeError(\"WaterfallDialog.continue_dialog(): dc cannot be None.\")\r\n\r\n if dialog_context.context.activity.type != ActivityTypes.message:\r\n return Dialog.end_of_turn\r\n\r\n return await self.resume_dialog(\r\n dialog_context,\r\n DialogReason.ContinueCalled,\r\n dialog_context.context.activity.text,\r\n )\r\n\r\n async def resume_dialog(\r\n self, dialog_context: DialogContext, reason: DialogReason, result: object\r\n ):\r\n if dialog_context is None:\r\n raise TypeError(\"WaterfallDialog.resume_dialog(): dc cannot be None.\")\r\n\r\n # Increment step index and run step\r\n state = dialog_context.active_dialog.state\r\n\r\n # Future Me:\r\n # If issues with CosmosDB, see https://github.com/Microsoft/botbuilder-dotnet/issues/871\r\n # for hints.\r\n return await self.run_step(\r\n dialog_context, state[self.StepIndex] + 1, reason, result\r\n )\r\n\r\n async def end_dialog( # pylint: disable=unused-argument\r\n self, context: TurnContext, instance: DialogInstance, reason: DialogReason\r\n ) -> None:\r\n if reason is DialogReason.CancelCalled:\r\n index = instance.state[self.StepIndex]\r\n step_name = self.get_step_name(index)\r\n instance_id = str(instance.state[self.PersistedInstanceId])\r\n properties = {\r\n \"DialogId\": self.id,\r\n \"StepName\": step_name,\r\n \"InstanceId\": instance_id,\r\n }\r\n self.telemetry_client.track_event(\"WaterfallCancel\", properties)\r\n else:\r\n if reason is DialogReason.EndCalled:\r\n\r\n instance_id = str(instance.state[self.PersistedInstanceId])\r\n properties = {\"DialogId\": self.id, \"InstanceId\": instance_id}\r\n self.telemetry_client.track_event(\"WaterfallComplete\", properties)\r\n\r\n return\r\n\r\n async def on_step(self, step_context: WaterfallStepContext) -> DialogTurnResult:\r\n step_name = self.get_step_name(step_context.index)\r\n instance_id = str(step_context.active_dialog.state[self.PersistedInstanceId])\r\n properties = {\r\n \"DialogId\": self.id,\r\n \"StepName\": step_name,\r\n \"InstanceId\": instance_id,\r\n }\r\n self.telemetry_client.track_event(\"WaterfallStep\", properties)\r\n return await self._steps[step_context.index](step_context)\r\n\r\n async def run_step(\r\n self,\r\n dialog_context: DialogContext,\r\n index: int,\r\n reason: DialogReason,\r\n result: object,\r\n ) -> DialogTurnResult:\r\n if not dialog_context:\r\n raise TypeError(\r\n \"WaterfallDialog.run_steps(): dialog_context cannot be None.\"\r\n )\r\n if index < len(self._steps):\r\n # Update persisted step index\r\n state = dialog_context.active_dialog.state\r\n state[self.StepIndex] = index\r\n\r\n # Create step context\r\n options = state[self.PersistedOptions]\r\n values = state[self.PersistedValues]\r\n step_context = WaterfallStepContext(\r\n self, dialog_context, options, values, index, reason, result\r\n )\r\n return await self.on_step(step_context)\r\n\r\n # End of waterfall so just return any result to parent\r\n return await dialog_context.end_dialog(result)\r\n\r\n def get_step_name(self, index: int) -> str:\r\n \"\"\"\r\n Give the waterfall step a unique name\r\n \"\"\"\r\n step_name = self._steps[index].__qualname__\r\n\r\n if not step_name or step_name.endswith(\"<lambda>\"):\r\n step_name = f\"Step{index + 1}of{len(self._steps)}\"\r\n\r\n return step_name\r\n", "path": "libraries/botbuilder-dialogs/botbuilder/dialogs/waterfall_dialog.py"}]} | 2,036 | 163 |
gh_patches_debug_10304 | rasdani/github-patches | git_diff | google__openhtf-393 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Nicer failure mode for misuse of StoreInModule.
See #389.
> ...it's nonsensical to have no dots, but something one might accidentally do if you forget to do the %s/name business, maybe throw in a quick format check on the inputs of add_argument() to fail-fast rather than raise an obscure error here?
> @grybmadsci
Document util/argv.py
There are no docs on how to use this module.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `openhtf/util/argv.py`
Content:
```
1 """Utilities for handling command line arguments.
2
3 StoreInModule:
4 Enables emulating a gflags-esque API (flag affects global value), but one
5 doesn't necessarily need to use flags to set values.
6
7 Example usage:
8 DEFAULT_VALUE = 0
9 ARG_PARSER = argv.ModuleParser()
10 ARG_PARSER.add_argument(
11 '--override-value', action=argv.StoreInModule,
12 default=DEFAULT_VALUE, target='%s.DEFAULT_VALUE' % __name__)
13
14 Then in an entry point (main() function), use that parser as a parent:
15 parser = argparse.ArgumentParser(parents=[other_module.ARG_PARSER])
16 parser.parse_args()
17 """
18
19 import argparse
20
21
22 def ModuleParser():
23 return argparse.ArgumentParser(add_help=False)
24
25
26 class StoreInModule(argparse.Action):
27
28 def __init__(self, *args, **kwargs):
29 self._tgt_mod, self._tgt_attr = kwargs.pop('target').rsplit('.', 1)
30 proxy_cls = kwargs.pop('proxy', None)
31 if proxy_cls is not None:
32 self._proxy = proxy_cls(*args, **kwargs)
33 super(StoreInModule, self).__init__(*args, **kwargs)
34
35 def __call__(self, parser, namespace, values, option_string=None):
36 if hasattr(self, '_proxy'):
37 values = self._proxy(parser, namespace, values)
38 base, mod = self._tgt_mod.rsplit('.', 1)
39 module = getattr(__import__(base, fromlist=[mod]), mod)
40 setattr(module, self._tgt_attr, values)
41
42
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/openhtf/util/argv.py b/openhtf/util/argv.py
--- a/openhtf/util/argv.py
+++ b/openhtf/util/argv.py
@@ -35,7 +35,10 @@
def __call__(self, parser, namespace, values, option_string=None):
if hasattr(self, '_proxy'):
values = self._proxy(parser, namespace, values)
- base, mod = self._tgt_mod.rsplit('.', 1)
- module = getattr(__import__(base, fromlist=[mod]), mod)
+ if '.' in self._tgt_mod:
+ base, mod = self._tgt_mod.rsplit('.', 1)
+ module = getattr(__import__(base, fromlist=[mod]), mod)
+ else:
+ module = __import__(self._tgt_mod)
setattr(module, self._tgt_attr, values)
| {"golden_diff": "diff --git a/openhtf/util/argv.py b/openhtf/util/argv.py\n--- a/openhtf/util/argv.py\n+++ b/openhtf/util/argv.py\n@@ -35,7 +35,10 @@\n def __call__(self, parser, namespace, values, option_string=None):\n if hasattr(self, '_proxy'):\n values = self._proxy(parser, namespace, values)\n- base, mod = self._tgt_mod.rsplit('.', 1)\n- module = getattr(__import__(base, fromlist=[mod]), mod)\n+ if '.' in self._tgt_mod:\n+ base, mod = self._tgt_mod.rsplit('.', 1)\n+ module = getattr(__import__(base, fromlist=[mod]), mod)\n+ else:\n+ module = __import__(self._tgt_mod)\n setattr(module, self._tgt_attr, values)\n", "issue": "Nicer failure mode for misuse of StoreInModule.\nSee #389.\n\n> ...it's nonsensical to have no dots, but something one might accidentally do if you forget to do the %s/name business, maybe throw in a quick format check on the inputs of add_argument() to fail-fast rather than raise an obscure error here?\n> @grybmadsci\n\nDocument util/argv.py\nThere are no docs on how to use this module.\n\n", "before_files": [{"content": "\"\"\"Utilities for handling command line arguments.\n\nStoreInModule:\n Enables emulating a gflags-esque API (flag affects global value), but one\n doesn't necessarily need to use flags to set values.\n \n Example usage:\n DEFAULT_VALUE = 0\n ARG_PARSER = argv.ModuleParser()\n ARG_PARSER.add_argument(\n '--override-value', action=argv.StoreInModule,\n default=DEFAULT_VALUE, target='%s.DEFAULT_VALUE' % __name__)\n\n Then in an entry point (main() function), use that parser as a parent:\n parser = argparse.ArgumentParser(parents=[other_module.ARG_PARSER])\n parser.parse_args()\n\"\"\"\n\nimport argparse\n\n\ndef ModuleParser():\n return argparse.ArgumentParser(add_help=False)\n\n\nclass StoreInModule(argparse.Action):\n\n def __init__(self, *args, **kwargs):\n self._tgt_mod, self._tgt_attr = kwargs.pop('target').rsplit('.', 1)\n proxy_cls = kwargs.pop('proxy', None)\n if proxy_cls is not None:\n self._proxy = proxy_cls(*args, **kwargs)\n super(StoreInModule, self).__init__(*args, **kwargs)\n\n def __call__(self, parser, namespace, values, option_string=None):\n if hasattr(self, '_proxy'):\n values = self._proxy(parser, namespace, values)\n base, mod = self._tgt_mod.rsplit('.', 1)\n module = getattr(__import__(base, fromlist=[mod]), mod)\n setattr(module, self._tgt_attr, values)\n\n", "path": "openhtf/util/argv.py"}], "after_files": [{"content": "\"\"\"Utilities for handling command line arguments.\n\nStoreInModule:\n Enables emulating a gflags-esque API (flag affects global value), but one\n doesn't necessarily need to use flags to set values.\n \n Example usage:\n DEFAULT_VALUE = 0\n ARG_PARSER = argv.ModuleParser()\n ARG_PARSER.add_argument(\n '--override-value', action=argv.StoreInModule,\n default=DEFAULT_VALUE, target='%s.DEFAULT_VALUE' % __name__)\n\n Then in an entry point (main() function), use that parser as a parent:\n parser = argparse.ArgumentParser(parents=[other_module.ARG_PARSER])\n parser.parse_args()\n\"\"\"\n\nimport argparse\n\n\ndef ModuleParser():\n return argparse.ArgumentParser(add_help=False)\n\n\nclass StoreInModule(argparse.Action):\n\n def __init__(self, *args, **kwargs):\n self._tgt_mod, self._tgt_attr = kwargs.pop('target').rsplit('.', 1)\n proxy_cls = kwargs.pop('proxy', None)\n if proxy_cls is not None:\n self._proxy = proxy_cls(*args, **kwargs)\n super(StoreInModule, self).__init__(*args, **kwargs)\n\n def __call__(self, parser, namespace, values, option_string=None):\n if hasattr(self, '_proxy'):\n values = self._proxy(parser, namespace, values)\n if '.' in self._tgt_mod:\n base, mod = self._tgt_mod.rsplit('.', 1)\n module = getattr(__import__(base, fromlist=[mod]), mod)\n else:\n module = __import__(self._tgt_mod)\n setattr(module, self._tgt_attr, values)\n\n", "path": "openhtf/util/argv.py"}]} | 761 | 193 |
gh_patches_debug_39557 | rasdani/github-patches | git_diff | geopandas__geopandas-1093 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add a show_versions function
Similarly to `pandas.show_versions()` and `sklearn.show_versions()`, it would be nice to have a function like that for GeoPandas. We can probably base the code on those examples.
It could list the versions of the direct python dependencies, but also try to show the versions of the underlying GEOS / GDAL libraries.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `geopandas/tools/_show_versions.py`
Content:
```
1 import platform
2 import sys
3 import importlib
4
5
6 def _get_sys_info():
7 """System information
8
9 Returns
10 -------
11 sys_info : dict
12 system and Python version information
13 """
14 python = sys.version.replace('\n', ' ')
15
16 blob = [
17 ("python", python),
18 ('executable', sys.executable),
19 ("machine", platform.platform()),
20 ]
21
22 return dict(blob)
23
24
25 def _get_deps_info():
26 """Overview of the installed version of main dependencies
27
28 Returns
29 -------
30 deps_info: dict
31 version information on relevant Python libraries
32 """
33 deps = [
34 "geopandas",
35 "pandas",
36 "fiona",
37 "osgeo.gdal",
38 "numpy",
39 "shapely",
40 "rtree",
41 "pyproj",
42 "matplotlib",
43 "mapclassify",
44 "pysal",
45 "geopy",
46 "psycopg2",
47 "descartes"
48 ]
49
50 def get_version(module):
51 return module.__version__
52
53 deps_info = {}
54
55 for modname in deps:
56 try:
57 if modname in sys.modules:
58 mod = sys.modules[modname]
59 else:
60 mod = importlib.import_module(modname)
61 ver = get_version(mod)
62 deps_info[modname] = ver
63 except ImportError:
64 deps_info[modname] = None
65 except AttributeError:
66 deps_info[modname] = None
67
68 return deps_info
69
70
71 def show_versions():
72 """
73 Print system information and installed module versions.
74
75 Example
76 -------
77 > python -c "import geopandas; geopandas.show_versions()"
78 """
79 sys_info = _get_sys_info()
80 deps_info = _get_deps_info()
81
82 maxlen = max(len(x) for x in deps_info)
83 tpl = "{{k:<{maxlen}}}: {{stat}}".format(maxlen=maxlen)
84 print("\nSYSTEM INFO")
85 print("-----------")
86 for k, stat in sys_info.items():
87 print(tpl.format(k=k, stat=stat))
88 print("\nPYTHON DEPENDENCIES")
89 print("-------------------")
90 for k, stat in deps_info.items():
91 print(tpl.format(k=k, stat=stat))
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/geopandas/tools/_show_versions.py b/geopandas/tools/_show_versions.py
--- a/geopandas/tools/_show_versions.py
+++ b/geopandas/tools/_show_versions.py
@@ -22,6 +22,53 @@
return dict(blob)
+def _get_C_info():
+ """Information on system PROJ, GDAL, GEOS
+ Returns
+ -------
+ c_info: dict
+ system PROJ information
+ """
+ try:
+ import pyproj
+ from pyproj.exceptions import DataDirError
+ proj = pyproj.proj_version_str
+ try:
+ proj_dir = pyproj.datadir.get_data_dir()
+ except DataDirError:
+ proj_dir = None
+ except Exception:
+ proj = None
+ proj_dir = None
+
+ try:
+ import shapely._buildcfg
+ geos = '{}.{}.{}'.format(*shapely._buildcfg.geos_version)
+ geos_dir = shapely._buildcfg.geos_library_path
+ except Exception:
+ geos = None
+ geos_dir = None
+
+ try:
+ import fiona
+ gdal = fiona.env.get_gdal_release_name()
+ gdal_dir = fiona.env.GDALDataFinder().search()
+ except Exception:
+ gdal = None
+ gdal_dir = None
+
+ blob = [
+ ("GEOS", geos),
+ ("GEOS lib", geos_dir),
+ ("GDAL", gdal),
+ ("GDAL dir", gdal_dir),
+ ("PROJ", proj),
+ ("PROJ data dir", proj_dir)
+ ]
+
+ return dict(blob)
+
+
def _get_deps_info():
"""Overview of the installed version of main dependencies
@@ -34,7 +81,6 @@
"geopandas",
"pandas",
"fiona",
- "osgeo.gdal",
"numpy",
"shapely",
"rtree",
@@ -43,8 +89,7 @@
"mapclassify",
"pysal",
"geopy",
- "psycopg2",
- "descartes"
+ "psycopg2"
]
def get_version(module):
@@ -60,9 +105,7 @@
mod = importlib.import_module(modname)
ver = get_version(mod)
deps_info[modname] = ver
- except ImportError:
- deps_info[modname] = None
- except AttributeError:
+ except Exception:
deps_info[modname] = None
return deps_info
@@ -78,6 +121,7 @@
"""
sys_info = _get_sys_info()
deps_info = _get_deps_info()
+ proj_info = _get_C_info()
maxlen = max(len(x) for x in deps_info)
tpl = "{{k:<{maxlen}}}: {{stat}}".format(maxlen=maxlen)
@@ -85,6 +129,10 @@
print("-----------")
for k, stat in sys_info.items():
print(tpl.format(k=k, stat=stat))
+ print("\nGEOS, GDAL, PROJ INFO")
+ print("---------------------")
+ for k, stat in proj_info.items():
+ print(tpl.format(k=k, stat=stat))
print("\nPYTHON DEPENDENCIES")
print("-------------------")
for k, stat in deps_info.items():
| {"golden_diff": "diff --git a/geopandas/tools/_show_versions.py b/geopandas/tools/_show_versions.py\n--- a/geopandas/tools/_show_versions.py\n+++ b/geopandas/tools/_show_versions.py\n@@ -22,6 +22,53 @@\n return dict(blob)\n \n \n+def _get_C_info():\n+ \"\"\"Information on system PROJ, GDAL, GEOS\n+ Returns\n+ -------\n+ c_info: dict\n+ system PROJ information\n+ \"\"\"\n+ try:\n+ import pyproj\n+ from pyproj.exceptions import DataDirError\n+ proj = pyproj.proj_version_str\n+ try:\n+ proj_dir = pyproj.datadir.get_data_dir()\n+ except DataDirError:\n+ proj_dir = None\n+ except Exception:\n+ proj = None\n+ proj_dir = None\n+\n+ try:\n+ import shapely._buildcfg\n+ geos = '{}.{}.{}'.format(*shapely._buildcfg.geos_version)\n+ geos_dir = shapely._buildcfg.geos_library_path\n+ except Exception:\n+ geos = None\n+ geos_dir = None\n+\n+ try:\n+ import fiona\n+ gdal = fiona.env.get_gdal_release_name()\n+ gdal_dir = fiona.env.GDALDataFinder().search()\n+ except Exception:\n+ gdal = None\n+ gdal_dir = None\n+\n+ blob = [\n+ (\"GEOS\", geos),\n+ (\"GEOS lib\", geos_dir),\n+ (\"GDAL\", gdal),\n+ (\"GDAL dir\", gdal_dir),\n+ (\"PROJ\", proj),\n+ (\"PROJ data dir\", proj_dir)\n+ ]\n+\n+ return dict(blob)\n+\n+\n def _get_deps_info():\n \"\"\"Overview of the installed version of main dependencies\n \n@@ -34,7 +81,6 @@\n \"geopandas\",\n \"pandas\",\n \"fiona\",\n- \"osgeo.gdal\",\n \"numpy\",\n \"shapely\",\n \"rtree\",\n@@ -43,8 +89,7 @@\n \"mapclassify\",\n \"pysal\",\n \"geopy\",\n- \"psycopg2\",\n- \"descartes\"\n+ \"psycopg2\"\n ]\n \n def get_version(module):\n@@ -60,9 +105,7 @@\n mod = importlib.import_module(modname)\n ver = get_version(mod)\n deps_info[modname] = ver\n- except ImportError:\n- deps_info[modname] = None\n- except AttributeError:\n+ except Exception:\n deps_info[modname] = None\n \n return deps_info\n@@ -78,6 +121,7 @@\n \"\"\"\n sys_info = _get_sys_info()\n deps_info = _get_deps_info()\n+ proj_info = _get_C_info()\n \n maxlen = max(len(x) for x in deps_info)\n tpl = \"{{k:<{maxlen}}}: {{stat}}\".format(maxlen=maxlen)\n@@ -85,6 +129,10 @@\n print(\"-----------\")\n for k, stat in sys_info.items():\n print(tpl.format(k=k, stat=stat))\n+ print(\"\\nGEOS, GDAL, PROJ INFO\")\n+ print(\"---------------------\")\n+ for k, stat in proj_info.items():\n+ print(tpl.format(k=k, stat=stat))\n print(\"\\nPYTHON DEPENDENCIES\")\n print(\"-------------------\")\n for k, stat in deps_info.items():\n", "issue": "Add a show_versions function\nSimilarly to `pandas.show_versions()` and `sklearn.show_versions()`, it would be nice to have a function like that for GeoPandas. We can probably base the code on those examples. \r\n\r\nIt could list the versions of the direct python dependencies, but also try to show the versions of the underlying GEOS / GDAL libraries.\n", "before_files": [{"content": "import platform\nimport sys\nimport importlib\n\n\ndef _get_sys_info():\n \"\"\"System information\n\n Returns\n -------\n sys_info : dict\n system and Python version information\n \"\"\"\n python = sys.version.replace('\\n', ' ')\n\n blob = [\n (\"python\", python),\n ('executable', sys.executable),\n (\"machine\", platform.platform()),\n ]\n\n return dict(blob)\n\n\ndef _get_deps_info():\n \"\"\"Overview of the installed version of main dependencies\n\n Returns\n -------\n deps_info: dict\n version information on relevant Python libraries\n \"\"\"\n deps = [\n \"geopandas\",\n \"pandas\",\n \"fiona\",\n \"osgeo.gdal\",\n \"numpy\",\n \"shapely\",\n \"rtree\",\n \"pyproj\",\n \"matplotlib\",\n \"mapclassify\",\n \"pysal\",\n \"geopy\",\n \"psycopg2\",\n \"descartes\"\n ]\n\n def get_version(module):\n return module.__version__\n\n deps_info = {}\n\n for modname in deps:\n try:\n if modname in sys.modules:\n mod = sys.modules[modname]\n else:\n mod = importlib.import_module(modname)\n ver = get_version(mod)\n deps_info[modname] = ver\n except ImportError:\n deps_info[modname] = None\n except AttributeError:\n deps_info[modname] = None\n\n return deps_info\n\n\ndef show_versions():\n \"\"\"\n Print system information and installed module versions.\n\n Example\n -------\n > python -c \"import geopandas; geopandas.show_versions()\"\n \"\"\"\n sys_info = _get_sys_info()\n deps_info = _get_deps_info()\n\n maxlen = max(len(x) for x in deps_info)\n tpl = \"{{k:<{maxlen}}}: {{stat}}\".format(maxlen=maxlen)\n print(\"\\nSYSTEM INFO\")\n print(\"-----------\")\n for k, stat in sys_info.items():\n print(tpl.format(k=k, stat=stat))\n print(\"\\nPYTHON DEPENDENCIES\")\n print(\"-------------------\")\n for k, stat in deps_info.items():\n print(tpl.format(k=k, stat=stat))\n", "path": "geopandas/tools/_show_versions.py"}], "after_files": [{"content": "import platform\nimport sys\nimport importlib\n\n\ndef _get_sys_info():\n \"\"\"System information\n\n Returns\n -------\n sys_info : dict\n system and Python version information\n \"\"\"\n python = sys.version.replace('\\n', ' ')\n\n blob = [\n (\"python\", python),\n ('executable', sys.executable),\n (\"machine\", platform.platform()),\n ]\n\n return dict(blob)\n\n\ndef _get_C_info():\n \"\"\"Information on system PROJ, GDAL, GEOS\n Returns\n -------\n c_info: dict\n system PROJ information\n \"\"\"\n try:\n import pyproj\n from pyproj.exceptions import DataDirError\n proj = pyproj.proj_version_str\n try:\n proj_dir = pyproj.datadir.get_data_dir()\n except DataDirError:\n proj_dir = None\n except Exception:\n proj = None\n proj_dir = None\n\n try:\n import shapely._buildcfg\n geos = '{}.{}.{}'.format(*shapely._buildcfg.geos_version)\n geos_dir = shapely._buildcfg.geos_library_path\n except Exception:\n geos = None\n geos_dir = None\n\n try:\n import fiona\n gdal = fiona.env.get_gdal_release_name()\n gdal_dir = fiona.env.GDALDataFinder().search()\n except Exception:\n gdal = None\n gdal_dir = None\n\n blob = [\n (\"GEOS\", geos),\n (\"GEOS lib\", geos_dir),\n (\"GDAL\", gdal),\n (\"GDAL dir\", gdal_dir),\n (\"PROJ\", proj),\n (\"PROJ data dir\", proj_dir)\n ]\n\n return dict(blob)\n\n\ndef _get_deps_info():\n \"\"\"Overview of the installed version of main dependencies\n\n Returns\n -------\n deps_info: dict\n version information on relevant Python libraries\n \"\"\"\n deps = [\n \"geopandas\",\n \"pandas\",\n \"fiona\",\n \"numpy\",\n \"shapely\",\n \"rtree\",\n \"pyproj\",\n \"matplotlib\",\n \"mapclassify\",\n \"pysal\",\n \"geopy\",\n \"psycopg2\"\n ]\n\n def get_version(module):\n return module.__version__\n\n deps_info = {}\n\n for modname in deps:\n try:\n if modname in sys.modules:\n mod = sys.modules[modname]\n else:\n mod = importlib.import_module(modname)\n ver = get_version(mod)\n deps_info[modname] = ver\n except Exception:\n deps_info[modname] = None\n\n return deps_info\n\n\ndef show_versions():\n \"\"\"\n Print system information and installed module versions.\n\n Example\n -------\n > python -c \"import geopandas; geopandas.show_versions()\"\n \"\"\"\n sys_info = _get_sys_info()\n deps_info = _get_deps_info()\n proj_info = _get_C_info()\n\n maxlen = max(len(x) for x in deps_info)\n tpl = \"{{k:<{maxlen}}}: {{stat}}\".format(maxlen=maxlen)\n print(\"\\nSYSTEM INFO\")\n print(\"-----------\")\n for k, stat in sys_info.items():\n print(tpl.format(k=k, stat=stat))\n print(\"\\nGEOS, GDAL, PROJ INFO\")\n print(\"---------------------\")\n for k, stat in proj_info.items():\n print(tpl.format(k=k, stat=stat))\n print(\"\\nPYTHON DEPENDENCIES\")\n print(\"-------------------\")\n for k, stat in deps_info.items():\n print(tpl.format(k=k, stat=stat))\n", "path": "geopandas/tools/_show_versions.py"}]} | 1,000 | 795 |
gh_patches_debug_26059 | rasdani/github-patches | git_diff | DDMAL__CantusDB-192 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
make a test case to test the permissions
we have implemented a lot of different restrictions to views. create a unit test to automate the testing process.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `django/cantusdb_project/main_app/views/sequence.py`
Content:
```
1 from django.views.generic import DetailView, ListView, UpdateView
2 from main_app.models import Sequence
3 from django.db.models import Q
4 from main_app.forms import SequenceEditForm
5 from django.contrib.auth.mixins import LoginRequiredMixin
6 from django.contrib import messages
7 from django.contrib.auth.mixins import UserPassesTestMixin
8 from django.core.exceptions import PermissionDenied
9 from django.http import Http404
10
11
12
13 class SequenceDetailView(DetailView):
14 """
15 Displays a single Sequence object. Accessed with ``sequences/<int:pk>``
16 """
17
18 model = Sequence
19 context_object_name = "sequence"
20 template_name = "sequence_detail.html"
21
22 def get_context_data(self, **kwargs):
23
24 # if the sequence's source isn't public, only logged-in users should be able to view the sequence's detail page
25 sequence = self.get_object()
26 source = sequence.source
27 if (source.public is False) and (not self.request.user.is_authenticated):
28 raise PermissionDenied()
29
30 context = super().get_context_data(**kwargs)
31 context["concordances"] = Sequence.objects.filter(
32 cantus_id=self.get_object().cantus_id
33 ).order_by("siglum")
34 return context
35
36
37 class SequenceListView(ListView):
38 """
39 Displays a list of Sequence objects. Accessed with ``sequences/``
40 """
41
42 model = Sequence
43 paginate_by = 100
44 context_object_name = "sequences"
45 template_name = "sequence_list.html"
46
47 def get_queryset(self):
48 queryset = super().get_queryset()
49 q_obj_filter = Q(source__visible=True)
50 q_obj_filter &= Q(source__public=True)
51
52 if self.request.GET.get("incipit"):
53 incipit = self.request.GET.get("incipit")
54 q_obj_filter &= Q(incipit__icontains=incipit)
55 if self.request.GET.get("siglum"):
56 siglum = self.request.GET.get("siglum")
57 q_obj_filter &= Q(siglum__icontains=siglum)
58 if self.request.GET.get("cantus_id"):
59 cantus_id = self.request.GET.get("cantus_id")
60 q_obj_filter &= Q(cantus_id__icontains=cantus_id)
61
62 return queryset.filter(q_obj_filter).order_by("siglum", "sequence")
63
64 class SequenceEditView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
65 template_name = "sequence_edit.html"
66 model = Sequence
67 form_class = SequenceEditForm
68 pk_url_kwarg = "sequence_id"
69
70 def form_valid(self, form):
71 messages.success(
72 self.request,
73 "Sequence updated successfully!",
74 )
75 return super().form_valid(form)
76
77 def test_func(self):
78 user = self.request.user
79 sequence_id = self.kwargs.get(self.pk_url_kwarg)
80 try:
81 sequence = Sequence.objects.get(id=sequence_id)
82 except:
83 raise Http404("This sequence does not exist")
84 # find the source of this sequence
85 source = sequence.source
86 # checks if the user is an editor or a proofreader,
87 # and if the user is given privilege to edit this source and thus, it's sequences
88 is_editor_proofreader = user.groups.filter(Q(name="editor")|Q(name="proofreader")).exists()
89 can_edit_sequences_in_source = user.sources_user_can_edit.filter(id=source.id)
90 # checks if the user is a project manager (they should have the privilege to edit any sequence)
91 is_project_manager = user.groups.filter(name="project manager").exists()
92 # checks if the user is a contributor,
93 # and if the user is the creator of this source
94 # (they should only have the privilege to edit sequences in a source they have created)
95 is_contributor = user.groups.filter(name="contributor").exists()
96
97 if ((is_editor_proofreader and can_edit_sequences_in_source)
98 or (is_project_manager)
99 or (is_contributor and source.created_by == user)):
100 return True
101 else:
102 return False
103
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/django/cantusdb_project/main_app/views/sequence.py b/django/cantusdb_project/main_app/views/sequence.py
--- a/django/cantusdb_project/main_app/views/sequence.py
+++ b/django/cantusdb_project/main_app/views/sequence.py
@@ -76,27 +76,10 @@
def test_func(self):
user = self.request.user
- sequence_id = self.kwargs.get(self.pk_url_kwarg)
- try:
- sequence = Sequence.objects.get(id=sequence_id)
- except:
- raise Http404("This sequence does not exist")
- # find the source of this sequence
- source = sequence.source
- # checks if the user is an editor or a proofreader,
- # and if the user is given privilege to edit this source and thus, it's sequences
- is_editor_proofreader = user.groups.filter(Q(name="editor")|Q(name="proofreader")).exists()
- can_edit_sequences_in_source = user.sources_user_can_edit.filter(id=source.id)
# checks if the user is a project manager (they should have the privilege to edit any sequence)
is_project_manager = user.groups.filter(name="project manager").exists()
- # checks if the user is a contributor,
- # and if the user is the creator of this source
- # (they should only have the privilege to edit sequences in a source they have created)
- is_contributor = user.groups.filter(name="contributor").exists()
-
- if ((is_editor_proofreader and can_edit_sequences_in_source)
- or (is_project_manager)
- or (is_contributor and source.created_by == user)):
+
+ if is_project_manager:
return True
else:
return False
| {"golden_diff": "diff --git a/django/cantusdb_project/main_app/views/sequence.py b/django/cantusdb_project/main_app/views/sequence.py\n--- a/django/cantusdb_project/main_app/views/sequence.py\n+++ b/django/cantusdb_project/main_app/views/sequence.py\n@@ -76,27 +76,10 @@\n \n def test_func(self):\n user = self.request.user\n- sequence_id = self.kwargs.get(self.pk_url_kwarg)\n- try:\n- sequence = Sequence.objects.get(id=sequence_id)\n- except:\n- raise Http404(\"This sequence does not exist\")\n- # find the source of this sequence\n- source = sequence.source\n- # checks if the user is an editor or a proofreader,\n- # and if the user is given privilege to edit this source and thus, it's sequences\n- is_editor_proofreader = user.groups.filter(Q(name=\"editor\")|Q(name=\"proofreader\")).exists()\n- can_edit_sequences_in_source = user.sources_user_can_edit.filter(id=source.id)\n # checks if the user is a project manager (they should have the privilege to edit any sequence)\n is_project_manager = user.groups.filter(name=\"project manager\").exists()\n- # checks if the user is a contributor,\n- # and if the user is the creator of this source \n- # (they should only have the privilege to edit sequences in a source they have created)\n- is_contributor = user.groups.filter(name=\"contributor\").exists()\n-\n- if ((is_editor_proofreader and can_edit_sequences_in_source) \n- or (is_project_manager) \n- or (is_contributor and source.created_by == user)):\n+\n+ if is_project_manager:\n return True\n else:\n return False\n", "issue": "make a test case to test the permissions\nwe have implemented a lot of different restrictions to views. create a unit test to automate the testing process.\n", "before_files": [{"content": "from django.views.generic import DetailView, ListView, UpdateView\nfrom main_app.models import Sequence\nfrom django.db.models import Q\nfrom main_app.forms import SequenceEditForm\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib import messages\nfrom django.contrib.auth.mixins import UserPassesTestMixin\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import Http404\n\n\n\nclass SequenceDetailView(DetailView):\n \"\"\"\n Displays a single Sequence object. Accessed with ``sequences/<int:pk>``\n \"\"\"\n\n model = Sequence\n context_object_name = \"sequence\"\n template_name = \"sequence_detail.html\"\n\n def get_context_data(self, **kwargs):\n\n # if the sequence's source isn't public, only logged-in users should be able to view the sequence's detail page\n sequence = self.get_object()\n source = sequence.source\n if (source.public is False) and (not self.request.user.is_authenticated):\n raise PermissionDenied()\n \n context = super().get_context_data(**kwargs)\n context[\"concordances\"] = Sequence.objects.filter(\n cantus_id=self.get_object().cantus_id\n ).order_by(\"siglum\")\n return context\n\n\nclass SequenceListView(ListView):\n \"\"\"\n Displays a list of Sequence objects. Accessed with ``sequences/``\n \"\"\"\n\n model = Sequence\n paginate_by = 100\n context_object_name = \"sequences\"\n template_name = \"sequence_list.html\"\n\n def get_queryset(self):\n queryset = super().get_queryset()\n q_obj_filter = Q(source__visible=True)\n q_obj_filter &= Q(source__public=True)\n\n if self.request.GET.get(\"incipit\"):\n incipit = self.request.GET.get(\"incipit\")\n q_obj_filter &= Q(incipit__icontains=incipit)\n if self.request.GET.get(\"siglum\"):\n siglum = self.request.GET.get(\"siglum\")\n q_obj_filter &= Q(siglum__icontains=siglum)\n if self.request.GET.get(\"cantus_id\"):\n cantus_id = self.request.GET.get(\"cantus_id\")\n q_obj_filter &= Q(cantus_id__icontains=cantus_id)\n\n return queryset.filter(q_obj_filter).order_by(\"siglum\", \"sequence\")\n\nclass SequenceEditView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):\n template_name = \"sequence_edit.html\"\n model = Sequence\n form_class = SequenceEditForm\n pk_url_kwarg = \"sequence_id\"\n\n def form_valid(self, form):\n messages.success(\n self.request,\n \"Sequence updated successfully!\",\n )\n return super().form_valid(form)\n\n def test_func(self):\n user = self.request.user\n sequence_id = self.kwargs.get(self.pk_url_kwarg)\n try:\n sequence = Sequence.objects.get(id=sequence_id)\n except:\n raise Http404(\"This sequence does not exist\")\n # find the source of this sequence\n source = sequence.source\n # checks if the user is an editor or a proofreader,\n # and if the user is given privilege to edit this source and thus, it's sequences\n is_editor_proofreader = user.groups.filter(Q(name=\"editor\")|Q(name=\"proofreader\")).exists()\n can_edit_sequences_in_source = user.sources_user_can_edit.filter(id=source.id)\n # checks if the user is a project manager (they should have the privilege to edit any sequence)\n is_project_manager = user.groups.filter(name=\"project manager\").exists()\n # checks if the user is a contributor,\n # and if the user is the creator of this source \n # (they should only have the privilege to edit sequences in a source they have created)\n is_contributor = user.groups.filter(name=\"contributor\").exists()\n\n if ((is_editor_proofreader and can_edit_sequences_in_source) \n or (is_project_manager) \n or (is_contributor and source.created_by == user)):\n return True\n else:\n return False\n", "path": "django/cantusdb_project/main_app/views/sequence.py"}], "after_files": [{"content": "from django.views.generic import DetailView, ListView, UpdateView\nfrom main_app.models import Sequence\nfrom django.db.models import Q\nfrom main_app.forms import SequenceEditForm\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib import messages\nfrom django.contrib.auth.mixins import UserPassesTestMixin\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import Http404\n\n\n\nclass SequenceDetailView(DetailView):\n \"\"\"\n Displays a single Sequence object. Accessed with ``sequences/<int:pk>``\n \"\"\"\n\n model = Sequence\n context_object_name = \"sequence\"\n template_name = \"sequence_detail.html\"\n\n def get_context_data(self, **kwargs):\n\n # if the sequence's source isn't public, only logged-in users should be able to view the sequence's detail page\n sequence = self.get_object()\n source = sequence.source\n if (source.public is False) and (not self.request.user.is_authenticated):\n raise PermissionDenied()\n \n context = super().get_context_data(**kwargs)\n context[\"concordances\"] = Sequence.objects.filter(\n cantus_id=self.get_object().cantus_id\n ).order_by(\"siglum\")\n return context\n\n\nclass SequenceListView(ListView):\n \"\"\"\n Displays a list of Sequence objects. Accessed with ``sequences/``\n \"\"\"\n\n model = Sequence\n paginate_by = 100\n context_object_name = \"sequences\"\n template_name = \"sequence_list.html\"\n\n def get_queryset(self):\n queryset = super().get_queryset()\n q_obj_filter = Q(source__visible=True)\n q_obj_filter &= Q(source__public=True)\n\n if self.request.GET.get(\"incipit\"):\n incipit = self.request.GET.get(\"incipit\")\n q_obj_filter &= Q(incipit__icontains=incipit)\n if self.request.GET.get(\"siglum\"):\n siglum = self.request.GET.get(\"siglum\")\n q_obj_filter &= Q(siglum__icontains=siglum)\n if self.request.GET.get(\"cantus_id\"):\n cantus_id = self.request.GET.get(\"cantus_id\")\n q_obj_filter &= Q(cantus_id__icontains=cantus_id)\n\n return queryset.filter(q_obj_filter).order_by(\"siglum\", \"sequence\")\n\nclass SequenceEditView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):\n template_name = \"sequence_edit.html\"\n model = Sequence\n form_class = SequenceEditForm\n pk_url_kwarg = \"sequence_id\"\n\n def form_valid(self, form):\n messages.success(\n self.request,\n \"Sequence updated successfully!\",\n )\n return super().form_valid(form)\n\n def test_func(self):\n user = self.request.user\n # checks if the user is a project manager (they should have the privilege to edit any sequence)\n is_project_manager = user.groups.filter(name=\"project manager\").exists()\n\n if is_project_manager:\n return True\n else:\n return False\n", "path": "django/cantusdb_project/main_app/views/sequence.py"}]} | 1,360 | 392 |
gh_patches_debug_38134 | rasdani/github-patches | git_diff | bridgecrewio__checkov-536 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
LaunchConfigurationEBSEncryption gives false-positives, due to not checking snapshot_id
In the relevant `aws_launch_configuration` Terraform docs https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_configuration
```
encrypted - (Optional) Whether the volume should be encrypted or not.
Do not use this option if you are using snapshot_id as the encrypted flag will be determined by the snapshot.
(Default: false).
```
The relevant part is **Do not use this option if you are using snapshot_id as the encrypted flag will be determined by the snapshot.** `snapshot_id` is not taken into account in the current check:
https://github.com/bridgecrewio/checkov/blob/d67ec380daf81b889e83d4d7c7d33f490525e899/checkov/terraform/checks/resource/aws/LaunchConfigurationEBSEncryption.py#L16-L34
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/terraform/checks/resource/aws/SecurityGroupRuleDescription.py`
Content:
```
1 from checkov.common.models.enums import CheckResult, CheckCategories
2 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
3
4
5 class SecurityGroupRuleDescription(BaseResourceCheck):
6 def __init__(self):
7 name = "Ensure every security groups rule has a description"
8 id = "CKV_AWS_23"
9 supported_resource = ['aws_security_group', 'aws_security_group_rule', 'aws_db_security_group',
10 'aws_elasticache_security_group', 'aws_redshift_security_group']
11 categories = [CheckCategories.NETWORKING]
12 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resource)
13
14 def scan_resource_conf(self, conf):
15 """
16 Looks for description at security group rules :
17 https://www.terraform.io/docs/providers/aws/r/security_group.html
18 :param conf: aws_security_group configuration
19 :return: <CheckResult>
20 """
21 if 'description' in conf.keys():
22 if conf['description']:
23 return CheckResult.PASSED
24 egress_result = self.check_rule(rule_type='egress', conf=conf)
25 ingress_result = self.check_rule(rule_type='ingress', conf=conf)
26 if egress_result == CheckResult.PASSED and ingress_result == CheckResult.PASSED:
27 return CheckResult.PASSED
28 return CheckResult.FAILED
29
30 def check_rule(self, rule_type, conf):
31 if rule_type in conf.keys():
32 for rule in conf[rule_type]:
33 if isinstance(rule, dict):
34 if 'description' not in rule.keys() or not rule['description']:
35 return CheckResult.FAILED
36 return CheckResult.PASSED
37
38
39 check = SecurityGroupRuleDescription()
40
```
Path: `checkov/terraform/checks/resource/aws/LaunchConfigurationEBSEncryption.py`
Content:
```
1 from checkov.common.models.enums import CheckResult, CheckCategories
2 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
3
4
5 class LaunchConfigurationEBSEncryption(BaseResourceValueCheck):
6 def __init__(self):
7 name = "Ensure all data stored in the Launch configuration EBS is securely encrypted"
8 id = "CKV_AWS_8"
9 supported_resources = ['aws_launch_configuration', 'aws_instance']
10 categories = [CheckCategories.ENCRYPTION]
11 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
12
13 def get_inspected_key(self):
14 return "*_block_device/[0]/encrypted"
15
16 def scan_resource_conf(self, conf):
17 """
18 Looks for encryption configuration at launch configuration:
19 https://www.terraform.io/docs/providers/aws/r/launch_configuration.html or https://www.terraform.io/docs/providers/aws/d/instance.html
20 :param conf: aws_launch_configuration configuration
21 :return: <CheckResult>
22 """
23 have_root_block = 0
24 for key in conf.keys():
25 if "block_device" in key and "ephemeral" not in key:
26 if isinstance(conf[key][0], dict) and conf[key][0].get("encrypted") != [True]:
27 return CheckResult.FAILED
28 if "root_block_device" in key:
29 # Issue 496 - TF will create unencrypted EBS root by default if whole root_block_device block is omitted.
30 have_root_block = 1
31 if have_root_block == 0:
32 return CheckResult.FAILED
33
34 return CheckResult.PASSED
35
36
37 check = LaunchConfigurationEBSEncryption()
38
```
Path: `checkov/common/runners/runner_registry.py`
Content:
```
1 import json
2 import logging
3 from abc import abstractmethod
4
5
6 class RunnerRegistry(object):
7 runners = []
8 scan_reports = []
9 banner = ""
10
11 def __init__(self, banner, runner_filter, *runners):
12 self.logger = logging.getLogger(__name__)
13 self.runner_filter = runner_filter
14 self.runners = runners
15 self.banner = banner
16 self.filter_runner_framework()
17 self.scan_reports = []
18
19 @abstractmethod
20 def extract_entity_details(self, entity):
21 raise NotImplementedError()
22
23 def run(self, root_folder=None, external_checks_dir=None, files=None, guidelines={}):
24 for runner in self.runners:
25 scan_report = runner.run(root_folder, external_checks_dir=external_checks_dir, files=files,
26 runner_filter=self.runner_filter)
27 RunnerRegistry.enrich_report_with_guidelines(scan_report, guidelines)
28 self.scan_reports.append(scan_report)
29 return self.scan_reports
30
31 def print_reports(self, scan_reports, args):
32 if args.output != "json" and args.output != "junitxml" and args.output != "github_failed_only":
33 print(f"{self.banner}\n")
34 exit_codes = []
35 report_jsons = []
36 for report in scan_reports:
37 if not report.is_empty():
38 if args.output == "json":
39 report_jsons.append(report.get_dict())
40 elif args.output == "junitxml":
41 report.print_junit_xml()
42 elif args.output == 'github_failed_only':
43 report.print_failed_github_md()
44 else:
45 report.print_console(is_quiet=args.quiet)
46 exit_codes.append(report.get_exit_code(args.soft_fail))
47 if args.output == "json":
48 if len(report_jsons) == 1:
49 print(json.dumps(report_jsons[0], indent=4))
50 else:
51 print(json.dumps(report_jsons, indent=4))
52 exit_code = 1 if 1 in exit_codes else 0
53 exit(exit_code)
54
55 def filter_runner_framework(self):
56 if self.runner_filter.framework == 'all':
57 return
58 for runner in self.runners:
59 if runner.check_type == self.runner_filter.framework:
60 self.runners = [runner]
61 return
62
63 @staticmethod
64 def enrich_report_with_guidelines(scan_report, guidelines):
65 for record in scan_report.failed_checks + scan_report.passed_checks + scan_report.skipped_checks:
66 if record.check_id in guidelines:
67 record.set_guideline(guidelines[record.check_id])
68
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/checkov/common/runners/runner_registry.py b/checkov/common/runners/runner_registry.py
--- a/checkov/common/runners/runner_registry.py
+++ b/checkov/common/runners/runner_registry.py
@@ -29,7 +29,11 @@
return self.scan_reports
def print_reports(self, scan_reports, args):
- if args.output != "json" and args.output != "junitxml" and args.output != "github_failed_only":
+ if args.output not in (
+ "github_failed_only",
+ "json",
+ "junitxml",
+ ):
print(f"{self.banner}\n")
exit_codes = []
report_jsons = []
diff --git a/checkov/terraform/checks/resource/aws/LaunchConfigurationEBSEncryption.py b/checkov/terraform/checks/resource/aws/LaunchConfigurationEBSEncryption.py
--- a/checkov/terraform/checks/resource/aws/LaunchConfigurationEBSEncryption.py
+++ b/checkov/terraform/checks/resource/aws/LaunchConfigurationEBSEncryption.py
@@ -20,15 +20,25 @@
:param conf: aws_launch_configuration configuration
:return: <CheckResult>
"""
- have_root_block = 0
for key in conf.keys():
- if "block_device" in key and "ephemeral" not in key:
- if isinstance(conf[key][0], dict) and conf[key][0].get("encrypted") != [True]:
+ if (
+ "block_device" in key
+ and
+ "ephemeral" not in key
+ ):
+ if (
+ isinstance(conf[key][0], dict)
+ and
+ conf[key][0].get("encrypted") != [True]
+ and
+ # If present, the encrypted flag will be determined by the snapshot
+ # Note: checkov does not know if snapshot is encrypted, so we default to PASSED
+ not conf[key][0].get("snapshot_id")
+ ):
return CheckResult.FAILED
- if "root_block_device" in key:
- # Issue 496 - TF will create unencrypted EBS root by default if whole root_block_device block is omitted.
- have_root_block = 1
- if have_root_block == 0:
+
+ # Issue 496 - TF will create unencrypted EBS root by default if whole root_block_device block is omitted.
+ if "root_block_device" not in conf.keys():
return CheckResult.FAILED
return CheckResult.PASSED
diff --git a/checkov/terraform/checks/resource/aws/SecurityGroupRuleDescription.py b/checkov/terraform/checks/resource/aws/SecurityGroupRuleDescription.py
--- a/checkov/terraform/checks/resource/aws/SecurityGroupRuleDescription.py
+++ b/checkov/terraform/checks/resource/aws/SecurityGroupRuleDescription.py
@@ -6,8 +6,13 @@
def __init__(self):
name = "Ensure every security groups rule has a description"
id = "CKV_AWS_23"
- supported_resource = ['aws_security_group', 'aws_security_group_rule', 'aws_db_security_group',
- 'aws_elasticache_security_group', 'aws_redshift_security_group']
+ supported_resource = [
+ 'aws_security_group',
+ 'aws_security_group_rule',
+ 'aws_db_security_group',
+ 'aws_elasticache_security_group',
+ 'aws_redshift_security_group',
+ ]
categories = [CheckCategories.NETWORKING]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resource)
| {"golden_diff": "diff --git a/checkov/common/runners/runner_registry.py b/checkov/common/runners/runner_registry.py\n--- a/checkov/common/runners/runner_registry.py\n+++ b/checkov/common/runners/runner_registry.py\n@@ -29,7 +29,11 @@\n return self.scan_reports\n \n def print_reports(self, scan_reports, args):\n- if args.output != \"json\" and args.output != \"junitxml\" and args.output != \"github_failed_only\":\n+ if args.output not in (\n+ \"github_failed_only\",\n+ \"json\",\n+ \"junitxml\",\n+ ):\n print(f\"{self.banner}\\n\")\n exit_codes = []\n report_jsons = []\ndiff --git a/checkov/terraform/checks/resource/aws/LaunchConfigurationEBSEncryption.py b/checkov/terraform/checks/resource/aws/LaunchConfigurationEBSEncryption.py\n--- a/checkov/terraform/checks/resource/aws/LaunchConfigurationEBSEncryption.py\n+++ b/checkov/terraform/checks/resource/aws/LaunchConfigurationEBSEncryption.py\n@@ -20,15 +20,25 @@\n :param conf: aws_launch_configuration configuration\n :return: <CheckResult>\n \"\"\"\n- have_root_block = 0\n for key in conf.keys():\n- if \"block_device\" in key and \"ephemeral\" not in key:\n- if isinstance(conf[key][0], dict) and conf[key][0].get(\"encrypted\") != [True]:\n+ if (\n+ \"block_device\" in key\n+ and\n+ \"ephemeral\" not in key\n+ ):\n+ if (\n+ isinstance(conf[key][0], dict)\n+ and\n+ conf[key][0].get(\"encrypted\") != [True]\n+ and\n+ # If present, the encrypted flag will be determined by the snapshot\n+ # Note: checkov does not know if snapshot is encrypted, so we default to PASSED\n+ not conf[key][0].get(\"snapshot_id\")\n+ ):\n return CheckResult.FAILED\n- if \"root_block_device\" in key:\n- # Issue 496 - TF will create unencrypted EBS root by default if whole root_block_device block is omitted.\n- have_root_block = 1\n- if have_root_block == 0: \n+\n+ # Issue 496 - TF will create unencrypted EBS root by default if whole root_block_device block is omitted.\n+ if \"root_block_device\" not in conf.keys():\n return CheckResult.FAILED\n \n return CheckResult.PASSED\ndiff --git a/checkov/terraform/checks/resource/aws/SecurityGroupRuleDescription.py b/checkov/terraform/checks/resource/aws/SecurityGroupRuleDescription.py\n--- a/checkov/terraform/checks/resource/aws/SecurityGroupRuleDescription.py\n+++ b/checkov/terraform/checks/resource/aws/SecurityGroupRuleDescription.py\n@@ -6,8 +6,13 @@\n def __init__(self):\n name = \"Ensure every security groups rule has a description\"\n id = \"CKV_AWS_23\"\n- supported_resource = ['aws_security_group', 'aws_security_group_rule', 'aws_db_security_group',\n- 'aws_elasticache_security_group', 'aws_redshift_security_group']\n+ supported_resource = [\n+ 'aws_security_group',\n+ 'aws_security_group_rule',\n+ 'aws_db_security_group',\n+ 'aws_elasticache_security_group',\n+ 'aws_redshift_security_group',\n+ ]\n categories = [CheckCategories.NETWORKING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resource)\n", "issue": "LaunchConfigurationEBSEncryption gives false-positives, due to not checking snapshot_id\nIn the relevant `aws_launch_configuration` Terraform docs https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_configuration\r\n\r\n```\r\nencrypted - (Optional) Whether the volume should be encrypted or not. \r\nDo not use this option if you are using snapshot_id as the encrypted flag will be determined by the snapshot.\r\n(Default: false).\r\n```\r\nThe relevant part is **Do not use this option if you are using snapshot_id as the encrypted flag will be determined by the snapshot.** `snapshot_id` is not taken into account in the current check:\r\n\r\nhttps://github.com/bridgecrewio/checkov/blob/d67ec380daf81b889e83d4d7c7d33f490525e899/checkov/terraform/checks/resource/aws/LaunchConfigurationEBSEncryption.py#L16-L34\n", "before_files": [{"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\n\nclass SecurityGroupRuleDescription(BaseResourceCheck):\n def __init__(self):\n name = \"Ensure every security groups rule has a description\"\n id = \"CKV_AWS_23\"\n supported_resource = ['aws_security_group', 'aws_security_group_rule', 'aws_db_security_group',\n 'aws_elasticache_security_group', 'aws_redshift_security_group']\n categories = [CheckCategories.NETWORKING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resource)\n\n def scan_resource_conf(self, conf):\n \"\"\"\n Looks for description at security group rules :\n https://www.terraform.io/docs/providers/aws/r/security_group.html\n :param conf: aws_security_group configuration\n :return: <CheckResult>\n \"\"\"\n if 'description' in conf.keys():\n if conf['description']:\n return CheckResult.PASSED\n egress_result = self.check_rule(rule_type='egress', conf=conf)\n ingress_result = self.check_rule(rule_type='ingress', conf=conf)\n if egress_result == CheckResult.PASSED and ingress_result == CheckResult.PASSED:\n return CheckResult.PASSED\n return CheckResult.FAILED\n\n def check_rule(self, rule_type, conf):\n if rule_type in conf.keys():\n for rule in conf[rule_type]:\n if isinstance(rule, dict):\n if 'description' not in rule.keys() or not rule['description']:\n return CheckResult.FAILED\n return CheckResult.PASSED\n\n\ncheck = SecurityGroupRuleDescription()\n", "path": "checkov/terraform/checks/resource/aws/SecurityGroupRuleDescription.py"}, {"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n\n\nclass LaunchConfigurationEBSEncryption(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure all data stored in the Launch configuration EBS is securely encrypted\"\n id = \"CKV_AWS_8\"\n supported_resources = ['aws_launch_configuration', 'aws_instance']\n categories = [CheckCategories.ENCRYPTION]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return \"*_block_device/[0]/encrypted\"\n\n def scan_resource_conf(self, conf):\n \"\"\"\n Looks for encryption configuration at launch configuration:\n https://www.terraform.io/docs/providers/aws/r/launch_configuration.html or https://www.terraform.io/docs/providers/aws/d/instance.html\n :param conf: aws_launch_configuration configuration\n :return: <CheckResult>\n \"\"\"\n have_root_block = 0\n for key in conf.keys():\n if \"block_device\" in key and \"ephemeral\" not in key:\n if isinstance(conf[key][0], dict) and conf[key][0].get(\"encrypted\") != [True]:\n return CheckResult.FAILED\n if \"root_block_device\" in key:\n # Issue 496 - TF will create unencrypted EBS root by default if whole root_block_device block is omitted.\n have_root_block = 1\n if have_root_block == 0: \n return CheckResult.FAILED\n\n return CheckResult.PASSED\n\n\ncheck = LaunchConfigurationEBSEncryption()\n", "path": "checkov/terraform/checks/resource/aws/LaunchConfigurationEBSEncryption.py"}, {"content": "import json\nimport logging\nfrom abc import abstractmethod\n\n\nclass RunnerRegistry(object):\n runners = []\n scan_reports = []\n banner = \"\"\n\n def __init__(self, banner, runner_filter, *runners):\n self.logger = logging.getLogger(__name__)\n self.runner_filter = runner_filter\n self.runners = runners\n self.banner = banner\n self.filter_runner_framework()\n self.scan_reports = []\n\n @abstractmethod\n def extract_entity_details(self, entity):\n raise NotImplementedError()\n\n def run(self, root_folder=None, external_checks_dir=None, files=None, guidelines={}):\n for runner in self.runners:\n scan_report = runner.run(root_folder, external_checks_dir=external_checks_dir, files=files,\n runner_filter=self.runner_filter)\n RunnerRegistry.enrich_report_with_guidelines(scan_report, guidelines)\n self.scan_reports.append(scan_report)\n return self.scan_reports\n\n def print_reports(self, scan_reports, args):\n if args.output != \"json\" and args.output != \"junitxml\" and args.output != \"github_failed_only\":\n print(f\"{self.banner}\\n\")\n exit_codes = []\n report_jsons = []\n for report in scan_reports:\n if not report.is_empty():\n if args.output == \"json\":\n report_jsons.append(report.get_dict())\n elif args.output == \"junitxml\":\n report.print_junit_xml()\n elif args.output == 'github_failed_only':\n report.print_failed_github_md()\n else:\n report.print_console(is_quiet=args.quiet)\n exit_codes.append(report.get_exit_code(args.soft_fail))\n if args.output == \"json\":\n if len(report_jsons) == 1:\n print(json.dumps(report_jsons[0], indent=4))\n else:\n print(json.dumps(report_jsons, indent=4))\n exit_code = 1 if 1 in exit_codes else 0\n exit(exit_code)\n\n def filter_runner_framework(self):\n if self.runner_filter.framework == 'all':\n return\n for runner in self.runners:\n if runner.check_type == self.runner_filter.framework:\n self.runners = [runner]\n return\n\n @staticmethod\n def enrich_report_with_guidelines(scan_report, guidelines):\n for record in scan_report.failed_checks + scan_report.passed_checks + scan_report.skipped_checks:\n if record.check_id in guidelines:\n record.set_guideline(guidelines[record.check_id])\n", "path": "checkov/common/runners/runner_registry.py"}], "after_files": [{"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\n\nclass SecurityGroupRuleDescription(BaseResourceCheck):\n def __init__(self):\n name = \"Ensure every security groups rule has a description\"\n id = \"CKV_AWS_23\"\n supported_resource = [\n 'aws_security_group',\n 'aws_security_group_rule',\n 'aws_db_security_group',\n 'aws_elasticache_security_group',\n 'aws_redshift_security_group',\n ]\n categories = [CheckCategories.NETWORKING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resource)\n\n def scan_resource_conf(self, conf):\n \"\"\"\n Looks for description at security group rules :\n https://www.terraform.io/docs/providers/aws/r/security_group.html\n :param conf: aws_security_group configuration\n :return: <CheckResult>\n \"\"\"\n if 'description' in conf.keys():\n if conf['description']:\n return CheckResult.PASSED\n egress_result = self.check_rule(rule_type='egress', conf=conf)\n ingress_result = self.check_rule(rule_type='ingress', conf=conf)\n if egress_result == CheckResult.PASSED and ingress_result == CheckResult.PASSED:\n return CheckResult.PASSED\n return CheckResult.FAILED\n\n def check_rule(self, rule_type, conf):\n if rule_type in conf.keys():\n for rule in conf[rule_type]:\n if isinstance(rule, dict):\n if 'description' not in rule.keys() or not rule['description']:\n return CheckResult.FAILED\n return CheckResult.PASSED\n\n\ncheck = SecurityGroupRuleDescription()\n", "path": "checkov/terraform/checks/resource/aws/SecurityGroupRuleDescription.py"}, {"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n\n\nclass LaunchConfigurationEBSEncryption(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure all data stored in the Launch configuration EBS is securely encrypted\"\n id = \"CKV_AWS_8\"\n supported_resources = ['aws_launch_configuration', 'aws_instance']\n categories = [CheckCategories.ENCRYPTION]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return \"*_block_device/[0]/encrypted\"\n\n def scan_resource_conf(self, conf):\n \"\"\"\n Looks for encryption configuration at launch configuration:\n https://www.terraform.io/docs/providers/aws/r/launch_configuration.html or https://www.terraform.io/docs/providers/aws/d/instance.html\n :param conf: aws_launch_configuration configuration\n :return: <CheckResult>\n \"\"\"\n for key in conf.keys():\n if (\n \"block_device\" in key\n and\n \"ephemeral\" not in key\n ):\n if (\n isinstance(conf[key][0], dict)\n and\n conf[key][0].get(\"encrypted\") != [True]\n and\n # If present, the encrypted flag will be determined by the snapshot\n # Note: checkov does not know if snapshot is encrypted, so we default to PASSED\n not conf[key][0].get(\"snapshot_id\")\n ):\n return CheckResult.FAILED\n\n # Issue 496 - TF will create unencrypted EBS root by default if whole root_block_device block is omitted.\n if \"root_block_device\" not in conf.keys():\n return CheckResult.FAILED\n\n return CheckResult.PASSED\n\n\ncheck = LaunchConfigurationEBSEncryption()\n", "path": "checkov/terraform/checks/resource/aws/LaunchConfigurationEBSEncryption.py"}, {"content": "import json\nimport logging\nfrom abc import abstractmethod\n\n\nclass RunnerRegistry(object):\n runners = []\n scan_reports = []\n banner = \"\"\n\n def __init__(self, banner, runner_filter, *runners):\n self.logger = logging.getLogger(__name__)\n self.runner_filter = runner_filter\n self.runners = runners\n self.banner = banner\n self.filter_runner_framework()\n self.scan_reports = []\n\n @abstractmethod\n def extract_entity_details(self, entity):\n raise NotImplementedError()\n\n def run(self, root_folder=None, external_checks_dir=None, files=None, guidelines={}):\n for runner in self.runners:\n scan_report = runner.run(root_folder, external_checks_dir=external_checks_dir, files=files,\n runner_filter=self.runner_filter)\n RunnerRegistry.enrich_report_with_guidelines(scan_report, guidelines)\n self.scan_reports.append(scan_report)\n return self.scan_reports\n\n def print_reports(self, scan_reports, args):\n if args.output not in (\n \"github_failed_only\",\n \"json\",\n \"junitxml\",\n ):\n print(f\"{self.banner}\\n\")\n exit_codes = []\n report_jsons = []\n for report in scan_reports:\n if not report.is_empty():\n if args.output == \"json\":\n report_jsons.append(report.get_dict())\n elif args.output == \"junitxml\":\n report.print_junit_xml()\n elif args.output == 'github_failed_only':\n report.print_failed_github_md()\n else:\n report.print_console(is_quiet=args.quiet)\n exit_codes.append(report.get_exit_code(args.soft_fail))\n if args.output == \"json\":\n if len(report_jsons) == 1:\n print(json.dumps(report_jsons[0], indent=4))\n else:\n print(json.dumps(report_jsons, indent=4))\n exit_code = 1 if 1 in exit_codes else 0\n exit(exit_code)\n\n def filter_runner_framework(self):\n if self.runner_filter.framework == 'all':\n return\n for runner in self.runners:\n if runner.check_type == self.runner_filter.framework:\n self.runners = [runner]\n return\n\n @staticmethod\n def enrich_report_with_guidelines(scan_report, guidelines):\n for record in scan_report.failed_checks + scan_report.passed_checks + scan_report.skipped_checks:\n if record.check_id in guidelines:\n record.set_guideline(guidelines[record.check_id])\n", "path": "checkov/common/runners/runner_registry.py"}]} | 2,038 | 804 |
gh_patches_debug_12143 | rasdani/github-patches | git_diff | google__turbinia-294 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Programatically setting config file
I am trying to write tests for the dftimewolf turbinia module. Loading the configuration on a system that doesn't have one will fail, so I am trying to manually feed in a test config data to see if the module behaves as expected.
I tried setting the `TURBINIA_CONFIG_PATH` environment variable, but this just *adds* the path the list of possible config paths. This would work in a pristine test environment, but it will break in my dev setup where I already have a production turbinia config file set up.
What do you think of giving `TURBINIA_CONFIG_PATH` environment variable precedence over the other potential config locations?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `turbinia/config/__init__.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright 2016 Google Inc.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """Basic Turbinia config."""
16
17 from __future__ import unicode_literals
18
19 import imp
20 import itertools
21 import logging
22 import os
23 import sys
24
25 log = logging.getLogger('turbinia')
26
27 # Look for config files with these names
28 CONFIGFILES = ['.turbiniarc', 'turbinia.conf', 'turbinia_config.py']
29 # Look in homedir first, then /etc/turbinia, and finally in the source
30 # config dir for config files
31 CONFIGPATH = [
32 os.path.expanduser('~'),
33 '/etc/turbinia',
34 os.path.dirname(os.path.abspath(__file__))]
35 # Config vars that we expect to exist in the configuration
36 CONFIGVARS = [
37 # Turbinia Config
38 'TASK_MANAGER',
39 'LOG_FILE',
40 'LOCK_FILE',
41 'OUTPUT_DIR',
42 'SLEEP_TIME',
43 'SINGLE_RUN',
44 'MOUNT_DIR_PREFIX',
45 'SHARED_FILESYSTEM',
46 # TODO(aarontp): Move this to the recipe config when it's available.
47 'DEBUG_TASKS',
48 # GCE CONFIG
49 'PROJECT',
50 'ZONE',
51 'TURBINIA_REGION',
52 'BUCKET_NAME',
53 'PSQ_TOPIC',
54 'PUBSUB_TOPIC',
55 'GCS_OUTPUT_PATH',
56 'STATE_MANAGER',
57 'INSTANCE_ID',
58 # REDIS CONFIG
59 'REDIS_HOST',
60 'REDIS_PORT',
61 'REDIS_DB',
62 # Celery config
63 'CELERY_BROKER',
64 'CELERY_BACKEND',
65 'KOMBU_BROKER',
66 'KOMBU_CHANNEL',
67 'KOMBU_DURABLE',]
68 # Environment variable to look for path data in
69 ENVCONFIGVAR = 'TURBINIA_CONFIG_PATH'
70
71 CONFIG = None
72
73
74 class TurbiniaConfigException(Exception):
75 """Exception for Turbinia configuration."""
76 pass
77
78
79 def LoadConfig():
80 """Finds Turbinia config file and loads it."""
81 # TODO(aarontp): Find way to not require global var here. Maybe a singleton
82 # pattern on the config class.
83 # pylint: disable=global-statement
84 global CONFIG
85 if CONFIG:
86 return CONFIG
87
88 if ENVCONFIGVAR in os.environ:
89 CONFIGPATH.extend(os.environ[ENVCONFIGVAR].split(':'))
90
91 config_file = None
92 # Load first file found
93 for _dir, _file in itertools.product(CONFIGPATH, CONFIGFILES):
94 if os.path.exists(os.path.join(_dir, _file)):
95 config_file = os.path.join(_dir, _file)
96 break
97
98 if config_file is None:
99 raise TurbiniaConfigException('No config files found')
100
101 log.info('Loading config from {0:s}'.format(config_file))
102 _config = imp.load_source('config', config_file)
103 _config.configSource = config_file
104 ValidateAndSetConfig(_config)
105 CONFIG = _config
106 return _config
107
108
109 def ValidateAndSetConfig(_config):
110 """Makes sure that the config has the vars loaded and set in the module."""
111 # TODO(aarontp): Allow for non-mandatory config options
112 for var in CONFIGVARS:
113 if not hasattr(_config, var):
114 raise TurbiniaConfigException(
115 'No config attribute {0:s}:{1:s}'.format(_config.configSource, var))
116 if getattr(_config, var) is None:
117 raise TurbiniaConfigException(
118 'Config attribute {0:s}:{1:s} is not set'.format(
119 _config.configSource, var))
120
121 # Set the attribute in the current module
122 setattr(sys.modules[__name__], var, getattr(_config, var))
123
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/turbinia/config/__init__.py b/turbinia/config/__init__.py
--- a/turbinia/config/__init__.py
+++ b/turbinia/config/__init__.py
@@ -85,12 +85,16 @@
if CONFIG:
return CONFIG
+ # If the environment variable is set, take precedence over the pre-defined
+ # CONFIGPATHs.
+ configpath = CONFIGPATH
if ENVCONFIGVAR in os.environ:
- CONFIGPATH.extend(os.environ[ENVCONFIGVAR].split(':'))
+ configpath = os.environ[ENVCONFIGVAR].split(':')
+
config_file = None
# Load first file found
- for _dir, _file in itertools.product(CONFIGPATH, CONFIGFILES):
+ for _dir, _file in itertools.product(configpath, CONFIGFILES):
if os.path.exists(os.path.join(_dir, _file)):
config_file = os.path.join(_dir, _file)
break
| {"golden_diff": "diff --git a/turbinia/config/__init__.py b/turbinia/config/__init__.py\n--- a/turbinia/config/__init__.py\n+++ b/turbinia/config/__init__.py\n@@ -85,12 +85,16 @@\n if CONFIG:\n return CONFIG\n \n+ # If the environment variable is set, take precedence over the pre-defined\n+ # CONFIGPATHs.\n+ configpath = CONFIGPATH\n if ENVCONFIGVAR in os.environ:\n- CONFIGPATH.extend(os.environ[ENVCONFIGVAR].split(':'))\n+ configpath = os.environ[ENVCONFIGVAR].split(':')\n+\n \n config_file = None\n # Load first file found\n- for _dir, _file in itertools.product(CONFIGPATH, CONFIGFILES):\n+ for _dir, _file in itertools.product(configpath, CONFIGFILES):\n if os.path.exists(os.path.join(_dir, _file)):\n config_file = os.path.join(_dir, _file)\n break\n", "issue": "Programatically setting config file\nI am trying to write tests for the dftimewolf turbinia module. Loading the configuration on a system that doesn't have one will fail, so I am trying to manually feed in a test config data to see if the module behaves as expected.\r\n\r\nI tried setting the `TURBINIA_CONFIG_PATH` environment variable, but this just *adds* the path the list of possible config paths. This would work in a pristine test environment, but it will break in my dev setup where I already have a production turbinia config file set up.\r\n\r\nWhat do you think of giving `TURBINIA_CONFIG_PATH` environment variable precedence over the other potential config locations?\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Basic Turbinia config.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport imp\nimport itertools\nimport logging\nimport os\nimport sys\n\nlog = logging.getLogger('turbinia')\n\n# Look for config files with these names\nCONFIGFILES = ['.turbiniarc', 'turbinia.conf', 'turbinia_config.py']\n# Look in homedir first, then /etc/turbinia, and finally in the source\n# config dir for config files\nCONFIGPATH = [\n os.path.expanduser('~'),\n '/etc/turbinia',\n os.path.dirname(os.path.abspath(__file__))]\n# Config vars that we expect to exist in the configuration\nCONFIGVARS = [\n # Turbinia Config\n 'TASK_MANAGER',\n 'LOG_FILE',\n 'LOCK_FILE',\n 'OUTPUT_DIR',\n 'SLEEP_TIME',\n 'SINGLE_RUN',\n 'MOUNT_DIR_PREFIX',\n 'SHARED_FILESYSTEM',\n # TODO(aarontp): Move this to the recipe config when it's available.\n 'DEBUG_TASKS',\n # GCE CONFIG\n 'PROJECT',\n 'ZONE',\n 'TURBINIA_REGION',\n 'BUCKET_NAME',\n 'PSQ_TOPIC',\n 'PUBSUB_TOPIC',\n 'GCS_OUTPUT_PATH',\n 'STATE_MANAGER',\n 'INSTANCE_ID',\n # REDIS CONFIG\n 'REDIS_HOST',\n 'REDIS_PORT',\n 'REDIS_DB',\n # Celery config\n 'CELERY_BROKER',\n 'CELERY_BACKEND',\n 'KOMBU_BROKER',\n 'KOMBU_CHANNEL',\n 'KOMBU_DURABLE',]\n# Environment variable to look for path data in\nENVCONFIGVAR = 'TURBINIA_CONFIG_PATH'\n\nCONFIG = None\n\n\nclass TurbiniaConfigException(Exception):\n \"\"\"Exception for Turbinia configuration.\"\"\"\n pass\n\n\ndef LoadConfig():\n \"\"\"Finds Turbinia config file and loads it.\"\"\"\n # TODO(aarontp): Find way to not require global var here. Maybe a singleton\n # pattern on the config class.\n # pylint: disable=global-statement\n global CONFIG\n if CONFIG:\n return CONFIG\n\n if ENVCONFIGVAR in os.environ:\n CONFIGPATH.extend(os.environ[ENVCONFIGVAR].split(':'))\n\n config_file = None\n # Load first file found\n for _dir, _file in itertools.product(CONFIGPATH, CONFIGFILES):\n if os.path.exists(os.path.join(_dir, _file)):\n config_file = os.path.join(_dir, _file)\n break\n\n if config_file is None:\n raise TurbiniaConfigException('No config files found')\n\n log.info('Loading config from {0:s}'.format(config_file))\n _config = imp.load_source('config', config_file)\n _config.configSource = config_file\n ValidateAndSetConfig(_config)\n CONFIG = _config\n return _config\n\n\ndef ValidateAndSetConfig(_config):\n \"\"\"Makes sure that the config has the vars loaded and set in the module.\"\"\"\n # TODO(aarontp): Allow for non-mandatory config options\n for var in CONFIGVARS:\n if not hasattr(_config, var):\n raise TurbiniaConfigException(\n 'No config attribute {0:s}:{1:s}'.format(_config.configSource, var))\n if getattr(_config, var) is None:\n raise TurbiniaConfigException(\n 'Config attribute {0:s}:{1:s} is not set'.format(\n _config.configSource, var))\n\n # Set the attribute in the current module\n setattr(sys.modules[__name__], var, getattr(_config, var))\n", "path": "turbinia/config/__init__.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Basic Turbinia config.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport imp\nimport itertools\nimport logging\nimport os\nimport sys\n\nlog = logging.getLogger('turbinia')\n\n# Look for config files with these names\nCONFIGFILES = ['.turbiniarc', 'turbinia.conf', 'turbinia_config.py']\n# Look in homedir first, then /etc/turbinia, and finally in the source\n# config dir for config files\nCONFIGPATH = [\n os.path.expanduser('~'),\n '/etc/turbinia',\n os.path.dirname(os.path.abspath(__file__))]\n# Config vars that we expect to exist in the configuration\nCONFIGVARS = [\n # Turbinia Config\n 'TASK_MANAGER',\n 'LOG_FILE',\n 'LOCK_FILE',\n 'OUTPUT_DIR',\n 'SLEEP_TIME',\n 'SINGLE_RUN',\n 'MOUNT_DIR_PREFIX',\n 'SHARED_FILESYSTEM',\n # TODO(aarontp): Move this to the recipe config when it's available.\n 'DEBUG_TASKS',\n # GCE CONFIG\n 'PROJECT',\n 'ZONE',\n 'TURBINIA_REGION',\n 'BUCKET_NAME',\n 'PSQ_TOPIC',\n 'PUBSUB_TOPIC',\n 'GCS_OUTPUT_PATH',\n 'STATE_MANAGER',\n 'INSTANCE_ID',\n # REDIS CONFIG\n 'REDIS_HOST',\n 'REDIS_PORT',\n 'REDIS_DB',\n # Celery config\n 'CELERY_BROKER',\n 'CELERY_BACKEND',\n 'KOMBU_BROKER',\n 'KOMBU_CHANNEL',\n 'KOMBU_DURABLE',]\n# Environment variable to look for path data in\nENVCONFIGVAR = 'TURBINIA_CONFIG_PATH'\n\nCONFIG = None\n\n\nclass TurbiniaConfigException(Exception):\n \"\"\"Exception for Turbinia configuration.\"\"\"\n pass\n\n\ndef LoadConfig():\n \"\"\"Finds Turbinia config file and loads it.\"\"\"\n # TODO(aarontp): Find way to not require global var here. Maybe a singleton\n # pattern on the config class.\n # pylint: disable=global-statement\n global CONFIG\n if CONFIG:\n return CONFIG\n\n # If the environment variable is set, take precedence over the pre-defined\n # CONFIGPATHs.\n configpath = CONFIGPATH\n if ENVCONFIGVAR in os.environ:\n configpath = os.environ[ENVCONFIGVAR].split(':')\n\n\n config_file = None\n # Load first file found\n for _dir, _file in itertools.product(configpath, CONFIGFILES):\n if os.path.exists(os.path.join(_dir, _file)):\n config_file = os.path.join(_dir, _file)\n break\n\n if config_file is None:\n raise TurbiniaConfigException('No config files found')\n\n log.info('Loading config from {0:s}'.format(config_file))\n _config = imp.load_source('config', config_file)\n _config.configSource = config_file\n ValidateAndSetConfig(_config)\n CONFIG = _config\n return _config\n\n\ndef ValidateAndSetConfig(_config):\n \"\"\"Makes sure that the config has the vars loaded and set in the module.\"\"\"\n # TODO(aarontp): Allow for non-mandatory config options\n for var in CONFIGVARS:\n if not hasattr(_config, var):\n raise TurbiniaConfigException(\n 'No config attribute {0:s}:{1:s}'.format(_config.configSource, var))\n if getattr(_config, var) is None:\n raise TurbiniaConfigException(\n 'Config attribute {0:s}:{1:s} is not set'.format(\n _config.configSource, var))\n\n # Set the attribute in the current module\n setattr(sys.modules[__name__], var, getattr(_config, var))\n", "path": "turbinia/config/__init__.py"}]} | 1,598 | 220 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.